summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Marineau <marineam@gentoo.org>2008-08-31 16:50:29 +0000
committerMichael Marineau <marineam@gentoo.org>2008-08-31 16:50:29 +0000
commit2cd57c3db24ec0c91d8c433c69a7176ad2f27720 (patch)
treee2c540b1edffff8e87a72408d5572ded4f8fbc7a
parentUpgrade patches to Xen 3.3.0 and Debian Security 2.6.18.dfsg.1-22etch2 (diff)
downloadxen-2cd57c3db24ec0c91d8c433c69a7176ad2f27720.tar.gz
xen-2cd57c3db24ec0c91d8c433c69a7176ad2f27720.tar.bz2
xen-2cd57c3db24ec0c91d8c433c69a7176ad2f27720.zip
Releasing 2.6.18-12
svn path=/patches/; revision=82
-rw-r--r--tags/2.6.18-12/00000_README392
-rw-r--r--tags/2.6.18-12/10002_xen-3.3.0.patch210345
-rw-r--r--tags/2.6.18-12/30001_nfnetlink_log-null-deref.patch37
-rw-r--r--tags/2.6.18-12/30002_nf_conntrack-set-nfctinfo.patch35
-rw-r--r--tags/2.6.18-12/30003_netlink-infinite-recursion.patch65
-rw-r--r--tags/2.6.18-12/30004_nl_fib_lookup-oops.patch34
-rw-r--r--tags/2.6.18-12/30005_core-dump-unreadable-PT_INTERP.patch70
-rw-r--r--tags/2.6.18-12/30006_appletalk-length-mismatch.patch93
-rw-r--r--tags/2.6.18-12/30007_cm4040-buffer-overflow.patch44
-rw-r--r--tags/2.6.18-12/30008_ipv6_fl_socklist-no-share.patch32
-rw-r--r--tags/2.6.18-12/30009_keys-serial-num-collision.patch92
-rw-r--r--tags/2.6.18-12/30010_ipv6_getsockopt_sticky-null-opt.patch42
-rw-r--r--tags/2.6.18-12/30011_ipv6_setsockopt-NULL-deref.patch28
-rw-r--r--tags/2.6.18-12/30012_ipv6-disallow-RH0-by-default.patch166
-rw-r--r--tags/2.6.18-12/30013_listxattr-mem-corruption.patch441
-rw-r--r--tags/2.6.18-12/30014_bluetooth-l2cap-hci-info-leaks.patch63
-rw-r--r--tags/2.6.18-12/30015_usblcd-limit-memory-consumption.patch89
-rw-r--r--tags/2.6.18-12/30016_pppoe-socket-release-mem-leak.patch42
-rw-r--r--tags/2.6.18-12/30017_nf_conntrack_h323-bounds-checking.patch42
-rw-r--r--tags/2.6.18-12/30018_dn_fib-out-of-bounds.patch37
-rw-r--r--tags/2.6.18-12/30019_random-fix-seeding-with-zero-entropy.patch97
-rw-r--r--tags/2.6.18-12/30020_random-fix-error-in-entropy-extraction.patch51
-rw-r--r--tags/2.6.18-12/30021_nf_conntrack_sctp-null-deref.patch49
-rw-r--r--tags/2.6.18-12/30022_i965-secure-batchbuffer.patch67
-rw-r--r--tags/2.6.18-12/30023_appletalk-endianness-annotations.patch285
-rw-r--r--tags/2.6.18-12/30024_drm-i965.patch221
-rw-r--r--tags/2.6.18-12/30025_ipv4-fib_props-out-of-bounds.patch42
-rw-r--r--tags/2.6.18-12/30026_cifs-fix-sign-settings.patch179
-rw-r--r--tags/2.6.18-12/30027_cpuset_tasks-underflow.patch61
-rw-r--r--tags/2.6.18-12/30028_random-bound-check-ordering.patch42
-rw-r--r--tags/2.6.18-12/30030_aacraid-ioctl-perm-check.patch40
-rw-r--r--tags/2.6.18-12/30031_ptrace-handle-bogus-selector.patch86
-rw-r--r--tags/2.6.18-12/30032_fixup-trace_irq-breakage.patch64
-rw-r--r--tags/2.6.18-12/30033_prevent-stack-growth-into-hugetlb-region.patch47
-rw-r--r--tags/2.6.18-12/30034_cifs-honor-umask.patch81
-rw-r--r--tags/2.6.18-12/30035_amd64-zero-extend-32bit-ptrace.patch88
-rw-r--r--tags/2.6.18-12/30036_jffs2-ACL-vs-mode-handling.patch355
-rw-r--r--tags/2.6.18-12/30039_hugetlb-prio_tree-unit-fix.patch85
-rw-r--r--tags/2.6.18-12/30040_usb-pwc-disconnect-block.patch124
-rw-r--r--tags/2.6.18-12/30041_ipv6-disallow-RH0-by-default-2.patch30
-rw-r--r--tags/2.6.18-12/30042_reset-pdeathsig-on-suid-upstream.patch51
-rw-r--r--tags/2.6.18-12/30044_cifs-better-failed-mount-errors.patch234
-rw-r--r--tags/2.6.18-12/30045_cifs-corrupt-server-response-overflow.patch694
-rw-r--r--tags/2.6.18-12/30046_wait_task_stopped-hang.patch38
-rw-r--r--tags/2.6.18-12/30047_ieee80211-underflow.patch54
-rw-r--r--tags/2.6.18-12/30048_sysfs_readdir-NULL-deref-1.patch112
-rw-r--r--tags/2.6.18-12/30049_sysfs_readdir-NULL-deref-2.patch128
-rw-r--r--tags/2.6.18-12/30050_sysfs-fix-condition-check.patch29
-rw-r--r--tags/2.6.18-12/30051_tmpfs-restore-clear_highpage.patch44
-rw-r--r--tags/2.6.18-12/30052_minixfs-printk-hang.patch76
-rw-r--r--tags/2.6.18-12/30053_hrtimer-large-relative-timeouts-overflow.patch45
-rw-r--r--tags/2.6.18-12/30054_coredump-only-to-same-uid.patch38
-rw-r--r--tags/2.6.18-12/30055_isdn-net-overflow.patch54
-rw-r--r--tags/2.6.18-12/30056_proc-snd-page-alloc-mem-leak.patch169
-rw-r--r--tags/2.6.18-12/30057_fat-move-ioctl-compat-code.patch167
-rw-r--r--tags/2.6.18-12/30058_fat-fix-compat-ioctls.patch311
-rw-r--r--tags/2.6.18-12/30059_vfs-use-access-mode-flag.patch52
-rw-r--r--tags/2.6.18-12/30060_i4l-isdn_ioctl-mem-overrun.patch56
-rw-r--r--tags/2.6.18-12/30061_vmsplice-security.patch28
-rw-r--r--tags/2.6.18-12/30062_clear-spurious-irq.patch34
-rw-r--r--tags/2.6.18-12/30063_mmap-VM_DONTEXPAND.patch120
-rw-r--r--tags/2.6.18-12/30064_RLIMIT_CPU-earlier-checking.patch80
-rw-r--r--tags/2.6.18-12/30065_dnotify-race.patch22
-rw-r--r--tags/2.6.18-12/30066_fcntl_setlk-close-race.patch76
-rw-r--r--tags/2.6.18-12/30067_sit-missing-kfree_skb-on-pskb_may_pull.patch26
-rw-r--r--tags/2.6.18-12/30068_hrtimer-prevent-overrun.patch38
-rw-r--r--tags/2.6.18-12/30069_ktime-fix-MTIME_SEC_MAX-on-32-bit.patch29
-rw-r--r--tags/2.6.18-12/30070_amd64-cs-corruption.patch12
-rw-r--r--tags/2.6.18-12/30071_dccp-feature-length-check.patch15
-rw-r--r--tags/2.6.18-12/30072_asn1-ber-decoding-checks.patch103
-rw-r--r--tags/2.6.18-12/30073_nfs-write-corruption.patch76
-rw-r--r--tags/2.6.18-12/30074_x86-clear-df-before-calling-signal-handler.patch57
-rw-r--r--tags/2.6.18-12/30075_3w-xxxx-bigmem-corruption.patch42
-rw-r--r--tags/2.6.18-12/30076_dnotify-race-locking.patch29
-rw-r--r--tags/2.6.18-12/30077_sctp-make-sure-n-sizeof-does-not-overflow.patch29
-rw-r--r--tags/2.6.18-12/30078_esp-iv-in-linear-part-of-skb.patch48
-rw-r--r--tags/2.6.18-12/30079a_amd64-fix-zeroing-on-exception-in-copy_user-pre.patch798
-rw-r--r--tags/2.6.18-12/30079b_amd64-fix-zeroing-on-exception-in-copy_user.patch272
-rw-r--r--tags/2.6.18-12/30080_tty-fix-for-tty-operations-bugs.patch183
-rw-r--r--tags/2.6.18-12/30081_check-privileges-before-setting-mount-propagation.patch28
-rw-r--r--tags/2.6.18-12/30082a_x86-add-copy_user_handle_tail.patch56
-rw-r--r--tags/2.6.18-12/30082b_x86-fix-copy_user.patch537
-rw-r--r--tags/2.6.18-12/30083_x86-wrong-register-was-used-in-align-macro.patch29
-rw-r--r--tags/2.6.18-12/30084_cifs-fix-compiler-warning.patch21
-rw-r--r--tags/2.6.18-12/30085_netfilter-nf_nat_snmp_basic-fix-range-check.patch27
-rw-r--r--tags/2.6.18-12/30086_sound-ensure-device-number-is-valid-in-snd_seq_oss_synth_make_info.patch30
-rw-r--r--tags/2.6.18-12/30087_vfs-fix-lookup-on-deleted-directory.patch71
-rw-r--r--tags/2.6.18-12/50009_gentooify-tls-warning.patch16
88 files changed, 219567 insertions, 0 deletions
diff --git a/tags/2.6.18-12/00000_README b/tags/2.6.18-12/00000_README
new file mode 100644
index 0000000..c9c4c91
--- /dev/null
+++ b/tags/2.6.18-12/00000_README
@@ -0,0 +1,392 @@
+Xen Patches README
+------------------
+
+These patches are intended to be stacked on top of genpatches-base.
+
+Many of the patches included here are swiped from various sources which
+use their own four digit patch numbering scheme, so we are stuck with five
+digits to indiciate the source for easier tracking and re-syncing.
+
+Numbering
+---------
+
+0xxxx Gentoo, not related to Xen. (in case we pull something from extras)
+1xxxx XenSource, upstream Xen patch for 2.6.18
+2xxxx Redhat, we use their Xen patch for >=2.6.20
+3xxxx Debian, we use their security fixes for 2.6.18
+5xxxx Gentoo, Xen and other fixes for Redhat and/or Debian patches.
+
+Patches
+-------
+
+10002_xen-3.3.0.patch
+ Upstream 3.3.0 patch
+
+30001_nfnetlink_log-null-deref.patch
+ [SECURITY] Fix remotely exploitable NULL pointer dereference in
+ nfulnl_recv_config()
+ See CVE-2007-1496
+
+30002_nf_conntrack-set-nfctinfo.patch
+ [SECURITY] Fix incorrect classification of IPv6 fragments as ESTABLISHED,
+ which allows remote attackers to bypass certain rulesets
+ See CVE-2007-1497
+
+30003_netlink-infinite-recursion.patch
+ [SECURITY] Fix infinite recursion bug in netlink
+ See CVE-2007-1861
+
+30004_nl_fib_lookup-oops.patch
+ Add fix for oops bug added by previous patch
+
+30005_core-dump-unreadable-PT_INTERP.patch
+ [SECURITY] Fix a vulnerability that allows local users to read
+ otherwise unreadable (but executable) files by triggering a core dump.
+ See CVE-2007-0958
+
+30006_appletalk-length-mismatch.patch
+ [SECURITY] Fix a remote DoS (crash) in appletalk
+ Depends upon bugfix/appletalk-endianness-annotations.patch
+ See CVE-2007-1357
+
+30007_cm4040-buffer-overflow.patch
+ [SECURITY] Fix a buffer overflow in the Omnikey CardMan 4040 driver
+ See CVE-2007-0005
+
+30008_ipv6_fl_socklist-no-share.patch
+ [SECURITY] Fix local DoS vulnerability caused by inadvertently sharing
+ ipv6_fl_socklist between the listening socket and the socket created
+ for connection.
+ See CVE-2007-1592
+
+30009_keys-serial-num-collision.patch
+ [SECURITY] Fix the key serial number collision avoidance code in
+ key_alloc_serial() that could lead to a local DoS (oops).
+ (closes: #398470)
+ See CVE-2007-0006
+
+30010_ipv6_getsockopt_sticky-null-opt.patch
+ [SECURITY] Fix kernel memory leak vulnerability in
+ ipv6_getsockopt_sticky() which can be triggered by passing a len < 0.
+ See CVE-2007-1000
+
+30011_ipv6_setsockopt-NULL-deref.patch
+ [SECURITY] Fix NULL dereference in ipv6_setsockopt that could lead
+ to a local DoS (oops).
+ See CVE-2007-1388
+
+30012_ipv6-disallow-RH0-by-default.patch
+ [SECURITY] Avoid a remote DoS (network amplification between two routers)
+ by disabling type0 IPv6 route headers by default. Can be re-enabled via
+ a sysctl interface. Thanks to Vlad Yasevich for porting help.
+
+30013_listxattr-mem-corruption.patch
+ [SECURITY] Fix userspace corruption vulnerability caused by
+ incorrectly promoted return values in bad_inode_ops
+ This patch changes the kernel ABI.
+ See CVE-2006-5753
+
+30014_bluetooth-l2cap-hci-info-leaks.patch
+ [SECURITY] Fix information leaks in setsockopt() implementations
+ See CVE-2007-1353
+
+30015_usblcd-limit-memory-consumption.patch
+ [SECURITY] limit memory consumption during write in the usblcd driver
+ See CVE-2007-3513
+
+30016_pppoe-socket-release-mem-leak.patch
+ [SECURITY] fix unpriveleged memory leak when a PPPoE socket is released
+ after connect but before PPPIOCGCHAN ioctl is called upon it
+ See CVE-2007-2525
+
+30017_nf_conntrack_h323-bounds-checking.patch
+ [SECURITY] nf_conntrack_h323: add checking of out-of-range on choices'
+ index values
+ See CVE-2007-3642
+
+30018_dn_fib-out-of-bounds.patch
+ [SECURITY] Fix out of bounds condition in dn_fib_props[]
+ See CVE-2007-2172
+
+30019_random-fix-seeding-with-zero-entropy.patch,
+30020_random-fix-error-in-entropy-extraction.patch
+ [SECURITY] Avoid seeding with the same values at boot time when a
+ system has no entropy source and fix a casting error in entropy
+ extraction that resulted in slightly less random numbers.
+ See CVE-2007-2453
+
+30021_nf_conntrack_sctp-null-deref.patch
+ [SECURITY] Fix remotely triggerable NULL pointer dereference
+ by sending an unknown chunk type.
+ See CVE-2007-2876
+
+30022_i965-secure-batchbuffer.patch
+ [SECURITY] Fix i965 secured batchbuffer usage
+ See CVE-2007-3851
+
+30023_appletalk-endianness-annotations.patch
+ Dependency for 30006_appletalk-length-mismatch.patch.
+
+30024_drm-i965.patch
+ Dependency for 30022_i965-secure-batchbuffer.patch
+
+30025_ipv4-fib_props-out-of-bounds.patch
+ [SECURITY] Fix a typo which caused fib_props[] to be of the wrong size
+ and check for out of bounds condition in index provided by userspace
+ See CVE-2007-2172
+
+30026_cifs-fix-sign-settings.patch
+ [SECURITY] Fix overriding the server to force signing on caused by
+ checking the wrong gloal variable.
+ See CVE-2007-3843
+
+30027_cpuset_tasks-underflow.patch
+ [SECURITY] Fix integer underflow in /dev/cpuset/tasks which could allow
+ local attackers to read sensitive kernel memory if the cpuset filesystem
+ is mounted.
+ See CVE-2007-2875
+
+30028_random-bound-check-ordering.patch
+ [SECURITY] Fix stack-based buffer overflow in the random number
+ generator
+ See CVE-2007-3105
+
+30030_aacraid-ioctl-perm-check.patch
+ [SECURITY] Require admin capabilities to issue ioctls to aacraid devices
+ See CVE-2007-4308
+
+30031_ptrace-handle-bogus-selector.patch,
+30032_fixup-trace_irq-breakage.patch
+ [SECURITY] Handle an invalid LDT segment selector %cs (the xcs field)
+ during ptrace single-step operations that can be used to trigger a
+ NULL-pointer dereference causing an Oops.
+ See CVE-2007-3731
+
+30033_prevent-stack-growth-into-hugetlb-region.patch
+ [SECURITY] Prevent OOPS during stack expansion when the VMA crosses
+ into address space reserved for hugetlb pages.
+ See CVE-2007-3739
+
+30034_cifs-honor-umask.patch
+ [SECURITY] Make CIFS honor a process' umask
+ See CVE-2007-3740
+
+30035_amd64-zero-extend-32bit-ptrace.patch
+ [SECURITY] Zero extend all registers after ptrace in 32-bit entry path.
+ See CVE-2007-4573
+
+30036_jffs2-ACL-vs-mode-handling.patch
+ [SECURITY] Write correct legacy modes to the medium on inode creation to
+ prevent incorrect permissions upon remount.
+ See CVE-2007-4849
+
+30039_hugetlb-prio_tree-unit-fix.patch
+ [SECURITY] Fix misconversion of hugetlb_vmtruncate_list to prio_tree
+ which could be used to trigger a BUG_ON() call in exit_mmap.
+ See CVE-2007-4133
+
+30040_usb-pwc-disconnect-block.patch
+ [SECURITY] Fix issue with unplugging webcams that use the pwc driver.
+ If userspace still has the device open it can result, the driver would
+ wait for the device to close, blocking the USB subsystem.
+ See CVE-2007-5093
+
+30041_ipv6-disallow-RH0-by-default-2.patch
+ Fix ipv6 rfc conformance issue introduced in 2.6.18.dfsg.1-13 by the
+ fix for CVE-2007-2242. Thanks to Brian Haley for the patch.
+ (closes: Debian #440127)
+
+/* This is already in Xen 3.2
+30042_reset-pdeathsig-on-suid-upstream.patch
+ Update fix for CVE-2007-3848 with the patch accepted upstream
+ (formerly 30013_reset-pdeathsig-on-suid.patch)
+*/
+
+30043_don-t-leak-nt-bit-into-next-task-xen.patch
+ [SECURITY] Don't leak NT bit into next task (Xen).
+ See CVE-2006-5755
+
+30044_cifs-better-failed-mount-errors.patch,
+30045_cifs-corrupt-server-response-overflow.patch
+ [SECURITY][CIFS] Fix multiple overflows that can be remotely triggered
+ by a server sending a corrupt response.
+ See CVE-2007-5904
+
+30046_wait_task_stopped-hang.patch
+ [SECURITY] wait_task_stopped was incorrectly testing for TASK_TRACED -
+ check p->exit_state instead avoiding a potential system hang
+ See CVE-2007-5500
+
+30047_ieee80211-underflow.patch
+ [SECURITY] Fix integer overflow in ieee80211 which makes it possible
+ for a malicious frame to crash a system using a driver built on top of
+ the Linux 802.11 wireless code.
+ See CVE-2007-4997
+
+30048_sysfs_readdir-NULL-deref-1.patch,
+30049_sysfs_readdir-NULL-deref-2.patch,
+30050_sysfs-fix-condition-check.patch
+ [SECURITY] Fix potential NULL pointer dereference which can lead to
+ a local DoS (kernel oops)
+ See CVE-2007-3104
+
+30051_tmpfs-restore-clear_highpage.patch
+ [SECURITY] Fix a theoretical kernel memory leak in the tmpfs filesystem
+ See CVE-2007-6417
+
+30052_minixfs-printk-hang.patch
+ [SECURITY] Rate-limit printks caused by accessing a corrupted minixfs
+ filesystem that would otherwise cause a system to hang (printk storm)
+ See CVE-2006-6058
+
+30053_hrtimer-large-relative-timeouts-overflow.patch
+ [SECURITY] Avoid overflow in hrtimers due to large relative timeouts
+ See CVE-2007-5966
+
+30054_coredump-only-to-same-uid.patch
+ [SECURITY] Fix an issue where core dumping over a file that
+ already exists retains the ownership of the original file
+ See CVE-2007-6206
+
+30055_isdn-net-overflow.patch
+ [SECURITY] Fix potential overflows in the ISDN subsystem
+ See CVE-2007-6063
+
+30056_proc-snd-page-alloc-mem-leak.patch
+ [SECURITY][ABI Changer] Fix an issue in the alsa subsystem that allows a
+ local user to read potentially sensitive kernel memory from the proc
+ filesystem
+ See CVE-2007-4571
+
+30057_fat-move-ioctl-compat-code.patch
+30058_bugfix/fat-fix-compat-ioctls.patch
+ [SECURITY][ABI Changer] Fix kernel_dirent corruption in the compat layer
+ for fat ioctls
+ See CVE-2007-2878
+
+30059_vfs-use-access-mode-flag.patch
+ [SECURITY] Use the access mode flag instead of the open flag when
+ testing access mode for a directory. Modify
+ features/all/vserver/vs2.0.2.2-rc9.patch to apply on top of this
+ See CVE-2008-0001
+
+30060_i4l-isdn_ioctl-mem-overrun.patch
+ [SECURITY] Fix potential isdn ioctl memory overrun
+ See CVE-2007-6151
+
+30061_vmsplice-security.patch
+ [SECURITY] Fix missing access check in vmsplice.
+ See CVE-2008-0010, CVE-2008-0600
+
+30062_clear-spurious-irq.patch
+ Fix a minor denial of service issue that allows local users to disable
+ an interrupt by causing an interrupt handler to be quickly inserted/removed.
+ This has only been shown to happen with certain serial devices so can only
+ be triggered by a user who already has additional priveleges (dialout
+ group). (closes: Debian #404815)
+
+30063_mmap-VM_DONTEXPAND.patch
+ [SECURITY] Add VM_DONTEXPAND to vm_flags in drivers that register
+ a fault handler but do not bounds check the offset argument
+ See CVE-2008-0007
+
+30064_RLIMIT_CPU-earlier-checking.patch
+ [SECURITY] Move check for an RLIMIT_CPU with a value of 0 earlier
+ to prevent a user escape (closes: #419706)
+ See CVE-2008-1294
+
+30065_dnotify-race.patch
+ [SECURITY] Fix a race in the directory notify
+ See CVE-2008-1375
+
+30066_fcntl_setlk-close-race.patch
+ [SECURITY] Fix an SMP race to prevent reordering of flock updates
+ and accesses to the descriptor table on close().
+ See CVE-2008-1669
+
+30067_sit-missing-kfree_skb-on-pskb_may_pull.patch
+ [SECURITY] Fix remotely-triggerable memory leak in the Simple
+ Internet Transition (SIT) code used for IPv6 over IPv4 tunnels
+ See CVE-2008-2136
+
+30068_hrtimer-prevent-overrun.patch
+30069_ktime-fix-MTIME_SEC_MAX-on-32-bit.patch
+ [SECURITY] Fix potential infinite loop in hrtimer_forward on
+ 64-bit systems
+ See CVE-2007-6712
+
+30070_amd64-cs-corruption.patch
+ [SECURITY] Fix local ptrace denial of service for amd64 flavor
+ kernels, bug #480390
+ See CVE-2008-1615
+
+30071_dccp-feature-length-check.patch
+ [SECURITY] Validate feature length to avoid heap overflow
+ See CVE-2008-2358
+
+30072_asn1-ber-decoding-checks.patch
+ [SECURITY] Validate lengths in ASN.1 decoding code to avoid
+ heap overflow
+ See CVE-2008-1673
+
+30073_nfs-write-corruption.patch
+ Fix potential nfs write corruption (closes: #470719)
+
+30074_x86-clear-df-before-calling-signal-handler.patch
+ [i386/amd64] Clear DF before calling signal handler. (closes: #469058)
+ CVE-2008-1367
+
+30075_3w-xxxx-bigmem-corruption.patch
+ 3w-xxxx: Fix data corruption on em64t systems w/ > 2GB of memory
+ (closes: #464923).
+
+30076_dnotify-race-locking.patch
+ Add missing locking for the dnotify-race fix that was included in
+ the upstream commit
+
+30077_sctp-make-sure-n-sizeof-does-not-overflow.patch
+ [SECURITY] Fix potential overflow condition in
+ sctp_getsockopt_local_addrs_old
+ See CVE-2008-2826
+
+30078_esp-iv-in-linear-part-of-skb.patch
+ [SECURITY] Avoid tripping BUG() in IPsec code when the first fragment
+ of an ESP packet does not contain the entire ESP header and IV
+ See CVE-2007-6282
+
+30079a_amd64-fix-zeroing-on-exception-in-copy_user-pre.patch
+30079b_amd64-fix-zeroing-on-exception-in-copy_user.patch
+ [SECURITY] [amd64] Fix potential information leak when a copy
+ operation fails by properly zeroing out destination memory
+ See CVE-2008-2729
+
+30080_tty-fix-for-tty-operations-bugs.patch
+ [SECURITY] Fix issues with tty operation handling in various drivers
+ See CVE-2008-2812
+
+30081_check-privileges-before-setting-mount-propagation.patch
+ [SECURITY] Check CAP_SYS_ADMIN when changing mountpoint type
+ See CVE-2008-2931
+
+30082a_x86-add-copy_user_handle_tail.patch
+30082b_x86-fix-copy_user.patch
+ [SECURITY][amd64] Fix memory leak in the copy_user routine, see #490910.
+ See CVE-2008-0598
+
+30083_x86-wrong-register-was-used-in-align-macro.patch
+ Fix regression introduced upstream by the fix for CVE-2008-0598
+
+30084_cifs-fix-compiler-warning.patch
+30085_netfilter-nf_nat_snmp_basic-fix-range-check.patch
+ Fix regressions introduced upstream by the fixes for CVE-2008-1673
+
+30086_sound-ensure-device-number-is-valid-in-snd_seq_oss_synth_make_info.patch
+ Fix possible information leak in seq_oss_synth.c
+ See CVE-2008-3272
+
+30087_vfs-fix-lookup-on-deleted-directory.patch
+ Fix potential memory leak in lookup path
+ See CVE-2008-3275
+
+50009_gentooify-tls-warning.patch
+ Change tls warning instructions to apply directly to Gentoo.
diff --git a/tags/2.6.18-12/10002_xen-3.3.0.patch b/tags/2.6.18-12/10002_xen-3.3.0.patch
new file mode 100644
index 0000000..996125c
--- /dev/null
+++ b/tags/2.6.18-12/10002_xen-3.3.0.patch
@@ -0,0 +1,210345 @@
+diff -rpuN linux-2.6.18.8/arch/i386/boot/Makefile linux-2.6.18-xen-3.3.0/arch/i386/boot/Makefile
+--- linux-2.6.18.8/arch/i386/boot/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/boot/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -26,7 +26,7 @@ SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
+ #RAMDISK := -DRAMDISK=512
+
+ targets := vmlinux.bin bootsect bootsect.o \
+- setup setup.o zImage bzImage
++ setup setup.o zImage bzImage vmlinuz vmlinux-stripped
+ subdir- := compressed
+
+ hostprogs-y := tools/build
+@@ -133,5 +133,13 @@ zlilo: $(BOOTIMAGE)
+ cp System.map $(INSTALL_PATH)/
+ if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
+
++$(obj)/vmlinuz: $(obj)/vmlinux-stripped FORCE
++ $(call if_changed,gzip)
++ @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
++
++$(obj)/vmlinux-stripped: OBJCOPYFLAGS := -g --strip-unneeded
++$(obj)/vmlinux-stripped: vmlinux FORCE
++ $(call if_changed,objcopy)
++
+ install:
+ sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
+diff -rpuN linux-2.6.18.8/arch/i386/Kconfig linux-2.6.18-xen-3.3.0/arch/i386/Kconfig
+--- linux-2.6.18.8/arch/i386/Kconfig 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -16,6 +16,7 @@ config X86_32
+
+ config GENERIC_TIME
+ bool
++ depends on !X86_XEN
+ default y
+
+ config LOCKDEP_SUPPORT
+@@ -103,6 +104,17 @@ config X86_PC
+ help
+ Choose this option if your computer is a standard PC or compatible.
+
++config X86_XEN
++ bool "Xen-compatible"
++ select XEN
++ select X86_PAE
++ select X86_UP_APIC if !SMP && XEN_PRIVILEGED_GUEST
++ select X86_UP_IOAPIC if !SMP && XEN_PRIVILEGED_GUEST
++ select SWIOTLB
++ help
++ Choose this option if you plan to run this kernel on top of the
++ Xen Hypervisor.
++
+ config X86_ELAN
+ bool "AMD Elan"
+ help
+@@ -213,6 +225,7 @@ source "arch/i386/Kconfig.cpu"
+
+ config HPET_TIMER
+ bool "HPET Timer Support"
++ depends on !X86_XEN
+ help
+ This enables the use of the HPET for the kernel's internal timer.
+ HPET is the next generation timer replacing legacy 8254s.
+@@ -263,7 +276,7 @@ source "kernel/Kconfig.preempt"
+
+ config X86_UP_APIC
+ bool "Local APIC support on uniprocessors"
+- depends on !SMP && !(X86_VISWS || X86_VOYAGER)
++ depends on !SMP && !(X86_VISWS || X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)
+ help
+ A local APIC (Advanced Programmable Interrupt Controller) is an
+ integrated interrupt controller in the CPU. If you have a single-CPU
+@@ -288,12 +301,12 @@ config X86_UP_IOAPIC
+
+ config X86_LOCAL_APIC
+ bool
+- depends on X86_UP_APIC || ((X86_VISWS || SMP) && !X86_VOYAGER)
++ depends on X86_UP_APIC || ((X86_VISWS || SMP) && !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST))
+ default y
+
+ config X86_IO_APIC
+ bool
+- depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER))
++ depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER || XEN_UNPRIVILEGED_GUEST))
+ default y
+
+ config X86_VISWS_APIC
+@@ -303,7 +316,7 @@ config X86_VISWS_APIC
+
+ config X86_MCE
+ bool "Machine Check Exception"
+- depends on !X86_VOYAGER
++ depends on !(X86_VOYAGER || X86_XEN)
+ ---help---
+ Machine Check Exception support allows the processor to notify the
+ kernel if it detects a problem (e.g. overheating, component failure).
+@@ -384,7 +397,7 @@ config I8K
+
+ config X86_REBOOTFIXUPS
+ bool "Enable X86 board specific fixups for reboot"
+- depends on X86
++ depends on !X86_XEN
+ default n
+ ---help---
+ This enables chipset and/or board specific fixups to be done
+@@ -402,6 +415,7 @@ config X86_REBOOTFIXUPS
+
+ config MICROCODE
+ tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
++ depends on !XEN_UNPRIVILEGED_GUEST
+ ---help---
+ If you say Y here and also to "/dev file system support" in the
+ 'File systems' section, you will be able to update the microcode on
+@@ -434,6 +448,10 @@ config X86_CPUID
+ with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
+ /dev/cpu/31/cpuid.
+
++config SWIOTLB
++ bool
++ default n
++
+ source "drivers/firmware/Kconfig"
+
+ choice
+@@ -616,6 +634,7 @@ config HIGHPTE
+
+ config MATH_EMULATION
+ bool "Math emulation"
++ depends on !X86_XEN
+ ---help---
+ Linux can emulate a math coprocessor (used for floating point
+ operations) if you don't have one. 486DX and Pentium processors have
+@@ -641,6 +660,7 @@ config MATH_EMULATION
+
+ config MTRR
+ bool "MTRR (Memory Type Range Register) support"
++ depends on !XEN_UNPRIVILEGED_GUEST
+ ---help---
+ On Intel P6 family processors (Pentium Pro, Pentium II and later)
+ the Memory Type Range Registers (MTRRs) may be used to control
+@@ -675,7 +695,7 @@ config MTRR
+
+ config EFI
+ bool "Boot from EFI support"
+- depends on ACPI
++ depends on ACPI && !X86_XEN
+ default n
+ ---help---
+ This enables the the kernel to boot on EFI platforms using
+@@ -693,7 +713,7 @@ config EFI
+
+ config IRQBALANCE
+ bool "Enable kernel irq balancing"
+- depends on SMP && X86_IO_APIC
++ depends on SMP && X86_IO_APIC && !X86_XEN
+ default y
+ help
+ The default yes will allow the kernel to do irq load balancing.
+@@ -741,7 +761,7 @@ source kernel/Kconfig.hz
+
+ config KEXEC
+ bool "kexec system call (EXPERIMENTAL)"
+- depends on EXPERIMENTAL
++ depends on EXPERIMENTAL && !XEN_UNPRIVILEGED_GUEST
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+@@ -760,6 +780,7 @@ config CRASH_DUMP
+ bool "kernel crash dumps (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on HIGHMEM
++ depends on !XEN
+ help
+ Generate crash dump after being started by kexec.
+
+@@ -793,6 +814,7 @@ config HOTPLUG_CPU
+
+ config COMPAT_VDSO
+ bool "Compat VDSO support"
++ depends on !X86_XEN
+ default y
+ help
+ Map the VDSO to the predictable old-style address too.
+@@ -810,18 +832,18 @@ config ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on HIGHMEM
+
+ menu "Power management options (ACPI, APM)"
+- depends on !X86_VOYAGER
++ depends on !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)
+
+-source kernel/power/Kconfig
++source "kernel/power/Kconfig"
+
+ source "drivers/acpi/Kconfig"
+
+ menu "APM (Advanced Power Management) BIOS Support"
+-depends on PM && !X86_VISWS
++depends on PM && !(X86_VISWS || X86_XEN)
+
+ config APM
+ tristate "APM (Advanced Power Management) BIOS support"
+- depends on PM
++ depends on PM && PM_LEGACY
+ ---help---
+ APM is a BIOS specification for saving power using several different
+ techniques. This is mostly useful for battery powered laptops with
+@@ -1006,6 +1028,7 @@ choice
+
+ config PCI_GOBIOS
+ bool "BIOS"
++ depends on !X86_XEN
+
+ config PCI_GOMMCONFIG
+ bool "MMConfig"
+@@ -1013,6 +1036,13 @@ config PCI_GOMMCONFIG
+ config PCI_GODIRECT
+ bool "Direct"
+
++config PCI_GOXEN_FE
++ bool "Xen PCI Frontend"
++ depends on X86_XEN
++ help
++ The PCI device frontend driver allows the kernel to import arbitrary
++ PCI devices from a PCI backend to support PCI driver domains.
++
+ config PCI_GOANY
+ bool "Any"
+
+@@ -1020,7 +1050,7 @@ endchoice
+
+ config PCI_BIOS
+ bool
+- depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
++ depends on !(X86_VISWS || X86_XEN) && PCI && (PCI_GOBIOS || PCI_GOANY)
+ default y
+
+ config PCI_DIRECT
+@@ -1033,6 +1063,19 @@ config PCI_MMCONFIG
+ depends on PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
+ default y
+
++config XEN_PCIDEV_FRONTEND
++ bool
++ depends on PCI && X86_XEN && (PCI_GOXEN_FE || PCI_GOANY)
++ select HOTPLUG
++ default y
++
++config XEN_PCIDEV_FE_DEBUG
++ bool "Xen PCI Frontend Debugging"
++ depends on XEN_PCIDEV_FRONTEND
++ default n
++ help
++ Enables some debug statements within the PCI Frontend.
++
+ source "drivers/pci/pcie/Kconfig"
+
+ source "drivers/pci/Kconfig"
+@@ -1043,7 +1086,7 @@ config ISA_DMA_API
+
+ config ISA
+ bool "ISA support"
+- depends on !(X86_VOYAGER || X86_VISWS)
++ depends on !(X86_VOYAGER || X86_VISWS || X86_XEN)
+ help
+ Find out whether you have ISA slots on your motherboard. ISA is the
+ name of a bus system, i.e. the way the CPU talks to the other stuff
+@@ -1070,7 +1113,7 @@ config EISA
+ source "drivers/eisa/Kconfig"
+
+ config MCA
+- bool "MCA support" if !(X86_VISWS || X86_VOYAGER)
++ bool "MCA support" if !(X86_VISWS || X86_VOYAGER || X86_XEN)
+ default y if X86_VOYAGER
+ help
+ MicroChannel Architecture is found in some IBM PS/2 machines and
+@@ -1146,6 +1189,8 @@ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+
++source "drivers/xen/Kconfig"
++
+ source "lib/Kconfig"
+
+ #
+@@ -1171,7 +1216,7 @@ config X86_SMP
+
+ config X86_HT
+ bool
+- depends on SMP && !(X86_VISWS || X86_VOYAGER)
++ depends on SMP && !(X86_VISWS || X86_VOYAGER || X86_XEN)
+ default y
+
+ config X86_BIOS_REBOOT
+@@ -1182,6 +1227,17 @@ config X86_BIOS_REBOOT
+ config X86_TRAMPOLINE
+ bool
+ depends on X86_SMP || (X86_VOYAGER && SMP)
++ depends on !XEN
++ default y
++
++config X86_NO_TSS
++ bool
++ depends on X86_XEN
++ default y
++
++config X86_NO_IDT
++ bool
++ depends on X86_XEN
+ default y
+
+ config KTIME_SCALAR
+diff -rpuN linux-2.6.18.8/arch/i386/Kconfig.cpu linux-2.6.18-xen-3.3.0/arch/i386/Kconfig.cpu
+--- linux-2.6.18.8/arch/i386/Kconfig.cpu 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/Kconfig.cpu 2008-08-21 11:36:07.000000000 +0200
+@@ -252,7 +252,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ bool
+- depends on M586MMX || M586TSC || M586 || M486 || M386
++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !X86_NO_IDT
+ default y
+
+ config X86_WP_WORKS_OK
+@@ -312,5 +312,5 @@ config X86_OOSTORE
+
+ config X86_TSC
+ bool
+- depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX) && !X86_NUMAQ
++ depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX) && !X86_NUMAQ && !X86_XEN
+ default y
+diff -rpuN linux-2.6.18.8/arch/i386/Kconfig.debug linux-2.6.18-xen-3.3.0/arch/i386/Kconfig.debug
+--- linux-2.6.18.8/arch/i386/Kconfig.debug 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/Kconfig.debug 2008-08-21 11:36:07.000000000 +0200
+@@ -79,6 +79,7 @@ config X86_MPPARSE
+ config DOUBLEFAULT
+ default y
+ bool "Enable doublefault exception handler" if EMBEDDED
++ depends on !X86_NO_TSS
+ help
+ This option allows trapping of rare doublefault exceptions that
+ would otherwise cause a system to silently reboot. Disabling this
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/acpi/boot-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/acpi/boot-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/acpi/boot-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/acpi/boot-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1168 @@
++/*
++ * boot.c - Architecture-Specific Low-Level ACPI Boot Support
++ *
++ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
++ * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++
++#include <linux/init.h>
++#include <linux/acpi.h>
++#include <linux/efi.h>
++#include <linux/module.h>
++#include <linux/dmi.h>
++#include <linux/irq.h>
++
++#include <asm/pgtable.h>
++#include <asm/io_apic.h>
++#include <asm/apic.h>
++#include <asm/io.h>
++#include <asm/mpspec.h>
++
++#ifdef CONFIG_X86_64
++
++extern void __init clustered_apic_check(void);
++
++extern int gsi_irq_sharing(int gsi);
++#include <asm/proto.h>
++
++static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
++
++
++#else /* X86 */
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#include <mach_apic.h>
++#include <mach_mpparse.h>
++#endif /* CONFIG_X86_LOCAL_APIC */
++
++static inline int gsi_irq_sharing(int gsi) { return gsi; }
++
++#endif /* X86 */
++
++#define BAD_MADT_ENTRY(entry, end) ( \
++ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
++ ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
++
++#define PREFIX "ACPI: "
++
++int acpi_noirq __initdata; /* skip ACPI IRQ initialization */
++int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
++int acpi_ht __initdata = 1; /* enable HT */
++
++int acpi_lapic;
++int acpi_ioapic;
++int acpi_strict;
++EXPORT_SYMBOL(acpi_strict);
++
++acpi_interrupt_flags acpi_sci_flags __initdata;
++int acpi_sci_override_gsi __initdata;
++int acpi_skip_timer_override __initdata;
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
++#endif
++
++#ifndef __HAVE_ARCH_CMPXCHG
++#warning ACPI uses CMPXCHG, i486 and later hardware
++#endif
++
++#define MAX_MADT_ENTRIES 256
++u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
++ {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
++EXPORT_SYMBOL(x86_acpiid_to_apicid);
++
++/* --------------------------------------------------------------------------
++ Boot-time Configuration
++ -------------------------------------------------------------------------- */
++
++/*
++ * The default interrupt routing model is PIC (8259). This gets
++ * overriden if IOAPICs are enumerated (below).
++ */
++enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
++
++#if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
++
++/* rely on all ACPI tables being in the direct mapping */
++char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
++{
++ if (!phys_addr || !size)
++ return NULL;
++
++ if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
++ return __va(phys_addr);
++
++ return NULL;
++}
++
++#else
++
++/*
++ * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
++ * to map the target physical address. The problem is that set_fixmap()
++ * provides a single page, and it is possible that the page is not
++ * sufficient.
++ * By using this area, we can map up to MAX_IO_APICS pages temporarily,
++ * i.e. until the next __va_range() call.
++ *
++ * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
++ * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
++ * count idx down while incrementing the phys address.
++ */
++char *__acpi_map_table(unsigned long phys, unsigned long size)
++{
++ unsigned long base, offset, mapped_size;
++ int idx;
++
++#ifndef CONFIG_XEN
++ if (phys + size < 8 * 1024 * 1024)
++ return __va(phys);
++#endif
++
++ offset = phys & (PAGE_SIZE - 1);
++ mapped_size = PAGE_SIZE - offset;
++ set_fixmap(FIX_ACPI_END, phys);
++ base = fix_to_virt(FIX_ACPI_END);
++
++ /*
++ * Most cases can be covered by the below.
++ */
++ idx = FIX_ACPI_END;
++ while (mapped_size < size) {
++ if (--idx < FIX_ACPI_BEGIN)
++ return NULL; /* cannot handle this */
++ phys += PAGE_SIZE;
++ set_fixmap(idx, phys);
++ mapped_size += PAGE_SIZE;
++ }
++
++ return ((unsigned char *)base + offset);
++}
++#endif
++
++#ifdef CONFIG_PCI_MMCONFIG
++/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
++struct acpi_table_mcfg_config *pci_mmcfg_config;
++int pci_mmcfg_config_num;
++
++int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
++{
++ struct acpi_table_mcfg *mcfg;
++ unsigned long i;
++ int config_size;
++
++ if (!phys_addr || !size)
++ return -EINVAL;
++
++ mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
++ if (!mcfg) {
++ printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
++ return -ENODEV;
++ }
++
++ /* how many config structures do we have */
++ pci_mmcfg_config_num = 0;
++ i = size - sizeof(struct acpi_table_mcfg);
++ while (i >= sizeof(struct acpi_table_mcfg_config)) {
++ ++pci_mmcfg_config_num;
++ i -= sizeof(struct acpi_table_mcfg_config);
++ };
++ if (pci_mmcfg_config_num == 0) {
++ printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
++ return -ENODEV;
++ }
++
++ config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
++ pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
++ if (!pci_mmcfg_config) {
++ printk(KERN_WARNING PREFIX
++ "No memory for MCFG config tables\n");
++ return -ENOMEM;
++ }
++
++ memcpy(pci_mmcfg_config, &mcfg->config, config_size);
++ for (i = 0; i < pci_mmcfg_config_num; ++i) {
++ if (mcfg->config[i].base_reserved) {
++ printk(KERN_ERR PREFIX
++ "MMCONFIG not in low 4GB of memory\n");
++ kfree(pci_mmcfg_config);
++ pci_mmcfg_config_num = 0;
++ return -ENODEV;
++ }
++ }
++
++ return 0;
++}
++#endif /* CONFIG_PCI_MMCONFIG */
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
++{
++ struct acpi_table_madt *madt = NULL;
++
++ if (!phys_addr || !size || !cpu_has_apic)
++ return -EINVAL;
++
++ madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
++ if (!madt) {
++ printk(KERN_WARNING PREFIX "Unable to map MADT\n");
++ return -ENODEV;
++ }
++
++ if (madt->lapic_address) {
++ acpi_lapic_addr = (u64) madt->lapic_address;
++
++ printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
++ madt->lapic_address);
++ }
++
++ acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
++
++ return 0;
++}
++
++static int __init
++acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
++{
++ struct acpi_table_lapic *processor = NULL;
++
++ processor = (struct acpi_table_lapic *)header;
++
++ if (BAD_MADT_ENTRY(processor, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ /* Record local apic id only when enabled */
++ if (processor->flags.enabled)
++ x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
++
++ /*
++ * We need to register disabled CPU as well to permit
++ * counting disabled CPUs. This allows us to size
++ * cpus_possible_map more accurately, to permit
++ * to not preallocating memory for all NR_CPUS
++ * when we use CPU hotplug.
++ */
++ mp_register_lapic(processor->id, /* APIC ID */
++ processor->flags.enabled); /* Enabled? */
++
++ return 0;
++}
++
++static int __init
++acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
++ const unsigned long end)
++{
++ struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
++
++ lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
++
++ if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
++ return -EINVAL;
++
++ acpi_lapic_addr = lapic_addr_ovr->address;
++
++ return 0;
++}
++
++static int __init
++acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
++{
++ struct acpi_table_lapic_nmi *lapic_nmi = NULL;
++
++ lapic_nmi = (struct acpi_table_lapic_nmi *)header;
++
++ if (BAD_MADT_ENTRY(lapic_nmi, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ if (lapic_nmi->lint != 1)
++ printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
++
++ return 0;
++}
++
++#endif /*CONFIG_X86_LOCAL_APIC */
++
++#ifdef CONFIG_X86_IO_APIC
++
++static int __init
++acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
++{
++ struct acpi_table_ioapic *ioapic = NULL;
++
++ ioapic = (struct acpi_table_ioapic *)header;
++
++ if (BAD_MADT_ENTRY(ioapic, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ mp_register_ioapic(ioapic->id,
++ ioapic->address, ioapic->global_irq_base);
++
++ return 0;
++}
++
++/*
++ * Parse Interrupt Source Override for the ACPI SCI
++ */
++static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
++{
++ if (trigger == 0) /* compatible SCI trigger is level */
++ trigger = 3;
++
++ if (polarity == 0) /* compatible SCI polarity is low */
++ polarity = 3;
++
++ /* Command-line over-ride via acpi_sci= */
++ if (acpi_sci_flags.trigger)
++ trigger = acpi_sci_flags.trigger;
++
++ if (acpi_sci_flags.polarity)
++ polarity = acpi_sci_flags.polarity;
++
++ /*
++ * mp_config_acpi_legacy_irqs() already setup IRQs < 16
++ * If GSI is < 16, this will update its flags,
++ * else it will create a new mp_irqs[] entry.
++ */
++ mp_override_legacy_irq(gsi, polarity, trigger, gsi);
++
++ /*
++ * stash over-ride to indicate we've been here
++ * and for later update of acpi_fadt
++ */
++ acpi_sci_override_gsi = gsi;
++ return;
++}
++
++static int __init
++acpi_parse_int_src_ovr(acpi_table_entry_header * header,
++ const unsigned long end)
++{
++ struct acpi_table_int_src_ovr *intsrc = NULL;
++
++ intsrc = (struct acpi_table_int_src_ovr *)header;
++
++ if (BAD_MADT_ENTRY(intsrc, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ if (intsrc->bus_irq == acpi_fadt.sci_int) {
++ acpi_sci_ioapic_setup(intsrc->global_irq,
++ intsrc->flags.polarity,
++ intsrc->flags.trigger);
++ return 0;
++ }
++
++ if (acpi_skip_timer_override &&
++ intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
++ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
++ return 0;
++ }
++
++ mp_override_legacy_irq(intsrc->bus_irq,
++ intsrc->flags.polarity,
++ intsrc->flags.trigger, intsrc->global_irq);
++
++ return 0;
++}
++
++static int __init
++acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
++{
++ struct acpi_table_nmi_src *nmi_src = NULL;
++
++ nmi_src = (struct acpi_table_nmi_src *)header;
++
++ if (BAD_MADT_ENTRY(nmi_src, end))
++ return -EINVAL;
++
++ acpi_table_print_madt_entry(header);
++
++ /* TBD: Support nimsrc entries? */
++
++ return 0;
++}
++
++#endif /* CONFIG_X86_IO_APIC */
++
++/*
++ * acpi_pic_sci_set_trigger()
++ *
++ * use ELCR to set PIC-mode trigger type for SCI
++ *
++ * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
++ * it may require Edge Trigger -- use "acpi_sci=edge"
++ *
++ * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
++ * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
++ * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
++ * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
++ */
++
++void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
++{
++ unsigned int mask = 1 << irq;
++ unsigned int old, new;
++
++ /* Real old ELCR mask */
++ old = inb(0x4d0) | (inb(0x4d1) << 8);
++
++ /*
++ * If we use ACPI to set PCI irq's, then we should clear ELCR
++ * since we will set it correctly as we enable the PCI irq
++ * routing.
++ */
++ new = acpi_noirq ? old : 0;
++
++ /*
++ * Update SCI information in the ELCR, it isn't in the PCI
++ * routing tables..
++ */
++ switch (trigger) {
++ case 1: /* Edge - clear */
++ new &= ~mask;
++ break;
++ case 3: /* Level - set */
++ new |= mask;
++ break;
++ }
++
++ if (old == new)
++ return;
++
++ printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
++ outb(new, 0x4d0);
++ outb(new >> 8, 0x4d1);
++}
++
++int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
++{
++#ifdef CONFIG_X86_IO_APIC
++ if (use_pci_vector() && !platform_legacy_irq(gsi))
++ *irq = IO_APIC_VECTOR(gsi);
++ else
++#endif
++ *irq = gsi_irq_sharing(gsi);
++ return 0;
++}
++
++/*
++ * success: return IRQ number (>=0)
++ * failure: return < 0
++ */
++int acpi_register_gsi(u32 gsi, int triggering, int polarity)
++{
++ unsigned int irq;
++ unsigned int plat_gsi = gsi;
++
++#ifdef CONFIG_PCI
++ /*
++ * Make sure all (legacy) PCI IRQs are set as level-triggered.
++ */
++ if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
++ extern void eisa_set_level_irq(unsigned int irq);
++
++ if (triggering == ACPI_LEVEL_SENSITIVE)
++ eisa_set_level_irq(gsi);
++ }
++#endif
++
++#ifdef CONFIG_X86_IO_APIC
++ if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
++ plat_gsi = mp_register_gsi(gsi, triggering, polarity);
++ }
++#endif
++ acpi_gsi_to_irq(plat_gsi, &irq);
++ return irq;
++}
++
++EXPORT_SYMBOL(acpi_register_gsi);
++
++/*
++ * ACPI based hotplug support for CPU
++ */
++#ifdef CONFIG_ACPI_HOTPLUG_CPU
++int acpi_map_lsapic(acpi_handle handle, int *pcpu)
++{
++ /* TBD */
++ return -EINVAL;
++}
++
++EXPORT_SYMBOL(acpi_map_lsapic);
++
++int acpi_unmap_lsapic(int cpu)
++{
++ /* TBD */
++ return -EINVAL;
++}
++
++EXPORT_SYMBOL(acpi_unmap_lsapic);
++#endif /* CONFIG_ACPI_HOTPLUG_CPU */
++
++int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
++{
++ /* TBD */
++ return -EINVAL;
++}
++
++EXPORT_SYMBOL(acpi_register_ioapic);
++
++int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
++{
++ /* TBD */
++ return -EINVAL;
++}
++
++EXPORT_SYMBOL(acpi_unregister_ioapic);
++
++static unsigned long __init
++acpi_scan_rsdp(unsigned long start, unsigned long length)
++{
++ unsigned long offset = 0;
++ unsigned long sig_len = sizeof("RSD PTR ") - 1;
++ unsigned long vstart = (unsigned long)isa_bus_to_virt(start);
++
++ /*
++ * Scan all 16-byte boundaries of the physical memory region for the
++ * RSDP signature.
++ */
++ for (offset = 0; offset < length; offset += 16) {
++ if (strncmp((char *)(vstart + offset), "RSD PTR ", sig_len))
++ continue;
++ return (start + offset);
++ }
++
++ return 0;
++}
++
++static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
++{
++ struct acpi_table_sbf *sb;
++
++ if (!phys_addr || !size)
++ return -EINVAL;
++
++ sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
++ if (!sb) {
++ printk(KERN_WARNING PREFIX "Unable to map SBF\n");
++ return -ENODEV;
++ }
++
++ sbf_port = sb->sbf_cmos; /* Save CMOS port */
++
++ return 0;
++}
++
++#ifdef CONFIG_HPET_TIMER
++
++static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
++{
++ struct acpi_table_hpet *hpet_tbl;
++
++ if (!phys || !size)
++ return -EINVAL;
++
++ hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
++ if (!hpet_tbl) {
++ printk(KERN_WARNING PREFIX "Unable to map HPET\n");
++ return -ENODEV;
++ }
++
++ if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
++ printk(KERN_WARNING PREFIX "HPET timers must be located in "
++ "memory.\n");
++ return -1;
++ }
++#ifdef CONFIG_X86_64
++ vxtime.hpet_address = hpet_tbl->addr.addrl |
++ ((long)hpet_tbl->addr.addrh << 32);
++
++ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
++ hpet_tbl->id, vxtime.hpet_address);
++#else /* X86 */
++ {
++ extern unsigned long hpet_address;
++
++ hpet_address = hpet_tbl->addr.addrl;
++ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
++ hpet_tbl->id, hpet_address);
++ }
++#endif /* X86 */
++
++ return 0;
++}
++#else
++#define acpi_parse_hpet NULL
++#endif
++
++#ifdef CONFIG_X86_PM_TIMER
++extern u32 pmtmr_ioport;
++#endif
++
++static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
++{
++ struct fadt_descriptor *fadt = NULL;
++
++ fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
++ if (!fadt) {
++ printk(KERN_WARNING PREFIX "Unable to map FADT\n");
++ return 0;
++ }
++ /* initialize sci_int early for INT_SRC_OVR MADT parsing */
++ acpi_fadt.sci_int = fadt->sci_int;
++
++ /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
++ acpi_fadt.revision = fadt->revision;
++ acpi_fadt.force_apic_physical_destination_mode =
++ fadt->force_apic_physical_destination_mode;
++
++#if defined(CONFIG_X86_PM_TIMER) && !defined(CONFIG_XEN)
++ /* detect the location of the ACPI PM Timer */
++ if (fadt->revision >= FADT2_REVISION_ID) {
++ /* FADT rev. 2 */
++ if (fadt->xpm_tmr_blk.address_space_id !=
++ ACPI_ADR_SPACE_SYSTEM_IO)
++ return 0;
++
++ pmtmr_ioport = fadt->xpm_tmr_blk.address;
++ /*
++ * "X" fields are optional extensions to the original V1.0
++ * fields, so we must selectively expand V1.0 fields if the
++ * corresponding X field is zero.
++ */
++ if (!pmtmr_ioport)
++ pmtmr_ioport = fadt->V1_pm_tmr_blk;
++ } else {
++ /* FADT rev. 1 */
++ pmtmr_ioport = fadt->V1_pm_tmr_blk;
++ }
++ if (pmtmr_ioport)
++ printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
++ pmtmr_ioport);
++#endif
++ return 0;
++}
++
++unsigned long __init acpi_find_rsdp(void)
++{
++ unsigned long rsdp_phys = 0;
++
++ if (efi_enabled) {
++ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
++ return efi.acpi20;
++ else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
++ return efi.acpi;
++ }
++ /*
++ * Scan memory looking for the RSDP signature. First search EBDA (low
++ * memory) paragraphs and then search upper memory (E0000-FFFFF).
++ */
++ rsdp_phys = acpi_scan_rsdp(0, 0x400);
++ if (!rsdp_phys)
++ rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
++
++ return rsdp_phys;
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++/*
++ * Parse LAPIC entries in MADT
++ * returns 0 on success, < 0 on error
++ */
++static int __init acpi_parse_madt_lapic_entries(void)
++{
++ int count;
++
++ if (!cpu_has_apic)
++ return -ENODEV;
++
++ /*
++ * Note that the LAPIC address is obtained from the MADT (32-bit value)
++ * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
++ */
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
++ acpi_parse_lapic_addr_ovr, 0);
++ if (count < 0) {
++ printk(KERN_ERR PREFIX
++ "Error parsing LAPIC address override entry\n");
++ return count;
++ }
++
++ mp_register_lapic_address(acpi_lapic_addr);
++
++ count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
++ MAX_APICS);
++ if (!count) {
++ printk(KERN_ERR PREFIX "No LAPIC entries present\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return -ENODEV;
++ } else if (count < 0) {
++ printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return count;
++ }
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
++ if (count < 0) {
++ printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return count;
++ }
++ return 0;
++}
++#endif /* CONFIG_X86_LOCAL_APIC */
++
++#ifdef CONFIG_X86_IO_APIC
++/*
++ * Parse IOAPIC related entries in MADT
++ * returns 0 on success, < 0 on error
++ */
++static int __init acpi_parse_madt_ioapic_entries(void)
++{
++ int count;
++
++ /*
++ * ACPI interpreter is required to complete interrupt setup,
++ * so if it is off, don't enumerate the io-apics with ACPI.
++ * If MPS is present, it will handle them,
++ * otherwise the system will stay in PIC mode
++ */
++ if (acpi_disabled || acpi_noirq) {
++ return -ENODEV;
++ }
++
++ if (!cpu_has_apic)
++ return -ENODEV;
++
++ /*
++ * if "noapic" boot option, don't look for IO-APICs
++ */
++ if (skip_ioapic_setup) {
++ printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
++ "due to 'noapic' option.\n");
++ return -ENODEV;
++ }
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
++ MAX_IO_APICS);
++ if (!count) {
++ printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
++ return -ENODEV;
++ } else if (count < 0) {
++ printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
++ return count;
++ }
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
++ NR_IRQ_VECTORS);
++ if (count < 0) {
++ printk(KERN_ERR PREFIX
++ "Error parsing interrupt source overrides entry\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return count;
++ }
++
++ /*
++ * If BIOS did not supply an INT_SRC_OVR for the SCI
++ * pretend we got one so we can set the SCI flags.
++ */
++ if (!acpi_sci_override_gsi)
++ acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
++
++ /* Fill in identity legacy mapings where no override */
++ mp_config_acpi_legacy_irqs();
++
++ count =
++ acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
++ NR_IRQ_VECTORS);
++ if (count < 0) {
++ printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
++ /* TBD: Cleanup to allow fallback to MPS */
++ return count;
++ }
++
++ return 0;
++}
++#else
++static inline int acpi_parse_madt_ioapic_entries(void)
++{
++ return -1;
++}
++#endif /* !CONFIG_X86_IO_APIC */
++
++static void __init acpi_process_madt(void)
++{
++#ifdef CONFIG_X86_LOCAL_APIC
++ int count, error;
++
++ count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
++ if (count >= 1) {
++
++ /*
++ * Parse MADT LAPIC entries
++ */
++ error = acpi_parse_madt_lapic_entries();
++ if (!error) {
++ acpi_lapic = 1;
++
++#ifdef CONFIG_X86_GENERICARCH
++ generic_bigsmp_probe();
++#endif
++ /*
++ * Parse MADT IO-APIC entries
++ */
++ error = acpi_parse_madt_ioapic_entries();
++ if (!error) {
++ acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
++ acpi_irq_balance_set(NULL);
++ acpi_ioapic = 1;
++
++ smp_found_config = 1;
++ clustered_apic_check();
++ }
++ }
++ if (error == -EINVAL) {
++ /*
++ * Dell Precision Workstation 410, 610 come here.
++ */
++ printk(KERN_ERR PREFIX
++ "Invalid BIOS MADT, disabling ACPI\n");
++ disable_acpi();
++ }
++ }
++#endif
++ return;
++}
++
++extern int acpi_force;
++
++#ifdef __i386__
++
++static int __init disable_acpi_irq(struct dmi_system_id *d)
++{
++ if (!acpi_force) {
++ printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
++ d->ident);
++ acpi_noirq_set();
++ }
++ return 0;
++}
++
++static int __init disable_acpi_pci(struct dmi_system_id *d)
++{
++ if (!acpi_force) {
++ printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
++ d->ident);
++ acpi_disable_pci();
++ }
++ return 0;
++}
++
++static int __init dmi_disable_acpi(struct dmi_system_id *d)
++{
++ if (!acpi_force) {
++ printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
++ disable_acpi();
++ } else {
++ printk(KERN_NOTICE
++ "Warning: DMI blacklist says broken, but acpi forced\n");
++ }
++ return 0;
++}
++
++/*
++ * Limit ACPI to CPU enumeration for HT
++ */
++static int __init force_acpi_ht(struct dmi_system_id *d)
++{
++ if (!acpi_force) {
++ printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
++ d->ident);
++ disable_acpi();
++ acpi_ht = 1;
++ } else {
++ printk(KERN_NOTICE
++ "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
++ }
++ return 0;
++}
++
++/*
++ * If your system is blacklisted here, but you find that acpi=force
++ * works for you, please contact acpi-devel@sourceforge.net
++ */
++static struct dmi_system_id __initdata acpi_dmi_table[] = {
++ /*
++ * Boxes that need ACPI disabled
++ */
++ {
++ .callback = dmi_disable_acpi,
++ .ident = "IBM Thinkpad",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
++ },
++ },
++
++ /*
++ * Boxes that need acpi=ht
++ */
++ {
++ .callback = force_acpi_ht,
++ .ident = "FSC Primergy T850",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "DELL GX240",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
++ DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "HP VISUALIZE NT Workstation",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "Compaq Workstation W8000",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "ASUS P4B266",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "ASUS P2B-DS",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "ASUS CUR-DLS",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "ABIT i440BX-W83977",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
++ DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "IBM Bladecenter",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "IBM eServer xSeries 360",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "IBM eserver xSeries 330",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
++ },
++ },
++ {
++ .callback = force_acpi_ht,
++ .ident = "IBM eserver xSeries 440",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
++ },
++ },
++
++ /*
++ * Boxes that need ACPI PCI IRQ routing disabled
++ */
++ {
++ .callback = disable_acpi_irq,
++ .ident = "ASUS A7V",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
++ DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
++ /* newer BIOS, Revision 1011, does work */
++ DMI_MATCH(DMI_BIOS_VERSION,
++ "ASUS A7V ACPI BIOS Revision 1007"),
++ },
++ },
++
++ /*
++ * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
++ */
++ { /* _BBN 0 bug */
++ .callback = disable_acpi_pci,
++ .ident = "ASUS PR-DLS",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
++ DMI_MATCH(DMI_BIOS_VERSION,
++ "ASUS PR-DLS ACPI BIOS Revision 1010"),
++ DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
++ },
++ },
++ {
++ .callback = disable_acpi_pci,
++ .ident = "Acer TravelMate 36x Laptop",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
++ },
++ },
++ {}
++};
++
++#endif /* __i386__ */
++
++/*
++ * acpi_boot_table_init() and acpi_boot_init()
++ * called from setup_arch(), always.
++ * 1. checksums all tables
++ * 2. enumerates lapics
++ * 3. enumerates io-apics
++ *
++ * acpi_table_init() is separate to allow reading SRAT without
++ * other side effects.
++ *
++ * side effects of acpi_boot_init:
++ * acpi_lapic = 1 if LAPIC found
++ * acpi_ioapic = 1 if IOAPIC found
++ * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
++ * if acpi_blacklisted() acpi_disabled = 1;
++ * acpi_irq_model=...
++ * ...
++ *
++ * return value: (currently ignored)
++ * 0: success
++ * !0: failure
++ */
++
++int __init acpi_boot_table_init(void)
++{
++ int error;
++
++#ifdef __i386__
++ dmi_check_system(acpi_dmi_table);
++#endif
++
++ /*
++ * If acpi_disabled, bail out
++ * One exception: acpi=ht continues far enough to enumerate LAPICs
++ */
++ if (acpi_disabled && !acpi_ht)
++ return 1;
++
++ /*
++ * Initialize the ACPI boot-time table parser.
++ */
++ error = acpi_table_init();
++ if (error) {
++ disable_acpi();
++ return error;
++ }
++
++ acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
++
++ /*
++ * blacklist may disable ACPI entirely
++ */
++ error = acpi_blacklisted();
++ if (error) {
++ if (acpi_force) {
++ printk(KERN_WARNING PREFIX "acpi=force override\n");
++ } else {
++ printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
++ disable_acpi();
++ return error;
++ }
++ }
++
++ return 0;
++}
++
++int __init acpi_boot_init(void)
++{
++ /*
++ * If acpi_disabled, bail out
++ * One exception: acpi=ht continues far enough to enumerate LAPICs
++ */
++ if (acpi_disabled && !acpi_ht)
++ return 1;
++
++ acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
++
++ /*
++ * set sci_int and PM timer address
++ */
++ acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
++
++ /*
++ * Process the Multiple APIC Description Table (MADT), if present
++ */
++ acpi_process_madt();
++
++ acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
++
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/acpi/Makefile linux-2.6.18-xen-3.3.0/arch/i386/kernel/acpi/Makefile
+--- linux-2.6.18.8/arch/i386/kernel/acpi/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/acpi/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -4,5 +4,9 @@ obj-$(CONFIG_ACPI_SLEEP) += sleep.o wake
+
+ ifneq ($(CONFIG_ACPI_PROCESSOR),)
+ obj-y += cstate.o processor.o
++ifneq ($(CONFIG_PROCESSOR_EXTERNAL_CONTROL),)
++obj-$(CONFIG_XEN) += processor_extcntl_xen.o
++endif
+ endif
+
++disabled-obj-$(CONFIG_XEN) := cstate.o wakeup.o
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/acpi/processor.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/acpi/processor.c
+--- linux-2.6.18.8/arch/i386/kernel/acpi/processor.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/acpi/processor.c 2008-08-21 11:36:07.000000000 +0200
+@@ -62,7 +62,18 @@ static void init_intel_pdc(struct acpi_p
+ /* Initialize _PDC data based on the CPU vendor */
+ void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
+ {
++#ifdef CONFIG_XEN
++ /*
++ * As a work-around, just use cpu0's cpuinfo for all processors.
++ * Further work is required to expose xen hypervisor interface of
++ * getting physical cpuinfo to dom0 kernel and then
++ * arch_acpi_processor_init_pdc can set _PDC parameters according
++ * to Xen's phys information.
++ */
++ unsigned int cpu = 0;
++#else
+ unsigned int cpu = pr->id;
++#endif /* CONFIG_XEN */
+ struct cpuinfo_x86 *c = cpu_data + cpu;
+
+ pr->pdc = NULL;
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/acpi/processor_extcntl_xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/acpi/processor_extcntl_xen.c
+--- linux-2.6.18.8/arch/i386/kernel/acpi/processor_extcntl_xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/acpi/processor_extcntl_xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,229 @@
++/*
++ * processor_extcntl_xen.c - interface to notify Xen
++ *
++ * Copyright (C) 2008, Intel corporation
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/acpi.h>
++#include <linux/pm.h>
++#include <linux/cpu.h>
++
++#include <linux/cpufreq.h>
++#include <acpi/processor.h>
++#include <asm/hypercall.h>
++
++static int xen_processor_pmbits;
++
++static int xen_cx_notifier(struct acpi_processor *pr, int action)
++{
++ int ret, count = 0, i;
++ xen_platform_op_t op = {
++ .cmd = XENPF_set_processor_pminfo,
++ .interface_version = XENPF_INTERFACE_VERSION,
++ .u.set_pminfo.id = pr->acpi_id,
++ .u.set_pminfo.type = XEN_PM_CX,
++ };
++ struct xen_processor_cx *data, *buf;
++ struct acpi_processor_cx *cx;
++
++ if (action == PROCESSOR_PM_CHANGE)
++ return -EINVAL;
++
++ /* Convert to Xen defined structure and hypercall */
++ buf = kzalloc(pr->power.count * sizeof(struct xen_processor_cx),
++ GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ data = buf;
++ for (i = 1; i <= pr->power.count; i++) {
++ cx = &pr->power.states[i];
++ /* Skip invalid cstate entry */
++ if (!cx->valid)
++ continue;
++
++ data->type = cx->type;
++ data->latency = cx->latency;
++ data->power = cx->power;
++ data->reg.space_id = cx->reg.space_id;
++ data->reg.bit_width = cx->reg.bit_width;
++ data->reg.bit_offset = cx->reg.bit_offset;
++ data->reg.access_size = cx->reg.reserved;
++ data->reg.address = cx->reg.address;
++
++ /* Get dependency relationships */
++ if (cx->csd_count) {
++ printk("Wow! _CSD is found. Not support for now!\n");
++ kfree(buf);
++ return -EINVAL;
++ } else {
++ data->dpcnt = 0;
++ set_xen_guest_handle(data->dp, NULL);
++ }
++
++ data++;
++ count++;
++ }
++
++ if (!count) {
++ printk("No available Cx info for cpu %d\n", pr->acpi_id);
++ kfree(buf);
++ return -EINVAL;
++ }
++
++ op.u.set_pminfo.power.count = count;
++ op.u.set_pminfo.power.flags.bm_control = pr->flags.bm_control;
++ op.u.set_pminfo.power.flags.bm_check = pr->flags.bm_check;
++ op.u.set_pminfo.power.flags.has_cst = pr->flags.has_cst;
++ op.u.set_pminfo.power.flags.power_setup_done = pr->flags.power_setup_done;
++
++ set_xen_guest_handle(op.u.set_pminfo.power.states, buf);
++ ret = HYPERVISOR_platform_op(&op);
++ kfree(buf);
++ return ret;
++}
++
++static void convert_pct_reg(struct xen_pct_register *xpct,
++ struct acpi_pct_register *apct)
++{
++ xpct->descriptor = apct->descriptor;
++ xpct->length = apct->length;
++ xpct->space_id = apct->space_id;
++ xpct->bit_width = apct->bit_width;
++ xpct->bit_offset = apct->bit_offset;
++ xpct->reserved = apct->reserved;
++ xpct->address = apct->address;
++}
++
++static void convert_pss_states(struct xen_processor_px *xpss,
++ struct acpi_processor_px *apss, int state_count)
++{
++ int i;
++ for(i=0; i<state_count; i++) {
++ xpss->core_frequency = apss->core_frequency;
++ xpss->power = apss->power;
++ xpss->transition_latency = apss->transition_latency;
++ xpss->bus_master_latency = apss->bus_master_latency;
++ xpss->control = apss->control;
++ xpss->status = apss->status;
++ xpss++;
++ apss++;
++ }
++}
++
++static void convert_psd_pack(struct xen_psd_package *xpsd,
++ struct acpi_psd_package *apsd)
++{
++ xpsd->num_entries = apsd->num_entries;
++ xpsd->revision = apsd->revision;
++ xpsd->domain = apsd->domain;
++ xpsd->coord_type = apsd->coord_type;
++ xpsd->num_processors = apsd->num_processors;
++}
++
++static int xen_px_notifier(struct acpi_processor *pr, int action)
++{
++ int ret;
++ xen_platform_op_t op = {
++ .cmd = XENPF_set_processor_pminfo,
++ .interface_version = XENPF_INTERFACE_VERSION,
++ .u.set_pminfo.id = pr->acpi_id,
++ .u.set_pminfo.type = XEN_PM_PX,
++ };
++ struct xen_processor_performance *perf;
++ struct xen_processor_px *states = NULL;
++ struct acpi_processor_performance *px;
++ struct acpi_psd_package *pdomain;
++
++ /* leave dynamic ppc handle in the future */
++ if (action == PROCESSOR_PM_CHANGE)
++ return 0;
++
++ perf = &op.u.set_pminfo.perf;
++ px = pr->performance;
++
++ perf->flags = XEN_PX_PPC |
++ XEN_PX_PCT |
++ XEN_PX_PSS |
++ XEN_PX_PSD;
++
++ /* ppc */
++ perf->ppc = pr->performance_platform_limit;
++
++ /* pct */
++ convert_pct_reg(&perf->control_register, &px->control_register);
++ convert_pct_reg(&perf->status_register, &px->status_register);
++
++ /* pss */
++ perf->state_count = px->state_count;
++ states = kzalloc(px->state_count*sizeof(xen_processor_px_t),GFP_KERNEL);
++ if (!states)
++ return -ENOMEM;
++ convert_pss_states(states, px->states, px->state_count);
++ set_xen_guest_handle(perf->states, states);
++
++ /* psd */
++ pdomain = &px->domain_info;
++ convert_psd_pack(&perf->domain_info, pdomain);
++ if (perf->domain_info.num_processors) {
++ if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
++ perf->shared_type = CPUFREQ_SHARED_TYPE_ALL;
++ else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
++ perf->shared_type = CPUFREQ_SHARED_TYPE_ANY;
++ else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
++ perf->shared_type = CPUFREQ_SHARED_TYPE_HW;
++ } else
++ perf->shared_type = CPUFREQ_SHARED_TYPE_NONE;
++
++ ret = HYPERVISOR_platform_op(&op);
++ kfree(states);
++ return ret;
++}
++
++static int xen_tx_notifier(struct acpi_processor *pr, int action)
++{
++ return -EINVAL;
++}
++static int xen_hotplug_notifier(struct acpi_processor *pr, int event)
++{
++ return -EINVAL;
++}
++
++static struct processor_extcntl_ops xen_extcntl_ops = {
++ .hotplug = xen_hotplug_notifier,
++};
++
++void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **ops)
++{
++ xen_processor_pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8;
++
++ if (xen_processor_pmbits & XEN_PROCESSOR_PM_CX)
++ xen_extcntl_ops.pm_ops[PM_TYPE_IDLE] = xen_cx_notifier;
++ if (xen_processor_pmbits & XEN_PROCESSOR_PM_PX)
++ xen_extcntl_ops.pm_ops[PM_TYPE_PERF] = xen_px_notifier;
++ if (xen_processor_pmbits & XEN_PROCESSOR_PM_TX)
++ xen_extcntl_ops.pm_ops[PM_TYPE_THR] = xen_tx_notifier;
++
++ *ops = &xen_extcntl_ops;
++}
++EXPORT_SYMBOL(arch_acpi_processor_init_extcntl);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/acpi/sleep-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/acpi/sleep-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/acpi/sleep-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/acpi/sleep-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,113 @@
++/*
++ * sleep.c - x86-specific ACPI sleep support.
++ *
++ * Copyright (C) 2001-2003 Patrick Mochel
++ * Copyright (C) 2001-2003 Pavel Machek <pavel@suse.cz>
++ */
++
++#include <linux/acpi.h>
++#include <linux/bootmem.h>
++#include <linux/dmi.h>
++#include <linux/cpumask.h>
++
++#include <asm/smp.h>
++
++#ifndef CONFIG_ACPI_PV_SLEEP
++/* address in low memory of the wakeup routine. */
++unsigned long acpi_wakeup_address = 0;
++unsigned long acpi_video_flags;
++extern char wakeup_start, wakeup_end;
++
++extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
++#endif
++
++/**
++ * acpi_save_state_mem - save kernel state
++ *
++ * Create an identity mapped page table and copy the wakeup routine to
++ * low memory.
++ */
++int acpi_save_state_mem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++ if (!acpi_wakeup_address)
++ return 1;
++ memcpy((void *)acpi_wakeup_address, &wakeup_start,
++ &wakeup_end - &wakeup_start);
++ acpi_copy_wakeup_routine(acpi_wakeup_address);
++#endif
++ return 0;
++}
++
++/*
++ * acpi_restore_state - undo effects of acpi_save_state_mem
++ */
++void acpi_restore_state_mem(void)
++{
++}
++
++/**
++ * acpi_reserve_bootmem - do _very_ early ACPI initialisation
++ *
++ * We allocate a page from the first 1MB of memory for the wakeup
++ * routine for when we come back from a sleep state. The
++ * runtime allocator allows specification of <16MB pages, but not
++ * <1MB pages.
++ */
++void __init acpi_reserve_bootmem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++ if ((&wakeup_end - &wakeup_start) > PAGE_SIZE) {
++ printk(KERN_ERR
++ "ACPI: Wakeup code way too big, S3 disabled.\n");
++ return;
++ }
++
++ acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
++ if (!acpi_wakeup_address)
++ printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
++#endif
++}
++
++#ifndef CONFIG_ACPI_PV_SLEEP
++static int __init acpi_sleep_setup(char *str)
++{
++ while ((str != NULL) && (*str != '\0')) {
++ if (strncmp(str, "s3_bios", 7) == 0)
++ acpi_video_flags = 1;
++ if (strncmp(str, "s3_mode", 7) == 0)
++ acpi_video_flags |= 2;
++ str = strchr(str, ',');
++ if (str != NULL)
++ str += strspn(str, ", \t");
++ }
++ return 1;
++}
++
++__setup("acpi_sleep=", acpi_sleep_setup);
++
++static __init int reset_videomode_after_s3(struct dmi_system_id *d)
++{
++ acpi_video_flags |= 2;
++ return 0;
++}
++
++static __initdata struct dmi_system_id acpisleep_dmi_table[] = {
++ { /* Reset video mode after returning from ACPI S3 sleep */
++ .callback = reset_videomode_after_s3,
++ .ident = "Toshiba Satellite 4030cdt",
++ .matches = {
++ DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
++ },
++ },
++ {}
++};
++
++static int __init acpisleep_dmi_init(void)
++{
++ dmi_check_system(acpisleep_dmi_table);
++ return 0;
++}
++
++core_initcall(acpisleep_dmi_init);
++#endif /* CONFIG_ACPI_PV_SLEEP */
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/apic-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/apic-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/apic-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/apic-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,155 @@
++/*
++ * Local APIC handling, local APIC timers
++ *
++ * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively.
++ * Maciej W. Rozycki : Various updates and fixes.
++ * Mikael Pettersson : Power Management for UP-APIC.
++ * Pavel Machek and
++ * Mikael Pettersson : PM converted to driver model.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/mc146818rtc.h>
++#include <linux/kernel_stat.h>
++#include <linux/sysdev.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
++
++#include <asm/atomic.h>
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/hpet.h>
++#include <asm/i8253.h>
++#include <asm/nmi.h>
++
++#include <mach_apic.h>
++#include <mach_apicdef.h>
++#include <mach_ipi.h>
++
++#include "io_ports.h"
++
++#ifndef CONFIG_XEN
++/*
++ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
++ * IPIs in place of local APIC timers
++ */
++static cpumask_t timer_bcast_ipi;
++#endif
++
++/*
++ * Knob to control our willingness to enable the local APIC.
++ */
++int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
++
++/*
++ * Debug level
++ */
++int apic_verbosity;
++
++#ifndef CONFIG_XEN
++static int modern_apic(void)
++{
++ unsigned int lvr, version;
++ /* AMD systems use old APIC versions, so check the CPU */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 >= 0xf)
++ return 1;
++ lvr = apic_read(APIC_LVR);
++ version = GET_APIC_VERSION(lvr);
++ return version >= 0x14;
++}
++#endif /* !CONFIG_XEN */
++
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++ /*
++ * Currently unexpected vectors happen only on SMP and APIC.
++ * We _must_ ack these because every local APIC has only N
++ * irq slots per priority level, and a 'hanging, unacked' IRQ
++ * holds up an irq slot - in excessive cases (when multiple
++ * unexpected vectors occur) that might lock up the APIC
++ * completely.
++ * But only ack when the APIC is enabled -AK
++ */
++ if (cpu_has_apic)
++ ack_APIC_irq();
++}
++
++int get_physical_broadcast(void)
++{
++ return 0xff;
++}
++
++#ifndef CONFIG_XEN
++#ifndef CONFIG_SMP
++static void up_apic_timer_interrupt_call(struct pt_regs *regs)
++{
++ int cpu = smp_processor_id();
++
++ /*
++ * the NMI deadlock-detector uses this.
++ */
++ per_cpu(irq_stat, cpu).apic_timer_irqs++;
++
++ smp_local_timer_interrupt(regs);
++}
++#endif
++
++void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
++{
++ cpumask_t mask;
++
++ cpus_and(mask, cpu_online_map, timer_bcast_ipi);
++ if (!cpus_empty(mask)) {
++#ifdef CONFIG_SMP
++ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
++#else
++ /*
++ * We can directly call the apic timer interrupt handler
++ * in UP case. Minus all irq related functions
++ */
++ up_apic_timer_interrupt_call(regs);
++#endif
++ }
++}
++#endif
++
++int setup_profiling_timer(unsigned int multiplier)
++{
++ return -EINVAL;
++}
++
++/*
++ * This initializes the IO-APIC and APIC hardware if this is
++ * a UP kernel.
++ */
++int __init APIC_init_uniprocessor (void)
++{
++#ifdef CONFIG_X86_IO_APIC
++ if (smp_found_config)
++ if (!skip_ioapic_setup && nr_ioapics)
++ setup_IO_APIC();
++#endif
++
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/asm-offsets.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/asm-offsets.c
+--- linux-2.6.18.8/arch/i386/kernel/asm-offsets.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/asm-offsets.c 2008-08-21 11:36:07.000000000 +0200
+@@ -66,9 +66,14 @@ void foo(void)
+ OFFSET(pbe_orig_address, pbe, orig_address);
+ OFFSET(pbe_next, pbe, next);
+
++#ifndef CONFIG_X86_NO_TSS
+ /* Offset from the sysenter stack to tss.esp0 */
+- DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, esp0) -
++ DEFINE(SYSENTER_stack_esp0, offsetof(struct tss_struct, esp0) -
+ sizeof(struct tss_struct));
++#else
++ /* sysenter stack points directly to esp0 */
++ DEFINE(SYSENTER_stack_esp0, 0);
++#endif
+
+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
+ DEFINE(VDSO_PRELINK, VDSO_PRELINK);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/cpu/common-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/cpu/common-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/cpu/common-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/cpu/common-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,743 @@
++#include <linux/init.h>
++#include <linux/string.h>
++#include <linux/delay.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <linux/bootmem.h>
++#include <asm/semaphore.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/msr.h>
++#include <asm/io.h>
++#include <asm/mmu_context.h>
++#include <asm/mtrr.h>
++#include <asm/mce.h>
++#ifdef CONFIG_X86_LOCAL_APIC
++#include <asm/mpspec.h>
++#include <asm/apic.h>
++#include <mach_apic.h>
++#else
++#ifdef CONFIG_XEN
++#define phys_pkg_id(a,b) a
++#endif
++#endif
++#include <asm/hypervisor.h>
++
++#include "cpu.h"
++
++DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
++EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
++
++#ifndef CONFIG_XEN
++DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
++EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
++#endif
++
++static int cachesize_override __cpuinitdata = -1;
++static int disable_x86_fxsr __cpuinitdata;
++static int disable_x86_serial_nr __cpuinitdata = 1;
++static int disable_x86_sep __cpuinitdata;
++
++struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
++
++extern int disable_pse;
++
++static void default_init(struct cpuinfo_x86 * c)
++{
++ /* Not much we can do here... */
++ /* Check if at least it has cpuid */
++ if (c->cpuid_level == -1) {
++ /* No cpuid. It must be an ancient CPU */
++ if (c->x86 == 4)
++ strcpy(c->x86_model_id, "486");
++ else if (c->x86 == 3)
++ strcpy(c->x86_model_id, "386");
++ }
++}
++
++static struct cpu_dev default_cpu = {
++ .c_init = default_init,
++ .c_vendor = "Unknown",
++};
++static struct cpu_dev * this_cpu = &default_cpu;
++
++static int __init cachesize_setup(char *str)
++{
++ get_option (&str, &cachesize_override);
++ return 1;
++}
++__setup("cachesize=", cachesize_setup);
++
++int __cpuinit get_model_name(struct cpuinfo_x86 *c)
++{
++ unsigned int *v;
++ char *p, *q;
++
++ if (cpuid_eax(0x80000000) < 0x80000004)
++ return 0;
++
++ v = (unsigned int *) c->x86_model_id;
++ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
++ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
++ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
++ c->x86_model_id[48] = 0;
++
++ /* Intel chips right-justify this string for some dumb reason;
++ undo that brain damage */
++ p = q = &c->x86_model_id[0];
++ while ( *p == ' ' )
++ p++;
++ if ( p != q ) {
++ while ( *p )
++ *q++ = *p++;
++ while ( q <= &c->x86_model_id[48] )
++ *q++ = '\0'; /* Zero-pad the rest */
++ }
++
++ return 1;
++}
++
++
++void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
++{
++ unsigned int n, dummy, ecx, edx, l2size;
++
++ n = cpuid_eax(0x80000000);
++
++ if (n >= 0x80000005) {
++ cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
++ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
++ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
++ c->x86_cache_size=(ecx>>24)+(edx>>24);
++ }
++
++ if (n < 0x80000006) /* Some chips just has a large L1. */
++ return;
++
++ ecx = cpuid_ecx(0x80000006);
++ l2size = ecx >> 16;
++
++ /* do processor-specific cache resizing */
++ if (this_cpu->c_size_cache)
++ l2size = this_cpu->c_size_cache(c,l2size);
++
++ /* Allow user to override all this if necessary. */
++ if (cachesize_override != -1)
++ l2size = cachesize_override;
++
++ if ( l2size == 0 )
++ return; /* Again, no L2 cache is possible */
++
++ c->x86_cache_size = l2size;
++
++ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
++ l2size, ecx & 0xFF);
++}
++
++/* Naming convention should be: <Name> [(<Codename>)] */
++/* This table only is used unless init_<vendor>() below doesn't set it; */
++/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
++
++/* Look up CPU names by table lookup. */
++static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
++{
++ struct cpu_model_info *info;
++
++ if ( c->x86_model >= 16 )
++ return NULL; /* Range check */
++
++ if (!this_cpu)
++ return NULL;
++
++ info = this_cpu->c_models;
++
++ while (info && info->family) {
++ if (info->family == c->x86)
++ return info->model_names[c->x86_model];
++ info++;
++ }
++ return NULL; /* Not found */
++}
++
++
++static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
++{
++ char *v = c->x86_vendor_id;
++ int i;
++ static int printed;
++
++ for (i = 0; i < X86_VENDOR_NUM; i++) {
++ if (cpu_devs[i]) {
++ if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
++ (cpu_devs[i]->c_ident[1] &&
++ !strcmp(v,cpu_devs[i]->c_ident[1]))) {
++ c->x86_vendor = i;
++ if (!early)
++ this_cpu = cpu_devs[i];
++ return;
++ }
++ }
++ }
++ if (!printed) {
++ printed++;
++ printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
++ printk(KERN_ERR "CPU: Your system may be unstable.\n");
++ }
++ c->x86_vendor = X86_VENDOR_UNKNOWN;
++ this_cpu = &default_cpu;
++}
++
++
++static int __init x86_fxsr_setup(char * s)
++{
++ disable_x86_fxsr = 1;
++ return 1;
++}
++__setup("nofxsr", x86_fxsr_setup);
++
++
++static int __init x86_sep_setup(char * s)
++{
++ disable_x86_sep = 1;
++ return 1;
++}
++__setup("nosep", x86_sep_setup);
++
++
++/* Standard macro to see if a specific flag is changeable */
++static inline int flag_is_changeable_p(u32 flag)
++{
++ u32 f1, f2;
++
++ asm("pushfl\n\t"
++ "pushfl\n\t"
++ "popl %0\n\t"
++ "movl %0,%1\n\t"
++ "xorl %2,%0\n\t"
++ "pushl %0\n\t"
++ "popfl\n\t"
++ "pushfl\n\t"
++ "popl %0\n\t"
++ "popfl\n\t"
++ : "=&r" (f1), "=&r" (f2)
++ : "ir" (flag));
++
++ return ((f1^f2) & flag) != 0;
++}
++
++
++/* Probe for the CPUID instruction */
++static int __cpuinit have_cpuid_p(void)
++{
++ return flag_is_changeable_p(X86_EFLAGS_ID);
++}
++
++/* Do minimum CPU detection early.
++ Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
++ The others are not touched to avoid unwanted side effects.
++
++ WARNING: this function is only called on the BP. Don't add code here
++ that is supposed to run on all CPUs. */
++static void __init early_cpu_detect(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ c->x86_cache_alignment = 32;
++
++ if (!have_cpuid_p())
++ return;
++
++ /* Get vendor name */
++ cpuid(0x00000000, &c->cpuid_level,
++ (int *)&c->x86_vendor_id[0],
++ (int *)&c->x86_vendor_id[8],
++ (int *)&c->x86_vendor_id[4]);
++
++ get_cpu_vendor(c, 1);
++
++ c->x86 = 4;
++ if (c->cpuid_level >= 0x00000001) {
++ u32 junk, tfms, cap0, misc;
++ cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
++ c->x86 = (tfms >> 8) & 15;
++ c->x86_model = (tfms >> 4) & 15;
++ if (c->x86 == 0xf)
++ c->x86 += (tfms >> 20) & 0xff;
++ if (c->x86 >= 0x6)
++ c->x86_model += ((tfms >> 16) & 0xF) << 4;
++ c->x86_mask = tfms & 15;
++ if (cap0 & (1<<19))
++ c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
++ }
++}
++
++void __cpuinit generic_identify(struct cpuinfo_x86 * c)
++{
++ u32 tfms, xlvl;
++ int ebx;
++
++ if (have_cpuid_p()) {
++ /* Get vendor name */
++ cpuid(0x00000000, &c->cpuid_level,
++ (int *)&c->x86_vendor_id[0],
++ (int *)&c->x86_vendor_id[8],
++ (int *)&c->x86_vendor_id[4]);
++
++ get_cpu_vendor(c, 0);
++ /* Initialize the standard set of capabilities */
++ /* Note that the vendor-specific code below might override */
++
++ /* Intel-defined flags: level 0x00000001 */
++ if ( c->cpuid_level >= 0x00000001 ) {
++ u32 capability, excap;
++ cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
++ c->x86_capability[0] = capability;
++ c->x86_capability[4] = excap;
++ c->x86 = (tfms >> 8) & 15;
++ c->x86_model = (tfms >> 4) & 15;
++ if (c->x86 == 0xf)
++ c->x86 += (tfms >> 20) & 0xff;
++ if (c->x86 >= 0x6)
++ c->x86_model += ((tfms >> 16) & 0xF) << 4;
++ c->x86_mask = tfms & 15;
++#ifdef CONFIG_X86_HT
++ c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
++#else
++ c->apicid = (ebx >> 24) & 0xFF;
++#endif
++ } else {
++ /* Have CPUID level 0 only - unheard of */
++ c->x86 = 4;
++ }
++
++ /* AMD-defined flags: level 0x80000001 */
++ xlvl = cpuid_eax(0x80000000);
++ if ( (xlvl & 0xffff0000) == 0x80000000 ) {
++ if ( xlvl >= 0x80000001 ) {
++ c->x86_capability[1] = cpuid_edx(0x80000001);
++ c->x86_capability[6] = cpuid_ecx(0x80000001);
++ }
++ if ( xlvl >= 0x80000004 )
++ get_model_name(c); /* Default name */
++ }
++ }
++
++ early_intel_workaround(c);
++
++#ifdef CONFIG_X86_HT
++ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
++#endif
++}
++
++static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
++{
++ if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
++ /* Disable processor serial number */
++ unsigned long lo,hi;
++ rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
++ lo |= 0x200000;
++ wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
++ printk(KERN_NOTICE "CPU serial number disabled.\n");
++ clear_bit(X86_FEATURE_PN, c->x86_capability);
++
++ /* Disabling the serial number may affect the cpuid level */
++ c->cpuid_level = cpuid_eax(0);
++ }
++}
++
++static int __init x86_serial_nr_setup(char *s)
++{
++ disable_x86_serial_nr = 0;
++ return 1;
++}
++__setup("serialnumber", x86_serial_nr_setup);
++
++
++
++/*
++ * This does the hard work of actually picking apart the CPU stuff...
++ */
++void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
++{
++ int i;
++
++ c->loops_per_jiffy = loops_per_jiffy;
++ c->x86_cache_size = -1;
++ c->x86_vendor = X86_VENDOR_UNKNOWN;
++ c->cpuid_level = -1; /* CPUID not detected */
++ c->x86_model = c->x86_mask = 0; /* So far unknown... */
++ c->x86_vendor_id[0] = '\0'; /* Unset */
++ c->x86_model_id[0] = '\0'; /* Unset */
++ c->x86_max_cores = 1;
++ memset(&c->x86_capability, 0, sizeof c->x86_capability);
++
++ if (!have_cpuid_p()) {
++ /* First of all, decide if this is a 486 or higher */
++ /* It's a 486 if we can modify the AC flag */
++ if ( flag_is_changeable_p(X86_EFLAGS_AC) )
++ c->x86 = 4;
++ else
++ c->x86 = 3;
++ }
++
++ generic_identify(c);
++
++ printk(KERN_DEBUG "CPU: After generic identify, caps:");
++ for (i = 0; i < NCAPINTS; i++)
++ printk(" %08lx", c->x86_capability[i]);
++ printk("\n");
++
++ if (this_cpu->c_identify) {
++ this_cpu->c_identify(c);
++
++ printk(KERN_DEBUG "CPU: After vendor identify, caps:");
++ for (i = 0; i < NCAPINTS; i++)
++ printk(" %08lx", c->x86_capability[i]);
++ printk("\n");
++ }
++
++ /*
++ * Vendor-specific initialization. In this section we
++ * canonicalize the feature flags, meaning if there are
++ * features a certain CPU supports which CPUID doesn't
++ * tell us, CPUID claiming incorrect flags, or other bugs,
++ * we handle them here.
++ *
++ * At the end of this section, c->x86_capability better
++ * indicate the features this CPU genuinely supports!
++ */
++ if (this_cpu->c_init)
++ this_cpu->c_init(c);
++
++ /* Disable the PN if appropriate */
++ squash_the_stupid_serial_number(c);
++
++ /*
++ * The vendor-specific functions might have changed features. Now
++ * we do "generic changes."
++ */
++
++ /* TSC disabled? */
++ if ( tsc_disable )
++ clear_bit(X86_FEATURE_TSC, c->x86_capability);
++
++ /* FXSR disabled? */
++ if (disable_x86_fxsr) {
++ clear_bit(X86_FEATURE_FXSR, c->x86_capability);
++ clear_bit(X86_FEATURE_XMM, c->x86_capability);
++ }
++
++ /* SEP disabled? */
++ if (disable_x86_sep)
++ clear_bit(X86_FEATURE_SEP, c->x86_capability);
++
++ if (disable_pse)
++ clear_bit(X86_FEATURE_PSE, c->x86_capability);
++
++ /* If the model name is still unset, do table lookup. */
++ if ( !c->x86_model_id[0] ) {
++ char *p;
++ p = table_lookup_model(c);
++ if ( p )
++ strcpy(c->x86_model_id, p);
++ else
++ /* Last resort... */
++ sprintf(c->x86_model_id, "%02x/%02x",
++ c->x86, c->x86_model);
++ }
++
++ /* Now the feature flags better reflect actual CPU features! */
++
++ printk(KERN_DEBUG "CPU: After all inits, caps:");
++ for (i = 0; i < NCAPINTS; i++)
++ printk(" %08lx", c->x86_capability[i]);
++ printk("\n");
++
++ /*
++ * On SMP, boot_cpu_data holds the common feature set between
++ * all CPUs; so make sure that we indicate which features are
++ * common between the CPUs. The first time this routine gets
++ * executed, c == &boot_cpu_data.
++ */
++ if ( c != &boot_cpu_data ) {
++ /* AND the already accumulated flags with these */
++ for ( i = 0 ; i < NCAPINTS ; i++ )
++ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
++ }
++
++ /* Init Machine Check Exception if available. */
++ mcheck_init(c);
++
++ if (c == &boot_cpu_data)
++ sysenter_setup();
++ enable_sep_cpu();
++
++ if (c == &boot_cpu_data)
++ mtrr_bp_init();
++ else
++ mtrr_ap_init();
++}
++
++#ifdef CONFIG_X86_HT
++void __cpuinit detect_ht(struct cpuinfo_x86 *c)
++{
++ u32 eax, ebx, ecx, edx;
++ int index_msb, core_bits;
++
++ cpuid(1, &eax, &ebx, &ecx, &edx);
++
++ if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
++ return;
++
++ smp_num_siblings = (ebx & 0xff0000) >> 16;
++
++ if (smp_num_siblings == 1) {
++ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
++ } else if (smp_num_siblings > 1 ) {
++
++ if (smp_num_siblings > NR_CPUS) {
++ printk(KERN_WARNING "CPU: Unsupported number of the "
++ "siblings %d", smp_num_siblings);
++ smp_num_siblings = 1;
++ return;
++ }
++
++ index_msb = get_count_order(smp_num_siblings);
++ c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
++
++ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
++ c->phys_proc_id);
++
++ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
++
++ index_msb = get_count_order(smp_num_siblings) ;
++
++ core_bits = get_count_order(c->x86_max_cores);
++
++ c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
++ ((1 << core_bits) - 1);
++
++ if (c->x86_max_cores > 1)
++ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
++ c->cpu_core_id);
++ }
++}
++#endif
++
++void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
++{
++ char *vendor = NULL;
++
++ if (c->x86_vendor < X86_VENDOR_NUM)
++ vendor = this_cpu->c_vendor;
++ else if (c->cpuid_level >= 0)
++ vendor = c->x86_vendor_id;
++
++ if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
++ printk("%s ", vendor);
++
++ if (!c->x86_model_id[0])
++ printk("%d86", c->x86);
++ else
++ printk("%s", c->x86_model_id);
++
++ if (c->x86_mask || c->cpuid_level >= 0)
++ printk(" stepping %02x\n", c->x86_mask);
++ else
++ printk("\n");
++}
++
++cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
++
++/* This is hacky. :)
++ * We're emulating future behavior.
++ * In the future, the cpu-specific init functions will be called implicitly
++ * via the magic of initcalls.
++ * They will insert themselves into the cpu_devs structure.
++ * Then, when cpu_init() is called, we can just iterate over that array.
++ */
++
++extern int intel_cpu_init(void);
++extern int cyrix_init_cpu(void);
++extern int nsc_init_cpu(void);
++extern int amd_init_cpu(void);
++extern int centaur_init_cpu(void);
++extern int transmeta_init_cpu(void);
++extern int rise_init_cpu(void);
++extern int nexgen_init_cpu(void);
++extern int umc_init_cpu(void);
++
++void __init early_cpu_init(void)
++{
++ intel_cpu_init();
++ cyrix_init_cpu();
++ nsc_init_cpu();
++ amd_init_cpu();
++ centaur_init_cpu();
++ transmeta_init_cpu();
++ rise_init_cpu();
++ nexgen_init_cpu();
++ umc_init_cpu();
++ early_cpu_detect();
++
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ /* pse is not compatible with on-the-fly unmapping,
++ * disable it even if the cpus claim to support it.
++ */
++ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++ disable_pse = 1;
++#endif
++}
++
++static void __cpuinit cpu_gdt_init(const struct Xgt_desc_struct *gdt_descr)
++{
++ unsigned long frames[16];
++ unsigned long va;
++ int f;
++
++ for (va = gdt_descr->address, f = 0;
++ va < gdt_descr->address + gdt_descr->size;
++ va += PAGE_SIZE, f++) {
++ frames[f] = virt_to_mfn(va);
++ make_lowmem_page_readonly(
++ (void *)va, XENFEAT_writable_descriptor_tables);
++ }
++ if (HYPERVISOR_set_gdt(frames, (gdt_descr->size + 1) / 8))
++ BUG();
++}
++
++/*
++ * cpu_init() initializes state that is per-CPU. Some data is already
++ * initialized (naturally) in the bootstrap process, such as the GDT
++ * and IDT. We reload them nevertheless, this function acts as a
++ * 'CPU state barrier', nothing should get across.
++ */
++void __cpuinit cpu_init(void)
++{
++ int cpu = smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct * t = &per_cpu(init_tss, cpu);
++#endif
++ struct thread_struct *thread = &current->thread;
++ struct desc_struct *gdt;
++ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++
++ if (cpu_test_and_set(cpu, cpu_initialized)) {
++ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
++ for (;;) local_irq_enable();
++ }
++ printk(KERN_INFO "Initializing CPU#%d\n", cpu);
++
++ if (cpu_has_vme || cpu_has_de)
++ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
++ if (tsc_disable && cpu_has_tsc) {
++ printk(KERN_NOTICE "Disabling TSC...\n");
++ /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
++ clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
++ set_in_cr4(X86_CR4_TSD);
++ }
++
++#ifndef CONFIG_XEN
++ /* The CPU hotplug case */
++ if (cpu_gdt_descr->address) {
++ gdt = (struct desc_struct *)cpu_gdt_descr->address;
++ memset(gdt, 0, PAGE_SIZE);
++ goto old_gdt;
++ }
++ /*
++ * This is a horrible hack to allocate the GDT. The problem
++ * is that cpu_init() is called really early for the boot CPU
++ * (and hence needs bootmem) but much later for the secondary
++ * CPUs, when bootmem will have gone away
++ */
++ if (NODE_DATA(0)->bdata->node_bootmem_map) {
++ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
++ /* alloc_bootmem_pages panics on failure, so no check */
++ memset(gdt, 0, PAGE_SIZE);
++ } else {
++ gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
++ if (unlikely(!gdt)) {
++ printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
++ for (;;)
++ local_irq_enable();
++ }
++ }
++old_gdt:
++ /*
++ * Initialize the per-CPU GDT with the boot GDT,
++ * and set up the GDT descriptor:
++ */
++ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
++
++ /* Set up GDT entry for 16bit stack */
++ *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
++ ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
++ ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
++ (CPU_16BIT_STACK_SIZE - 1);
++
++ cpu_gdt_descr->size = GDT_SIZE - 1;
++ cpu_gdt_descr->address = (unsigned long)gdt;
++#else
++ if (cpu == 0 && cpu_gdt_descr->address == 0) {
++ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
++ /* alloc_bootmem_pages panics on failure, so no check */
++ memset(gdt, 0, PAGE_SIZE);
++
++ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
++
++ cpu_gdt_descr->size = GDT_SIZE;
++ cpu_gdt_descr->address = (unsigned long)gdt;
++ }
++#endif
++
++ cpu_gdt_init(cpu_gdt_descr);
++
++ /*
++ * Set up and load the per-CPU TSS and LDT
++ */
++ atomic_inc(&init_mm.mm_count);
++ current->active_mm = &init_mm;
++ if (current->mm)
++ BUG();
++ enter_lazy_tlb(&init_mm, current);
++
++ load_esp0(t, thread);
++
++ load_LDT(&init_mm.context);
++
++#ifdef CONFIG_DOUBLEFAULT
++ /* Set up doublefault TSS pointer in the GDT */
++ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
++#endif
++
++ /* Clear %fs and %gs. */
++ asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
++
++ /* Clear all 6 debug registers: */
++ set_debugreg(0, 0);
++ set_debugreg(0, 1);
++ set_debugreg(0, 2);
++ set_debugreg(0, 3);
++ set_debugreg(0, 6);
++ set_debugreg(0, 7);
++
++ /*
++ * Force FPU initialization:
++ */
++ current_thread_info()->status = 0;
++ clear_used_math();
++ mxcsr_feature_mask_init();
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++void __cpuinit cpu_uninit(void)
++{
++ int cpu = raw_smp_processor_id();
++ cpu_clear(cpu, cpu_initialized);
++
++ /* lazy TLB state */
++ per_cpu(cpu_tlbstate, cpu).state = 0;
++ per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
++}
++#endif
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/cpu/cpufreq/powernow-k8.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+--- linux-2.6.18.8/arch/i386/kernel/cpu/cpufreq/powernow-k8.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/cpu/cpufreq/powernow-k8.c 2008-08-21 11:36:07.000000000 +0200
+@@ -46,7 +46,7 @@
+
+ #define PFX "powernow-k8: "
+ #define BFX PFX "BIOS error: "
+-#define VERSION "version 2.00.00"
++#define VERSION "version 2.20.00"
+ #include "powernow-k8.h"
+
+ /* serialize freq changes */
+@@ -66,36 +66,15 @@ static u32 find_freq_from_fid(u32 fid)
+ return 800 + (fid * 100);
+ }
+
+-
+ /* Return a frequency in KHz, given an input fid */
+ static u32 find_khz_freq_from_fid(u32 fid)
+ {
+ return 1000 * find_freq_from_fid(fid);
+ }
+
+-/* Return a frequency in MHz, given an input fid and did */
+-static u32 find_freq_from_fiddid(u32 fid, u32 did)
+-{
+- return 100 * (fid + 0x10) >> did;
+-}
+-
+-static u32 find_khz_freq_from_fiddid(u32 fid, u32 did)
+-{
+- return 1000 * find_freq_from_fiddid(fid, did);
+-}
+-
+-static u32 find_fid_from_pstate(u32 pstate)
+-{
+- u32 hi, lo;
+- rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi);
+- return lo & HW_PSTATE_FID_MASK;
+-}
+-
+-static u32 find_did_from_pstate(u32 pstate)
++static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, u32 pstate)
+ {
+- u32 hi, lo;
+- rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi);
+- return (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT;
++ return data[pstate].frequency;
+ }
+
+ /* Return the vco fid for an input fid
+@@ -139,9 +118,7 @@ static int query_current_values_with_pen
+ if (cpu_family == CPU_HW_PSTATE) {
+ rdmsr(MSR_PSTATE_STATUS, lo, hi);
+ i = lo & HW_PSTATE_MASK;
+- rdmsr(MSR_PSTATE_DEF_BASE + i, lo, hi);
+- data->currfid = lo & HW_PSTATE_FID_MASK;
+- data->currdid = (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT;
++ data->currpstate = i;
+ return 0;
+ }
+ do {
+@@ -292,7 +269,7 @@ static int decrease_vid_code_by_step(str
+ static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
+ {
+ wrmsr(MSR_PSTATE_CTRL, pstate, 0);
+- data->currfid = find_fid_from_pstate(pstate);
++ data->currpstate = pstate;
+ return 0;
+ }
+
+@@ -738,6 +715,7 @@ static int find_psb_table(struct powerno
+
+ data->numps = psb->numps;
+ dprintk("numpstates: 0x%x\n", data->numps);
++ data->starting_core_affinity = cpumask_of_cpu(0);
+ return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid);
+ }
+ /*
+@@ -758,15 +736,43 @@ static int find_psb_table(struct powerno
+ #ifdef CONFIG_X86_POWERNOW_K8_ACPI
+ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
+ {
+- if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
++ if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE))
+ return;
+
+- data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
+- data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK;
+- data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
+- data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
+- data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK);
+- data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK;
++ data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK;
++ data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK;
++ data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
++ data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
++ data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK);
++ data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK;
++}
++
++static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
++static int preregister_valid = 0;
++
++static int powernow_k8_cpu_preinit_acpi()
++{
++ int i;
++ struct acpi_processor_performance *data;
++ for_each_possible_cpu(i) {
++ data = kzalloc(sizeof(struct acpi_processor_performance),
++ GFP_KERNEL);
++ if (!data) {
++ int j;
++ for_each_possible_cpu(j) {
++ kfree(acpi_perf_data[j]);
++ acpi_perf_data[j] = NULL;
++ }
++ return -ENODEV;
++ }
++ acpi_perf_data[i] = data;
++ }
++
++ if (acpi_processor_preregister_performance(acpi_perf_data))
++ return -ENODEV;
++ else
++ preregister_valid = 1;
++ return 0;
+ }
+
+ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
+@@ -774,28 +780,29 @@ static int powernow_k8_cpu_init_acpi(str
+ struct cpufreq_frequency_table *powernow_table;
+ int ret_val;
+
+- if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
++ data->acpi_data = acpi_perf_data[data->cpu];
++ if (acpi_processor_register_performance(data->acpi_data, data->cpu)) {
+ dprintk("register performance failed: bad ACPI data\n");
+ return -EIO;
+ }
+
+ /* verify the data contained in the ACPI structures */
+- if (data->acpi_data.state_count <= 1) {
++ if (data->acpi_data->state_count <= 1) {
+ dprintk("No ACPI P-States\n");
+ goto err_out;
+ }
+
+- if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+- (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
++ if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
++ (data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ dprintk("Invalid control/status registers (%x - %x)\n",
+- data->acpi_data.control_register.space_id,
+- data->acpi_data.status_register.space_id);
++ data->acpi_data->control_register.space_id,
++ data->acpi_data->status_register.space_id);
+ goto err_out;
+ }
+
+ /* fill in data->powernow_table */
+ powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
+- * (data->acpi_data.state_count + 1)), GFP_KERNEL);
++ * (data->acpi_data->state_count + 1)), GFP_KERNEL);
+ if (!powernow_table) {
+ dprintk("powernow_table memory alloc failure\n");
+ goto err_out;
+@@ -808,28 +815,43 @@ static int powernow_k8_cpu_init_acpi(str
+ if (ret_val)
+ goto err_out_mem;
+
+- powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END;
+- powernow_table[data->acpi_data.state_count].index = 0;
++ powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END;
++ powernow_table[data->acpi_data->state_count].index = 0;
+ data->powernow_table = powernow_table;
+
+ /* fill in data */
+- data->numps = data->acpi_data.state_count;
++ data->numps = data->acpi_data->state_count;
+ print_basics(data);
+ powernow_k8_acpi_pst_values(data, 0);
+
+ /* notify BIOS that we exist */
+ acpi_processor_notify_smm(THIS_MODULE);
+
++ /* determine affinity, from ACPI if available */
++ if (preregister_valid) {
++ if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) ||
++ (data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY))
++ data->starting_core_affinity = data->acpi_data->shared_cpu_map;
++ else
++ data->starting_core_affinity = cpumask_of_cpu(data->cpu);
++ } else {
++ /* best guess from family if not */
++ if (cpu_family == CPU_HW_PSTATE)
++ data->starting_core_affinity = cpumask_of_cpu(data->cpu);
++ else
++ data->starting_core_affinity = cpu_core_map[data->cpu];
++ }
++
+ return 0;
+
+ err_out_mem:
+ kfree(powernow_table);
+
+ err_out:
+- acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
++ acpi_processor_unregister_performance(data->acpi_data, data->cpu);
+
+- /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
+- data->acpi_data.state_count = 0;
++ /* data->acpi_data->state_count informs us at ->exit() whether ACPI was used */
++ data->acpi_data->state_count = 0;
+
+ return -ENODEV;
+ }
+@@ -837,41 +859,23 @@ err_out:
+ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table)
+ {
+ int i;
++ u32 hi = 0, lo = 0;
++ rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo);
++ data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
+
+- for (i = 0; i < data->acpi_data.state_count; i++) {
++ for (i = 0; i < data->acpi_data->state_count; i++) {
+ u32 index;
+- u32 hi = 0, lo = 0;
+- u32 fid;
+- u32 did;
+
+- index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
+- if (index > MAX_HW_PSTATE) {
++ index = data->acpi_data->states[i].control & HW_PSTATE_MASK;
++ if (index > data->max_hw_pstate) {
+ printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index);
+ printk(KERN_ERR PFX "Please report to BIOS manufacturer\n");
+- }
+- rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
+- if (!(hi & HW_PSTATE_VALID_MASK)) {
+- dprintk("invalid pstate %d, ignoring\n", index);
+- powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ continue;
+ }
+
+- fid = lo & HW_PSTATE_FID_MASK;
+- did = (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT;
+-
+- dprintk(" %d : fid 0x%x, did 0x%x\n", index, fid, did);
++ powernow_table[i].index = index;
++ powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000;
+
+- powernow_table[i].index = index | (fid << HW_FID_INDEX_SHIFT) | (did << HW_DID_INDEX_SHIFT);
+-
+- powernow_table[i].frequency = find_khz_freq_from_fiddid(fid, did);
+-
+- if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
+- printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
+- powernow_table[i].frequency,
+- (unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
+- powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+- continue;
+- }
+ }
+ return 0;
+ }
+@@ -880,16 +884,16 @@ static int fill_powernow_table_fidvid(st
+ {
+ int i;
+ int cntlofreq = 0;
+- for (i = 0; i < data->acpi_data.state_count; i++) {
++ for (i = 0; i < data->acpi_data->state_count; i++) {
+ u32 fid;
+ u32 vid;
+
+ if (data->exttype) {
+- fid = data->acpi_data.states[i].status & EXT_FID_MASK;
+- vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK;
++ fid = data->acpi_data->states[i].status & EXT_FID_MASK;
++ vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK;
+ } else {
+- fid = data->acpi_data.states[i].control & FID_MASK;
+- vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
++ fid = data->acpi_data->states[i].control & FID_MASK;
++ vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK;
+ }
+
+ dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
+@@ -930,10 +934,10 @@ static int fill_powernow_table_fidvid(st
+ cntlofreq = i;
+ }
+
+- if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
++ if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) {
+ printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
+ powernow_table[i].frequency,
+- (unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
++ (unsigned int) (data->acpi_data->states[i].core_frequency * 1000));
+ powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ continue;
+ }
+@@ -943,14 +947,15 @@ static int fill_powernow_table_fidvid(st
+
+ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
+ {
+- if (data->acpi_data.state_count)
+- acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
++ if (data->acpi_data->state_count)
++ acpi_processor_unregister_performance(data->acpi_data, data->cpu);
+ }
+
+ #else
+ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
+ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
+ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
++static int powernow_k8_cpu_preinit_acpi() { return -ENODEV; }
+ #endif /* CONFIG_X86_POWERNOW_K8_ACPI */
+
+ /* Take a frequency, and issue the fid/vid transition command */
+@@ -1012,22 +1017,18 @@ static int transition_frequency_fidvid(s
+ /* Take a frequency, and issue the hardware pstate transition command */
+ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned int index)
+ {
+- u32 fid = 0;
+- u32 did = 0;
+ u32 pstate = 0;
+ int res, i;
+ struct cpufreq_freqs freqs;
+
+ dprintk("cpu %d transition to index %u\n", smp_processor_id(), index);
+
+- /* get fid did for hardware pstate transition */
++ /* get MSR index for hardware pstate transition */
+ pstate = index & HW_PSTATE_MASK;
+- if (pstate > MAX_HW_PSTATE)
++ if (pstate > data->max_hw_pstate)
+ return 0;
+- fid = (index & HW_FID_INDEX_MASK) >> HW_FID_INDEX_SHIFT;
+- did = (index & HW_DID_INDEX_MASK) >> HW_DID_INDEX_SHIFT;
+- freqs.old = find_khz_freq_from_fiddid(data->currfid, data->currdid);
+- freqs.new = find_khz_freq_from_fiddid(fid, did);
++ freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
++ freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
+
+ for_each_cpu_mask(i, *(data->available_cores)) {
+ freqs.cpu = i;
+@@ -1035,9 +1036,7 @@ static int transition_frequency_pstate(s
+ }
+
+ res = transition_pstate(data, pstate);
+- data->currfid = find_fid_from_pstate(pstate);
+- data->currdid = find_did_from_pstate(pstate);
+- freqs.new = find_khz_freq_from_fiddid(data->currfid, data->currdid);
++ freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
+
+ for_each_cpu_mask(i, *(data->available_cores)) {
+ freqs.cpu = i;
+@@ -1082,10 +1081,7 @@ static int powernowk8_target(struct cpuf
+ if (query_current_values_with_pending_wait(data))
+ goto err_out;
+
+- if (cpu_family == CPU_HW_PSTATE)
+- dprintk("targ: curr fid 0x%x, did 0x%x\n",
+- data->currfid, data->currvid);
+- else {
++ if (cpu_family != CPU_HW_PSTATE) {
+ dprintk("targ: curr fid 0x%x, vid 0x%x\n",
+ data->currfid, data->currvid);
+
+@@ -1116,7 +1112,7 @@ static int powernowk8_target(struct cpuf
+ mutex_unlock(&fidvid_mutex);
+
+ if (cpu_family == CPU_HW_PSTATE)
+- pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid);
++ pol->cur = find_khz_freq_from_pstate(data->powernow_table, newstate);
+ else
+ pol->cur = find_khz_freq_from_fid(data->currfid);
+ ret = 0;
+@@ -1164,7 +1160,7 @@ static int __cpuinit powernowk8_cpu_init
+ * an UP version, and is deprecated by AMD.
+ */
+ if (num_online_cpus() != 1) {
+- printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n");
++ printk(KERN_ERR PFX "Your BIOS does not provide _PSS objects. PowerNow! does not work on SMP systems without _PSS objects. Complain to your BIOS vendor.\n");
+ kfree(data);
+ return -ENODEV;
+ }
+@@ -1204,10 +1200,7 @@ static int __cpuinit powernowk8_cpu_init
+ set_cpus_allowed(current, oldmask);
+
+ pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
+- if (cpu_family == CPU_HW_PSTATE)
+- pol->cpus = cpumask_of_cpu(pol->cpu);
+- else
+- pol->cpus = cpu_core_map[pol->cpu];
++ pol->cpus = data->starting_core_affinity;
+ data->available_cores = &(pol->cpus);
+
+ /* Take a crude guess here.
+@@ -1216,7 +1209,7 @@ static int __cpuinit powernowk8_cpu_init
+ + (3 * (1 << data->irt) * 10)) * 1000;
+
+ if (cpu_family == CPU_HW_PSTATE)
+- pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid);
++ pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
+ else
+ pol->cur = find_khz_freq_from_fid(data->currfid);
+ dprintk("policy current frequency %d kHz\n", pol->cur);
+@@ -1233,8 +1226,7 @@ static int __cpuinit powernowk8_cpu_init
+ cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
+
+ if (cpu_family == CPU_HW_PSTATE)
+- dprintk("cpu_init done, current fid 0x%x, did 0x%x\n",
+- data->currfid, data->currdid);
++ dprintk("cpu_init done, current pstate 0x%x\n", data->currpstate);
+ else
+ dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n",
+ data->currfid, data->currvid);
+@@ -1289,7 +1281,10 @@ static unsigned int powernowk8_get (unsi
+ if (query_current_values_with_pending_wait(data))
+ goto out;
+
+- khz = find_khz_freq_from_fid(data->currfid);
++ if (cpu_family == CPU_HW_PSTATE)
++ khz = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
++ else
++ khz = find_khz_freq_from_fid(data->currfid);
+
+ out:
+ set_cpus_allowed(current, oldmask);
+@@ -1323,6 +1318,7 @@ static int __cpuinit powernowk8_init(voi
+ }
+
+ if (supported_cpus == num_online_cpus()) {
++ powernow_k8_cpu_preinit_acpi();
+ printk(KERN_INFO PFX "Found %d %s "
+ "processors (" VERSION ")\n", supported_cpus,
+ boot_cpu_data.x86_model_id);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/cpu/cpufreq/powernow-k8.h linux-2.6.18-xen-3.3.0/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
+--- linux-2.6.18.8/arch/i386/kernel/cpu/cpufreq/powernow-k8.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/cpu/cpufreq/powernow-k8.h 2008-08-21 11:36:07.000000000 +0200
+@@ -1,5 +1,5 @@
+ /*
+- * (c) 2003-2006 Advanced Micro Devices, Inc.
++#* (c) 2003-2006 Advanced Micro Devices, Inc.
+ * Your use of this code is subject to the terms and conditions of the
+ * GNU general public license version 2. See "COPYING" or
+ * http://www.gnu.org/licenses/gpl.html
+@@ -10,6 +10,7 @@ struct powernow_k8_data {
+
+ u32 numps; /* number of p-states */
+ u32 batps; /* number of p-states supported on battery */
++ u32 max_hw_pstate; /* maximum legal hardware pstate */
+
+ /* these values are constant when the PSB is used to determine
+ * vid/fid pairings, but are modified during the ->target() call
+@@ -21,8 +22,8 @@ struct powernow_k8_data {
+ u32 plllock; /* pll lock time, units 1 us */
+ u32 exttype; /* extended interface = 1 */
+
+- /* keep track of the current fid / vid or did */
+- u32 currvid, currfid, currdid;
++ /* keep track of the current fid / vid or pstate */
++ u32 currvid, currfid, currpstate;
+
+ /* the powernow_table includes all frequency and vid/fid pairings:
+ * fid are the lower 8 bits of the index, vid are the upper 8 bits.
+@@ -32,12 +33,13 @@ struct powernow_k8_data {
+ #ifdef CONFIG_X86_POWERNOW_K8_ACPI
+ /* the acpi table needs to be kept. it's only available if ACPI was
+ * used to determine valid frequency/vid/fid states */
+- struct acpi_processor_performance acpi_data;
++ struct acpi_processor_performance *acpi_data;
+ #endif
+ /* we need to keep track of associated cores, but let cpufreq
+ * handle hotplug events - so just point at cpufreq pol->cpus
+ * structure */
+ cpumask_t *available_cores;
++ cpumask_t starting_core_affinity;
+ };
+
+
+@@ -87,23 +89,14 @@ struct powernow_k8_data {
+
+ /* Hardware Pstate _PSS and MSR definitions */
+ #define USE_HW_PSTATE 0x00000080
+-#define HW_PSTATE_FID_MASK 0x0000003f
+-#define HW_PSTATE_DID_MASK 0x000001c0
+-#define HW_PSTATE_DID_SHIFT 6
+-#define HW_PSTATE_MASK 0x00000007
+-#define HW_PSTATE_VALID_MASK 0x80000000
+-#define HW_FID_INDEX_SHIFT 8
+-#define HW_FID_INDEX_MASK 0x0000ff00
+-#define HW_DID_INDEX_SHIFT 16
+-#define HW_DID_INDEX_MASK 0x00ff0000
+-#define HW_WATTS_MASK 0xff
+-#define HW_PWR_DVR_MASK 0x300
+-#define HW_PWR_DVR_SHIFT 8
+-#define HW_PWR_MAX_MULT 3
+-#define MAX_HW_PSTATE 8 /* hw pstate supports up to 8 */
++#define HW_PSTATE_MASK 0x00000007
++#define HW_PSTATE_VALID_MASK 0x80000000
++#define HW_PSTATE_MAX_MASK 0x000000f0
++#define HW_PSTATE_MAX_SHIFT 4
+ #define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */
+ #define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */
+ #define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */
++#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */
+
+ /* define the two driver architectures */
+ #define CPU_OPTERON 0
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/cpu/Makefile linux-2.6.18-xen-3.3.0/arch/i386/kernel/cpu/Makefile
+--- linux-2.6.18.8/arch/i386/kernel/cpu/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/cpu/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -17,3 +17,4 @@ obj-$(CONFIG_X86_MCE) += mcheck/
+
+ obj-$(CONFIG_MTRR) += mtrr/
+ obj-$(CONFIG_CPU_FREQ) += cpufreq/
++
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/cpu/mtrr/main-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/cpu/mtrr/main-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/cpu/mtrr/main-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/cpu/mtrr/main-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,198 @@
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/ctype.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <asm/uaccess.h>
++#include <linux/mutex.h>
++
++#include <asm/mtrr.h>
++#include "mtrr.h"
++
++static DEFINE_MUTEX(mtrr_mutex);
++
++void generic_get_mtrr(unsigned int reg, unsigned long *base,
++ unsigned int *size, mtrr_type * type)
++{
++ struct xen_platform_op op;
++
++ op.cmd = XENPF_read_memtype;
++ op.u.read_memtype.reg = reg;
++ if (unlikely(HYPERVISOR_platform_op(&op)))
++ memset(&op.u.read_memtype, 0, sizeof(op.u.read_memtype));
++
++ *size = op.u.read_memtype.nr_mfns;
++ *base = op.u.read_memtype.mfn;
++ *type = op.u.read_memtype.type;
++}
++
++struct mtrr_ops generic_mtrr_ops = {
++ .use_intel_if = 1,
++ .get = generic_get_mtrr,
++};
++
++struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
++unsigned int num_var_ranges;
++unsigned int *usage_table;
++
++static void __init set_num_var_ranges(void)
++{
++ struct xen_platform_op op;
++
++ for (num_var_ranges = 0; ; num_var_ranges++) {
++ op.cmd = XENPF_read_memtype;
++ op.u.read_memtype.reg = num_var_ranges;
++ if (HYPERVISOR_platform_op(&op) != 0)
++ break;
++ }
++}
++
++static void __init init_table(void)
++{
++ int i, max;
++
++ max = num_var_ranges;
++ if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
++ == NULL) {
++ printk(KERN_ERR "mtrr: could not allocate\n");
++ return;
++ }
++ for (i = 0; i < max; i++)
++ usage_table[i] = 0;
++}
++
++int mtrr_add_page(unsigned long base, unsigned long size,
++ unsigned int type, char increment)
++{
++ int error;
++ struct xen_platform_op op;
++
++ mutex_lock(&mtrr_mutex);
++
++ op.cmd = XENPF_add_memtype;
++ op.u.add_memtype.mfn = base;
++ op.u.add_memtype.nr_mfns = size;
++ op.u.add_memtype.type = type;
++ error = HYPERVISOR_platform_op(&op);
++ if (error) {
++ mutex_unlock(&mtrr_mutex);
++ BUG_ON(error > 0);
++ return error;
++ }
++
++ if (increment)
++ ++usage_table[op.u.add_memtype.reg];
++
++ mutex_unlock(&mtrr_mutex);
++
++ return op.u.add_memtype.reg;
++}
++
++static int mtrr_check(unsigned long base, unsigned long size)
++{
++ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
++ printk(KERN_WARNING
++ "mtrr: size and base must be multiples of 4 kiB\n");
++ printk(KERN_DEBUG
++ "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
++ dump_stack();
++ return -1;
++ }
++ return 0;
++}
++
++int
++mtrr_add(unsigned long base, unsigned long size, unsigned int type,
++ char increment)
++{
++ if (mtrr_check(base, size))
++ return -EINVAL;
++ return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
++ increment);
++}
++
++int mtrr_del_page(int reg, unsigned long base, unsigned long size)
++{
++ unsigned i;
++ mtrr_type ltype;
++ unsigned long lbase;
++ unsigned int lsize;
++ int error = -EINVAL;
++ struct xen_platform_op op;
++
++ mutex_lock(&mtrr_mutex);
++
++ if (reg < 0) {
++ /* Search for existing MTRR */
++ for (i = 0; i < num_var_ranges; ++i) {
++ mtrr_if->get(i, &lbase, &lsize, &ltype);
++ if (lbase == base && lsize == size) {
++ reg = i;
++ break;
++ }
++ }
++ if (reg < 0) {
++ printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
++ size);
++ goto out;
++ }
++ }
++ if (usage_table[reg] < 1) {
++ printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
++ goto out;
++ }
++ if (--usage_table[reg] < 1) {
++ op.cmd = XENPF_del_memtype;
++ op.u.del_memtype.handle = 0;
++ op.u.del_memtype.reg = reg;
++ error = HYPERVISOR_platform_op(&op);
++ if (error) {
++ BUG_ON(error > 0);
++ goto out;
++ }
++ }
++ error = reg;
++ out:
++ mutex_unlock(&mtrr_mutex);
++ return error;
++}
++
++int
++mtrr_del(int reg, unsigned long base, unsigned long size)
++{
++ if (mtrr_check(base, size))
++ return -EINVAL;
++ return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
++}
++
++EXPORT_SYMBOL(mtrr_add);
++EXPORT_SYMBOL(mtrr_del);
++
++void __init mtrr_bp_init(void)
++{
++}
++
++void mtrr_ap_init(void)
++{
++}
++
++static int __init mtrr_init(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ if (!is_initial_xendomain())
++ return -ENODEV;
++
++ if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
++ (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
++ (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
++ (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
++ return -ENODEV;
++
++ set_num_var_ranges();
++ init_table();
++
++ return 0;
++}
++
++subsys_initcall(mtrr_init);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/cpu/mtrr/Makefile linux-2.6.18-xen-3.3.0/arch/i386/kernel/cpu/mtrr/Makefile
+--- linux-2.6.18.8/arch/i386/kernel/cpu/mtrr/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/cpu/mtrr/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -3,3 +3,4 @@ obj-y += amd.o
+ obj-y += cyrix.o
+ obj-y += centaur.o
+
++obj-$(CONFIG_XEN) := main.o if.o
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/crash.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/crash.c
+--- linux-2.6.18.8/arch/i386/kernel/crash.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/crash.c 2008-08-21 11:36:07.000000000 +0200
+@@ -90,6 +90,7 @@ static void crash_save_self(struct pt_re
+ crash_save_this_cpu(regs, cpu);
+ }
+
++#ifndef CONFIG_XEN
+ #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
+ static atomic_t waiting_for_crash_ipi;
+
+@@ -154,6 +155,7 @@ static void nmi_shootdown_cpus(void)
+ /* There are no cpus to shootdown */
+ }
+ #endif
++#endif /* CONFIG_XEN */
+
+ void machine_crash_shutdown(struct pt_regs *regs)
+ {
+@@ -170,10 +172,12 @@ void machine_crash_shutdown(struct pt_re
+
+ /* Make a note of crashing cpu. Will be used in NMI callback.*/
+ crashing_cpu = smp_processor_id();
++#ifndef CONFIG_XEN
+ nmi_shootdown_cpus();
+ lapic_shutdown();
+ #if defined(CONFIG_X86_IO_APIC)
+ disable_IO_APIC();
+ #endif
++#endif /* CONFIG_XEN */
+ crash_save_self(regs);
+ }
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/early_printk-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/early_printk-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/early_printk-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/early_printk-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2 @@
++
++#include "../../x86_64/kernel/early_printk-xen.c"
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/entry.S linux-2.6.18-xen-3.3.0/arch/i386/kernel/entry.S
+--- linux-2.6.18.8/arch/i386/kernel/entry.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/entry.S 2008-08-21 11:36:07.000000000 +0200
+@@ -269,7 +269,7 @@ ENTRY(sysenter_entry)
+ CFI_STARTPROC simple
+ CFI_DEF_CFA esp, 0
+ CFI_REGISTER esp, ebp
+- movl TSS_sysenter_esp0(%esp),%esp
++ movl SYSENTER_stack_esp0(%esp),%esp
+ sysenter_past_esp:
+ /*
+ * No need to follow this irqs on/off section: the syscall
+@@ -689,7 +689,7 @@ device_not_available_emulate:
+ * that sets up the real kernel stack. Check here, since we can't
+ * allow the wrong stack to be used.
+ *
+- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
++ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
+ * already pushed 3 words if it hits on the sysenter instruction:
+ * eflags, cs and eip.
+ *
+@@ -701,7 +701,7 @@ device_not_available_emulate:
+ cmpw $__KERNEL_CS,4(%esp); \
+ jne ok; \
+ label: \
+- movl TSS_sysenter_esp0+offset(%esp),%esp; \
++ movl SYSENTER_stack_esp0+offset(%esp),%esp; \
+ pushfl; \
+ pushl $__KERNEL_CS; \
+ pushl $sysenter_past_esp
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/entry-xen.S linux-2.6.18-xen-3.3.0/arch/i386/kernel/entry-xen.S
+--- linux-2.6.18.8/arch/i386/kernel/entry-xen.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/entry-xen.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1238 @@
++/*
++ * linux/arch/i386/entry.S
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
++
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ * This also contains the timer-interrupt handler, as well as all interrupts
++ * and faults that can result in a task-switch.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after a timer-interrupt and after each system call.
++ *
++ * I changed all the .align's to 4 (16 byte alignment), as that's faster
++ * on a 486.
++ *
++ * Stack layout in 'ret_from_system_call':
++ * ptrace needs to have all regs on the stack.
++ * if the order here is changed, it needs to be
++ * updated in fork.c:copy_process, signal.c:do_signal,
++ * ptrace.c and ptrace.h
++ *
++ * 0(%esp) - %ebx
++ * 4(%esp) - %ecx
++ * 8(%esp) - %edx
++ * C(%esp) - %esi
++ * 10(%esp) - %edi
++ * 14(%esp) - %ebp
++ * 18(%esp) - %eax
++ * 1C(%esp) - %ds
++ * 20(%esp) - %es
++ * 24(%esp) - orig_eax
++ * 28(%esp) - %eip
++ * 2C(%esp) - %cs
++ * 30(%esp) - %eflags
++ * 34(%esp) - %oldesp
++ * 38(%esp) - %oldss
++ *
++ * "current" is in register %ebx during any slow entries.
++ */
++
++#include <linux/linkage.h>
++#include <asm/thread_info.h>
++#include <asm/irqflags.h>
++#include <asm/errno.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/page.h>
++#include <asm/desc.h>
++#include <asm/dwarf2.h>
++#include "irq_vectors.h"
++#include <xen/interface/xen.h>
++
++#define nr_syscalls ((syscall_table_size)/4)
++
++EBX = 0x00
++ECX = 0x04
++EDX = 0x08
++ESI = 0x0C
++EDI = 0x10
++EBP = 0x14
++EAX = 0x18
++DS = 0x1C
++ES = 0x20
++ORIG_EAX = 0x24
++EIP = 0x28
++CS = 0x2C
++EFLAGS = 0x30
++OLDESP = 0x34
++OLDSS = 0x38
++
++CF_MASK = 0x00000001
++TF_MASK = 0x00000100
++IF_MASK = 0x00000200
++DF_MASK = 0x00000400
++NT_MASK = 0x00004000
++VM_MASK = 0x00020000
++/* Pseudo-eflags. */
++NMI_MASK = 0x80000000
++
++#ifndef CONFIG_XEN
++#define DISABLE_INTERRUPTS cli
++#define ENABLE_INTERRUPTS sti
++#else
++/* Offsets into shared_info_t. */
++#define evtchn_upcall_pending /* 0 */
++#define evtchn_upcall_mask 1
++
++#define sizeof_vcpu_shift 6
++
++#ifdef CONFIG_SMP
++#define GET_VCPU_INFO movl TI_cpu(%ebp),%esi ; \
++ shl $sizeof_vcpu_shift,%esi ; \
++ addl HYPERVISOR_shared_info,%esi
++#else
++#define GET_VCPU_INFO movl HYPERVISOR_shared_info,%esi
++#endif
++
++#define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(%esi)
++#define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(%esi)
++#define DISABLE_INTERRUPTS GET_VCPU_INFO ; \
++ __DISABLE_INTERRUPTS
++#define ENABLE_INTERRUPTS GET_VCPU_INFO ; \
++ __ENABLE_INTERRUPTS
++#define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi)
++#endif
++
++#ifdef CONFIG_PREEMPT
++#define preempt_stop cli; TRACE_IRQS_OFF
++#else
++#define preempt_stop
++#define resume_kernel restore_nocheck
++#endif
++
++.macro TRACE_IRQS_IRET
++#ifdef CONFIG_TRACE_IRQFLAGS
++ testl $IF_MASK,EFLAGS(%esp) # interrupts off?
++ jz 1f
++ TRACE_IRQS_ON
++1:
++#endif
++.endm
++
++#ifdef CONFIG_VM86
++#define resume_userspace_sig check_userspace
++#else
++#define resume_userspace_sig resume_userspace
++#endif
++
++#define SAVE_ALL \
++ cld; \
++ pushl %es; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET es, 0;*/\
++ pushl %ds; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET ds, 0;*/\
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET eax, 0;\
++ pushl %ebp; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebp, 0;\
++ pushl %edi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edi, 0;\
++ pushl %esi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET esi, 0;\
++ pushl %edx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edx, 0;\
++ pushl %ecx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ecx, 0;\
++ pushl %ebx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebx, 0;\
++ movl $(__USER_DS), %edx; \
++ movl %edx, %ds; \
++ movl %edx, %es;
++
++#define RESTORE_INT_REGS \
++ popl %ebx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebx;\
++ popl %ecx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ecx;\
++ popl %edx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edx;\
++ popl %esi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE esi;\
++ popl %edi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edi;\
++ popl %ebp; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebp;\
++ popl %eax; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE eax
++
++#define RESTORE_REGS \
++ RESTORE_INT_REGS; \
++1: popl %ds; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE ds;*/\
++2: popl %es; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE es;*/\
++.section .fixup,"ax"; \
++3: movl $0,(%esp); \
++ jmp 1b; \
++4: movl $0,(%esp); \
++ jmp 2b; \
++.previous; \
++.section __ex_table,"a";\
++ .align 4; \
++ .long 1b,3b; \
++ .long 2b,4b; \
++.previous
++
++#define RING0_INT_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_DEF_CFA esp, 3*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_EC_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_DEF_CFA esp, 4*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_PTREGS_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_DEF_CFA esp, OLDESP-EBX;\
++ /*CFI_OFFSET cs, CS-OLDESP;*/\
++ CFI_OFFSET eip, EIP-OLDESP;\
++ /*CFI_OFFSET es, ES-OLDESP;*/\
++ /*CFI_OFFSET ds, DS-OLDESP;*/\
++ CFI_OFFSET eax, EAX-OLDESP;\
++ CFI_OFFSET ebp, EBP-OLDESP;\
++ CFI_OFFSET edi, EDI-OLDESP;\
++ CFI_OFFSET esi, ESI-OLDESP;\
++ CFI_OFFSET edx, EDX-OLDESP;\
++ CFI_OFFSET ecx, ECX-OLDESP;\
++ CFI_OFFSET ebx, EBX-OLDESP
++
++ENTRY(ret_from_fork)
++ CFI_STARTPROC
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call schedule_tail
++ GET_THREAD_INFO(%ebp)
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ pushl $0x0202 # Reset kernel eflags
++ CFI_ADJUST_CFA_OFFSET 4
++ popfl
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp syscall_exit
++ CFI_ENDPROC
++
++/*
++ * Return to user mode is not as complex as all this looks,
++ * but we want the default path for a system call return to
++ * go as quickly as possible which is why some of this is
++ * less clear than it otherwise should be.
++ */
++
++ # userspace resumption stub bypassing syscall exit tracing
++ ALIGN
++ RING0_PTREGS_FRAME
++ret_from_exception:
++ preempt_stop
++ret_from_intr:
++ GET_THREAD_INFO(%ebp)
++check_userspace:
++ movl EFLAGS(%esp), %eax # mix EFLAGS and CS
++ movb CS(%esp), %al
++ testl $(VM_MASK | 2), %eax
++ jz resume_kernel
++ENTRY(resume_userspace)
++ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
++ # int/exception return?
++ jne work_pending
++ jmp restore_all
++
++#ifdef CONFIG_PREEMPT
++ENTRY(resume_kernel)
++ cli
++ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
++ jnz restore_nocheck
++need_resched:
++ movl TI_flags(%ebp), %ecx # need_resched set ?
++ testb $_TIF_NEED_RESCHED, %cl
++ jz restore_all
++ testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
++ jz restore_all
++ call preempt_schedule_irq
++ jmp need_resched
++#endif
++ CFI_ENDPROC
++
++/* SYSENTER_RETURN points to after the "sysenter" instruction in
++ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
++
++ # sysenter call handler stub
++ENTRY(sysenter_entry)
++ CFI_STARTPROC simple
++ CFI_DEF_CFA esp, 0
++ CFI_REGISTER esp, ebp
++ movl SYSENTER_stack_esp0(%esp),%esp
++sysenter_past_esp:
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs and here we enable it straight after entry:
++ */
++ sti
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ss, 0*/
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esp, 0
++ pushfl
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $(__USER_CS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET cs, 0*/
++ /*
++ * Push current_thread_info()->sysenter_return to the stack.
++ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
++ * pushed above; +8 corresponds to copy_thread's esp0 setting.
++ */
++ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eip, 0
++
++/*
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
++ */
++ cmpl $__PAGE_OFFSET-3,%ebp
++ jae syscall_fault
++1: movl (%ebp),%ebp
++.section __ex_table,"a"
++ .align 4
++ .long 1b,syscall_fault
++.previous
++
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++ call *sys_call_table(,%eax,4)
++ movl %eax,EAX(%esp)
++ DISABLE_INTERRUPTS
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx
++ jne syscall_exit_work
++/* if something modifies registers it must also disable sysexit */
++ movl EIP(%esp), %edx
++ movl OLDESP(%esp), %ecx
++ xorl %ebp,%ebp
++#ifdef CONFIG_XEN
++ TRACE_IRQS_ON
++ __ENABLE_INTERRUPTS
++sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/
++ __TEST_PENDING
++ jnz 14f # process more events if necessary...
++ movl ESI(%esp), %esi
++ sysexit
++14: __DISABLE_INTERRUPTS
++ TRACE_IRQS_OFF
++sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/
++ push %esp
++ call evtchn_do_upcall
++ add $4,%esp
++ jmp ret_from_intr
++#else
++ TRACE_IRQS_ON
++ sti
++ sysexit
++#endif /* !CONFIG_XEN */
++ CFI_ENDPROC
++
++ # pv sysenter call handler stub
++ENTRY(sysenter_entry_pv)
++ RING0_INT_FRAME
++ movl $__USER_DS,16(%esp)
++ movl %ebp,12(%esp)
++ movl $__USER_CS,4(%esp)
++ addl $4,%esp
++ /* +5*4 is SS:ESP,EFLAGS,CS:EIP. +8 is esp0 setting. */
++ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++/*
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
++ */
++ cmpl $__PAGE_OFFSET-3,%ebp
++ jae syscall_fault
++1: movl (%ebp),%ebp
++.section __ex_table,"a"
++ .align 4
++ .long 1b,syscall_fault
++.previous
++ /* fall through */
++ CFI_ENDPROC
++ENDPROC(sysenter_entry_pv)
++
++ # system call handler stub
++ENTRY(system_call)
++ RING0_INT_FRAME # can't unwind into user space anyway
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ testl $TF_MASK,EFLAGS(%esp)
++ jz no_singlestep
++ orl $_TIF_SINGLESTEP,TI_flags(%ebp)
++no_singlestep:
++ # system call tracing in operation / emulation
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++syscall_call:
++ call *sys_call_table(,%eax,4)
++ movl %eax,EAX(%esp) # store the return value
++syscall_exit:
++ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx # current->work
++ jne syscall_exit_work
++
++restore_all:
++#ifndef CONFIG_XEN
++ movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
++ # Warning: OLDSS(%esp) contains the wrong/random values if we
++ # are returning to the kernel.
++ # See comments in process.c:copy_thread() for details.
++ movb OLDSS(%esp), %ah
++ movb CS(%esp), %al
++ andl $(VM_MASK | (4 << 8) | 3), %eax
++ cmpl $((4 << 8) | 3), %eax
++ CFI_REMEMBER_STATE
++ je ldt_ss # returning to user-space with LDT SS
++restore_nocheck:
++#else
++restore_nocheck:
++ movl EFLAGS(%esp), %eax
++ testl $(VM_MASK|NMI_MASK), %eax
++ CFI_REMEMBER_STATE
++ jnz hypervisor_iret
++ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
++ GET_VCPU_INFO
++ andb evtchn_upcall_mask(%esi),%al
++ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
++ CFI_REMEMBER_STATE
++ jnz restore_all_enable_events # != 0 => enable event delivery
++#endif
++ TRACE_IRQS_IRET
++restore_nocheck_notrace:
++ RESTORE_REGS
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++1: iret
++.section .fixup,"ax"
++iret_exc:
++#ifndef CONFIG_XEN
++ TRACE_IRQS_ON
++ sti
++#endif
++ pushl $0 # no error code
++ pushl $do_iret_error
++ jmp error_code
++.previous
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++
++ CFI_RESTORE_STATE
++#ifndef CONFIG_XEN
++ldt_ss:
++ larl OLDSS(%esp), %eax
++ jnz restore_nocheck
++ testl $0x00400000, %eax # returning to 32bit stack?
++ jnz restore_nocheck # allright, normal return
++ /* If returning to userspace with 16bit stack,
++ * try to fix the higher word of ESP, as the CPU
++ * won't restore it.
++ * This is an "official" bug of all the x86-compatible
++ * CPUs, which we can try to work around to make
++ * dosemu and wine happy. */
++ subl $8, %esp # reserve space for switch16 pointer
++ CFI_ADJUST_CFA_OFFSET 8
++ cli
++ TRACE_IRQS_OFF
++ movl %esp, %eax
++ /* Set up the 16bit stack frame with switch32 pointer on top,
++ * and a switch16 pointer on top of the current frame. */
++ call setup_x86_bogus_stack
++ CFI_ADJUST_CFA_OFFSET -8 # frame has moved
++ TRACE_IRQS_IRET
++ RESTORE_REGS
++ lss 20+4(%esp), %esp # switch to 16bit stack
++1: iret
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++#else
++ ALIGN
++restore_all_enable_events:
++ TRACE_IRQS_ON
++ __ENABLE_INTERRUPTS
++scrit: /**** START OF CRITICAL REGION ****/
++ __TEST_PENDING
++ jnz 14f # process more events if necessary...
++ RESTORE_REGS
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++1: iret
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++14: __DISABLE_INTERRUPTS
++ TRACE_IRQS_OFF
++ jmp 11f
++ecrit: /**** END OF CRITICAL REGION ****/
++
++ CFI_RESTORE_STATE
++hypervisor_iret:
++ andl $~NMI_MASK, EFLAGS(%esp)
++ RESTORE_REGS
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp hypercall_page + (__HYPERVISOR_iret * 32)
++#endif
++ CFI_ENDPROC
++
++ # perform work that needs to be done immediately before resumption
++ ALIGN
++ RING0_PTREGS_FRAME # can't unwind into user space anyway
++work_pending:
++ testb $_TIF_NEED_RESCHED, %cl
++ jz work_notifysig
++work_resched:
++ call schedule
++ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
++ # than syscall tracing?
++ jz restore_all
++ testb $_TIF_NEED_RESCHED, %cl
++ jnz work_resched
++
++work_notifysig: # deal with pending signals and
++ # notify-resume requests
++ testl $VM_MASK, EFLAGS(%esp)
++ movl %esp, %eax
++ jne work_notifysig_v86 # returning to kernel-space or
++ # vm86-space
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++
++ ALIGN
++work_notifysig_v86:
++#ifdef CONFIG_VM86
++ pushl %ecx # save ti_flags for do_notify_resume
++ CFI_ADJUST_CFA_OFFSET 4
++ call save_v86_state # %eax contains pt_regs pointer
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ movl %eax, %esp
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++#endif
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_trace_entry:
++ movl $-ENOSYS,EAX(%esp)
++ movl %esp, %eax
++ xorl %edx,%edx
++ call do_syscall_trace
++ cmpl $0, %eax
++ jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
++ # so must skip actual syscall
++ movl ORIG_EAX(%esp), %eax
++ cmpl $(nr_syscalls), %eax
++ jnae syscall_call
++ jmp syscall_exit
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_exit_work:
++ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
++ jz work_pending
++ TRACE_IRQS_ON
++ ENABLE_INTERRUPTS # could let do_syscall_trace() call
++ # schedule() instead
++ movl %esp, %eax
++ movl $1, %edx
++ call do_syscall_trace
++ jmp resume_userspace
++ CFI_ENDPROC
++
++ RING0_INT_FRAME # can't unwind into user space anyway
++syscall_fault:
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl $-EFAULT,EAX(%esp)
++ jmp resume_userspace
++
++syscall_badsys:
++ movl $-ENOSYS,EAX(%esp)
++ jmp resume_userspace
++ CFI_ENDPROC
++
++#ifndef CONFIG_XEN
++#define FIXUP_ESPFIX_STACK \
++ movl %esp, %eax; \
++ /* switch to 32bit stack using the pointer on top of 16bit stack */ \
++ lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
++ /* copy data from 16bit stack to 32bit stack */ \
++ call fixup_x86_bogus_stack; \
++ /* put ESP to the proper location */ \
++ movl %eax, %esp;
++#define UNWIND_ESPFIX_STACK \
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ movl %ss, %eax; \
++ /* see if on 16bit stack */ \
++ cmpw $__ESPFIX_SS, %ax; \
++ je 28f; \
++27: popl %eax; \
++ CFI_ADJUST_CFA_OFFSET -4; \
++.section .fixup,"ax"; \
++28: movl $__KERNEL_DS, %eax; \
++ movl %eax, %ds; \
++ movl %eax, %es; \
++ /* switch to 32bit stack */ \
++ FIXUP_ESPFIX_STACK; \
++ jmp 27b; \
++.previous
++
++/*
++ * Build the entry stubs and pointer table with
++ * some assembler magic.
++ */
++.data
++ENTRY(interrupt)
++.text
++
++vector=0
++ENTRY(irq_entries_start)
++ RING0_INT_FRAME
++.rept NR_IRQS
++ ALIGN
++ .if vector
++ CFI_ADJUST_CFA_OFFSET -4
++ .endif
++1: pushl $~(vector)
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp common_interrupt
++.data
++ .long 1b
++.text
++vector=vector+1
++.endr
++
++/*
++ * the CPU automatically disables interrupts when executing an IRQ vector,
++ * so IRQ-flags tracing has to follow that:
++ */
++ ALIGN
++common_interrupt:
++ SAVE_ALL
++ TRACE_IRQS_OFF
++ movl %esp,%eax
++ call do_IRQ
++ jmp ret_from_intr
++ CFI_ENDPROC
++
++#define BUILD_INTERRUPT(name, nr) \
++ENTRY(name) \
++ RING0_INT_FRAME; \
++ pushl $~(nr); \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ SAVE_ALL; \
++ TRACE_IRQS_OFF \
++ movl %esp,%eax; \
++ call smp_/**/name; \
++ jmp ret_from_intr; \
++ CFI_ENDPROC
++
++/* The include is where all of the SMP etc. interrupts come from */
++#include "entry_arch.h"
++#else
++#define UNWIND_ESPFIX_STACK
++#endif
++
++ENTRY(divide_error)
++ RING0_INT_FRAME
++ pushl $0 # no error code
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_divide_error
++ CFI_ADJUST_CFA_OFFSET 4
++ ALIGN
++error_code:
++ pushl %ds
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ds, 0*/
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eax, 0
++ xorl %eax, %eax
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebp, 0
++ pushl %edi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edi, 0
++ pushl %esi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esi, 0
++ pushl %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edx, 0
++ decl %eax # eax = -1
++ pushl %ecx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ecx, 0
++ pushl %ebx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebx, 0
++ cld
++ pushl %es
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET es, 0*/
++ UNWIND_ESPFIX_STACK
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ /*CFI_REGISTER es, ecx*/
++ movl ES(%esp), %edi # get the function address
++ movl ORIG_EAX(%esp), %edx # get the error code
++ movl %eax, ORIG_EAX(%esp)
++ movl %ecx, ES(%esp)
++ /*CFI_REL_OFFSET es, ES*/
++ movl $(__USER_DS), %ecx
++ movl %ecx, %ds
++ movl %ecx, %es
++ movl %esp,%eax # pt_regs pointer
++ call *%edi
++ jmp ret_from_exception
++ CFI_ENDPROC
++
++#ifdef CONFIG_XEN
++# A note on the "critical region" in our callback handler.
++# We want to avoid stacking callback handlers due to events occurring
++# during handling of the last event. To do this, we keep events disabled
++# until we've done all processing. HOWEVER, we must enable events before
++# popping the stack frame (can't be done atomically) and so it would still
++# be possible to get enough handler activations to overflow the stack.
++# Although unlikely, bugs of that kind are hard to track down, so we'd
++# like to avoid the possibility.
++# So, on entry to the handler we detect whether we interrupted an
++# existing activation in its critical region -- if so, we pop the current
++# activation and restart the handler using the previous one.
++#
++# The sysexit critical region is slightly different. sysexit
++# atomically removes the entire stack frame. If we interrupt in the
++# critical region we know that the entire frame is present and correct
++# so we can simply throw away the new one.
++ENTRY(hypervisor_callback)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ movl EIP(%esp),%eax
++ cmpl $scrit,%eax
++ jb 11f
++ cmpl $ecrit,%eax
++ jb critical_region_fixup
++ cmpl $sysexit_scrit,%eax
++ jb 11f
++ cmpl $sysexit_ecrit,%eax
++ ja 11f
++ addl $OLDESP,%esp # Remove eflags...ebx from stack frame.
++11: push %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ call evtchn_do_upcall
++ add $4,%esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_intr
++ CFI_ENDPROC
++
++# [How we do the fixup]. We want to merge the current stack frame with the
++# just-interrupted frame. How we do this depends on where in the critical
++# region the interrupted handler was executing, and so how many saved
++# registers are in each frame. We do this quickly using the lookup table
++# 'critical_fixup_table'. For each byte offset in the critical region, it
++# provides the number of bytes which have already been popped from the
++# interrupted stack frame.
++critical_region_fixup:
++ movzbl critical_fixup_table-scrit(%eax),%ecx # %eax contains num bytes popped
++ cmpb $0xff,%cl # 0xff => vcpu_info critical region
++ jne 15f
++ xorl %ecx,%ecx
++15: leal (%esp,%ecx),%esi # %esi points at end of src region
++ leal OLDESP(%esp),%edi # %edi points at end of dst region
++ shrl $2,%ecx # convert words to bytes
++ je 17f # skip loop if nothing to copy
++16: subl $4,%esi # pre-decrementing copy loop
++ subl $4,%edi
++ movl (%esi),%eax
++ movl %eax,(%edi)
++ loop 16b
++17: movl %edi,%esp # final %edi is top of merged stack
++ jmp 11b
++
++.section .rodata,"a"
++critical_fixup_table:
++ .byte 0xff,0xff,0xff # testb $0xff,(%esi) = __TEST_PENDING
++ .byte 0xff,0xff # jnz 14f
++ .byte 0x00 # pop %ebx
++ .byte 0x04 # pop %ecx
++ .byte 0x08 # pop %edx
++ .byte 0x0c # pop %esi
++ .byte 0x10 # pop %edi
++ .byte 0x14 # pop %ebp
++ .byte 0x18 # pop %eax
++ .byte 0x1c # pop %ds
++ .byte 0x20 # pop %es
++ .byte 0x24,0x24,0x24 # add $4,%esp
++ .byte 0x28 # iret
++ .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
++ .byte 0x00,0x00 # jmp 11b
++.previous
++
++# Hypervisor uses this for application faults while it executes.
++# We get here for two reasons:
++# 1. Fault while reloading DS, ES, FS or GS
++# 2. Fault while executing IRET
++# Category 1 we fix up by reattempting the load, and zeroing the segment
++# register if the load fails.
++# Category 2 we fix up by jumping to do_iret_error. We cannot use the
++# normal Linux return path in this case because if we use the IRET hypercall
++# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
++# We distinguish between categories by maintaining a status value in EAX.
++ENTRY(failsafe_callback)
++ pushl %eax
++ movl $1,%eax
++1: mov 4(%esp),%ds
++2: mov 8(%esp),%es
++3: mov 12(%esp),%fs
++4: mov 16(%esp),%gs
++ testl %eax,%eax
++ popl %eax
++ jz 5f
++ addl $16,%esp # EAX != 0 => Category 2 (Bad IRET)
++ jmp iret_exc
++5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment)
++ RING0_INT_FRAME
++ pushl $0
++ SAVE_ALL
++ jmp ret_from_exception
++.section .fixup,"ax"; \
++6: xorl %eax,%eax; \
++ movl %eax,4(%esp); \
++ jmp 1b; \
++7: xorl %eax,%eax; \
++ movl %eax,8(%esp); \
++ jmp 2b; \
++8: xorl %eax,%eax; \
++ movl %eax,12(%esp); \
++ jmp 3b; \
++9: xorl %eax,%eax; \
++ movl %eax,16(%esp); \
++ jmp 4b; \
++.previous; \
++.section __ex_table,"a"; \
++ .align 4; \
++ .long 1b,6b; \
++ .long 2b,7b; \
++ .long 3b,8b; \
++ .long 4b,9b; \
++.previous
++#endif
++ CFI_ENDPROC
++
++ENTRY(coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(simd_coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_simd_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(device_not_available)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++#ifndef CONFIG_XEN
++ movl %cr0, %eax
++ testl $0x4, %eax # EM (math emulation bit)
++ je device_available_emulate
++ pushl $0 # temporary storage for ORIG_EIP
++ CFI_ADJUST_CFA_OFFSET 4
++ call math_emulate
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_exception
++device_available_emulate:
++#endif
++ preempt_stop
++ call math_state_restore
++ jmp ret_from_exception
++ CFI_ENDPROC
++
++#ifndef CONFIG_XEN
++/*
++ * Debug traps and NMI can happen at the one SYSENTER instruction
++ * that sets up the real kernel stack. Check here, since we can't
++ * allow the wrong stack to be used.
++ *
++ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
++ * already pushed 3 words if it hits on the sysenter instruction:
++ * eflags, cs and eip.
++ *
++ * We just load the right stack, and push the three (known) values
++ * by hand onto the new stack - while updating the return eip past
++ * the instruction that would have done it for sysenter.
++ */
++#define FIX_STACK(offset, ok, label) \
++ cmpw $__KERNEL_CS,4(%esp); \
++ jne ok; \
++label: \
++ movl SYSENTER_stack_esp0+offset(%esp),%esp; \
++ pushfl; \
++ pushl $__KERNEL_CS; \
++ pushl $sysenter_past_esp
++#endif /* CONFIG_XEN */
++
++KPROBE_ENTRY(debug)
++ RING0_INT_FRAME
++#ifndef CONFIG_XEN
++ cmpl $sysenter_entry,(%esp)
++ jne debug_stack_correct
++ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
++debug_stack_correct:
++#endif /* !CONFIG_XEN */
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # error code 0
++ movl %esp,%eax # pt_regs pointer
++ call do_debug
++ jmp ret_from_exception
++ CFI_ENDPROC
++ .previous .text
++#ifndef CONFIG_XEN
++/*
++ * NMI is doubly nasty. It can happen _while_ we're handling
++ * a debug fault, and the debug fault hasn't yet been able to
++ * clear up the stack. So we first check whether we got an
++ * NMI on the sysenter entry path, but after that we need to
++ * check whether we got an NMI on the debug path where the debug
++ * fault happened on the sysenter path.
++ */
++ENTRY(nmi)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %ss, %eax
++ cmpw $__ESPFIX_SS, %ax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ je nmi_16bit_stack
++ cmpl $sysenter_entry,(%esp)
++ je nmi_stack_fixup
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %esp,%eax
++ /* Do not access memory above the end of our stack page,
++ * it might not exist.
++ */
++ andl $(THREAD_SIZE-1),%eax
++ cmpl $(THREAD_SIZE-20),%eax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ jae nmi_stack_correct
++ cmpl $sysenter_entry,12(%esp)
++ je nmi_debug_stack_check
++nmi_stack_correct:
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_nmi
++ jmp restore_nocheck_notrace
++ CFI_ENDPROC
++
++nmi_stack_fixup:
++ FIX_STACK(12,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++nmi_debug_stack_check:
++ cmpw $__KERNEL_CS,16(%esp)
++ jne nmi_stack_correct
++ cmpl $debug,(%esp)
++ jb nmi_stack_correct
++ cmpl $debug_esp_fix_insn,(%esp)
++ ja nmi_stack_correct
++ FIX_STACK(24,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++
++nmi_16bit_stack:
++ RING0_INT_FRAME
++ /* create the pointer to lss back */
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ movzwl %sp, %esp
++ addw $4, (%esp)
++ /* copy the iret frame of 12 bytes */
++ .rept 3
++ pushl 16(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ .endr
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ FIXUP_ESPFIX_STACK # %eax == %esp
++ CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
++ xorl %edx,%edx # zero error code
++ call do_nmi
++ RESTORE_REGS
++ lss 12+4(%esp), %esp # back to 16bit stack
++1: iret
++ CFI_ENDPROC
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++#else
++ENTRY(nmi)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_nmi
++ orl $NMI_MASK, EFLAGS(%esp)
++ jmp restore_all
++ CFI_ENDPROC
++#endif
++
++KPROBE_ENTRY(int3)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_int3
++ jmp ret_from_exception
++ CFI_ENDPROC
++ .previous .text
++
++ENTRY(overflow)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_overflow
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(bounds)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_bounds
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(invalid_op)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_invalid_op
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(coprocessor_segment_overrun)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_segment_overrun
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(invalid_TSS)
++ RING0_EC_FRAME
++ pushl $do_invalid_TSS
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(segment_not_present)
++ RING0_EC_FRAME
++ pushl $do_segment_not_present
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(stack_segment)
++ RING0_EC_FRAME
++ pushl $do_stack_segment
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++KPROBE_ENTRY(general_protection)
++ RING0_EC_FRAME
++ pushl $do_general_protection
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++ .previous .text
++
++ENTRY(alignment_check)
++ RING0_EC_FRAME
++ pushl $do_alignment_check
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++KPROBE_ENTRY(page_fault)
++ RING0_EC_FRAME
++ pushl $do_page_fault
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++ .previous .text
++
++#ifdef CONFIG_X86_MCE
++ENTRY(machine_check)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl machine_check_vector
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++#endif
++
++#ifndef CONFIG_XEN
++ENTRY(spurious_interrupt_bug)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_spurious_interrupt_bug
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++#endif /* !CONFIG_XEN */
++
++#ifdef CONFIG_STACK_UNWIND
++ENTRY(arch_unwind_init_running)
++ CFI_STARTPROC
++ movl 4(%esp), %edx
++ movl (%esp), %ecx
++ leal 4(%esp), %eax
++ movl %ebx, EBX(%edx)
++ xorl %ebx, %ebx
++ movl %ebx, ECX(%edx)
++ movl %ebx, EDX(%edx)
++ movl %esi, ESI(%edx)
++ movl %edi, EDI(%edx)
++ movl %ebp, EBP(%edx)
++ movl %ebx, EAX(%edx)
++ movl $__USER_DS, DS(%edx)
++ movl $__USER_DS, ES(%edx)
++ movl %ebx, ORIG_EAX(%edx)
++ movl %ecx, EIP(%edx)
++ movl 12(%esp), %ecx
++ movl $__KERNEL_CS, CS(%edx)
++ movl %ebx, EFLAGS(%edx)
++ movl %eax, OLDESP(%edx)
++ movl 8(%esp), %eax
++ movl %ecx, 8(%esp)
++ movl EBX(%edx), %ebx
++ movl $__KERNEL_DS, OLDSS(%edx)
++ jmpl *%eax
++ CFI_ENDPROC
++ENDPROC(arch_unwind_init_running)
++#endif
++
++ENTRY(fixup_4gb_segment)
++ RING0_EC_FRAME
++ pushl $do_fixup_4gb_segment
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++.section .rodata,"a"
++#include "syscall_table.S"
++
++syscall_table_size=(.-sys_call_table)
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/fixup.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/fixup.c
+--- linux-2.6.18.8/arch/i386/kernel/fixup.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/fixup.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,88 @@
++/******************************************************************************
++ * fixup.c
++ *
++ * Binary-rewriting of certain IA32 instructions, on notification by Xen.
++ * Used to avoid repeated slow emulation of common instructions used by the
++ * user-space TLS (Thread-Local Storage) libraries.
++ *
++ * **** NOTE ****
++ * Issues with the binary rewriting have caused it to be removed. Instead
++ * we rely on Xen's emulator to boot the kernel, and then print a banner
++ * message recommending that the user disables /lib/tls.
++ *
++ * Copyright (c) 2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/version.h>
++
++#define DP(_f, _args...) printk(KERN_ALERT " " _f "\n" , ## _args )
++
++fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
++{
++ static unsigned long printed = 0;
++ char info[100];
++ int i;
++
++ /* Ignore statically-linked init. */
++ if (current->tgid == 1)
++ return;
++
++ VOID(HYPERVISOR_vm_assist(VMASST_CMD_disable,
++ VMASST_TYPE_4gb_segments_notify));
++
++ if (test_and_set_bit(0, &printed))
++ return;
++
++ sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
++
++ DP("");
++ DP("***************************************************************");
++ DP("***************************************************************");
++ DP("** WARNING: Currently emulating unsupported memory accesses **");
++ DP("** in /lib/tls glibc libraries. The emulation is **");
++ DP("** slow. To ensure full performance you should **");
++ DP("** install a 'xen-friendly' (nosegneg) version of **");
++ DP("** the library, or disable tls support by executing **");
++ DP("** the following as root: **");
++ DP("** mv /lib/tls /lib/tls.disabled **");
++ DP("** Offending process: %-38.38s **", info);
++ DP("***************************************************************");
++ DP("***************************************************************");
++ DP("");
++
++ for (i = 5; i > 0; i--) {
++ touch_softlockup_watchdog();
++ printk("Pausing... %d", i);
++ mdelay(1000);
++ printk("\b\b\b\b\b\b\b\b\b\b\b\b");
++ }
++
++ printk("Continuing...\n\n");
++}
++
++static int __init fixup_init(void)
++{
++ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++ VMASST_TYPE_4gb_segments_notify));
++ return 0;
++}
++__initcall(fixup_init);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/head-xen.S linux-2.6.18-xen-3.3.0/arch/i386/kernel/head-xen.S
+--- linux-2.6.18.8/arch/i386/kernel/head-xen.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/head-xen.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,207 @@
++
++
++.text
++#include <linux/elfnote.h>
++#include <linux/threads.h>
++#include <linux/linkage.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/cache.h>
++#include <asm/thread_info.h>
++#include <asm/asm-offsets.h>
++#include <asm/dwarf2.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/elfnote.h>
++
++/*
++ * References to members of the new_cpu_data structure.
++ */
++
++#define X86 new_cpu_data+CPUINFO_x86
++#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
++#define X86_MODEL new_cpu_data+CPUINFO_x86_model
++#define X86_MASK new_cpu_data+CPUINFO_x86_mask
++#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
++#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
++#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
++#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
++
++#define VIRT_ENTRY_OFFSET 0x0
++.org VIRT_ENTRY_OFFSET
++ENTRY(startup_32)
++ movl %esi,xen_start_info
++ cld
++
++ /* Set up the stack pointer */
++ movl $(init_thread_union+THREAD_SIZE),%esp
++
++ /* get vendor info */
++ xorl %eax,%eax # call CPUID with 0 -> return vendor ID
++ XEN_CPUID
++ movl %eax,X86_CPUID # save CPUID level
++ movl %ebx,X86_VENDOR_ID # lo 4 chars
++ movl %edx,X86_VENDOR_ID+4 # next 4 chars
++ movl %ecx,X86_VENDOR_ID+8 # last 4 chars
++
++ movl $1,%eax # Use the CPUID instruction to get CPU type
++ XEN_CPUID
++ movb %al,%cl # save reg for future use
++ andb $0x0f,%ah # mask processor family
++ movb %ah,X86
++ andb $0xf0,%al # mask model
++ shrb $4,%al
++ movb %al,X86_MODEL
++ andb $0x0f,%cl # mask mask revision
++ movb %cl,X86_MASK
++ movl %edx,X86_CAPABILITY
++
++ movb $1,X86_HARD_MATH
++
++ xorl %eax,%eax # Clear FS/GS and LDT
++ movl %eax,%fs
++ movl %eax,%gs
++ cld # gcc2 wants the direction flag cleared at all times
++
++ pushl %eax # fake return address
++ jmp start_kernel
++
++#define HYPERCALL_PAGE_OFFSET 0x1000
++.org HYPERCALL_PAGE_OFFSET
++ENTRY(hypercall_page)
++ CFI_STARTPROC
++.skip 0x1000
++ CFI_ENDPROC
++
++/*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++/*
++ * BSS section
++ */
++.section ".bss.page_aligned","w"
++ENTRY(empty_zero_page)
++ .fill 4096,1,0
++
++/*
++ * This starts the data section.
++ */
++.data
++
++/*
++ * The Global Descriptor Table contains 28 quadwords, per-CPU.
++ */
++ .align L1_CACHE_BYTES
++ENTRY(cpu_gdt_table)
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x0000000000000000 /* 0x0b reserved */
++ .quad 0x0000000000000000 /* 0x13 reserved */
++ .quad 0x0000000000000000 /* 0x1b reserved */
++ .quad 0x0000000000000000 /* 0x20 unused */
++ .quad 0x0000000000000000 /* 0x28 unused */
++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
++ .quad 0x0000000000000000 /* 0x4b reserved */
++ .quad 0x0000000000000000 /* 0x53 reserved */
++ .quad 0x0000000000000000 /* 0x5b reserved */
++
++ .quad 0x00cf9a000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
++ .quad 0x00cf92000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
++ .quad 0x00cffa000000ffff /* 0x73 user 4GB code at 0x00000000 */
++ .quad 0x00cff2000000ffff /* 0x7b user 4GB data at 0x00000000 */
++
++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
++
++ /*
++ * Segments used for calling PnP BIOS have byte granularity.
++ * They code segments and data segments have fixed 64k limits,
++ * the transfer segment sizes are set at run time.
++ */
++ .quad 0x0000000000000000 /* 0x90 32-bit code */
++ .quad 0x0000000000000000 /* 0x98 16-bit code */
++ .quad 0x0000000000000000 /* 0xa0 16-bit data */
++ .quad 0x0000000000000000 /* 0xa8 16-bit data */
++ .quad 0x0000000000000000 /* 0xb0 16-bit data */
++
++ /*
++ * The APM segments have byte granularity and their bases
++ * are set at run time. All have 64k limits.
++ */
++ .quad 0x0000000000000000 /* 0xb8 APM CS code */
++ .quad 0x0000000000000000 /* 0xc0 APM CS 16 code (16 bit) */
++ .quad 0x0000000000000000 /* 0xc8 APM DS data */
++
++ .quad 0x0000000000000000 /* 0xd0 - ESPFIX 16-bit SS */
++ .quad 0x0000000000000000 /* 0xd8 - unused */
++ .quad 0x0000000000000000 /* 0xe0 - unused */
++ .quad 0x0000000000000000 /* 0xe8 - unused */
++ .quad 0x0000000000000000 /* 0xf0 - unused */
++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++/*
++ * __xen_guest information
++ */
++.macro utoa value
++ .if (\value) < 0 || (\value) >= 0x10
++ utoa (((\value)>>4)&0x0fffffff)
++ .endif
++ .if ((\value) & 0xf) < 10
++ .byte '0' + ((\value) & 0xf)
++ .else
++ .byte 'A' + ((\value) & 0xf) - 10
++ .endif
++.endm
++
++.section __xen_guest
++ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
++ .ascii ",XEN_VER=xen-3.0"
++ .ascii ",VIRT_BASE=0x"
++ utoa __PAGE_OFFSET
++ .ascii ",ELF_PADDR_OFFSET=0x"
++ utoa __PAGE_OFFSET
++ .ascii ",VIRT_ENTRY=0x"
++ utoa (__PAGE_OFFSET + __PHYSICAL_START + VIRT_ENTRY_OFFSET)
++ .ascii ",HYPERCALL_PAGE=0x"
++ utoa ((__PHYSICAL_START+HYPERCALL_PAGE_OFFSET)>>PAGE_SHIFT)
++ .ascii ",FEATURES=writable_page_tables"
++ .ascii "|writable_descriptor_tables"
++ .ascii "|auto_translated_physmap"
++ .ascii "|pae_pgdir_above_4gb"
++ .ascii "|supervisor_mode_kernel"
++#ifdef CONFIG_X86_PAE
++ .ascii ",PAE=yes[extended-cr3]"
++#else
++ .ascii ",PAE=no"
++#endif
++ .ascii ",LOADER=generic"
++ .byte 0
++#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
++
++
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux")
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6")
++ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
++ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .long, __PAGE_OFFSET)
++#if CONFIG_XEN_COMPAT <= 0x030002
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, __PAGE_OFFSET)
++#else
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, 0)
++#endif
++ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long, startup_32)
++ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long, hypercall_page)
++ ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, .long, HYPERVISOR_VIRT_START)
++ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
++#ifdef CONFIG_X86_PAE
++ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "yes")
++ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT)
++#else
++ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "no")
++ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .long, _PAGE_PRESENT,_PAGE_PRESENT)
++#endif
++ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
++ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/init_task-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/init_task-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/init_task-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/init_task-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,51 @@
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/init.h>
++#include <linux/init_task.h>
++#include <linux/fs.h>
++#include <linux/mqueue.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/desc.h>
++
++static struct fs_struct init_fs = INIT_FS;
++static struct files_struct init_files = INIT_FILES;
++static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
++static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
++
++#define swapper_pg_dir ((pgd_t *)NULL)
++struct mm_struct init_mm = INIT_MM(init_mm);
++#undef swapper_pg_dir
++
++EXPORT_SYMBOL(init_mm);
++
++/*
++ * Initial thread structure.
++ *
++ * We need to make sure that this is THREAD_SIZE aligned due to the
++ * way process stacks are handled. This is done by having a special
++ * "init_task" linker map entry..
++ */
++union thread_union init_thread_union
++ __attribute__((__section__(".data.init_task"))) =
++ { INIT_THREAD_INFO(init_task) };
++
++/*
++ * Initial task structure.
++ *
++ * All other task structs will be allocated on slabs in fork.c
++ */
++struct task_struct init_task = INIT_TASK(init_task);
++
++EXPORT_SYMBOL(init_task);
++
++#ifndef CONFIG_X86_NO_TSS
++/*
++ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
++ * no more per-task TSS's.
++ */
++DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
++#endif
++
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/io_apic-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/io_apic-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/io_apic-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/io_apic-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2770 @@
++/*
++ * Intel IO-APIC support for multi-Pentium hosts.
++ *
++ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
++ *
++ * Many thanks to Stig Venaas for trying out countless experimental
++ * patches and reporting/debugging problems patiently!
++ *
++ * (c) 1999, Multiple IO-APIC support, developed by
++ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
++ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
++ * further tested and cleaned up by Zach Brown <zab@redhat.com>
++ * and Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively
++ * Paul Diefenbaugh : Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <linux/mc146818rtc.h>
++#include <linux/compiler.h>
++#include <linux/acpi.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/desc.h>
++#include <asm/timer.h>
++#include <asm/i8259.h>
++#include <asm/nmi.h>
++
++#include <mach_apic.h>
++
++#include "io_ports.h"
++
++#ifdef CONFIG_XEN
++
++#include <xen/interface/xen.h>
++#include <xen/interface/physdev.h>
++
++/* Fake i8259 */
++#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
++#define disable_8259A_irq(_irq) ((void)0)
++#define i8259A_irq_pending(_irq) (0)
++
++unsigned long io_apic_irqs;
++
++static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
++{
++ struct physdev_apic apic_op;
++ int ret;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
++ if (ret)
++ return ret;
++ return apic_op.value;
++}
++
++static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++ struct physdev_apic apic_op;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ apic_op.value = value;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op));
++}
++
++#define io_apic_read(a,r) xen_io_apic_read(a,r)
++#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++
++#endif /* CONFIG_XEN */
++
++int (*ioapic_renumber_irq)(int ioapic, int irq);
++atomic_t irq_mis_count;
++
++/* Where if anywhere is the i8259 connect in external int mode */
++static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
++
++static DEFINE_SPINLOCK(ioapic_lock);
++static DEFINE_SPINLOCK(vector_lock);
++
++int timer_over_8254 __initdata = 1;
++
++/*
++ * Is the SiS APIC rmw bug present ?
++ * -1 = don't know, 0 = no, 1 = yes
++ */
++int sis_apic_bug = -1;
++
++/*
++ * # of IRQ routing registers
++ */
++int nr_ioapic_registers[MAX_IO_APICS];
++
++int disable_timer_pin_1 __initdata;
++
++/*
++ * Rough estimation of how many shared IRQs there are, can
++ * be changed anytime.
++ */
++#define MAX_PLUS_SHARED_IRQS NR_IRQS
++#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
++
++/*
++ * This is performance-critical, we want to do it O(1)
++ *
++ * the indexing order of this array favors 1:1 mappings
++ * between pins and IRQs.
++ */
++
++static struct irq_pin_list {
++ int apic, pin, next;
++} irq_2_pin[PIN_MAP_SIZE];
++
++int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
++#ifdef CONFIG_PCI_MSI
++#define vector_to_irq(vector) \
++ (platform_legacy_irq(vector) ? vector : vector_irq[vector])
++#else
++#define vector_to_irq(vector) (vector)
++#endif
++
++/*
++ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
++ * shared ISA-space IRQs, so we have to support them. We are super
++ * fast in the common case, and fast for shared ISA-space IRQs.
++ */
++static void add_pin_to_irq(unsigned int irq, int apic, int pin)
++{
++ static int first_free_entry = NR_IRQS;
++ struct irq_pin_list *entry = irq_2_pin + irq;
++
++ while (entry->next)
++ entry = irq_2_pin + entry->next;
++
++ if (entry->pin != -1) {
++ entry->next = first_free_entry;
++ entry = irq_2_pin + entry->next;
++ if (++first_free_entry >= PIN_MAP_SIZE)
++ panic("io_apic.c: whoops");
++ }
++ entry->apic = apic;
++ entry->pin = pin;
++}
++
++#ifdef CONFIG_XEN
++#define clear_IO_APIC() ((void)0)
++#else
++/*
++ * Reroute an IRQ to a different pin.
++ */
++static void __init replace_pin_at_irq(unsigned int irq,
++ int oldapic, int oldpin,
++ int newapic, int newpin)
++{
++ struct irq_pin_list *entry = irq_2_pin + irq;
++
++ while (1) {
++ if (entry->apic == oldapic && entry->pin == oldpin) {
++ entry->apic = newapic;
++ entry->pin = newpin;
++ }
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++}
++
++static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
++{
++ struct irq_pin_list *entry = irq_2_pin + irq;
++ unsigned int pin, reg;
++
++ for (;;) {
++ pin = entry->pin;
++ if (pin == -1)
++ break;
++ reg = io_apic_read(entry->apic, 0x10 + pin*2);
++ reg &= ~disable;
++ reg |= enable;
++ io_apic_modify(entry->apic, 0x10 + pin*2, reg);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++}
++
++/* mask = 1 */
++static void __mask_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00010000, 0);
++}
++
++/* mask = 0 */
++static void __unmask_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0, 0x00010000);
++}
++
++/* mask = 1, trigger = 0 */
++static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
++}
++
++/* mask = 0, trigger = 1 */
++static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
++}
++
++static void mask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __mask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void unmask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ /* Check delivery_mode to be sure we're not clearing an SMI pin */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (entry.delivery_mode == dest_SMI)
++ return;
++
++ /*
++ * Disable it in the IO-APIC irq-routing table:
++ */
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 1;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC (void)
++{
++ int apic, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
++ clear_IO_APIC_pin(apic, pin);
++}
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
++{
++ unsigned long flags;
++ int pin;
++ struct irq_pin_list *entry = irq_2_pin + irq;
++ unsigned int apicid_value;
++ cpumask_t tmp;
++
++ cpus_and(tmp, cpumask, cpu_online_map);
++ if (cpus_empty(tmp))
++ tmp = TARGET_CPUS;
++
++ cpus_and(cpumask, tmp, CPU_MASK_ALL);
++
++ apicid_value = cpu_mask_to_apicid(cpumask);
++ /* Prepare to do the io_apic_write */
++ apicid_value = apicid_value << 24;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ for (;;) {
++ pin = entry->pin;
++ if (pin == -1)
++ break;
++ io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++ set_irq_info(irq, cpumask);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++#if defined(CONFIG_IRQBALANCE)
++# include <asm/processor.h> /* kernel_thread() */
++# include <linux/kernel_stat.h> /* kstat */
++# include <linux/slab.h> /* kmalloc() */
++# include <linux/timer.h> /* time_after() */
++
++#ifdef CONFIG_BALANCED_IRQ_DEBUG
++# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
++# define Dprintk(x...) do { TDprintk(x); } while (0)
++# else
++# define TDprintk(x...)
++# define Dprintk(x...)
++# endif
++
++#define IRQBALANCE_CHECK_ARCH -999
++#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
++#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
++#define BALANCED_IRQ_MORE_DELTA (HZ/10)
++#define BALANCED_IRQ_LESS_DELTA (HZ)
++
++static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH;
++static int physical_balance __read_mostly;
++static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
++
++static struct irq_cpu_info {
++ unsigned long * last_irq;
++ unsigned long * irq_delta;
++ unsigned long irq;
++} irq_cpu_data[NR_CPUS];
++
++#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq)
++#define LAST_CPU_IRQ(cpu,irq) (irq_cpu_data[cpu].last_irq[irq])
++#define IRQ_DELTA(cpu,irq) (irq_cpu_data[cpu].irq_delta[irq])
++
++#define IDLE_ENOUGH(cpu,now) \
++ (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
++
++#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
++
++#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
++
++static cpumask_t balance_irq_affinity[NR_IRQS] = {
++ [0 ... NR_IRQS-1] = CPU_MASK_ALL
++};
++
++void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
++{
++ balance_irq_affinity[irq] = mask;
++}
++
++static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
++ unsigned long now, int direction)
++{
++ int search_idle = 1;
++ int cpu = curr_cpu;
++
++ goto inside;
++
++ do {
++ if (unlikely(cpu == curr_cpu))
++ search_idle = 0;
++inside:
++ if (direction == 1) {
++ cpu++;
++ if (cpu >= NR_CPUS)
++ cpu = 0;
++ } else {
++ cpu--;
++ if (cpu == -1)
++ cpu = NR_CPUS-1;
++ }
++ } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
++ (search_idle && !IDLE_ENOUGH(cpu,now)));
++
++ return cpu;
++}
++
++static inline void balance_irq(int cpu, int irq)
++{
++ unsigned long now = jiffies;
++ cpumask_t allowed_mask;
++ unsigned int new_cpu;
++
++ if (irqbalance_disabled)
++ return;
++
++ cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
++ new_cpu = move(cpu, allowed_mask, now, 1);
++ if (cpu != new_cpu) {
++ set_pending_irq(irq, cpumask_of_cpu(new_cpu));
++ }
++}
++
++static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
++{
++ int i, j;
++ Dprintk("Rotating IRQs among CPUs.\n");
++ for_each_online_cpu(i) {
++ for (j = 0; j < NR_IRQS; j++) {
++ if (!irq_desc[j].action)
++ continue;
++ /* Is it a significant load ? */
++ if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
++ useful_load_threshold)
++ continue;
++ balance_irq(i, j);
++ }
++ }
++ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
++ return;
++}
++
++static void do_irq_balance(void)
++{
++ int i, j;
++ unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
++ unsigned long move_this_load = 0;
++ int max_loaded = 0, min_loaded = 0;
++ int load;
++ unsigned long useful_load_threshold = balanced_irq_interval + 10;
++ int selected_irq;
++ int tmp_loaded, first_attempt = 1;
++ unsigned long tmp_cpu_irq;
++ unsigned long imbalance = 0;
++ cpumask_t allowed_mask, target_cpu_mask, tmp;
++
++ for_each_possible_cpu(i) {
++ int package_index;
++ CPU_IRQ(i) = 0;
++ if (!cpu_online(i))
++ continue;
++ package_index = CPU_TO_PACKAGEINDEX(i);
++ for (j = 0; j < NR_IRQS; j++) {
++ unsigned long value_now, delta;
++ /* Is this an active IRQ? */
++ if (!irq_desc[j].action)
++ continue;
++ if ( package_index == i )
++ IRQ_DELTA(package_index,j) = 0;
++ /* Determine the total count per processor per IRQ */
++ value_now = (unsigned long) kstat_cpu(i).irqs[j];
++
++ /* Determine the activity per processor per IRQ */
++ delta = value_now - LAST_CPU_IRQ(i,j);
++
++ /* Update last_cpu_irq[][] for the next time */
++ LAST_CPU_IRQ(i,j) = value_now;
++
++ /* Ignore IRQs whose rate is less than the clock */
++ if (delta < useful_load_threshold)
++ continue;
++ /* update the load for the processor or package total */
++ IRQ_DELTA(package_index,j) += delta;
++
++ /* Keep track of the higher numbered sibling as well */
++ if (i != package_index)
++ CPU_IRQ(i) += delta;
++ /*
++ * We have sibling A and sibling B in the package
++ *
++ * cpu_irq[A] = load for cpu A + load for cpu B
++ * cpu_irq[B] = load for cpu B
++ */
++ CPU_IRQ(package_index) += delta;
++ }
++ }
++ /* Find the least loaded processor package */
++ for_each_online_cpu(i) {
++ if (i != CPU_TO_PACKAGEINDEX(i))
++ continue;
++ if (min_cpu_irq > CPU_IRQ(i)) {
++ min_cpu_irq = CPU_IRQ(i);
++ min_loaded = i;
++ }
++ }
++ max_cpu_irq = ULONG_MAX;
++
++tryanothercpu:
++ /* Look for heaviest loaded processor.
++ * We may come back to get the next heaviest loaded processor.
++ * Skip processors with trivial loads.
++ */
++ tmp_cpu_irq = 0;
++ tmp_loaded = -1;
++ for_each_online_cpu(i) {
++ if (i != CPU_TO_PACKAGEINDEX(i))
++ continue;
++ if (max_cpu_irq <= CPU_IRQ(i))
++ continue;
++ if (tmp_cpu_irq < CPU_IRQ(i)) {
++ tmp_cpu_irq = CPU_IRQ(i);
++ tmp_loaded = i;
++ }
++ }
++
++ if (tmp_loaded == -1) {
++ /* In the case of small number of heavy interrupt sources,
++ * loading some of the cpus too much. We use Ingo's original
++ * approach to rotate them around.
++ */
++ if (!first_attempt && imbalance >= useful_load_threshold) {
++ rotate_irqs_among_cpus(useful_load_threshold);
++ return;
++ }
++ goto not_worth_the_effort;
++ }
++
++ first_attempt = 0; /* heaviest search */
++ max_cpu_irq = tmp_cpu_irq; /* load */
++ max_loaded = tmp_loaded; /* processor */
++ imbalance = (max_cpu_irq - min_cpu_irq) / 2;
++
++ Dprintk("max_loaded cpu = %d\n", max_loaded);
++ Dprintk("min_loaded cpu = %d\n", min_loaded);
++ Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
++ Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
++ Dprintk("load imbalance = %lu\n", imbalance);
++
++ /* if imbalance is less than approx 10% of max load, then
++ * observe diminishing returns action. - quit
++ */
++ if (imbalance < (max_cpu_irq >> 3)) {
++ Dprintk("Imbalance too trivial\n");
++ goto not_worth_the_effort;
++ }
++
++tryanotherirq:
++ /* if we select an IRQ to move that can't go where we want, then
++ * see if there is another one to try.
++ */
++ move_this_load = 0;
++ selected_irq = -1;
++ for (j = 0; j < NR_IRQS; j++) {
++ /* Is this an active IRQ? */
++ if (!irq_desc[j].action)
++ continue;
++ if (imbalance <= IRQ_DELTA(max_loaded,j))
++ continue;
++ /* Try to find the IRQ that is closest to the imbalance
++ * without going over.
++ */
++ if (move_this_load < IRQ_DELTA(max_loaded,j)) {
++ move_this_load = IRQ_DELTA(max_loaded,j);
++ selected_irq = j;
++ }
++ }
++ if (selected_irq == -1) {
++ goto tryanothercpu;
++ }
++
++ imbalance = move_this_load;
++
++ /* For physical_balance case, we accumlated both load
++ * values in the one of the siblings cpu_irq[],
++ * to use the same code for physical and logical processors
++ * as much as possible.
++ *
++ * NOTE: the cpu_irq[] array holds the sum of the load for
++ * sibling A and sibling B in the slot for the lowest numbered
++ * sibling (A), _AND_ the load for sibling B in the slot for
++ * the higher numbered sibling.
++ *
++ * We seek the least loaded sibling by making the comparison
++ * (A+B)/2 vs B
++ */
++ load = CPU_IRQ(min_loaded) >> 1;
++ for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
++ if (load > CPU_IRQ(j)) {
++ /* This won't change cpu_sibling_map[min_loaded] */
++ load = CPU_IRQ(j);
++ min_loaded = j;
++ }
++ }
++
++ cpus_and(allowed_mask,
++ cpu_online_map,
++ balance_irq_affinity[selected_irq]);
++ target_cpu_mask = cpumask_of_cpu(min_loaded);
++ cpus_and(tmp, target_cpu_mask, allowed_mask);
++
++ if (!cpus_empty(tmp)) {
++
++ Dprintk("irq = %d moved to cpu = %d\n",
++ selected_irq, min_loaded);
++ /* mark for change destination */
++ set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
++
++ /* Since we made a change, come back sooner to
++ * check for more variation.
++ */
++ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
++ return;
++ }
++ goto tryanotherirq;
++
++not_worth_the_effort:
++ /*
++ * if we did not find an IRQ to move, then adjust the time interval
++ * upward
++ */
++ balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
++ Dprintk("IRQ worth rotating not found\n");
++ return;
++}
++
++static int balanced_irq(void *unused)
++{
++ int i;
++ unsigned long prev_balance_time = jiffies;
++ long time_remaining = balanced_irq_interval;
++
++ daemonize("kirqd");
++
++ /* push everything to CPU 0 to give us a starting point. */
++ for (i = 0 ; i < NR_IRQS ; i++) {
++ irq_desc[i].pending_mask = cpumask_of_cpu(0);
++ set_pending_irq(i, cpumask_of_cpu(0));
++ }
++
++ for ( ; ; ) {
++ time_remaining = schedule_timeout_interruptible(time_remaining);
++ try_to_freeze();
++ if (time_after(jiffies,
++ prev_balance_time+balanced_irq_interval)) {
++ preempt_disable();
++ do_irq_balance();
++ prev_balance_time = jiffies;
++ time_remaining = balanced_irq_interval;
++ preempt_enable();
++ }
++ }
++ return 0;
++}
++
++static int __init balanced_irq_init(void)
++{
++ int i;
++ struct cpuinfo_x86 *c;
++ cpumask_t tmp;
++
++ cpus_shift_right(tmp, cpu_online_map, 2);
++ c = &boot_cpu_data;
++ /* When not overwritten by the command line ask subarchitecture. */
++ if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
++ irqbalance_disabled = NO_BALANCE_IRQ;
++ if (irqbalance_disabled)
++ return 0;
++
++ /* disable irqbalance completely if there is only one processor online */
++ if (num_online_cpus() < 2) {
++ irqbalance_disabled = 1;
++ return 0;
++ }
++ /*
++ * Enable physical balance only if more than 1 physical processor
++ * is present
++ */
++ if (smp_num_siblings > 1 && !cpus_empty(tmp))
++ physical_balance = 1;
++
++ for_each_online_cpu(i) {
++ irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
++ irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
++ if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
++ printk(KERN_ERR "balanced_irq_init: out of memory");
++ goto failed;
++ }
++ memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
++ memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
++ }
++
++ printk(KERN_INFO "Starting balanced_irq\n");
++ if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0)
++ return 0;
++ else
++ printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
++failed:
++ for_each_possible_cpu(i) {
++ kfree(irq_cpu_data[i].irq_delta);
++ irq_cpu_data[i].irq_delta = NULL;
++ kfree(irq_cpu_data[i].last_irq);
++ irq_cpu_data[i].last_irq = NULL;
++ }
++ return 0;
++}
++
++int __init irqbalance_disable(char *str)
++{
++ irqbalance_disabled = 1;
++ return 1;
++}
++
++__setup("noirqbalance", irqbalance_disable);
++
++late_initcall(balanced_irq_init);
++#endif /* CONFIG_IRQBALANCE */
++#endif /* CONFIG_SMP */
++#endif
++
++#ifndef CONFIG_SMP
++void fastcall send_IPI_self(int vector)
++{
++#ifndef CONFIG_XEN
++ unsigned int cfg;
++
++ /*
++ * Wait for idle.
++ */
++ apic_wait_icr_idle();
++ cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
++ /*
++ * Send the IPI. The write to APIC_ICR fires this off.
++ */
++ apic_write_around(APIC_ICR, cfg);
++#endif
++}
++#endif /* !CONFIG_SMP */
++
++
++/*
++ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
++ * specific CPU-side IRQs.
++ */
++
++#define MAX_PIRQS 8
++static int pirq_entries [MAX_PIRQS];
++static int pirqs_enabled;
++int skip_ioapic_setup;
++
++static int __init ioapic_setup(char *str)
++{
++ skip_ioapic_setup = 1;
++ return 1;
++}
++
++__setup("noapic", ioapic_setup);
++
++static int __init ioapic_pirq_setup(char *str)
++{
++ int i, max;
++ int ints[MAX_PIRQS+1];
++
++ get_options(str, ARRAY_SIZE(ints), ints);
++
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ pirqs_enabled = 1;
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "PIRQ redirection, working around broken MP-BIOS.\n");
++ max = MAX_PIRQS;
++ if (ints[0] < MAX_PIRQS)
++ max = ints[0];
++
++ for (i = 0; i < max; i++) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
++ /*
++ * PIRQs are mapped upside down, usually.
++ */
++ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
++ }
++ return 1;
++}
++
++__setup("pirq=", ioapic_pirq_setup);
++
++/*
++ * Find the IRQ entry number of a certain pin.
++ */
++static int find_irq_entry(int apic, int pin, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_irqtype == type &&
++ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
++ mp_irqs[i].mpc_dstirq == pin)
++ return i;
++
++ return -1;
++}
++
++/*
++ * Find the pin to which IRQ[irq] (ISA) is connected
++ */
++static int __init find_isa_irq_pin(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_NEC98
++ ) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++
++ return mp_irqs[i].mpc_dstirq;
++ }
++ return -1;
++}
++
++static int __init find_isa_irq_apic(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_NEC98
++ ) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++ break;
++ }
++ if (i < mp_irq_entries) {
++ int apic;
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
++ return apic;
++ }
++ }
++
++ return -1;
++}
++
++/*
++ * Find a specific PCI IRQ entry.
++ * Not an __init, possibly needed by modules
++ */
++static int pin_2_irq(int idx, int apic, int pin);
++
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
++{
++ int apic, i, best_guess = -1;
++
++ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
++ "slot:%d, pin:%d.\n", bus, slot, pin);
++ if (mp_bus_id_to_pci_bus[bus] == -1) {
++ printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
++ return -1;
++ }
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
++ break;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
++ !mp_irqs[i].mpc_irqtype &&
++ (bus == lbus) &&
++ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
++ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
++
++ if (!(apic || IO_APIC_IRQ(irq)))
++ continue;
++
++ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
++ return irq;
++ /*
++ * Use the first all-but-pin matching entry as a
++ * best-guess fuzzy result for broken mptables.
++ */
++ if (best_guess < 0)
++ best_guess = irq;
++ }
++ }
++ return best_guess;
++}
++EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
++
++/*
++ * This function currently is only a helper for the i386 smp boot process where
++ * we need to reprogram the ioredtbls to cater for the cpus which have come online
++ * so mask in all cases should simply be TARGET_CPUS
++ */
++#ifdef CONFIG_SMP
++#ifndef CONFIG_XEN
++void __init setup_ioapic_dest(void)
++{
++ int pin, ioapic, irq, irq_entry;
++
++ if (skip_ioapic_setup == 1)
++ return;
++
++ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
++ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
++ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
++ if (irq_entry == -1)
++ continue;
++ irq = pin_2_irq(irq_entry, ioapic, pin);
++ set_ioapic_affinity_irq(irq, TARGET_CPUS);
++ }
++
++ }
++}
++#endif /* !CONFIG_XEN */
++#endif
++
++/*
++ * EISA Edge/Level control register, ELCR
++ */
++static int EISA_ELCR(unsigned int irq)
++{
++ if (irq < 16) {
++ unsigned int port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++ }
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "Broken MPtable reports ISA irq %d\n", irq);
++ return 0;
++}
++
++/* EISA interrupts are always polarity zero and can be edge or level
++ * trigger depending on the ELCR value. If an interrupt is listed as
++ * EISA conforming in the MP table, that means its trigger type must
++ * be read in from the ELCR */
++
++#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
++#define default_EISA_polarity(idx) (0)
++
++/* ISA interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_ISA_trigger(idx) (0)
++#define default_ISA_polarity(idx) (0)
++
++/* PCI interrupts are always polarity one level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_PCI_trigger(idx) (1)
++#define default_PCI_polarity(idx) (1)
++
++/* MCA interrupts are always polarity zero level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_MCA_trigger(idx) (1)
++#define default_MCA_polarity(idx) (0)
++
++/* NEC98 interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_NEC98_trigger(idx) (0)
++#define default_NEC98_polarity(idx) (0)
++
++static int __init MPBIOS_polarity(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int polarity;
++
++ /*
++ * Determine IRQ line polarity (high active or low active):
++ */
++ switch (mp_irqs[idx].mpc_irqflag & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent polarity */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ polarity = default_ISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ polarity = default_EISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ polarity = default_PCI_polarity(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ polarity = default_MCA_polarity(idx);
++ break;
++ }
++ case MP_BUS_NEC98: /* NEC 98 pin */
++ {
++ polarity = default_NEC98_polarity(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* high active */
++ {
++ polarity = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ case 3: /* low active */
++ {
++ polarity = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ return polarity;
++}
++
++static int MPBIOS_trigger(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int trigger;
++
++ /*
++ * Determine IRQ trigger mode (edge or level sensitive):
++ */
++ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ trigger = default_ISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ trigger = default_EISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ trigger = default_PCI_trigger(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ trigger = default_MCA_trigger(idx);
++ break;
++ }
++ case MP_BUS_NEC98: /* NEC 98 pin */
++ {
++ trigger = default_NEC98_trigger(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* edge */
++ {
++ trigger = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ case 3: /* level */
++ {
++ trigger = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 0;
++ break;
++ }
++ }
++ return trigger;
++}
++
++static inline int irq_polarity(int idx)
++{
++ return MPBIOS_polarity(idx);
++}
++
++static inline int irq_trigger(int idx)
++{
++ return MPBIOS_trigger(idx);
++}
++
++static int pin_2_irq(int idx, int apic, int pin)
++{
++ int irq, i;
++ int bus = mp_irqs[idx].mpc_srcbus;
++
++ /*
++ * Debugging check, we are in big trouble if this message pops up!
++ */
++ if (mp_irqs[idx].mpc_dstirq != pin)
++ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
++
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ case MP_BUS_EISA:
++ case MP_BUS_MCA:
++ case MP_BUS_NEC98:
++ {
++ irq = mp_irqs[idx].mpc_srcbusirq;
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ /*
++ * PCI IRQs are mapped in order
++ */
++ i = irq = 0;
++ while (i < apic)
++ irq += nr_ioapic_registers[i++];
++ irq += pin;
++
++ /*
++ * For MPS mode, so far only needed by ES7000 platform
++ */
++ if (ioapic_renumber_irq)
++ irq = ioapic_renumber_irq(apic, irq);
++
++ break;
++ }
++ default:
++ {
++ printk(KERN_ERR "unknown bus type %d.\n",bus);
++ irq = 0;
++ break;
++ }
++ }
++
++ /*
++ * PCI IRQ command line redirection. Yes, limits are hardcoded.
++ */
++ if ((pin >= 16) && (pin <= 23)) {
++ if (pirq_entries[pin-16] != -1) {
++ if (!pirq_entries[pin-16]) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "disabling PIRQ%d\n", pin-16);
++ } else {
++ irq = pirq_entries[pin-16];
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "using PIRQ%d -> IRQ %d\n",
++ pin-16, irq);
++ }
++ }
++ }
++ return irq;
++}
++
++static inline int IO_APIC_irq_trigger(int irq)
++{
++ int apic, idx, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
++ return irq_trigger(idx);
++ }
++ }
++ /*
++ * nonexistent IRQs are edge default
++ */
++ return 0;
++}
++
++/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
++u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; /* = { FIRST_DEVICE_VECTOR , 0 }; */
++
++int assign_irq_vector(int irq)
++{
++ unsigned long flags;
++ int vector;
++ struct physdev_irq irq_op;
++
++ BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
++
++ spin_lock_irqsave(&vector_lock, flags);
++
++ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
++ spin_unlock_irqrestore(&vector_lock, flags);
++ return IO_APIC_VECTOR(irq);
++ }
++
++ irq_op.irq = irq;
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
++ spin_unlock_irqrestore(&vector_lock, flags);
++ return -ENOSPC;
++ }
++
++ vector = irq_op.vector;
++ vector_irq[vector] = irq;
++ if (irq != AUTO_ASSIGN)
++ IO_APIC_VECTOR(irq) = vector;
++
++ spin_unlock_irqrestore(&vector_lock, flags);
++
++ return vector;
++}
++
++#ifndef CONFIG_XEN
++static struct hw_interrupt_type ioapic_level_type;
++static struct hw_interrupt_type ioapic_edge_type;
++
++#define IOAPIC_AUTO -1
++#define IOAPIC_EDGE 0
++#define IOAPIC_LEVEL 1
++
++static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
++{
++ unsigned idx;
++
++ idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
++
++ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
++ trigger == IOAPIC_LEVEL)
++ irq_desc[idx].chip = &ioapic_level_type;
++ else
++ irq_desc[idx].chip = &ioapic_edge_type;
++ set_intr_gate(vector, interrupt[idx]);
++}
++#else
++#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
++#endif
++
++static void __init setup_IO_APIC_irqs(void)
++{
++ struct IO_APIC_route_entry entry;
++ int apic, pin, idx, irq, first_notcon = 1, vector;
++ unsigned long flags;
++
++ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++
++ /*
++ * add it to the IO-APIC irq-routing table:
++ */
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* enable IRQ */
++ entry.dest.logical.logical_dest =
++ cpu_mask_to_apicid(TARGET_CPUS);
++
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if (idx == -1) {
++ if (first_notcon) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ " IO-APIC (apicid-pin) %d-%d",
++ mp_ioapics[apic].mpc_apicid,
++ pin);
++ first_notcon = 0;
++ } else
++ apic_printk(APIC_VERBOSE, ", %d-%d",
++ mp_ioapics[apic].mpc_apicid, pin);
++ continue;
++ }
++
++ entry.trigger = irq_trigger(idx);
++ entry.polarity = irq_polarity(idx);
++
++ if (irq_trigger(idx)) {
++ entry.trigger = 1;
++ entry.mask = 1;
++ }
++
++ irq = pin_2_irq(idx, apic, pin);
++ /*
++ * skip adding the timer int on secondary nodes, which causes
++ * a small but painful rift in the time-space continuum
++ */
++ if (multi_timer_check(apic, irq))
++ continue;
++ else
++ add_pin_to_irq(irq, apic, pin);
++
++ if (/*!apic &&*/ !IO_APIC_IRQ(irq))
++ continue;
++
++ if (IO_APIC_IRQ(irq)) {
++ vector = assign_irq_vector(irq);
++ entry.vector = vector;
++ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
++
++ if (!apic && (irq < 16))
++ disable_8259A_irq(irq);
++ }
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++ set_native_irq_info(irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++ }
++
++ if (!first_notcon)
++ apic_printk(APIC_VERBOSE, " not connected.\n");
++}
++
++/*
++ * Set up the 8259A-master output pin:
++ */
++#ifndef CONFIG_XEN
++static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ memset(&entry,0,sizeof(entry));
++
++ disable_8259A_irq(0);
++
++ /* mask LVT0 */
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++
++ /*
++ * We use logical delivery to get the timer IRQ
++ * to the first CPU.
++ */
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* unmask IRQ now */
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.polarity = 0;
++ entry.trigger = 0;
++ entry.vector = vector;
++
++ /*
++ * The timer IRQ doesn't have to know that behind the
++ * scene we have a 8259A-master in AEOI mode ...
++ */
++ irq_desc[0].chip = &ioapic_edge_type;
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ enable_8259A_irq(0);
++}
++
++static inline void UNEXPECTED_IO_APIC(void)
++{
++}
++
++void __init print_IO_APIC(void)
++{
++ int apic, i;
++ union IO_APIC_reg_00 reg_00;
++ union IO_APIC_reg_01 reg_01;
++ union IO_APIC_reg_02 reg_02;
++ union IO_APIC_reg_03 reg_03;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
++ for (i = 0; i < nr_ioapics; i++)
++ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
++ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
++
++ /*
++ * We are a bit conservative about what we expect. We have to
++ * know about every hardware change ASAP.
++ */
++ printk(KERN_INFO "testing the IO APIC.......................\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ reg_01.raw = io_apic_read(apic, 1);
++ if (reg_01.bits.version >= 0x10)
++ reg_02.raw = io_apic_read(apic, 2);
++ if (reg_01.bits.version >= 0x20)
++ reg_03.raw = io_apic_read(apic, 3);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
++ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
++ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
++ printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
++ printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
++ if (reg_00.bits.ID >= get_physical_broadcast())
++ UNEXPECTED_IO_APIC();
++ if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
++ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
++ if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
++ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
++ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
++ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
++ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
++ (reg_01.bits.entries != 0x2E) &&
++ (reg_01.bits.entries != 0x3F)
++ )
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
++ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
++ if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
++ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
++ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
++ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
++ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
++ )
++ UNEXPECTED_IO_APIC();
++ if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ /*
++ * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
++ * but the value of reg_02 is read as the previous read register
++ * value, so ignore it if reg_02 == reg_01.
++ */
++ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
++ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
++ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
++ if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++ }
++
++ /*
++ * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
++ * or reg_03, but the value of reg_0[23] is read as the previous read
++ * register value, so ignore it if reg_03 == reg_0[12].
++ */
++ if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
++ reg_03.raw != reg_01.raw) {
++ printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
++ printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
++ if (reg_03.bits.__reserved_1)
++ UNEXPECTED_IO_APIC();
++ }
++
++ printk(KERN_DEBUG ".... IRQ redirection table:\n");
++
++ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
++ " Stat Dest Deli Vect: \n");
++
++ for (i = 0; i <= reg_01.bits.entries; i++) {
++ struct IO_APIC_route_entry entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
++ *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk(KERN_DEBUG " %02x %03X %02X ",
++ i,
++ entry.dest.logical.logical_dest,
++ entry.dest.physical.physical_dest
++ );
++
++ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
++ entry.mask,
++ entry.trigger,
++ entry.irr,
++ entry.polarity,
++ entry.delivery_status,
++ entry.dest_mode,
++ entry.delivery_mode,
++ entry.vector
++ );
++ }
++ }
++ if (use_pci_vector())
++ printk(KERN_INFO "Using vector-based indexing\n");
++ printk(KERN_DEBUG "IRQ to pin mappings:\n");
++ for (i = 0; i < NR_IRQS; i++) {
++ struct irq_pin_list *entry = irq_2_pin + i;
++ if (entry->pin < 0)
++ continue;
++ if (use_pci_vector() && !platform_legacy_irq(i))
++ printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
++ else
++ printk(KERN_DEBUG "IRQ%d ", i);
++ for (;;) {
++ printk("-> %d:%d", entry->apic, entry->pin);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++ printk("\n");
++ }
++
++ printk(KERN_INFO ".................................... done.\n");
++
++ return;
++}
++
++static void print_APIC_bitfield (int base)
++{
++ unsigned int v;
++ int i, j;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
++ for (i = 0; i < 8; i++) {
++ v = apic_read(base + i*0x10);
++ for (j = 0; j < 32; j++) {
++ if (v & (1<<j))
++ printk("1");
++ else
++ printk("0");
++ }
++ printk("\n");
++ }
++}
++
++void /*__init*/ print_local_APIC(void * dummy)
++{
++ unsigned int v, ver, maxlvt;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
++ smp_processor_id(), hard_smp_processor_id());
++ v = apic_read(APIC_ID);
++ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
++ v = apic_read(APIC_LVR);
++ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
++ ver = GET_APIC_VERSION(v);
++ maxlvt = get_maxlvt();
++
++ v = apic_read(APIC_TASKPRI);
++ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
++
++ if (APIC_INTEGRATED(ver)) { /* !82489DX */
++ v = apic_read(APIC_ARBPRI);
++ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
++ v & APIC_ARBPRI_MASK);
++ v = apic_read(APIC_PROCPRI);
++ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
++ }
++
++ v = apic_read(APIC_EOI);
++ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
++ v = apic_read(APIC_RRR);
++ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
++ v = apic_read(APIC_LDR);
++ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
++ v = apic_read(APIC_DFR);
++ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
++ v = apic_read(APIC_SPIV);
++ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
++
++ printk(KERN_DEBUG "... APIC ISR field:\n");
++ print_APIC_bitfield(APIC_ISR);
++ printk(KERN_DEBUG "... APIC TMR field:\n");
++ print_APIC_bitfield(APIC_TMR);
++ printk(KERN_DEBUG "... APIC IRR field:\n");
++ print_APIC_bitfield(APIC_IRR);
++
++ if (APIC_INTEGRATED(ver)) { /* !82489DX */
++ if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
++ apic_write(APIC_ESR, 0);
++ v = apic_read(APIC_ESR);
++ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
++ }
++
++ v = apic_read(APIC_ICR);
++ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
++ v = apic_read(APIC_ICR2);
++ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
++
++ v = apic_read(APIC_LVTT);
++ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
++
++ if (maxlvt > 3) { /* PC is LVT#4. */
++ v = apic_read(APIC_LVTPC);
++ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
++ }
++ v = apic_read(APIC_LVT0);
++ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
++ v = apic_read(APIC_LVT1);
++ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
++
++ if (maxlvt > 2) { /* ERR is LVT#3. */
++ v = apic_read(APIC_LVTERR);
++ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
++ }
++
++ v = apic_read(APIC_TMICT);
++ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
++ v = apic_read(APIC_TMCCT);
++ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
++ v = apic_read(APIC_TDCR);
++ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
++ printk("\n");
++}
++
++void print_all_local_APICs (void)
++{
++ on_each_cpu(print_local_APIC, NULL, 1, 1);
++}
++
++void /*__init*/ print_PIC(void)
++{
++ unsigned int v;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "\nprinting PIC contents\n");
++
++ spin_lock_irqsave(&i8259A_lock, flags);
++
++ v = inb(0xa1) << 8 | inb(0x21);
++ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
++
++ v = inb(0xa0) << 8 | inb(0x20);
++ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
++
++ outb(0x0b,0xa0);
++ outb(0x0b,0x20);
++ v = inb(0xa0) << 8 | inb(0x20);
++ outb(0x0a,0xa0);
++ outb(0x0a,0x20);
++
++ spin_unlock_irqrestore(&i8259A_lock, flags);
++
++ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
++
++ v = inb(0x4d1) << 8 | inb(0x4d0);
++ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
++}
++#endif /* !CONFIG_XEN */
++
++static void __init enable_IO_APIC(void)
++{
++ union IO_APIC_reg_01 reg_01;
++ int i8259_apic, i8259_pin;
++ int i, apic;
++ unsigned long flags;
++
++ for (i = 0; i < PIN_MAP_SIZE; i++) {
++ irq_2_pin[i].pin = -1;
++ irq_2_pin[i].next = 0;
++ }
++ if (!pirqs_enabled)
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ /*
++ * The number of IO-APIC IRQ registers (== #pins):
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(apic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
++ }
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ int pin;
++ /* See if any of the pins is in ExtINT mode */
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ struct IO_APIC_route_entry entry;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++
++ /* If the interrupt line is enabled and in ExtInt mode
++ * I have found the pin where the i8259 is connected.
++ */
++ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
++ ioapic_i8259.apic = apic;
++ ioapic_i8259.pin = pin;
++ goto found_i8259;
++ }
++ }
++ }
++ found_i8259:
++ /* Look to see what if the MP table has reported the ExtINT */
++ /* If we could not find the appropriate pin by looking at the ioapic
++ * the i8259 probably is not connected the ioapic but give the
++ * mptable a chance anyway.
++ */
++ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
++ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
++ /* Trust the MP table if nothing is setup in the hardware */
++ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
++ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
++ ioapic_i8259.pin = i8259_pin;
++ ioapic_i8259.apic = i8259_apic;
++ }
++ /* Complain if the MP table and the hardware disagree */
++ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
++ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
++ {
++ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
++ }
++
++ /*
++ * Do not trust the IO-APIC being empty at bootup
++ */
++ clear_IO_APIC();
++}
++
++/*
++ * Not an __init, needed by the reboot code
++ */
++void disable_IO_APIC(void)
++{
++ /*
++ * Clear the IO-APIC before rebooting:
++ */
++ clear_IO_APIC();
++
++#ifndef CONFIG_XEN
++ /*
++ * If the i8259 is routed through an IOAPIC
++ * Put that IOAPIC in virtual wire mode
++ * so legacy interrupts can be delivered.
++ */
++ if (ioapic_i8259.pin != -1) {
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 0; /* Enabled */
++ entry.trigger = 0; /* Edge */
++ entry.irr = 0;
++ entry.polarity = 0; /* High */
++ entry.delivery_status = 0;
++ entry.dest_mode = 0; /* Physical */
++ entry.delivery_mode = dest_ExtINT; /* ExtInt */
++ entry.vector = 0;
++ entry.dest.physical.physical_dest =
++ GET_APIC_ID(apic_read(APIC_ID));
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
++ *(((int *)&entry)+1));
++ io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
++ *(((int *)&entry)+0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
++#endif
++}
++
++/*
++ * function to set the IO-APIC physical IDs based on the
++ * values stored in the MPC table.
++ *
++ * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
++ */
++
++#if !defined(CONFIG_XEN) && !defined(CONFIG_X86_NUMAQ)
++static void __init setup_ioapic_ids_from_mpc(void)
++{
++ union IO_APIC_reg_00 reg_00;
++ physid_mask_t phys_id_present_map;
++ int apic;
++ int i;
++ unsigned char old_id;
++ unsigned long flags;
++
++ /*
++ * Don't check I/O APIC IDs for xAPIC systems. They have
++ * no meaning without the serial APIC bus.
++ */
++ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
++ return;
++ /*
++ * This is broken; anything with a real cpu count has to
++ * circumvent this idiocy regardless.
++ */
++ phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
++
++ /*
++ * Set the IOAPIC ID to the value stored in the MPC table.
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ /* Read the register 0 value */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ old_id = mp_ioapics[apic].mpc_apicid;
++
++ if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
++ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
++ apic, mp_ioapics[apic].mpc_apicid);
++ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
++ reg_00.bits.ID);
++ mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
++ }
++
++ /*
++ * Sanity check, is the ID really free? Every APIC in a
++ * system must have a unique ID or we get lots of nice
++ * 'stuck on smp_invalidate_needed IPI wait' messages.
++ */
++ if (check_apicid_used(phys_id_present_map,
++ mp_ioapics[apic].mpc_apicid)) {
++ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
++ apic, mp_ioapics[apic].mpc_apicid);
++ for (i = 0; i < get_physical_broadcast(); i++)
++ if (!physid_isset(i, phys_id_present_map))
++ break;
++ if (i >= get_physical_broadcast())
++ panic("Max APIC ID exceeded!\n");
++ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
++ i);
++ physid_set(i, phys_id_present_map);
++ mp_ioapics[apic].mpc_apicid = i;
++ } else {
++ physid_mask_t tmp;
++ tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
++ apic_printk(APIC_VERBOSE, "Setting %d in the "
++ "phys_id_present_map\n",
++ mp_ioapics[apic].mpc_apicid);
++ physids_or(phys_id_present_map, phys_id_present_map, tmp);
++ }
++
++
++ /*
++ * We need to adjust the IRQ routing table
++ * if the ID changed.
++ */
++ if (old_id != mp_ioapics[apic].mpc_apicid)
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_dstapic == old_id)
++ mp_irqs[i].mpc_dstapic
++ = mp_ioapics[apic].mpc_apicid;
++
++ /*
++ * Read the right value from the MPC table and
++ * write it into the ID register.
++ */
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "...changing IO-APIC physical APIC ID to %d ...",
++ mp_ioapics[apic].mpc_apicid);
++
++ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0, reg_00.raw);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ /*
++ * Sanity check
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
++ printk("could not set ID!\n");
++ else
++ apic_printk(APIC_VERBOSE, " ok.\n");
++ }
++}
++#else
++static void __init setup_ioapic_ids_from_mpc(void) { }
++#endif
++
++#ifndef CONFIG_XEN
++/*
++ * There is a nasty bug in some older SMP boards, their mptable lies
++ * about the timer IRQ. We do the following to work around the situation:
++ *
++ * - timer IRQ defaults to IO-APIC IRQ
++ * - if this function detects that timer IRQs are defunct, then we fall
++ * back to ISA timer IRQs
++ */
++static int __init timer_irq_works(void)
++{
++ unsigned long t1 = jiffies;
++
++ local_irq_enable();
++ /* Let ten ticks pass... */
++ mdelay((10 * 1000) / HZ);
++
++ /*
++ * Expect a few ticks at least, to be sure some possible
++ * glue logic does not lock up after one or two first
++ * ticks in a non-ExtINT mode. Also the local APIC
++ * might have cached one ExtINT interrupt. Finally, at
++ * least one tick may be lost due to delays.
++ */
++ if (jiffies - t1 > 4)
++ return 1;
++
++ return 0;
++}
++
++/*
++ * In the SMP+IOAPIC case it might happen that there are an unspecified
++ * number of pending IRQ events unhandled. These cases are very rare,
++ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
++ * better to do it this way as thus we do not have to be aware of
++ * 'pending' interrupts in the IRQ path, except at this point.
++ */
++/*
++ * Edge triggered needs to resend any interrupt
++ * that was delayed but this is now handled in the device
++ * independent code.
++ */
++
++/*
++ * Starting up a edge-triggered IO-APIC interrupt is
++ * nasty - we need to make sure that we get the edge.
++ * If it is already asserted for some reason, we need
++ * return 1 to indicate that is was pending.
++ *
++ * This is not complete - we should be able to fake
++ * an edge even if it isn't on the 8259A...
++ */
++static unsigned int startup_edge_ioapic_irq(unsigned int irq)
++{
++ int was_pending = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ if (irq < 16) {
++ disable_8259A_irq(irq);
++ if (i8259A_irq_pending(irq))
++ was_pending = 1;
++ }
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return was_pending;
++}
++
++/*
++ * Once we have recorded IRQ_PENDING already, we can mask the
++ * interrupt for real. This prevents IRQ storms from unhandled
++ * devices.
++ */
++static void ack_edge_ioapic_irq(unsigned int irq)
++{
++ move_irq(irq);
++ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
++ == (IRQ_PENDING | IRQ_DISABLED))
++ mask_IO_APIC_irq(irq);
++ ack_APIC_irq();
++}
++
++/*
++ * Level triggered interrupts can just be masked,
++ * and shutting down and starting up the interrupt
++ * is the same as enabling and disabling them -- except
++ * with a startup need to return a "was pending" value.
++ *
++ * Level triggered interrupts are special because we
++ * do not touch any IO-APIC register while handling
++ * them. We ack the APIC in the end-IRQ handler, not
++ * in the start-IRQ-handler. Protection against reentrance
++ * from the same interrupt is still provided, both by the
++ * generic IRQ layer and by the fact that an unacked local
++ * APIC does not accept IRQs.
++ */
++static unsigned int startup_level_ioapic_irq (unsigned int irq)
++{
++ unmask_IO_APIC_irq(irq);
++
++ return 0; /* don't check for pending */
++}
++
++static void end_level_ioapic_irq (unsigned int irq)
++{
++ unsigned long v;
++ int i;
++
++ move_irq(irq);
++/*
++ * It appears there is an erratum which affects at least version 0x11
++ * of I/O APIC (that's the 82093AA and cores integrated into various
++ * chipsets). Under certain conditions a level-triggered interrupt is
++ * erroneously delivered as edge-triggered one but the respective IRR
++ * bit gets set nevertheless. As a result the I/O unit expects an EOI
++ * message but it will never arrive and further interrupts are blocked
++ * from the source. The exact reason is so far unknown, but the
++ * phenomenon was observed when two consecutive interrupt requests
++ * from a given source get delivered to the same CPU and the source is
++ * temporarily disabled in between.
++ *
++ * A workaround is to simulate an EOI message manually. We achieve it
++ * by setting the trigger mode to edge and then to level when the edge
++ * trigger mode gets detected in the TMR of a local APIC for a
++ * level-triggered interrupt. We mask the source for the time of the
++ * operation to prevent an edge-triggered interrupt escaping meanwhile.
++ * The idea is from Manfred Spraul. --macro
++ */
++ i = IO_APIC_VECTOR(irq);
++
++ v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
++
++ ack_APIC_irq();
++
++ if (!(v & (1 << (i & 0x1f)))) {
++ atomic_inc(&irq_mis_count);
++ spin_lock(&ioapic_lock);
++ __mask_and_edge_IO_APIC_irq(irq);
++ __unmask_and_level_IO_APIC_irq(irq);
++ spin_unlock(&ioapic_lock);
++ }
++}
++
++#ifdef CONFIG_PCI_MSI
++static unsigned int startup_edge_ioapic_vector(unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ return startup_edge_ioapic_irq(irq);
++}
++
++static void ack_edge_ioapic_vector(unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ move_native_irq(vector);
++ ack_edge_ioapic_irq(irq);
++}
++
++static unsigned int startup_level_ioapic_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ return startup_level_ioapic_irq (irq);
++}
++
++static void end_level_ioapic_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ move_native_irq(vector);
++ end_level_ioapic_irq(irq);
++}
++
++static void mask_IO_APIC_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ mask_IO_APIC_irq(irq);
++}
++
++static void unmask_IO_APIC_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ unmask_IO_APIC_irq(irq);
++}
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_vector (unsigned int vector,
++ cpumask_t cpu_mask)
++{
++ int irq = vector_to_irq(vector);
++
++ set_native_irq_info(vector, cpu_mask);
++ set_ioapic_affinity_irq(irq, cpu_mask);
++}
++#endif
++#endif
++
++static int ioapic_retrigger(unsigned int irq)
++{
++ send_IPI_self(IO_APIC_VECTOR(irq));
++
++ return 1;
++}
++
++/*
++ * Level and edge triggered IO-APIC interrupts need different handling,
++ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
++ * handled with the level-triggered descriptor, but that one has slightly
++ * more overhead. Level-triggered interrupts cannot be handled with the
++ * edge-triggered handler, without risking IRQ storms and other ugly
++ * races.
++ */
++static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
++ .typename = "IO-APIC-edge",
++ .startup = startup_edge_ioapic,
++ .shutdown = shutdown_edge_ioapic,
++ .enable = enable_edge_ioapic,
++ .disable = disable_edge_ioapic,
++ .ack = ack_edge_ioapic,
++ .end = end_edge_ioapic,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity,
++#endif
++ .retrigger = ioapic_retrigger,
++};
++
++static struct hw_interrupt_type ioapic_level_type __read_mostly = {
++ .typename = "IO-APIC-level",
++ .startup = startup_level_ioapic,
++ .shutdown = shutdown_level_ioapic,
++ .enable = enable_level_ioapic,
++ .disable = disable_level_ioapic,
++ .ack = mask_and_ack_level_ioapic,
++ .end = end_level_ioapic,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity,
++#endif
++ .retrigger = ioapic_retrigger,
++};
++#endif /* !CONFIG_XEN */
++
++static inline void init_IO_APIC_traps(void)
++{
++ int irq;
++
++ /*
++ * NOTE! The local APIC isn't very good at handling
++ * multiple interrupts at the same interrupt level.
++ * As the interrupt level is determined by taking the
++ * vector number and shifting that right by 4, we
++ * want to spread these out a bit so that they don't
++ * all fall in the same interrupt level.
++ *
++ * Also, we've got to be careful not to trash gate
++ * 0x80, because int 0x80 is hm, kind of importantish. ;)
++ */
++ for (irq = 0; irq < NR_IRQS ; irq++) {
++ int tmp = irq;
++ if (use_pci_vector()) {
++ if (!platform_legacy_irq(tmp))
++ if ((tmp = vector_to_irq(tmp)) == -1)
++ continue;
++ }
++ if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
++ /*
++ * Hmm.. We don't have an entry for this,
++ * so default to an old-fashioned 8259
++ * interrupt if we can..
++ */
++ if (irq < 16)
++ make_8259A_irq(irq);
++#ifndef CONFIG_XEN
++ else
++ /* Strange. Oh, well.. */
++ irq_desc[irq].chip = &no_irq_type;
++#endif
++ }
++ }
++}
++
++#ifndef CONFIG_XEN
++static void enable_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
++}
++
++static void disable_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
++}
++
++static void ack_lapic_irq (unsigned int irq)
++{
++ ack_APIC_irq();
++}
++
++static void end_lapic_irq (unsigned int i) { /* nothing */ }
++
++static struct hw_interrupt_type lapic_irq_type __read_mostly = {
++ .typename = "local-APIC-edge",
++ .startup = NULL, /* startup_irq() not used for IRQ0 */
++ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
++ .enable = enable_lapic_irq,
++ .disable = disable_lapic_irq,
++ .ack = ack_lapic_irq,
++ .end = end_lapic_irq
++};
++
++static void setup_nmi (void)
++{
++ /*
++ * Dirty trick to enable the NMI watchdog ...
++ * We put the 8259A master into AEOI mode and
++ * unmask on all local APICs LVT0 as NMI.
++ *
++ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
++ * is from Maciej W. Rozycki - so we do not have to EOI from
++ * the NMI handler or the timer interrupt.
++ */
++ apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
++
++ on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
++
++ apic_printk(APIC_VERBOSE, " done.\n");
++}
++
++/*
++ * This looks a bit hackish but it's about the only one way of sending
++ * a few INTA cycles to 8259As and any associated glue logic. ICR does
++ * not support the ExtINT mode, unfortunately. We need to send these
++ * cycles as some i82489DX-based boards have glue logic that keeps the
++ * 8259A interrupt line asserted until INTA. --macro
++ */
++static inline void unlock_ExtINT_logic(void)
++{
++ int apic, pin, i;
++ struct IO_APIC_route_entry entry0, entry1;
++ unsigned char save_control, save_freq_select;
++ unsigned long flags;
++
++ pin = find_isa_irq_pin(8, mp_INT);
++ apic = find_isa_irq_apic(8, mp_INT);
++ if (pin == -1)
++ return;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ clear_IO_APIC_pin(apic, pin);
++
++ memset(&entry1, 0, sizeof(entry1));
++
++ entry1.dest_mode = 0; /* physical delivery */
++ entry1.mask = 0; /* unmask IRQ now */
++ entry1.dest.physical.physical_dest = hard_smp_processor_id();
++ entry1.delivery_mode = dest_ExtINT;
++ entry1.polarity = entry0.polarity;
++ entry1.trigger = 0;
++ entry1.vector = 0;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ save_control = CMOS_READ(RTC_CONTROL);
++ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
++ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
++ RTC_FREQ_SELECT);
++ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
++
++ i = 100;
++ while (i-- > 0) {
++ mdelay(10);
++ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
++ i -= 10;
++ }
++
++ CMOS_WRITE(save_control, RTC_CONTROL);
++ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++ clear_IO_APIC_pin(apic, pin);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++int timer_uses_ioapic_pin_0;
++
++/*
++ * This code may look a bit paranoid, but it's supposed to cooperate with
++ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
++ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
++ * fanatically on his truly buggy board.
++ */
++static inline void check_timer(void)
++{
++ int apic1, pin1, apic2, pin2;
++ int vector;
++
++ /*
++ * get/set the timer IRQ vector:
++ */
++ disable_8259A_irq(0);
++ vector = assign_irq_vector(0);
++ set_intr_gate(vector, interrupt[0]);
++
++ /*
++ * Subtle, code in do_timer_interrupt() expects an AEOI
++ * mode for the 8259A whenever interrupts are routed
++ * through I/O APICs. Also IRQ0 has to be enabled in
++ * the 8259A which implies the virtual wire has to be
++ * disabled in the local APIC.
++ */
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++ init_8259A(1);
++ timer_ack = 1;
++ if (timer_over_8254 > 0)
++ enable_8259A_irq(0);
++
++ pin1 = find_isa_irq_pin(0, mp_INT);
++ apic1 = find_isa_irq_apic(0, mp_INT);
++ pin2 = ioapic_i8259.pin;
++ apic2 = ioapic_i8259.apic;
++
++ if (pin1 == 0)
++ timer_uses_ioapic_pin_0 = 1;
++
++ printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
++ vector, apic1, pin1, apic2, pin2);
++
++ if (pin1 != -1) {
++ /*
++ * Ok, does IRQ0 through the IOAPIC work?
++ */
++ unmask_IO_APIC_irq(0);
++ if (timer_irq_works()) {
++ if (nmi_watchdog == NMI_IO_APIC) {
++ disable_8259A_irq(0);
++ setup_nmi();
++ enable_8259A_irq(0);
++ }
++ if (disable_timer_pin_1 > 0)
++ clear_IO_APIC_pin(0, pin1);
++ return;
++ }
++ clear_IO_APIC_pin(apic1, pin1);
++ printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
++ "IO-APIC\n");
++ }
++
++ printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
++ if (pin2 != -1) {
++ printk("\n..... (found pin %d) ...", pin2);
++ /*
++ * legacy devices should be connected to IO APIC #0
++ */
++ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
++ if (timer_irq_works()) {
++ printk("works.\n");
++ if (pin1 != -1)
++ replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
++ else
++ add_pin_to_irq(0, apic2, pin2);
++ if (nmi_watchdog == NMI_IO_APIC) {
++ setup_nmi();
++ }
++ return;
++ }
++ /*
++ * Cleanup, just in case ...
++ */
++ clear_IO_APIC_pin(apic2, pin2);
++ }
++ printk(" failed.\n");
++
++ if (nmi_watchdog == NMI_IO_APIC) {
++ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
++ nmi_watchdog = 0;
++ }
++
++ printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
++
++ disable_8259A_irq(0);
++ irq_desc[0].chip = &lapic_irq_type;
++ apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
++ enable_8259A_irq(0);
++
++ if (timer_irq_works()) {
++ printk(" works.\n");
++ return;
++ }
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
++ printk(" failed.\n");
++
++ printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
++
++ timer_ack = 0;
++ init_8259A(0);
++ make_8259A_irq(0);
++ apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
++
++ unlock_ExtINT_logic();
++
++ if (timer_irq_works()) {
++ printk(" works.\n");
++ return;
++ }
++ printk(" failed :(.\n");
++ panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
++ "report. Then try booting with the 'noapic' option");
++}
++#else
++int timer_uses_ioapic_pin_0 = 0;
++#define check_timer() ((void)0)
++#endif
++
++/*
++ *
++ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
++ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
++ * Linux doesn't really care, as it's not actually used
++ * for any interrupt handling anyway.
++ */
++#define PIC_IRQS (1 << PIC_CASCADE_IR)
++
++void __init setup_IO_APIC(void)
++{
++ enable_IO_APIC();
++
++ if (acpi_ioapic)
++ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
++ else
++ io_apic_irqs = ~PIC_IRQS;
++
++ printk("ENABLING IO-APIC IRQs\n");
++
++ /*
++ * Set up IO-APIC IRQ routing.
++ */
++ if (!acpi_ioapic)
++ setup_ioapic_ids_from_mpc();
++#ifndef CONFIG_XEN
++ sync_Arb_IDs();
++#endif
++ setup_IO_APIC_irqs();
++ init_IO_APIC_traps();
++ check_timer();
++ if (!acpi_ioapic)
++ print_IO_APIC();
++}
++
++static int __init setup_disable_8254_timer(char *s)
++{
++ timer_over_8254 = -1;
++ return 1;
++}
++static int __init setup_enable_8254_timer(char *s)
++{
++ timer_over_8254 = 2;
++ return 1;
++}
++
++__setup("disable_8254_timer", setup_disable_8254_timer);
++__setup("enable_8254_timer", setup_enable_8254_timer);
++
++/*
++ * Called after all the initialization is done. If we didnt find any
++ * APIC bugs then we can allow the modify fast path
++ */
++
++static int __init io_apic_bug_finalize(void)
++{
++ if(sis_apic_bug == -1)
++ sis_apic_bug = 0;
++ if (is_initial_xendomain()) {
++ struct xen_platform_op op = { .cmd = XENPF_platform_quirk };
++ op.u.platform_quirk.quirk_id = sis_apic_bug ?
++ QUIRK_IOAPIC_BAD_REGSEL : QUIRK_IOAPIC_GOOD_REGSEL;
++ VOID(HYPERVISOR_platform_op(&op));
++ }
++ return 0;
++}
++
++late_initcall(io_apic_bug_finalize);
++
++struct sysfs_ioapic_data {
++ struct sys_device dev;
++ struct IO_APIC_route_entry entry[0];
++};
++static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
++
++static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++ *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
++ *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++static int ioapic_resume(struct sys_device *dev)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ union IO_APIC_reg_00 reg_00;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(dev->id, 0);
++ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
++ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
++ io_apic_write(dev->id, 0, reg_00.raw);
++ }
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++ io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
++ io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++static struct sysdev_class ioapic_sysdev_class = {
++ set_kset_name("ioapic"),
++ .suspend = ioapic_suspend,
++ .resume = ioapic_resume,
++};
++
++static int __init ioapic_init_sysfs(void)
++{
++ struct sys_device * dev;
++ int i, size, error = 0;
++
++ error = sysdev_class_register(&ioapic_sysdev_class);
++ if (error)
++ return error;
++
++ for (i = 0; i < nr_ioapics; i++ ) {
++ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
++ * sizeof(struct IO_APIC_route_entry);
++ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
++ if (!mp_ioapic_data[i]) {
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ memset(mp_ioapic_data[i], 0, size);
++ dev = &mp_ioapic_data[i]->dev;
++ dev->id = i;
++ dev->cls = &ioapic_sysdev_class;
++ error = sysdev_register(dev);
++ if (error) {
++ kfree(mp_ioapic_data[i]);
++ mp_ioapic_data[i] = NULL;
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ }
++
++ return 0;
++}
++
++device_initcall(ioapic_init_sysfs);
++
++/* --------------------------------------------------------------------------
++ ACPI-based IOAPIC Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++int __init io_apic_get_unique_id (int ioapic, int apic_id)
++{
++#ifndef CONFIG_XEN
++ union IO_APIC_reg_00 reg_00;
++ static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
++ physid_mask_t tmp;
++ unsigned long flags;
++ int i = 0;
++
++ /*
++ * The P4 platform supports up to 256 APIC IDs on two separate APIC
++ * buses (one for LAPICs, one for IOAPICs), where predecessors only
++ * supports up to 16 on one shared APIC bus.
++ *
++ * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
++ * advantage of new APIC bus architecture.
++ */
++
++ if (physids_empty(apic_id_map))
++ apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(ioapic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ if (apic_id >= get_physical_broadcast()) {
++ printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
++ "%d\n", ioapic, apic_id, reg_00.bits.ID);
++ apic_id = reg_00.bits.ID;
++ }
++
++ /*
++ * Every APIC in a system must have a unique ID or we get lots of nice
++ * 'stuck on smp_invalidate_needed IPI wait' messages.
++ */
++ if (check_apicid_used(apic_id_map, apic_id)) {
++
++ for (i = 0; i < get_physical_broadcast(); i++) {
++ if (!check_apicid_used(apic_id_map, i))
++ break;
++ }
++
++ if (i == get_physical_broadcast())
++ panic("Max apic_id exceeded!\n");
++
++ printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
++ "trying %d\n", ioapic, apic_id, i);
++
++ apic_id = i;
++ }
++
++ tmp = apicid_to_cpu_present(apic_id);
++ physids_or(apic_id_map, apic_id_map, tmp);
++
++ if (reg_00.bits.ID != apic_id) {
++ reg_00.bits.ID = apic_id;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic, 0, reg_00.raw);
++ reg_00.raw = io_apic_read(ioapic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ /* Sanity check */
++ if (reg_00.bits.ID != apic_id) {
++ printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
++ return -1;
++ }
++ }
++
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
++#endif /* !CONFIG_XEN */
++
++ return apic_id;
++}
++
++
++int __init io_apic_get_version (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.version;
++}
++
++
++int __init io_apic_get_redir_entries (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.entries;
++}
++
++
++int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ if (!IO_APIC_IRQ(irq)) {
++ printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
++ ioapic);
++ return -EINVAL;
++ }
++
++ /*
++ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
++ * Note that we mask (disable) IRQs now -- these get enabled when the
++ * corresponding device driver registers for this IRQ.
++ */
++
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.trigger = edge_level;
++ entry.polarity = active_high_low;
++ entry.mask = 1;
++
++ /*
++ * IRQs < 16 are already in the irq_2_pin[] map
++ */
++ if (irq >= 16)
++ add_pin_to_irq(irq, ioapic, pin);
++
++ entry.vector = assign_irq_vector(irq);
++
++ apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
++ "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
++ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
++ edge_level, active_high_low);
++
++ ioapic_register_intr(irq, entry.vector, edge_level);
++
++ if (!ioapic && (irq < 16))
++ disable_8259A_irq(irq);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
++ set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++#endif /* CONFIG_ACPI */
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/ioport-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/ioport-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/ioport-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/ioport-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,123 @@
++/*
++ * linux/arch/i386/kernel/ioport.c
++ *
++ * This contains the io-permission bitmap code - written by obz, with changes
++ * by Linus.
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/ioport.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/thread_info.h>
++#include <xen/interface/physdev.h>
++
++/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
++static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
++{
++ unsigned long mask;
++ unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
++ unsigned int low_index = base & (BITS_PER_LONG-1);
++ int length = low_index + extent;
++
++ if (low_index != 0) {
++ mask = (~0UL << low_index);
++ if (length < BITS_PER_LONG)
++ mask &= ~(~0UL << length);
++ if (new_value)
++ *bitmap_base++ |= mask;
++ else
++ *bitmap_base++ &= ~mask;
++ length -= BITS_PER_LONG;
++ }
++
++ mask = (new_value ? ~0UL : 0UL);
++ while (length >= BITS_PER_LONG) {
++ *bitmap_base++ = mask;
++ length -= BITS_PER_LONG;
++ }
++
++ if (length > 0) {
++ mask = ~(~0UL << length);
++ if (new_value)
++ *bitmap_base++ |= mask;
++ else
++ *bitmap_base++ &= ~mask;
++ }
++}
++
++
++/*
++ * this changes the io permissions bitmap in the current task.
++ */
++asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
++{
++ struct thread_struct * t = &current->thread;
++ unsigned long *bitmap;
++ struct physdev_set_iobitmap set_iobitmap;
++
++ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
++ return -EINVAL;
++ if (turn_on && !capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
++ /*
++ * If it's the first ioperm() call in this thread's lifetime, set the
++ * IO bitmap up. ioperm() is much less timing critical than clone(),
++ * this is why we delay this operation until now:
++ */
++ if (!t->io_bitmap_ptr) {
++ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!bitmap)
++ return -ENOMEM;
++
++ memset(bitmap, 0xff, IO_BITMAP_BYTES);
++ t->io_bitmap_ptr = bitmap;
++ set_thread_flag(TIF_IO_BITMAP);
++
++ set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
++ set_iobitmap.nr_ports = IO_BITMAP_BITS;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
++ &set_iobitmap));
++ }
++
++ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
++
++ return 0;
++}
++
++/*
++ * sys_iopl has to be used when you want to access the IO ports
++ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
++ * you'd need 8kB of bitmaps/process, which is a bit excessive.
++ *
++ * Here we just change the eflags value on the stack: we allow
++ * only the super-user to do it. This depends on the stack-layout
++ * on system-call entry - see also fork() and the signal handling
++ * code.
++ */
++
++asmlinkage long sys_iopl(unsigned long unused)
++{
++ volatile struct pt_regs * regs = (struct pt_regs *) &unused;
++ unsigned int level = regs->ebx;
++ struct thread_struct *t = &current->thread;
++ unsigned int old = (t->iopl >> 12) & 3;
++
++ if (level > 3)
++ return -EINVAL;
++ /* Trying to gain more privileges? */
++ if (level > old) {
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
++ }
++ t->iopl = level << 12;
++ set_iopl_mask(t->iopl);
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/irq-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/irq-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/irq-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/irq-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,324 @@
++/*
++ * linux/arch/i386/kernel/irq.c
++ *
++ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
++ *
++ * This file contains the lowest level x86-specific interrupt
++ * entry, irq-stacks and irq statistics code. All the remaining
++ * irq logic is done by the generic kernel/irq/ code and
++ * by the x86-specific irq controller code. (e.g. i8259.c and
++ * io_apic.c.)
++ */
++
++#include <asm/uaccess.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <linux/interrupt.h>
++#include <linux/kernel_stat.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <linux/delay.h>
++
++DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
++EXPORT_PER_CPU_SYMBOL(irq_stat);
++
++#ifndef CONFIG_X86_LOCAL_APIC
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++}
++#endif
++
++#ifdef CONFIG_4KSTACKS
++/*
++ * per-CPU IRQ handling contexts (thread information and stack)
++ */
++union irq_ctx {
++ struct thread_info tinfo;
++ u32 stack[THREAD_SIZE/sizeof(u32)];
++};
++
++static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
++static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
++#endif
++
++/*
++ * do_IRQ handles all normal device IRQ's (the special
++ * SMP cross-CPU interrupts have their own specific
++ * handlers).
++ */
++fastcall unsigned int do_IRQ(struct pt_regs *regs)
++{
++ /* high bit used in ret_from_ code */
++ int irq = ~regs->orig_eax;
++#ifdef CONFIG_4KSTACKS
++ union irq_ctx *curctx, *irqctx;
++ u32 *isp;
++#endif
++
++ if (unlikely((unsigned)irq >= NR_IRQS)) {
++ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
++ __FUNCTION__, irq);
++ BUG();
++ }
++
++ irq_enter();
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
++ /* Debugging check for stack overflow: is there less than 1KB free? */
++ {
++ long esp;
++
++ __asm__ __volatile__("andl %%esp,%0" :
++ "=r" (esp) : "0" (THREAD_SIZE - 1));
++ if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
++ printk("do_IRQ: stack overflow: %ld\n",
++ esp - sizeof(struct thread_info));
++ dump_stack();
++ }
++ }
++#endif
++
++#ifdef CONFIG_4KSTACKS
++
++ curctx = (union irq_ctx *) current_thread_info();
++ irqctx = hardirq_ctx[smp_processor_id()];
++
++ /*
++ * this is where we switch to the IRQ stack. However, if we are
++ * already using the IRQ stack (because we interrupted a hardirq
++ * handler) we can't do that and just have to keep using the
++ * current stack (which is the irq stack already after all)
++ */
++ if (curctx != irqctx) {
++ int arg1, arg2, ebx;
++
++ /* build the stack frame on the IRQ stack */
++ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
++ irqctx->tinfo.task = curctx->tinfo.task;
++ irqctx->tinfo.previous_esp = current_stack_pointer;
++
++ /*
++ * Copy the softirq bits in preempt_count so that the
++ * softirq checks work in the hardirq context.
++ */
++ irqctx->tinfo.preempt_count =
++ (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
++ (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
++
++ asm volatile(
++ " xchgl %%ebx,%%esp \n"
++ " call __do_IRQ \n"
++ " movl %%ebx,%%esp \n"
++ : "=a" (arg1), "=d" (arg2), "=b" (ebx)
++ : "0" (irq), "1" (regs), "2" (isp)
++ : "memory", "cc", "ecx"
++ );
++ } else
++#endif
++ __do_IRQ(irq, regs);
++
++ irq_exit();
++
++ return 1;
++}
++
++#ifdef CONFIG_4KSTACKS
++
++/*
++ * These should really be __section__(".bss.page_aligned") as well, but
++ * gcc's 3.0 and earlier don't handle that correctly.
++ */
++static char softirq_stack[NR_CPUS * THREAD_SIZE]
++ __attribute__((__aligned__(THREAD_SIZE)));
++
++static char hardirq_stack[NR_CPUS * THREAD_SIZE]
++ __attribute__((__aligned__(THREAD_SIZE)));
++
++/*
++ * allocate per-cpu stacks for hardirq and for softirq processing
++ */
++void irq_ctx_init(int cpu)
++{
++ union irq_ctx *irqctx;
++
++ if (hardirq_ctx[cpu])
++ return;
++
++ irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
++ irqctx->tinfo.task = NULL;
++ irqctx->tinfo.exec_domain = NULL;
++ irqctx->tinfo.cpu = cpu;
++ irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
++ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
++
++ hardirq_ctx[cpu] = irqctx;
++
++ irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
++ irqctx->tinfo.task = NULL;
++ irqctx->tinfo.exec_domain = NULL;
++ irqctx->tinfo.cpu = cpu;
++ irqctx->tinfo.preempt_count = 0;
++ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
++
++ softirq_ctx[cpu] = irqctx;
++
++ printk("CPU %u irqstacks, hard=%p soft=%p\n",
++ cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
++}
++
++void irq_ctx_exit(int cpu)
++{
++ hardirq_ctx[cpu] = NULL;
++}
++
++extern asmlinkage void __do_softirq(void);
++
++asmlinkage void do_softirq(void)
++{
++ unsigned long flags;
++ struct thread_info *curctx;
++ union irq_ctx *irqctx;
++ u32 *isp;
++
++ if (in_interrupt())
++ return;
++
++ local_irq_save(flags);
++
++ if (local_softirq_pending()) {
++ curctx = current_thread_info();
++ irqctx = softirq_ctx[smp_processor_id()];
++ irqctx->tinfo.task = curctx->task;
++ irqctx->tinfo.previous_esp = current_stack_pointer;
++
++ /* build the stack frame on the softirq stack */
++ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
++
++ asm volatile(
++ " xchgl %%ebx,%%esp \n"
++ " call __do_softirq \n"
++ " movl %%ebx,%%esp \n"
++ : "=b"(isp)
++ : "0"(isp)
++ : "memory", "cc", "edx", "ecx", "eax"
++ );
++ /*
++ * Shouldnt happen, we returned above if in_interrupt():
++ */
++ WARN_ON_ONCE(softirq_count());
++ }
++
++ local_irq_restore(flags);
++}
++
++EXPORT_SYMBOL(do_softirq);
++#endif
++
++/*
++ * Interrupt statistics:
++ */
++
++atomic_t irq_err_count;
++
++/*
++ * /proc/interrupts printing:
++ */
++
++int show_interrupts(struct seq_file *p, void *v)
++{
++ int i = *(loff_t *) v, j;
++ struct irqaction * action;
++ unsigned long flags;
++
++ if (i == 0) {
++ seq_printf(p, " ");
++ for_each_online_cpu(j)
++ seq_printf(p, "CPU%-8d",j);
++ seq_putc(p, '\n');
++ }
++
++ if (i < NR_IRQS) {
++ spin_lock_irqsave(&irq_desc[i].lock, flags);
++ action = irq_desc[i].action;
++ if (!action)
++ goto skip;
++ seq_printf(p, "%3d: ",i);
++#ifndef CONFIG_SMP
++ seq_printf(p, "%10u ", kstat_irqs(i));
++#else
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++#endif
++ seq_printf(p, " %14s", irq_desc[i].chip->typename);
++ seq_printf(p, " %s", action->name);
++
++ for (action=action->next; action; action = action->next)
++ seq_printf(p, ", %s", action->name);
++
++ seq_putc(p, '\n');
++skip:
++ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
++ } else if (i == NR_IRQS) {
++ seq_printf(p, "NMI: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", nmi_count(j));
++ seq_putc(p, '\n');
++#ifdef CONFIG_X86_LOCAL_APIC
++ seq_printf(p, "LOC: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ",
++ per_cpu(irq_stat,j).apic_timer_irqs);
++ seq_putc(p, '\n');
++#endif
++ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
++#if defined(CONFIG_X86_IO_APIC)
++ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
++#endif
++ }
++ return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++void fixup_irqs(cpumask_t map)
++{
++ unsigned int irq;
++ static int warned;
++
++ for (irq = 0; irq < NR_IRQS; irq++) {
++ cpumask_t mask;
++ if (irq == 2)
++ continue;
++
++ cpus_and(mask, irq_desc[irq].affinity, map);
++ if (any_online_cpu(mask) == NR_CPUS) {
++ /*printk("Breaking affinity for irq %i\n", irq);*/
++ mask = map;
++ }
++ if (irq_desc[irq].chip->set_affinity)
++ irq_desc[irq].chip->set_affinity(irq, mask);
++ else if (irq_desc[irq].action && !(warned++))
++ printk("Cannot set affinity for irq %i\n", irq);
++ }
++
++#if 0
++ barrier();
++ /* Ingo Molnar says: "after the IO-APIC masks have been redirected
++ [note the nop - the interrupt-enable boundary on x86 is two
++ instructions from sti] - to flush out pending hardirqs and
++ IPIs. After this point nothing is supposed to reach this CPU." */
++ __asm__ __volatile__("sti; nop; cli");
++ barrier();
++#else
++ /* That doesn't seem sufficient. Give it 1ms. */
++ local_irq_enable();
++ mdelay(1);
++ local_irq_disable();
++#endif
++}
++#endif
++
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/ldt-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/ldt-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/ldt-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/ldt-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,270 @@
++/*
++ * linux/kernel/ldt.c
++ *
++ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
++ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/ldt.h>
++#include <asm/desc.h>
++#include <asm/mmu_context.h>
++
++#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
++static void flush_ldt(void *null)
++{
++ if (current->active_mm)
++ load_LDT(&current->active_mm->context);
++}
++#endif
++
++static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
++{
++ void *oldldt;
++ void *newldt;
++ int oldsize;
++
++ if (mincount <= pc->size)
++ return 0;
++ oldsize = pc->size;
++ mincount = (mincount+511)&(~511);
++ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
++ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
++ else
++ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
++
++ if (!newldt)
++ return -ENOMEM;
++
++ if (oldsize)
++ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
++ oldldt = pc->ldt;
++ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
++ pc->ldt = newldt;
++ wmb();
++ pc->size = mincount;
++ wmb();
++
++ if (reload) {
++#ifdef CONFIG_SMP
++ cpumask_t mask;
++ preempt_disable();
++#endif
++ make_pages_readonly(
++ pc->ldt,
++ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ load_LDT(pc);
++#ifdef CONFIG_SMP
++ mask = cpumask_of_cpu(smp_processor_id());
++ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
++ smp_call_function(flush_ldt, NULL, 1, 1);
++ preempt_enable();
++#endif
++ }
++ if (oldsize) {
++ make_pages_writable(
++ oldldt,
++ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(oldldt);
++ else
++ kfree(oldldt);
++ }
++ return 0;
++}
++
++static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++{
++ int err = alloc_ldt(new, old->size, 0);
++ if (err < 0)
++ return err;
++ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
++ make_pages_readonly(
++ new->ldt,
++ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ return 0;
++}
++
++/*
++ * we do not have to muck with descriptors here, that is
++ * done in switch_mm() as needed.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
++{
++ struct mm_struct * old_mm;
++ int retval = 0;
++
++ init_MUTEX(&mm->context.sem);
++ mm->context.size = 0;
++ mm->context.has_foreign_mappings = 0;
++ old_mm = current->mm;
++ if (old_mm && old_mm->context.size > 0) {
++ down(&old_mm->context.sem);
++ retval = copy_ldt(&mm->context, &old_mm->context);
++ up(&old_mm->context.sem);
++ }
++ return retval;
++}
++
++/*
++ * No need to lock the MM as we are the last user
++ */
++void destroy_context(struct mm_struct *mm)
++{
++ if (mm->context.size) {
++ if (mm == current->active_mm)
++ clear_LDT();
++ make_pages_writable(
++ mm->context.ldt,
++ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(mm->context.ldt);
++ else
++ kfree(mm->context.ldt);
++ mm->context.size = 0;
++ }
++}
++
++static int read_ldt(void __user * ptr, unsigned long bytecount)
++{
++ int err;
++ unsigned long size;
++ struct mm_struct * mm = current->mm;
++
++ if (!mm->context.size)
++ return 0;
++ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
++ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
++
++ down(&mm->context.sem);
++ size = mm->context.size*LDT_ENTRY_SIZE;
++ if (size > bytecount)
++ size = bytecount;
++
++ err = 0;
++ if (copy_to_user(ptr, mm->context.ldt, size))
++ err = -EFAULT;
++ up(&mm->context.sem);
++ if (err < 0)
++ goto error_return;
++ if (size != bytecount) {
++ /* zero-fill the rest */
++ if (clear_user(ptr+size, bytecount-size) != 0) {
++ err = -EFAULT;
++ goto error_return;
++ }
++ }
++ return bytecount;
++error_return:
++ return err;
++}
++
++static int read_default_ldt(void __user * ptr, unsigned long bytecount)
++{
++ int err;
++ unsigned long size;
++ void *address;
++
++ err = 0;
++ address = &default_ldt[0];
++ size = 5*sizeof(struct desc_struct);
++ if (size > bytecount)
++ size = bytecount;
++
++ err = size;
++ if (copy_to_user(ptr, address, size))
++ err = -EFAULT;
++
++ return err;
++}
++
++static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
++{
++ struct mm_struct * mm = current->mm;
++ __u32 entry_1, entry_2;
++ int error;
++ struct user_desc ldt_info;
++
++ error = -EINVAL;
++ if (bytecount != sizeof(ldt_info))
++ goto out;
++ error = -EFAULT;
++ if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
++ goto out;
++
++ error = -EINVAL;
++ if (ldt_info.entry_number >= LDT_ENTRIES)
++ goto out;
++ if (ldt_info.contents == 3) {
++ if (oldmode)
++ goto out;
++ if (ldt_info.seg_not_present == 0)
++ goto out;
++ }
++
++ down(&mm->context.sem);
++ if (ldt_info.entry_number >= mm->context.size) {
++ error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
++ if (error < 0)
++ goto out_unlock;
++ }
++
++ /* Allow LDTs to be cleared by the user. */
++ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
++ if (oldmode || LDT_empty(&ldt_info)) {
++ entry_1 = 0;
++ entry_2 = 0;
++ goto install;
++ }
++ }
++
++ entry_1 = LDT_entry_a(&ldt_info);
++ entry_2 = LDT_entry_b(&ldt_info);
++ if (oldmode)
++ entry_2 &= ~(1 << 20);
++
++ /* Install the new entry ... */
++install:
++ error = write_ldt_entry(mm->context.ldt, ldt_info.entry_number,
++ entry_1, entry_2);
++
++out_unlock:
++ up(&mm->context.sem);
++out:
++ return error;
++}
++
++asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
++{
++ int ret = -ENOSYS;
++
++ switch (func) {
++ case 0:
++ ret = read_ldt(ptr, bytecount);
++ break;
++ case 1:
++ ret = write_ldt(ptr, bytecount, 1);
++ break;
++ case 2:
++ ret = read_default_ldt(ptr, bytecount);
++ break;
++ case 0x11:
++ ret = write_ldt(ptr, bytecount, 0);
++ break;
++ }
++ return ret;
++}
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/machine_kexec.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/machine_kexec.c
+--- linux-2.6.18.8/arch/i386/kernel/machine_kexec.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/machine_kexec.c 2008-08-21 11:36:07.000000000 +0200
+@@ -19,123 +19,67 @@
+ #include <asm/desc.h>
+ #include <asm/system.h>
+
+-#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
+-
+-#define L0_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+-#define L1_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+-#define L2_ATTR (_PAGE_PRESENT)
+-
+-#define LEVEL0_SIZE (1UL << 12UL)
+-
+-#ifndef CONFIG_X86_PAE
+-#define LEVEL1_SIZE (1UL << 22UL)
+-static u32 pgtable_level1[1024] PAGE_ALIGNED;
+-
+-static void identity_map_page(unsigned long address)
+-{
+- unsigned long level1_index, level2_index;
+- u32 *pgtable_level2;
+-
+- /* Find the current page table */
+- pgtable_level2 = __va(read_cr3());
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
+
+- /* Find the indexes of the physical address to identity map */
+- level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE;
+- level2_index = address / LEVEL1_SIZE;
+-
+- /* Identity map the page table entry */
+- pgtable_level1[level1_index] = address | L0_ATTR;
+- pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR;
+-
+- /* Flush the tlb so the new mapping takes effect.
+- * Global tlb entries are not flushed but that is not an issue.
+- */
+- load_cr3(pgtable_level2);
+-}
++#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
++static u32 kexec_pgd[1024] PAGE_ALIGNED;
++#ifdef CONFIG_X86_PAE
++static u32 kexec_pmd0[1024] PAGE_ALIGNED;
++static u32 kexec_pmd1[1024] PAGE_ALIGNED;
++#endif
++static u32 kexec_pte0[1024] PAGE_ALIGNED;
++static u32 kexec_pte1[1024] PAGE_ALIGNED;
+
+-#else
+-#define LEVEL1_SIZE (1UL << 21UL)
+-#define LEVEL2_SIZE (1UL << 30UL)
+-static u64 pgtable_level1[512] PAGE_ALIGNED;
+-static u64 pgtable_level2[512] PAGE_ALIGNED;
++#ifdef CONFIG_XEN
+
+-static void identity_map_page(unsigned long address)
+-{
+- unsigned long level1_index, level2_index, level3_index;
+- u64 *pgtable_level3;
++#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
+
+- /* Find the current page table */
+- pgtable_level3 = __va(read_cr3());
++#if PAGES_NR > KEXEC_XEN_NO_PAGES
++#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
++#endif
+
+- /* Find the indexes of the physical address to identity map */
+- level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE;
+- level2_index = (address % LEVEL2_SIZE)/LEVEL1_SIZE;
+- level3_index = address / LEVEL2_SIZE;
+-
+- /* Identity map the page table entry */
+- pgtable_level1[level1_index] = address | L0_ATTR;
+- pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR;
+- set_64bit(&pgtable_level3[level3_index],
+- __pa(pgtable_level2) | L2_ATTR);
+-
+- /* Flush the tlb so the new mapping takes effect.
+- * Global tlb entries are not flushed but that is not an issue.
+- */
+- load_cr3(pgtable_level3);
+-}
++#if PA_CONTROL_PAGE != 0
++#error PA_CONTROL_PAGE is non zero - Xen support will break
+ #endif
+
+-static void set_idt(void *newidt, __u16 limit)
++void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
+ {
+- struct Xgt_desc_struct curidt;
++ void *control_page;
+
+- /* ia32 supports unaliged loads & stores */
+- curidt.size = limit;
+- curidt.address = (unsigned long)newidt;
++ memset(xki->page_list, 0, sizeof(xki->page_list));
+
+- load_idt(&curidt);
+-};
++ control_page = page_address(image->control_code_page);
++ memcpy(control_page, relocate_kernel, PAGE_SIZE);
+
++ xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
++ xki->page_list[PA_PGD] = __ma(kexec_pgd);
++#ifdef CONFIG_X86_PAE
++ xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
++ xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
++#endif
++ xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
++ xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
+
+-static void set_gdt(void *newgdt, __u16 limit)
+-{
+- struct Xgt_desc_struct curgdt;
+-
+- /* ia32 supports unaligned loads & stores */
+- curgdt.size = limit;
+- curgdt.address = (unsigned long)newgdt;
+-
+- load_gdt(&curgdt);
+-};
++}
+
+-static void load_segments(void)
++int __init machine_kexec_setup_resources(struct resource *hypervisor,
++ struct resource *phys_cpus,
++ int nr_phys_cpus)
+ {
+-#define __STR(X) #X
+-#define STR(X) __STR(X)
++ int k;
++
++ /* The per-cpu crash note resources belong to the hypervisor resource */
++ for (k = 0; k < nr_phys_cpus; k++)
++ request_resource(hypervisor, phys_cpus + k);
+
+- __asm__ __volatile__ (
+- "\tljmp $"STR(__KERNEL_CS)",$1f\n"
+- "\t1:\n"
+- "\tmovl $"STR(__KERNEL_DS)",%%eax\n"
+- "\tmovl %%eax,%%ds\n"
+- "\tmovl %%eax,%%es\n"
+- "\tmovl %%eax,%%fs\n"
+- "\tmovl %%eax,%%gs\n"
+- "\tmovl %%eax,%%ss\n"
+- ::: "eax", "memory");
+-#undef STR
+-#undef __STR
++ return 0;
+ }
+
+-typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)(
+- unsigned long indirection_page,
+- unsigned long reboot_code_buffer,
+- unsigned long start_address,
+- unsigned int has_pae) ATTRIB_NORET;
+-
+-extern const unsigned char relocate_new_kernel[];
+-extern void relocate_new_kernel_end(void);
+-extern const unsigned int relocate_new_kernel_size;
++void machine_kexec_register_resources(struct resource *res) { ; }
++
++#endif /* CONFIG_XEN */
+
+ /*
+ * A architecture hook called to validate the
+@@ -163,49 +107,38 @@ void machine_kexec_cleanup(struct kimage
+ {
+ }
+
++#ifndef CONFIG_XEN
+ /*
+ * Do not allocate memory (or fail in any way) in machine_kexec().
+ * We are past the point of no return, committed to rebooting now.
+ */
+ NORET_TYPE void machine_kexec(struct kimage *image)
+ {
+- unsigned long page_list;
+- unsigned long reboot_code_buffer;
+-
+- relocate_new_kernel_t rnk;
++ unsigned long page_list[PAGES_NR];
++ void *control_page;
+
+ /* Interrupts aren't acceptable while we reboot */
+ local_irq_disable();
+
+- /* Compute some offsets */
+- reboot_code_buffer = page_to_pfn(image->control_code_page)
+- << PAGE_SHIFT;
+- page_list = image->head;
+-
+- /* Set up an identity mapping for the reboot_code_buffer */
+- identity_map_page(reboot_code_buffer);
+-
+- /* copy it out */
+- memcpy((void *)reboot_code_buffer, relocate_new_kernel,
+- relocate_new_kernel_size);
+-
+- /* The segment registers are funny things, they have both a
+- * visible and an invisible part. Whenever the visible part is
+- * set to a specific selector, the invisible part is loaded
+- * with from a table in memory. At no other time is the
+- * descriptor table in memory accessed.
+- *
+- * I take advantage of this here by force loading the
+- * segments, before I zap the gdt with an invalid value.
+- */
+- load_segments();
+- /* The gdt & idt are now invalid.
+- * If you want to load them you must set up your own idt & gdt.
+- */
+- set_gdt(phys_to_virt(0),0);
+- set_idt(phys_to_virt(0),0);
+-
+- /* now call it */
+- rnk = (relocate_new_kernel_t) reboot_code_buffer;
+- (*rnk)(page_list, reboot_code_buffer, image->start, cpu_has_pae);
++ control_page = page_address(image->control_code_page);
++ memcpy(control_page, relocate_kernel, PAGE_SIZE);
++
++ page_list[PA_CONTROL_PAGE] = __pa(control_page);
++ page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
++ page_list[PA_PGD] = __pa(kexec_pgd);
++ page_list[VA_PGD] = (unsigned long)kexec_pgd;
++#ifdef CONFIG_X86_PAE
++ page_list[PA_PMD_0] = __pa(kexec_pmd0);
++ page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
++ page_list[PA_PMD_1] = __pa(kexec_pmd1);
++ page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
++#endif
++ page_list[PA_PTE_0] = __pa(kexec_pte0);
++ page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
++ page_list[PA_PTE_1] = __pa(kexec_pte1);
++ page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
++
++ relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
++ image->start, cpu_has_pae);
+ }
++#endif
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/Makefile linux-2.6.18-xen-3.3.0/arch/i386/kernel/Makefile
+--- linux-2.6.18.8/arch/i386/kernel/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -43,6 +43,7 @@ obj-$(CONFIG_K8_NB) += k8.o
+ EXTRA_AFLAGS := -traditional
+
+ obj-$(CONFIG_SCx200) += scx200.o
++obj-$(CONFIG_XEN) += fixup.o
+
+ # vsyscall.o contains the vsyscall DSO images as __initdata.
+ # We must build both images before we can assemble it.
+@@ -80,5 +81,8 @@ $(obj)/vsyscall-syms.o: $(src)/vsyscall.
+ $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
+ $(call if_changed,syscall)
+
++early_printk-y += ../../x86_64/kernel/early_printk.o
+ k8-y += ../../x86_64/kernel/k8.o
+
++disabled-obj-$(CONFIG_XEN) := i8259.o reboot.o smpboot.o
++%/head.o %/head.s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) :=
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/microcode-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/microcode-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/microcode-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/microcode-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,144 @@
++/*
++ * Intel CPU Microcode Update Driver for Linux
++ *
++ * Copyright (C) 2000-2004 Tigran Aivazian
++ *
++ * This driver allows to upgrade microcode on Intel processors
++ * belonging to IA-32 family - PentiumPro, Pentium II,
++ * Pentium III, Xeon, Pentium 4, etc.
++ *
++ * Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual,
++ * Order Number 245472 or free download from:
++ *
++ * http://developer.intel.com/design/pentium4/manuals/245472.htm
++ *
++ * For more information, go to http://www.urbanmyth.org/microcode
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++//#define DEBUG /* pr_debug */
++#include <linux/capability.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/cpumask.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/miscdevice.h>
++#include <linux/spinlock.h>
++#include <linux/mm.h>
++#include <linux/mutex.h>
++#include <linux/syscalls.h>
++
++#include <asm/msr.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++
++MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
++MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>");
++MODULE_LICENSE("GPL");
++
++static int verbose;
++module_param(verbose, int, 0644);
++
++#define MICROCODE_VERSION "1.14a-xen"
++
++#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */
++#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */
++#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /* 2048 bytes */
++
++/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
++static DEFINE_MUTEX(microcode_mutex);
++
++static int microcode_open (struct inode *unused1, struct file *unused2)
++{
++ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
++
++static int do_microcode_update (const void __user *ubuf, size_t len)
++{
++ int err;
++ void *kbuf;
++
++ kbuf = vmalloc(len);
++ if (!kbuf)
++ return -ENOMEM;
++
++ if (copy_from_user(kbuf, ubuf, len) == 0) {
++ struct xen_platform_op op;
++
++ op.cmd = XENPF_microcode_update;
++ set_xen_guest_handle(op.u.microcode.data, kbuf);
++ op.u.microcode.length = len;
++ err = HYPERVISOR_platform_op(&op);
++ } else
++ err = -EFAULT;
++
++ vfree(kbuf);
++
++ return err;
++}
++
++static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
++{
++ ssize_t ret;
++
++ if (len < MC_HEADER_SIZE) {
++ printk(KERN_ERR "microcode: not enough data\n");
++ return -EINVAL;
++ }
++
++ mutex_lock(&microcode_mutex);
++
++ ret = do_microcode_update(buf, len);
++ if (!ret)
++ ret = (ssize_t)len;
++
++ mutex_unlock(&microcode_mutex);
++
++ return ret;
++}
++
++static struct file_operations microcode_fops = {
++ .owner = THIS_MODULE,
++ .write = microcode_write,
++ .open = microcode_open,
++};
++
++static struct miscdevice microcode_dev = {
++ .minor = MICROCODE_MINOR,
++ .name = "microcode",
++ .fops = &microcode_fops,
++};
++
++static int __init microcode_init (void)
++{
++ int error;
++
++ error = misc_register(&microcode_dev);
++ if (error) {
++ printk(KERN_ERR
++ "microcode: can't misc_register on minor=%d\n",
++ MICROCODE_MINOR);
++ return error;
++ }
++
++ printk(KERN_INFO
++ "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@veritas.com>\n");
++ return 0;
++}
++
++static void __exit microcode_exit (void)
++{
++ misc_deregister(&microcode_dev);
++}
++
++module_init(microcode_init)
++module_exit(microcode_exit)
++MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/mpparse-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/mpparse-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/mpparse-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/mpparse-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1185 @@
++/*
++ * Intel Multiprocessor Specification 1.1 and 1.4
++ * compliant MP-table parsing routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Erich Boleyn : MP v1.4 and additional changes.
++ * Alan Cox : Added EBDA scanning
++ * Ingo Molnar : various cleanups and rewrites
++ * Maciej W. Rozycki: Bits for default MP configurations
++ * Paul Diefenbaugh: Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/acpi.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/bitops.h>
++
++#include <asm/smp.h>
++#include <asm/acpi.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/io_apic.h>
++
++#include <mach_apic.h>
++#include <mach_mpparse.h>
++#include <bios_ebda.h>
++
++/* Have we found an MP table */
++int smp_found_config;
++unsigned int __initdata maxcpus = NR_CPUS;
++
++/*
++ * Various Linux-internal data structures created from the
++ * MP-table.
++ */
++int apic_version [MAX_APICS];
++int mp_bus_id_to_type [MAX_MP_BUSSES];
++int mp_bus_id_to_node [MAX_MP_BUSSES];
++int mp_bus_id_to_local [MAX_MP_BUSSES];
++int quad_local_to_mp_bus_id [NR_CPUS/4][4];
++int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++static int mp_current_pci_id;
++
++/* I/O APIC entries */
++struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
++
++/* # of MP IRQ source entries */
++struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
++
++/* MP IRQ source entries */
++int mp_irq_entries;
++
++int nr_ioapics;
++
++int pic_mode;
++unsigned long mp_lapic_addr;
++
++unsigned int def_to_bigsmp = 0;
++
++/* Processor that is doing the boot up */
++unsigned int boot_cpu_physical_apicid = -1U;
++/* Internal processor count */
++static unsigned int __devinitdata num_processors;
++
++/* Bitmask of physically existing CPUs */
++physid_mask_t phys_cpu_present_map;
++
++u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++/*
++ * Intel MP BIOS table parsing routines:
++ */
++
++
++/*
++ * Checksum an MP configuration block.
++ */
++
++static int __init mpf_checksum(unsigned char *mp, int len)
++{
++ int sum = 0;
++
++ while (len--)
++ sum += *mp++;
++
++ return sum & 0xFF;
++}
++
++/*
++ * Have to match translation table entries to main table entries by counter
++ * hence the mpc_record variable .... can't see a less disgusting way of
++ * doing this ....
++ */
++
++static int mpc_record;
++static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
++
++#ifndef CONFIG_XEN
++static void __devinit MP_processor_info (struct mpc_config_processor *m)
++{
++ int ver, apicid;
++ physid_mask_t phys_cpu;
++
++ if (!(m->mpc_cpuflag & CPU_ENABLED))
++ return;
++
++ apicid = mpc_apic_id(m, translation_table[mpc_record]);
++
++ if (m->mpc_featureflag&(1<<0))
++ Dprintk(" Floating point unit present.\n");
++ if (m->mpc_featureflag&(1<<7))
++ Dprintk(" Machine Exception supported.\n");
++ if (m->mpc_featureflag&(1<<8))
++ Dprintk(" 64 bit compare & exchange supported.\n");
++ if (m->mpc_featureflag&(1<<9))
++ Dprintk(" Internal APIC present.\n");
++ if (m->mpc_featureflag&(1<<11))
++ Dprintk(" SEP present.\n");
++ if (m->mpc_featureflag&(1<<12))
++ Dprintk(" MTRR present.\n");
++ if (m->mpc_featureflag&(1<<13))
++ Dprintk(" PGE present.\n");
++ if (m->mpc_featureflag&(1<<14))
++ Dprintk(" MCA present.\n");
++ if (m->mpc_featureflag&(1<<15))
++ Dprintk(" CMOV present.\n");
++ if (m->mpc_featureflag&(1<<16))
++ Dprintk(" PAT present.\n");
++ if (m->mpc_featureflag&(1<<17))
++ Dprintk(" PSE present.\n");
++ if (m->mpc_featureflag&(1<<18))
++ Dprintk(" PSN present.\n");
++ if (m->mpc_featureflag&(1<<19))
++ Dprintk(" Cache Line Flush Instruction present.\n");
++ /* 20 Reserved */
++ if (m->mpc_featureflag&(1<<21))
++ Dprintk(" Debug Trace and EMON Store present.\n");
++ if (m->mpc_featureflag&(1<<22))
++ Dprintk(" ACPI Thermal Throttle Registers present.\n");
++ if (m->mpc_featureflag&(1<<23))
++ Dprintk(" MMX present.\n");
++ if (m->mpc_featureflag&(1<<24))
++ Dprintk(" FXSR present.\n");
++ if (m->mpc_featureflag&(1<<25))
++ Dprintk(" XMM present.\n");
++ if (m->mpc_featureflag&(1<<26))
++ Dprintk(" Willamette New Instructions present.\n");
++ if (m->mpc_featureflag&(1<<27))
++ Dprintk(" Self Snoop present.\n");
++ if (m->mpc_featureflag&(1<<28))
++ Dprintk(" HT present.\n");
++ if (m->mpc_featureflag&(1<<29))
++ Dprintk(" Thermal Monitor present.\n");
++ /* 30, 31 Reserved */
++
++
++ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++ Dprintk(" Bootup CPU\n");
++ boot_cpu_physical_apicid = m->mpc_apicid;
++ }
++
++ ver = m->mpc_apicver;
++
++ /*
++ * Validate version
++ */
++ if (ver == 0x0) {
++ printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
++ "fixing up to 0x10. (tell your hw vendor)\n",
++ m->mpc_apicid);
++ ver = 0x10;
++ }
++ apic_version[m->mpc_apicid] = ver;
++
++ phys_cpu = apicid_to_cpu_present(apicid);
++ physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
++
++ if (num_processors >= NR_CPUS) {
++ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
++ " Processor ignored.\n", NR_CPUS);
++ return;
++ }
++
++ if (num_processors >= maxcpus) {
++ printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
++ " Processor ignored.\n", maxcpus);
++ return;
++ }
++
++ cpu_set(num_processors, cpu_possible_map);
++ num_processors++;
++
++ /*
++ * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
++ * but we need to work other dependencies like SMP_SUSPEND etc
++ * before this can be done without some confusion.
++ * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
++ * - Ashok Raj <ashok.raj@intel.com>
++ */
++ if (num_processors > 8) {
++ switch (boot_cpu_data.x86_vendor) {
++ case X86_VENDOR_INTEL:
++ if (!APIC_XAPIC(ver)) {
++ def_to_bigsmp = 0;
++ break;
++ }
++ /* If P4 and above fall through */
++ case X86_VENDOR_AMD:
++ def_to_bigsmp = 1;
++ }
++ }
++ bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
++}
++#else
++void __init MP_processor_info (struct mpc_config_processor *m)
++{
++ num_processors++;
++}
++#endif /* CONFIG_XEN */
++
++static void __init MP_bus_info (struct mpc_config_bus *m)
++{
++ char str[7];
++
++ memcpy(str, m->mpc_bustype, 6);
++ str[6] = 0;
++
++ mpc_oem_bus_info(m, str, translation_table[mpc_record]);
++
++ if (m->mpc_busid >= MAX_MP_BUSSES) {
++ printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
++ " is too large, max. supported is %d\n",
++ m->mpc_busid, str, MAX_MP_BUSSES - 1);
++ return;
++ }
++
++ if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
++ } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
++ } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
++ mpc_oem_pci_bus(m, translation_table[mpc_record]);
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
++ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
++ mp_current_pci_id++;
++ } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
++ } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
++ } else {
++ printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
++ }
++}
++
++static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
++{
++ if (!(m->mpc_flags & MPC_APIC_USABLE))
++ return;
++
++ printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
++ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
++ MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
++ }
++ if (!m->mpc_apicaddr) {
++ printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
++ " found in MP table, skipping!\n");
++ return;
++ }
++ mp_ioapics[nr_ioapics] = *m;
++ nr_ioapics++;
++}
++
++static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
++{
++ mp_irqs [mp_irq_entries] = *m;
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
++ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
++ m->mpc_irqtype, m->mpc_irqflag & 3,
++ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
++ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!!\n");
++}
++
++static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
++{
++ Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
++ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
++ m->mpc_irqtype, m->mpc_irqflag & 3,
++ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
++ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
++ /*
++ * Well it seems all SMP boards in existence
++ * use ExtINT/LVT1 == LINT0 and
++ * NMI/LVT2 == LINT1 - the following check
++ * will show us if this assumptions is false.
++ * Until then we do not have to add baggage.
++ */
++ if ((m->mpc_irqtype == mp_ExtINT) &&
++ (m->mpc_destapiclint != 0))
++ BUG();
++ if ((m->mpc_irqtype == mp_NMI) &&
++ (m->mpc_destapiclint != 1))
++ BUG();
++}
++
++#ifdef CONFIG_X86_NUMAQ
++static void __init MP_translation_info (struct mpc_config_translation *m)
++{
++ printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
++
++ if (mpc_record >= MAX_MPC_ENTRY)
++ printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
++ else
++ translation_table[mpc_record] = m; /* stash this for later */
++ if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
++ node_set_online(m->trans_quad);
++}
++
++/*
++ * Read/parse the MPC oem tables
++ */
++
++static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
++ unsigned short oemsize)
++{
++ int count = sizeof (*oemtable); /* the header size */
++ unsigned char *oemptr = ((unsigned char *)oemtable)+count;
++
++ mpc_record = 0;
++ printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
++ if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
++ {
++ printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
++ oemtable->oem_signature[0],
++ oemtable->oem_signature[1],
++ oemtable->oem_signature[2],
++ oemtable->oem_signature[3]);
++ return;
++ }
++ if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
++ {
++ printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
++ return;
++ }
++ while (count < oemtable->oem_length) {
++ switch (*oemptr) {
++ case MP_TRANSLATION:
++ {
++ struct mpc_config_translation *m=
++ (struct mpc_config_translation *)oemptr;
++ MP_translation_info(m);
++ oemptr += sizeof(*m);
++ count += sizeof(*m);
++ ++mpc_record;
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
++ return;
++ }
++ }
++ }
++}
++
++static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
++ char *productid)
++{
++ if (strncmp(oem, "IBM NUMA", 8))
++ printk("Warning! May not be a NUMA-Q system!\n");
++ if (mpc->mpc_oemptr)
++ smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
++ mpc->mpc_oemsize);
++}
++#endif /* CONFIG_X86_NUMAQ */
++
++/*
++ * Read/parse the MPC
++ */
++
++static int __init smp_read_mpc(struct mp_config_table *mpc)
++{
++ char str[16];
++ char oem[10];
++ int count=sizeof(*mpc);
++ unsigned char *mpt=((unsigned char *)mpc)+count;
++
++ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
++ printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
++ *(u32 *)mpc->mpc_signature);
++ return 0;
++ }
++ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
++ printk(KERN_ERR "SMP mptable: checksum error!\n");
++ return 0;
++ }
++ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
++ printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
++ mpc->mpc_spec);
++ return 0;
++ }
++ if (!mpc->mpc_lapic) {
++ printk(KERN_ERR "SMP mptable: null local APIC address!\n");
++ return 0;
++ }
++ memcpy(oem,mpc->mpc_oem,8);
++ oem[8]=0;
++ printk(KERN_INFO "OEM ID: %s ",oem);
++
++ memcpy(str,mpc->mpc_productid,12);
++ str[12]=0;
++ printk("Product ID: %s ",str);
++
++ mps_oem_check(mpc, oem, str);
++
++ printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
++
++ /*
++ * Save the local APIC address (it might be non-default) -- but only
++ * if we're not using ACPI.
++ */
++ if (!acpi_lapic)
++ mp_lapic_addr = mpc->mpc_lapic;
++
++ /*
++ * Now process the configuration blocks.
++ */
++ mpc_record = 0;
++ while (count < mpc->mpc_length) {
++ switch(*mpt) {
++ case MP_PROCESSOR:
++ {
++ struct mpc_config_processor *m=
++ (struct mpc_config_processor *)mpt;
++ /* ACPI may have already provided this data */
++ if (!acpi_lapic)
++ MP_processor_info(m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
++ break;
++ }
++ case MP_BUS:
++ {
++ struct mpc_config_bus *m=
++ (struct mpc_config_bus *)mpt;
++ MP_bus_info(m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
++ break;
++ }
++ case MP_IOAPIC:
++ {
++ struct mpc_config_ioapic *m=
++ (struct mpc_config_ioapic *)mpt;
++ MP_ioapic_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ case MP_INTSRC:
++ {
++ struct mpc_config_intsrc *m=
++ (struct mpc_config_intsrc *)mpt;
++
++ MP_intsrc_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ case MP_LINTSRC:
++ {
++ struct mpc_config_lintsrc *m=
++ (struct mpc_config_lintsrc *)mpt;
++ MP_lintsrc_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ default:
++ {
++ count = mpc->mpc_length;
++ break;
++ }
++ }
++ ++mpc_record;
++ }
++ clustered_apic_check();
++ if (!num_processors)
++ printk(KERN_ERR "SMP mptable: no processors registered!\n");
++ return num_processors;
++}
++
++static int __init ELCR_trigger(unsigned int irq)
++{
++ unsigned int port;
++
++ port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++}
++
++static void __init construct_default_ioirq_mptable(int mpc_default_type)
++{
++ struct mpc_config_intsrc intsrc;
++ int i;
++ int ELCR_fallback = 0;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqflag = 0; /* conforming */
++ intsrc.mpc_srcbus = 0;
++ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
++
++ intsrc.mpc_irqtype = mp_INT;
++
++ /*
++ * If true, we have an ISA/PCI system with no IRQ entries
++ * in the MP table. To prevent the PCI interrupts from being set up
++ * incorrectly, we try to use the ELCR. The sanity check to see if
++ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
++ * never be level sensitive, so we simply see if the ELCR agrees.
++ * If it does, we assume it's valid.
++ */
++ if (mpc_default_type == 5) {
++ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
++
++ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
++ printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
++ else {
++ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
++ ELCR_fallback = 1;
++ }
++ }
++
++ for (i = 0; i < 16; i++) {
++ switch (mpc_default_type) {
++ case 2:
++ if (i == 0 || i == 13)
++ continue; /* IRQ0 & IRQ13 not connected */
++ /* fall through */
++ default:
++ if (i == 2)
++ continue; /* IRQ2 is never connected */
++ }
++
++ if (ELCR_fallback) {
++ /*
++ * If the ELCR indicates a level-sensitive interrupt, we
++ * copy that information over to the MP table in the
++ * irqflag field (level sensitive, active high polarity).
++ */
++ if (ELCR_trigger(i))
++ intsrc.mpc_irqflag = 13;
++ else
++ intsrc.mpc_irqflag = 0;
++ }
++
++ intsrc.mpc_srcbusirq = i;
++ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
++ MP_intsrc_info(&intsrc);
++ }
++
++ intsrc.mpc_irqtype = mp_ExtINT;
++ intsrc.mpc_srcbusirq = 0;
++ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
++ MP_intsrc_info(&intsrc);
++}
++
++static inline void __init construct_default_ISA_mptable(int mpc_default_type)
++{
++ struct mpc_config_processor processor;
++ struct mpc_config_bus bus;
++ struct mpc_config_ioapic ioapic;
++ struct mpc_config_lintsrc lintsrc;
++ int linttypes[2] = { mp_ExtINT, mp_NMI };
++ int i;
++
++ /*
++ * local APIC has default address
++ */
++ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++
++ /*
++ * 2 CPUs, numbered 0 & 1.
++ */
++ processor.mpc_type = MP_PROCESSOR;
++ /* Either an integrated APIC or a discrete 82489DX. */
++ processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ processor.mpc_cpuflag = CPU_ENABLED;
++ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++ (boot_cpu_data.x86_model << 4) |
++ boot_cpu_data.x86_mask;
++ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_reserved[0] = 0;
++ processor.mpc_reserved[1] = 0;
++ for (i = 0; i < 2; i++) {
++ processor.mpc_apicid = i;
++ MP_processor_info(&processor);
++ }
++
++ bus.mpc_type = MP_BUS;
++ bus.mpc_busid = 0;
++ switch (mpc_default_type) {
++ default:
++ printk("???\n");
++ printk(KERN_ERR "Unknown standard configuration %d\n",
++ mpc_default_type);
++ /* fall through */
++ case 1:
++ case 5:
++ memcpy(bus.mpc_bustype, "ISA ", 6);
++ break;
++ case 2:
++ case 6:
++ case 3:
++ memcpy(bus.mpc_bustype, "EISA ", 6);
++ break;
++ case 4:
++ case 7:
++ memcpy(bus.mpc_bustype, "MCA ", 6);
++ }
++ MP_bus_info(&bus);
++ if (mpc_default_type > 4) {
++ bus.mpc_busid = 1;
++ memcpy(bus.mpc_bustype, "PCI ", 6);
++ MP_bus_info(&bus);
++ }
++
++ ioapic.mpc_type = MP_IOAPIC;
++ ioapic.mpc_apicid = 2;
++ ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ ioapic.mpc_flags = MPC_APIC_USABLE;
++ ioapic.mpc_apicaddr = 0xFEC00000;
++ MP_ioapic_info(&ioapic);
++
++ /*
++ * We set up most of the low 16 IO-APIC pins according to MPS rules.
++ */
++ construct_default_ioirq_mptable(mpc_default_type);
++
++ lintsrc.mpc_type = MP_LINTSRC;
++ lintsrc.mpc_irqflag = 0; /* conforming */
++ lintsrc.mpc_srcbusid = 0;
++ lintsrc.mpc_srcbusirq = 0;
++ lintsrc.mpc_destapic = MP_APIC_ALL;
++ for (i = 0; i < 2; i++) {
++ lintsrc.mpc_irqtype = linttypes[i];
++ lintsrc.mpc_destapiclint = i;
++ MP_lintsrc_info(&lintsrc);
++ }
++}
++
++static struct intel_mp_floating *mpf_found;
++
++/*
++ * Scan the memory blocks for an SMP configuration block.
++ */
++void __init get_smp_config (void)
++{
++ struct intel_mp_floating *mpf = mpf_found;
++
++ /*
++ * ACPI supports both logical (e.g. Hyper-Threading) and physical
++ * processors, where MPS only supports physical.
++ */
++ if (acpi_lapic && acpi_ioapic) {
++ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
++ return;
++ }
++ else if (acpi_lapic)
++ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
++
++ printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
++ if (mpf->mpf_feature2 & (1<<7)) {
++ printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
++ pic_mode = 1;
++ } else {
++ printk(KERN_INFO " Virtual Wire compatibility mode.\n");
++ pic_mode = 0;
++ }
++
++ /*
++ * Now see if we need to read further.
++ */
++ if (mpf->mpf_feature1 != 0) {
++
++ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
++ construct_default_ISA_mptable(mpf->mpf_feature1);
++
++ } else if (mpf->mpf_physptr) {
++
++ /*
++ * Read the physical hardware table. Anything here will
++ * override the defaults.
++ */
++ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
++ smp_found_config = 0;
++ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
++ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
++ return;
++ }
++ /*
++ * If there are no explicit MP IRQ entries, then we are
++ * broken. We set up most of the low 16 IO-APIC pins to
++ * ISA defaults and hope it will work.
++ */
++ if (!mp_irq_entries) {
++ struct mpc_config_bus bus;
++
++ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
++
++ bus.mpc_type = MP_BUS;
++ bus.mpc_busid = 0;
++ memcpy(bus.mpc_bustype, "ISA ", 6);
++ MP_bus_info(&bus);
++
++ construct_default_ioirq_mptable(0);
++ }
++
++ } else
++ BUG();
++
++ printk(KERN_INFO "Processors: %d\n", num_processors);
++ /*
++ * Only use the first configuration found.
++ */
++}
++
++static int __init smp_scan_config (unsigned long base, unsigned long length)
++{
++ unsigned long *bp = isa_bus_to_virt(base);
++ struct intel_mp_floating *mpf;
++
++ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
++ if (sizeof(*mpf) != 16)
++ printk("Error: MPF size\n");
++
++ while (length > 0) {
++ mpf = (struct intel_mp_floating *)bp;
++ if ((*bp == SMP_MAGIC_IDENT) &&
++ (mpf->mpf_length == 1) &&
++ !mpf_checksum((unsigned char *)bp, 16) &&
++ ((mpf->mpf_specification == 1)
++ || (mpf->mpf_specification == 4)) ) {
++
++ smp_found_config = 1;
++#ifndef CONFIG_XEN
++ printk(KERN_INFO "found SMP MP-table at %08lx\n",
++ virt_to_phys(mpf));
++ reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
++ if (mpf->mpf_physptr) {
++ /*
++ * We cannot access to MPC table to compute
++ * table size yet, as only few megabytes from
++ * the bottom is mapped now.
++ * PC-9800's MPC table places on the very last
++ * of physical memory; so that simply reserving
++ * PAGE_SIZE from mpg->mpf_physptr yields BUG()
++ * in reserve_bootmem.
++ */
++ unsigned long size = PAGE_SIZE;
++ unsigned long end = max_low_pfn * PAGE_SIZE;
++ if (mpf->mpf_physptr + size > end)
++ size = end - mpf->mpf_physptr;
++ reserve_bootmem(mpf->mpf_physptr, size);
++ }
++#else
++ printk(KERN_INFO "found SMP MP-table at %08lx\n",
++ ((unsigned long)bp - (unsigned long)isa_bus_to_virt(base)) + base);
++#endif
++
++ mpf_found = mpf;
++ return 1;
++ }
++ bp += 4;
++ length -= 16;
++ }
++ return 0;
++}
++
++void __init find_smp_config (void)
++{
++#ifndef CONFIG_XEN
++ unsigned int address;
++#endif
++
++ /*
++ * FIXME: Linux assumes you have 640K of base ram..
++ * this continues the error...
++ *
++ * 1) Scan the bottom 1K for a signature
++ * 2) Scan the top 1K of base RAM
++ * 3) Scan the 64K of bios
++ */
++ if (smp_scan_config(0x0,0x400) ||
++ smp_scan_config(639*0x400,0x400) ||
++ smp_scan_config(0xF0000,0x10000))
++ return;
++ /*
++ * If it is an SMP machine we should know now, unless the
++ * configuration is in an EISA/MCA bus machine with an
++ * extended bios data area.
++ *
++ * there is a real-mode segmented pointer pointing to the
++ * 4K EBDA area at 0x40E, calculate and scan it here.
++ *
++ * NOTE! There are Linux loaders that will corrupt the EBDA
++ * area, and as such this kind of SMP config may be less
++ * trustworthy, simply because the SMP table may have been
++ * stomped on during early boot. These loaders are buggy and
++ * should be fixed.
++ *
++ * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
++ */
++
++#ifndef CONFIG_XEN
++ address = get_bios_ebda();
++ if (address)
++ smp_scan_config(address, 0x400);
++#endif
++}
++
++int es7000_plat;
++
++/* --------------------------------------------------------------------------
++ ACPI-based MP Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++void __init mp_register_lapic_address (
++ u64 address)
++{
++#ifndef CONFIG_XEN
++ mp_lapic_addr = (unsigned long) address;
++
++ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
++
++ if (boot_cpu_physical_apicid == -1U)
++ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
++
++ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
++#endif
++}
++
++
++void __devinit mp_register_lapic (
++ u8 id,
++ u8 enabled)
++{
++ struct mpc_config_processor processor;
++ int boot_cpu = 0;
++
++ if (MAX_APICS - id <= 0) {
++ printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
++ id, MAX_APICS);
++ return;
++ }
++
++ if (id == boot_cpu_physical_apicid)
++ boot_cpu = 1;
++
++#ifndef CONFIG_XEN
++ processor.mpc_type = MP_PROCESSOR;
++ processor.mpc_apicid = id;
++ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
++ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
++ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
++ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
++ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_reserved[0] = 0;
++ processor.mpc_reserved[1] = 0;
++#endif
++
++ MP_processor_info(&processor);
++}
++
++#ifdef CONFIG_X86_IO_APIC
++
++#define MP_ISA_BUS 0
++#define MP_MAX_IOAPIC_PIN 127
++
++static struct mp_ioapic_routing {
++ int apic_id;
++ int gsi_base;
++ int gsi_end;
++ u32 pin_programmed[4];
++} mp_ioapic_routing[MAX_IO_APICS];
++
++
++static int mp_find_ioapic (
++ int gsi)
++{
++ int i = 0;
++
++ /* Find the IOAPIC that manages this GSI. */
++ for (i = 0; i < nr_ioapics; i++) {
++ if ((gsi >= mp_ioapic_routing[i].gsi_base)
++ && (gsi <= mp_ioapic_routing[i].gsi_end))
++ return i;
++ }
++
++ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
++
++ return -1;
++}
++
++
++void __init mp_register_ioapic (
++ u8 id,
++ u32 address,
++ u32 gsi_base)
++{
++ int idx = 0;
++ int tmpid;
++
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
++ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
++ }
++ if (!address) {
++ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
++ " found in MADT table, skipping!\n");
++ return;
++ }
++
++ idx = nr_ioapics++;
++
++ mp_ioapics[idx].mpc_type = MP_IOAPIC;
++ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
++ mp_ioapics[idx].mpc_apicaddr = address;
++
++#ifndef CONFIG_XEN
++ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
++#endif
++ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
++ tmpid = io_apic_get_unique_id(idx, id);
++ else
++ tmpid = id;
++ if (tmpid == -1) {
++ nr_ioapics--;
++ return;
++ }
++ mp_ioapics[idx].mpc_apicid = tmpid;
++ mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
++
++ /*
++ * Build basic GSI lookup table to facilitate gsi->io_apic lookups
++ * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
++ */
++ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
++ mp_ioapic_routing[idx].gsi_base = gsi_base;
++ mp_ioapic_routing[idx].gsi_end = gsi_base +
++ io_apic_get_redir_entries(idx);
++
++ printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
++ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
++ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
++ mp_ioapic_routing[idx].gsi_base,
++ mp_ioapic_routing[idx].gsi_end);
++
++ return;
++}
++
++
++void __init mp_override_legacy_irq (
++ u8 bus_irq,
++ u8 polarity,
++ u8 trigger,
++ u32 gsi)
++{
++ struct mpc_config_intsrc intsrc;
++ int ioapic = -1;
++ int pin = -1;
++
++ /*
++ * Convert 'gsi' to 'ioapic.pin'.
++ */
++ ioapic = mp_find_ioapic(gsi);
++ if (ioapic < 0)
++ return;
++ pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
++
++ /*
++ * TBD: This check is for faulty timer entries, where the override
++ * erroneously sets the trigger to level, resulting in a HUGE
++ * increase of timer interrupts!
++ */
++ if ((bus_irq == 0) && (trigger == 3))
++ trigger = 1;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqtype = mp_INT;
++ intsrc.mpc_irqflag = (trigger << 2) | polarity;
++ intsrc.mpc_srcbus = MP_ISA_BUS;
++ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
++ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
++ intsrc.mpc_dstirq = pin; /* INTIN# */
++
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
++ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
++ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
++ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
++
++ mp_irqs[mp_irq_entries] = intsrc;
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!\n");
++
++ return;
++}
++
++void __init mp_config_acpi_legacy_irqs (void)
++{
++ struct mpc_config_intsrc intsrc;
++ int i = 0;
++ int ioapic = -1;
++
++ /*
++ * Fabricate the legacy ISA bus (bus #31).
++ */
++ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
++ Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
++
++ /*
++ * Older generations of ES7000 have no legacy identity mappings
++ */
++ if (es7000_plat == 1)
++ return;
++
++ /*
++ * Locate the IOAPIC that manages the ISA IRQs (0-15).
++ */
++ ioapic = mp_find_ioapic(0);
++ if (ioapic < 0)
++ return;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqflag = 0; /* Conforming */
++ intsrc.mpc_srcbus = MP_ISA_BUS;
++ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
++
++ /*
++ * Use the default configuration for the IRQs 0-15. Unless
++ * overriden by (MADT) interrupt source override entries.
++ */
++ for (i = 0; i < 16; i++) {
++ int idx;
++
++ for (idx = 0; idx < mp_irq_entries; idx++) {
++ struct mpc_config_intsrc *irq = mp_irqs + idx;
++
++ /* Do we already have a mapping for this ISA IRQ? */
++ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
++ break;
++
++ /* Do we already have a mapping for this IOAPIC pin */
++ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
++ (irq->mpc_dstirq == i))
++ break;
++ }
++
++ if (idx != mp_irq_entries) {
++ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
++ continue; /* IRQ already used */
++ }
++
++ intsrc.mpc_irqtype = mp_INT;
++ intsrc.mpc_srcbusirq = i; /* Identity mapped */
++ intsrc.mpc_dstirq = i;
++
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
++ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
++ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
++ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
++ intsrc.mpc_dstirq);
++
++ mp_irqs[mp_irq_entries] = intsrc;
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!\n");
++ }
++}
++
++#define MAX_GSI_NUM 4096
++
++int mp_register_gsi (u32 gsi, int triggering, int polarity)
++{
++ int ioapic = -1;
++ int ioapic_pin = 0;
++ int idx, bit = 0;
++ static int pci_irq = 16;
++ /*
++ * Mapping between Global System Interrups, which
++ * represent all possible interrupts, and IRQs
++ * assigned to actual devices.
++ */
++ static int gsi_to_irq[MAX_GSI_NUM];
++
++ /* Don't set up the ACPI SCI because it's already set up */
++ if (acpi_fadt.sci_int == gsi)
++ return gsi;
++
++ ioapic = mp_find_ioapic(gsi);
++ if (ioapic < 0) {
++ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
++ return gsi;
++ }
++
++ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
++
++ if (ioapic_renumber_irq)
++ gsi = ioapic_renumber_irq(ioapic, gsi);
++
++ /*
++ * Avoid pin reprogramming. PRTs typically include entries
++ * with redundant pin->gsi mappings (but unique PCI devices);
++ * we only program the IOAPIC on the first.
++ */
++ bit = ioapic_pin % 32;
++ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
++ if (idx > 3) {
++ printk(KERN_ERR "Invalid reference to IOAPIC pin "
++ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
++ ioapic_pin);
++ return gsi;
++ }
++ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
++ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
++ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
++ return gsi_to_irq[gsi];
++ }
++
++ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
++
++ if (triggering == ACPI_LEVEL_SENSITIVE) {
++ /*
++ * For PCI devices assign IRQs in order, avoiding gaps
++ * due to unused I/O APIC pins.
++ */
++ int irq = gsi;
++ if (gsi < MAX_GSI_NUM) {
++ /*
++ * Retain the VIA chipset work-around (gsi > 15), but
++ * avoid a problem where the 8254 timer (IRQ0) is setup
++ * via an override (so it's not on pin 0 of the ioapic),
++ * and at the same time, the pin 0 interrupt is a PCI
++ * type. The gsi > 15 test could cause these two pins
++ * to be shared as IRQ0, and they are not shareable.
++ * So test for this condition, and if necessary, avoid
++ * the pin collision.
++ */
++ if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
++ gsi = pci_irq++;
++ /*
++ * Don't assign IRQ used by ACPI SCI
++ */
++ if (gsi == acpi_fadt.sci_int)
++ gsi = pci_irq++;
++ gsi_to_irq[irq] = gsi;
++ } else {
++ printk(KERN_ERR "GSI %u is too high\n", gsi);
++ return gsi;
++ }
++ }
++
++ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
++ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
++ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
++ return gsi;
++}
++
++#endif /* CONFIG_X86_IO_APIC */
++#endif /* CONFIG_ACPI */
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/pci-dma-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/pci-dma-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/pci-dma-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/pci-dma-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,415 @@
++/*
++ * Dynamic DMA mapping support.
++ *
++ * On i386 there is no hardware dynamic DMA address translation,
++ * so consistent alloc/free are merely page allocation/freeing.
++ * The rest of the dynamic DMA mapping interface is implemented
++ * in asm/pci.h.
++ */
++
++#include <linux/types.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/pci.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <asm/io.h>
++#include <xen/balloon.h>
++#include <xen/gnttab.h>
++#include <asm/swiotlb.h>
++#include <asm/tlbflush.h>
++#include <asm-i386/mach-xen/asm/swiotlb.h>
++#include <asm-i386/mach-xen/asm/gnttab_dma.h>
++#include <asm/bug.h>
++
++#ifdef __x86_64__
++#include <asm/proto.h>
++
++int iommu_merge __read_mostly = 0;
++EXPORT_SYMBOL(iommu_merge);
++
++dma_addr_t bad_dma_address __read_mostly;
++EXPORT_SYMBOL(bad_dma_address);
++
++/* This tells the BIO block layer to assume merging. Default to off
++ because we cannot guarantee merging later. */
++int iommu_bio_merge __read_mostly = 0;
++EXPORT_SYMBOL(iommu_bio_merge);
++
++int force_iommu __read_mostly= 0;
++
++__init int iommu_setup(char *p)
++{
++ return 1;
++}
++
++void __init pci_iommu_alloc(void)
++{
++#ifdef CONFIG_SWIOTLB
++ pci_swiotlb_init();
++#endif
++}
++
++static int __init pci_iommu_init(void)
++{
++ no_iommu_init();
++ return 0;
++}
++
++/* Must execute after PCI subsystem */
++fs_initcall(pci_iommu_init);
++#endif
++
++struct dma_coherent_mem {
++ void *virt_base;
++ u32 device_base;
++ int size;
++ int flags;
++ unsigned long *bitmap;
++};
++
++#define IOMMU_BUG_ON(test) \
++do { \
++ if (unlikely(test)) { \
++ printk(KERN_ALERT "Fatal DMA error! " \
++ "Please use 'swiotlb=force'\n"); \
++ BUG(); \
++ } \
++} while (0)
++
++static int check_pages_physically_contiguous(unsigned long pfn,
++ unsigned int offset,
++ size_t length)
++{
++ unsigned long next_mfn;
++ int i;
++ int nr_pages;
++
++ next_mfn = pfn_to_mfn(pfn);
++ nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
++
++ for (i = 1; i < nr_pages; i++) {
++ if (pfn_to_mfn(++pfn) != ++next_mfn)
++ return 0;
++ }
++ return 1;
++}
++
++int range_straddles_page_boundary(paddr_t p, size_t size)
++{
++ extern unsigned long *contiguous_bitmap;
++ unsigned long pfn = p >> PAGE_SHIFT;
++ unsigned int offset = p & ~PAGE_MASK;
++
++ if (offset + size <= PAGE_SIZE)
++ return 0;
++ if (test_bit(pfn, contiguous_bitmap))
++ return 0;
++ if (check_pages_physically_contiguous(pfn, offset, size))
++ return 0;
++ return 1;
++}
++
++int
++dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ enum dma_data_direction direction)
++{
++ int i, rc;
++
++ if (direction == DMA_NONE)
++ BUG();
++ WARN_ON(nents == 0 || sg[0].length == 0);
++
++ if (swiotlb) {
++ rc = swiotlb_map_sg(hwdev, sg, nents, direction);
++ } else {
++ for (i = 0; i < nents; i++ ) {
++ BUG_ON(!sg[i].page);
++ sg[i].dma_address =
++ gnttab_dma_map_page(sg[i].page) + sg[i].offset;
++ sg[i].dma_length = sg[i].length;
++ IOMMU_BUG_ON(address_needs_mapping(
++ hwdev, sg[i].dma_address));
++ IOMMU_BUG_ON(range_straddles_page_boundary(
++ page_to_pseudophys(sg[i].page) + sg[i].offset,
++ sg[i].length));
++ }
++ rc = nents;
++ }
++
++ flush_write_buffers();
++ return rc;
++}
++EXPORT_SYMBOL(dma_map_sg);
++
++void
++dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ enum dma_data_direction direction)
++{
++ int i;
++
++ BUG_ON(direction == DMA_NONE);
++ if (swiotlb)
++ swiotlb_unmap_sg(hwdev, sg, nents, direction);
++ else {
++ for (i = 0; i < nents; i++ )
++ gnttab_dma_unmap_page(sg[i].dma_address);
++ }
++}
++EXPORT_SYMBOL(dma_unmap_sg);
++
++#ifdef CONFIG_HIGHMEM
++dma_addr_t
++dma_map_page(struct device *dev, struct page *page, unsigned long offset,
++ size_t size, enum dma_data_direction direction)
++{
++ dma_addr_t dma_addr;
++
++ BUG_ON(direction == DMA_NONE);
++
++ if (swiotlb) {
++ dma_addr = swiotlb_map_page(
++ dev, page, offset, size, direction);
++ } else {
++ dma_addr = gnttab_dma_map_page(page) + offset;
++ IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
++ }
++
++ return dma_addr;
++}
++EXPORT_SYMBOL(dma_map_page);
++
++void
++dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
++ enum dma_data_direction direction)
++{
++ BUG_ON(direction == DMA_NONE);
++ if (swiotlb)
++ swiotlb_unmap_page(dev, dma_address, size, direction);
++ else
++ gnttab_dma_unmap_page(dma_address);
++}
++EXPORT_SYMBOL(dma_unmap_page);
++#endif /* CONFIG_HIGHMEM */
++
++int
++dma_mapping_error(dma_addr_t dma_addr)
++{
++ if (swiotlb)
++ return swiotlb_dma_mapping_error(dma_addr);
++ return 0;
++}
++EXPORT_SYMBOL(dma_mapping_error);
++
++int
++dma_supported(struct device *dev, u64 mask)
++{
++ if (swiotlb)
++ return swiotlb_dma_supported(dev, mask);
++ /*
++ * By default we'll BUG when an infeasible DMA is requested, and
++ * request swiotlb=force (see IOMMU_BUG_ON).
++ */
++ return 1;
++}
++EXPORT_SYMBOL(dma_supported);
++
++void *dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp)
++{
++ void *ret;
++ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++ unsigned int order = get_order(size);
++ unsigned long vstart;
++ u64 mask;
++
++ /* ignore region specifiers */
++ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
++
++ if (mem) {
++ int page = bitmap_find_free_region(mem->bitmap, mem->size,
++ order);
++ if (page >= 0) {
++ *dma_handle = mem->device_base + (page << PAGE_SHIFT);
++ ret = mem->virt_base + (page << PAGE_SHIFT);
++ memset(ret, 0, size);
++ return ret;
++ }
++ if (mem->flags & DMA_MEMORY_EXCLUSIVE)
++ return NULL;
++ }
++
++ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
++ gfp |= GFP_DMA;
++
++ vstart = __get_free_pages(gfp, order);
++ ret = (void *)vstart;
++
++ if (dev != NULL && dev->coherent_dma_mask)
++ mask = dev->coherent_dma_mask;
++ else
++ mask = 0xffffffff;
++
++ if (ret != NULL) {
++ if (xen_create_contiguous_region(vstart, order,
++ fls64(mask)) != 0) {
++ free_pages(vstart, order);
++ return NULL;
++ }
++ memset(ret, 0, size);
++ *dma_handle = virt_to_bus(ret);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(dma_alloc_coherent);
++
++void dma_free_coherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle)
++{
++ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++ int order = get_order(size);
++
++ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
++ int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
++
++ bitmap_release_region(mem->bitmap, page, order);
++ } else {
++ xen_destroy_contiguous_region((unsigned long)vaddr, order);
++ free_pages((unsigned long)vaddr, order);
++ }
++}
++EXPORT_SYMBOL(dma_free_coherent);
++
++#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
++int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
++ dma_addr_t device_addr, size_t size, int flags)
++{
++ void __iomem *mem_base;
++ int pages = size >> PAGE_SHIFT;
++ int bitmap_size = (pages + 31)/32;
++
++ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
++ goto out;
++ if (!size)
++ goto out;
++ if (dev->dma_mem)
++ goto out;
++
++ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
++
++ mem_base = ioremap(bus_addr, size);
++ if (!mem_base)
++ goto out;
++
++ dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
++ if (!dev->dma_mem)
++ goto out;
++ memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
++ dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
++ if (!dev->dma_mem->bitmap)
++ goto free1_out;
++ memset(dev->dma_mem->bitmap, 0, bitmap_size);
++
++ dev->dma_mem->virt_base = mem_base;
++ dev->dma_mem->device_base = device_addr;
++ dev->dma_mem->size = pages;
++ dev->dma_mem->flags = flags;
++
++ if (flags & DMA_MEMORY_MAP)
++ return DMA_MEMORY_MAP;
++
++ return DMA_MEMORY_IO;
++
++ free1_out:
++ kfree(dev->dma_mem->bitmap);
++ out:
++ return 0;
++}
++EXPORT_SYMBOL(dma_declare_coherent_memory);
++
++void dma_release_declared_memory(struct device *dev)
++{
++ struct dma_coherent_mem *mem = dev->dma_mem;
++
++ if(!mem)
++ return;
++ dev->dma_mem = NULL;
++ iounmap(mem->virt_base);
++ kfree(mem->bitmap);
++ kfree(mem);
++}
++EXPORT_SYMBOL(dma_release_declared_memory);
++
++void *dma_mark_declared_memory_occupied(struct device *dev,
++ dma_addr_t device_addr, size_t size)
++{
++ struct dma_coherent_mem *mem = dev->dma_mem;
++ int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ int pos, err;
++
++ if (!mem)
++ return ERR_PTR(-EINVAL);
++
++ pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
++ err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
++ if (err != 0)
++ return ERR_PTR(err);
++ return mem->virt_base + (pos << PAGE_SHIFT);
++}
++EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
++#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
++
++dma_addr_t
++dma_map_single(struct device *dev, void *ptr, size_t size,
++ enum dma_data_direction direction)
++{
++ dma_addr_t dma;
++
++ if (direction == DMA_NONE)
++ BUG();
++ WARN_ON(size == 0);
++
++ if (swiotlb) {
++ dma = swiotlb_map_single(dev, ptr, size, direction);
++ } else {
++ dma = gnttab_dma_map_page(virt_to_page(ptr)) +
++ offset_in_page(ptr);
++ IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
++ IOMMU_BUG_ON(address_needs_mapping(dev, dma));
++ }
++
++ flush_write_buffers();
++ return dma;
++}
++EXPORT_SYMBOL(dma_map_single);
++
++void
++dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
++ enum dma_data_direction direction)
++{
++ if (direction == DMA_NONE)
++ BUG();
++ if (swiotlb)
++ swiotlb_unmap_single(dev, dma_addr, size, direction);
++ else
++ gnttab_dma_unmap_page(dma_addr);
++}
++EXPORT_SYMBOL(dma_unmap_single);
++
++void
++dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
++}
++EXPORT_SYMBOL(dma_sync_single_for_cpu);
++
++void
++dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
++}
++EXPORT_SYMBOL(dma_sync_single_for_device);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/process-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/process-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/process-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/process-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,877 @@
++/*
++ * linux/arch/i386/kernel/process.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ */
++
++/*
++ * This file handles the architecture-dependent parts of process handling..
++ */
++
++#include <stdarg.h>
++
++#include <linux/cpu.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/elfcore.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/user.h>
++#include <linux/a.out.h>
++#include <linux/interrupt.h>
++#include <linux/utsname.h>
++#include <linux/delay.h>
++#include <linux/reboot.h>
++#include <linux/init.h>
++#include <linux/mc146818rtc.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/random.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/ldt.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/desc.h>
++#include <asm/vm86.h>
++#ifdef CONFIG_MATH_EMULATION
++#include <asm/math_emu.h>
++#endif
++
++#include <xen/interface/physdev.h>
++#include <xen/interface/vcpu.h>
++#include <xen/cpu_hotplug.h>
++
++#include <linux/err.h>
++
++#include <asm/tlbflush.h>
++#include <asm/cpu.h>
++
++asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
++
++static int hlt_counter;
++
++unsigned long boot_option_idle_override = 0;
++EXPORT_SYMBOL(boot_option_idle_override);
++
++/*
++ * Return saved PC of a blocked thread.
++ */
++unsigned long thread_saved_pc(struct task_struct *tsk)
++{
++ return ((unsigned long *)tsk->thread.esp)[3];
++}
++
++/*
++ * Powermanagement idle function, if any..
++ */
++void (*pm_idle)(void);
++EXPORT_SYMBOL(pm_idle);
++static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
++
++void disable_hlt(void)
++{
++ hlt_counter++;
++}
++
++EXPORT_SYMBOL(disable_hlt);
++
++void enable_hlt(void)
++{
++ hlt_counter--;
++}
++
++EXPORT_SYMBOL(enable_hlt);
++
++/*
++ * On SMP it's slightly faster (but much more power-consuming!)
++ * to poll the ->work.need_resched flag instead of waiting for the
++ * cross-CPU IPI to arrive. Use this option with caution.
++ */
++static void poll_idle (void)
++{
++ local_irq_enable();
++
++ asm volatile(
++ "2:"
++ "testl %0, %1;"
++ "rep; nop;"
++ "je 2b;"
++ : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
++}
++
++static void xen_idle(void)
++{
++ local_irq_disable();
++
++ if (need_resched())
++ local_irq_enable();
++ else {
++ current_thread_info()->status &= ~TS_POLLING;
++ smp_mb__after_clear_bit();
++ safe_halt();
++ current_thread_info()->status |= TS_POLLING;
++ }
++}
++#ifdef CONFIG_APM_MODULE
++EXPORT_SYMBOL(default_idle);
++#endif
++
++#ifdef CONFIG_HOTPLUG_CPU
++extern cpumask_t cpu_initialized;
++static inline void play_dead(void)
++{
++ idle_task_exit();
++ local_irq_disable();
++ cpu_clear(smp_processor_id(), cpu_initialized);
++ preempt_enable_no_resched();
++ VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
++ cpu_bringup();
++}
++#else
++static inline void play_dead(void)
++{
++ BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++/*
++ * The idle thread. There's no useful work to be
++ * done, so just try to conserve power and have a
++ * low exit latency (ie sit in a loop waiting for
++ * somebody to say that they'd like to reschedule)
++ */
++void cpu_idle(void)
++{
++ int cpu = smp_processor_id();
++
++ current_thread_info()->status |= TS_POLLING;
++
++ /* endless idle loop with no priority at all */
++ while (1) {
++ while (!need_resched()) {
++ void (*idle)(void);
++
++ if (__get_cpu_var(cpu_idle_state))
++ __get_cpu_var(cpu_idle_state) = 0;
++
++ rmb();
++ idle = xen_idle; /* no alternatives */
++
++ if (cpu_is_offline(cpu))
++ play_dead();
++
++ __get_cpu_var(irq_stat).idle_timestamp = jiffies;
++ idle();
++ }
++ preempt_enable_no_resched();
++ schedule();
++ preempt_disable();
++ }
++}
++
++void cpu_idle_wait(void)
++{
++ unsigned int cpu, this_cpu = get_cpu();
++ cpumask_t map;
++
++ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
++ put_cpu();
++
++ cpus_clear(map);
++ for_each_online_cpu(cpu) {
++ per_cpu(cpu_idle_state, cpu) = 1;
++ cpu_set(cpu, map);
++ }
++
++ __get_cpu_var(cpu_idle_state) = 0;
++
++ wmb();
++ do {
++ ssleep(1);
++ for_each_online_cpu(cpu) {
++ if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
++ cpu_clear(cpu, map);
++ }
++ cpus_and(map, map, cpu_online_map);
++ } while (!cpus_empty(map));
++}
++EXPORT_SYMBOL_GPL(cpu_idle_wait);
++
++void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
++{
++}
++
++static int __init idle_setup (char *str)
++{
++ if (!strncmp(str, "poll", 4)) {
++ printk("using polling idle threads.\n");
++ pm_idle = poll_idle;
++ }
++
++ boot_option_idle_override = 1;
++ return 1;
++}
++
++__setup("idle=", idle_setup);
++
++void show_regs(struct pt_regs * regs)
++{
++ unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
++
++ printk("\n");
++ printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
++ printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
++ print_symbol("EIP is at %s\n", regs->eip);
++
++ if (user_mode_vm(regs))
++ printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
++ printk(" EFLAGS: %08lx %s (%s %.*s)\n",
++ regs->eflags, print_tainted(), system_utsname.release,
++ (int)strcspn(system_utsname.version, " "),
++ system_utsname.version);
++ printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
++ regs->eax,regs->ebx,regs->ecx,regs->edx);
++ printk("ESI: %08lx EDI: %08lx EBP: %08lx",
++ regs->esi, regs->edi, regs->ebp);
++ printk(" DS: %04x ES: %04x\n",
++ 0xffff & regs->xds,0xffff & regs->xes);
++
++ cr0 = read_cr0();
++ cr2 = read_cr2();
++ cr3 = read_cr3();
++ cr4 = read_cr4_safe();
++ printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
++ show_trace(NULL, regs, &regs->esp);
++}
++
++/*
++ * This gets run with %ebx containing the
++ * function to call, and %edx containing
++ * the "args".
++ */
++extern void kernel_thread_helper(void);
++__asm__(".section .text\n"
++ ".align 4\n"
++ "kernel_thread_helper:\n\t"
++ "movl %edx,%eax\n\t"
++ "pushl %edx\n\t"
++ "call *%ebx\n\t"
++ "pushl %eax\n\t"
++ "call do_exit\n"
++ ".previous");
++
++/*
++ * Create a kernel thread
++ */
++int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++{
++ struct pt_regs regs;
++
++ memset(&regs, 0, sizeof(regs));
++
++ regs.ebx = (unsigned long) fn;
++ regs.edx = (unsigned long) arg;
++
++ regs.xds = __USER_DS;
++ regs.xes = __USER_DS;
++ regs.orig_eax = -1;
++ regs.eip = (unsigned long) kernel_thread_helper;
++ regs.xcs = GET_KERNEL_CS();
++ regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
++
++ /* Ok, create the new process.. */
++ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
++}
++EXPORT_SYMBOL(kernel_thread);
++
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++ /* The process may have allocated an io port bitmap... nuke it. */
++ if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
++ struct task_struct *tsk = current;
++ struct thread_struct *t = &tsk->thread;
++ struct physdev_set_iobitmap set_iobitmap;
++ memset(&set_iobitmap, 0, sizeof(set_iobitmap));
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
++ &set_iobitmap));
++ kfree(t->io_bitmap_ptr);
++ t->io_bitmap_ptr = NULL;
++ clear_thread_flag(TIF_IO_BITMAP);
++ }
++}
++
++void flush_thread(void)
++{
++ struct task_struct *tsk = current;
++
++ memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
++ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
++ clear_tsk_thread_flag(tsk, TIF_DEBUG);
++ /*
++ * Forget coprocessor state..
++ */
++ clear_fpu(tsk);
++ clear_used_math();
++}
++
++void release_thread(struct task_struct *dead_task)
++{
++ BUG_ON(dead_task->mm);
++ release_vm86_irqs(dead_task);
++}
++
++/*
++ * This gets called before we allocate a new thread and copy
++ * the current task into it.
++ */
++void prepare_to_copy(struct task_struct *tsk)
++{
++ unlazy_fpu(tsk);
++}
++
++int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
++ unsigned long unused,
++ struct task_struct * p, struct pt_regs * regs)
++{
++ struct pt_regs * childregs;
++ struct task_struct *tsk;
++ int err;
++
++ childregs = task_pt_regs(p);
++ *childregs = *regs;
++ childregs->eax = 0;
++ childregs->esp = esp;
++
++ p->thread.esp = (unsigned long) childregs;
++ p->thread.esp0 = (unsigned long) (childregs+1);
++
++ p->thread.eip = (unsigned long) ret_from_fork;
++
++ savesegment(fs,p->thread.fs);
++ savesegment(gs,p->thread.gs);
++
++ tsk = current;
++ if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
++ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!p->thread.io_bitmap_ptr) {
++ p->thread.io_bitmap_max = 0;
++ return -ENOMEM;
++ }
++ memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
++ IO_BITMAP_BYTES);
++ set_tsk_thread_flag(p, TIF_IO_BITMAP);
++ }
++
++ /*
++ * Set a new TLS for the child thread?
++ */
++ if (clone_flags & CLONE_SETTLS) {
++ struct desc_struct *desc;
++ struct user_desc info;
++ int idx;
++
++ err = -EFAULT;
++ if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
++ goto out;
++ err = -EINVAL;
++ if (LDT_empty(&info))
++ goto out;
++
++ idx = info.entry_number;
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ goto out;
++
++ desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
++ desc->a = LDT_entry_a(&info);
++ desc->b = LDT_entry_b(&info);
++ }
++
++ p->thread.iopl = current->thread.iopl;
++
++ err = 0;
++ out:
++ if (err && p->thread.io_bitmap_ptr) {
++ kfree(p->thread.io_bitmap_ptr);
++ p->thread.io_bitmap_max = 0;
++ }
++ return err;
++}
++
++/*
++ * fill in the user structure for a core dump..
++ */
++void dump_thread(struct pt_regs * regs, struct user * dump)
++{
++ int i;
++
++/* changed the size calculations - should hopefully work better. lbt */
++ dump->magic = CMAGIC;
++ dump->start_code = 0;
++ dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
++ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
++ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
++ dump->u_dsize -= dump->u_tsize;
++ dump->u_ssize = 0;
++ for (i = 0; i < 8; i++)
++ dump->u_debugreg[i] = current->thread.debugreg[i];
++
++ if (dump->start_stack < TASK_SIZE)
++ dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
++
++ dump->regs.ebx = regs->ebx;
++ dump->regs.ecx = regs->ecx;
++ dump->regs.edx = regs->edx;
++ dump->regs.esi = regs->esi;
++ dump->regs.edi = regs->edi;
++ dump->regs.ebp = regs->ebp;
++ dump->regs.eax = regs->eax;
++ dump->regs.ds = regs->xds;
++ dump->regs.es = regs->xes;
++ savesegment(fs,dump->regs.fs);
++ savesegment(gs,dump->regs.gs);
++ dump->regs.orig_eax = regs->orig_eax;
++ dump->regs.eip = regs->eip;
++ dump->regs.cs = regs->xcs;
++ dump->regs.eflags = regs->eflags;
++ dump->regs.esp = regs->esp;
++ dump->regs.ss = regs->xss;
++
++ dump->u_fpvalid = dump_fpu (regs, &dump->i387);
++}
++EXPORT_SYMBOL(dump_thread);
++
++/*
++ * Capture the user space registers if the task is not running (in user space)
++ */
++int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++{
++ struct pt_regs ptregs = *task_pt_regs(tsk);
++ ptregs.xcs &= 0xffff;
++ ptregs.xds &= 0xffff;
++ ptregs.xes &= 0xffff;
++ ptregs.xss &= 0xffff;
++
++ elf_core_copy_regs(regs, &ptregs);
++
++ return 1;
++}
++
++static noinline void __switch_to_xtra(struct task_struct *next_p)
++{
++ struct thread_struct *next;
++
++ next = &next_p->thread;
++
++ if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
++ set_debugreg(next->debugreg[0], 0);
++ set_debugreg(next->debugreg[1], 1);
++ set_debugreg(next->debugreg[2], 2);
++ set_debugreg(next->debugreg[3], 3);
++ /* no 4 and 5 */
++ set_debugreg(next->debugreg[6], 6);
++ set_debugreg(next->debugreg[7], 7);
++ }
++}
++
++/*
++ * This function selects if the context switch from prev to next
++ * has to tweak the TSC disable bit in the cr4.
++ */
++static inline void disable_tsc(struct task_struct *prev_p,
++ struct task_struct *next_p)
++{
++ struct thread_info *prev, *next;
++
++ /*
++ * gcc should eliminate the ->thread_info dereference if
++ * has_secure_computing returns 0 at compile time (SECCOMP=n).
++ */
++ prev = task_thread_info(prev_p);
++ next = task_thread_info(next_p);
++
++ if (has_secure_computing(prev) || has_secure_computing(next)) {
++ /* slow path here */
++ if (has_secure_computing(prev) &&
++ !has_secure_computing(next)) {
++ write_cr4(read_cr4() & ~X86_CR4_TSD);
++ } else if (!has_secure_computing(prev) &&
++ has_secure_computing(next))
++ write_cr4(read_cr4() | X86_CR4_TSD);
++ }
++}
++
++/*
++ * switch_to(x,yn) should switch tasks from x to y.
++ *
++ * We fsave/fwait so that an exception goes off at the right time
++ * (as a call from the fsave or fwait in effect) rather than to
++ * the wrong process. Lazy FP saving no longer makes any sense
++ * with modern CPU's, and this simplifies a lot of things (SMP
++ * and UP become the same).
++ *
++ * NOTE! We used to use the x86 hardware context switching. The
++ * reason for not using it any more becomes apparent when you
++ * try to recover gracefully from saved state that is no longer
++ * valid (stale segment register values in particular). With the
++ * hardware task-switch, there is no way to fix up bad state in
++ * a reasonable manner.
++ *
++ * The fact that Intel documents the hardware task-switching to
++ * be slow is a fairly red herring - this code is not noticeably
++ * faster. However, there _is_ some room for improvement here,
++ * so the performance issues may eventually be a valid point.
++ * More important, however, is the fact that this allows us much
++ * more flexibility.
++ *
++ * The return value (in %eax) will be the "prev" task after
++ * the task-switch, and shows up in ret_from_fork in entry.S,
++ * for example.
++ */
++struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ struct thread_struct *prev = &prev_p->thread,
++ *next = &next_p->thread;
++ int cpu = smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct *tss = &per_cpu(init_tss, cpu);
++#endif
++#if CONFIG_XEN_COMPAT > 0x030002
++ struct physdev_set_iopl iopl_op;
++ struct physdev_set_iobitmap iobmp_op;
++#else
++ struct physdev_op _pdo[2], *pdo = _pdo;
++#define iopl_op pdo->u.set_iopl
++#define iobmp_op pdo->u.set_iobitmap
++#endif
++ multicall_entry_t _mcl[8], *mcl = _mcl;
++
++ /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
++
++ /*
++ * This is basically '__unlazy_fpu', except that we queue a
++ * multicall to indicate FPU task switch, rather than
++ * synchronously trapping to Xen.
++ */
++ if (prev_p->thread_info->status & TS_USEDFPU) {
++ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
++ mcl->op = __HYPERVISOR_fpu_taskswitch;
++ mcl->args[0] = 1;
++ mcl++;
++ }
++#if 0 /* lazy fpu sanity check */
++ else BUG_ON(!(read_cr0() & 8));
++#endif
++
++ /*
++ * Reload esp0.
++ * This is load_esp0(tss, next) with a multicall.
++ */
++ mcl->op = __HYPERVISOR_stack_switch;
++ mcl->args[0] = __KERNEL_DS;
++ mcl->args[1] = next->esp0;
++ mcl++;
++
++ /*
++ * Load the per-thread Thread-Local Storage descriptor.
++ * This is load_TLS(next, cpu) with multicalls.
++ */
++#define C(i) do { \
++ if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
++ next->tls_array[i].b != prev->tls_array[i].b)) { \
++ mcl->op = __HYPERVISOR_update_descriptor; \
++ *(u64 *)&mcl->args[0] = virt_to_machine( \
++ &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
++ *(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i]; \
++ mcl++; \
++ } \
++} while (0)
++ C(0); C(1); C(2);
++#undef C
++
++ if (unlikely(prev->iopl != next->iopl)) {
++ iopl_op.iopl = (next->iopl == 0) ? 1 : (next->iopl >> 12) & 3;
++#if CONFIG_XEN_COMPAT > 0x030002
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iopl;
++ mcl->args[1] = (unsigned long)&iopl_op;
++#else
++ mcl->op = __HYPERVISOR_physdev_op_compat;
++ pdo->cmd = PHYSDEVOP_set_iopl;
++ mcl->args[0] = (unsigned long)pdo++;
++#endif
++ mcl++;
++ }
++
++ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
++ set_xen_guest_handle(iobmp_op.bitmap,
++ (char *)next->io_bitmap_ptr);
++ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
++#if CONFIG_XEN_COMPAT > 0x030002
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iobitmap;
++ mcl->args[1] = (unsigned long)&iobmp_op;
++#else
++ mcl->op = __HYPERVISOR_physdev_op_compat;
++ pdo->cmd = PHYSDEVOP_set_iobitmap;
++ mcl->args[0] = (unsigned long)pdo++;
++#endif
++ mcl++;
++ }
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ BUG_ON(pdo > _pdo + ARRAY_SIZE(_pdo));
++#endif
++ BUG_ON(mcl > _mcl + ARRAY_SIZE(_mcl));
++ if (unlikely(HYPERVISOR_multicall_check(_mcl, mcl - _mcl, NULL)))
++ BUG();
++
++ /*
++ * Restore %fs and %gs if needed.
++ *
++ * Glibc normally makes %fs be zero, and %gs is one of
++ * the TLS segments.
++ */
++ if (unlikely(next->fs))
++ loadsegment(fs, next->fs);
++
++ if (next->gs)
++ loadsegment(gs, next->gs);
++
++ /*
++ * Now maybe handle debug registers
++ */
++ if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
++ __switch_to_xtra(next_p);
++
++ disable_tsc(prev_p, next_p);
++
++ return prev_p;
++}
++
++asmlinkage int sys_fork(struct pt_regs regs)
++{
++ return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
++}
++
++asmlinkage int sys_clone(struct pt_regs regs)
++{
++ unsigned long clone_flags;
++ unsigned long newsp;
++ int __user *parent_tidptr, *child_tidptr;
++
++ clone_flags = regs.ebx;
++ newsp = regs.ecx;
++ parent_tidptr = (int __user *)regs.edx;
++ child_tidptr = (int __user *)regs.edi;
++ if (!newsp)
++ newsp = regs.esp;
++ return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
++}
++
++/*
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
++ */
++asmlinkage int sys_vfork(struct pt_regs regs)
++{
++ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
++}
++
++/*
++ * sys_execve() executes a new program.
++ */
++asmlinkage int sys_execve(struct pt_regs regs)
++{
++ int error;
++ char * filename;
++
++ filename = getname((char __user *) regs.ebx);
++ error = PTR_ERR(filename);
++ if (IS_ERR(filename))
++ goto out;
++ error = do_execve(filename,
++ (char __user * __user *) regs.ecx,
++ (char __user * __user *) regs.edx,
++ &regs);
++ if (error == 0) {
++ task_lock(current);
++ current->ptrace &= ~PT_DTRACE;
++ task_unlock(current);
++ /* Make sure we don't return using sysenter.. */
++ set_thread_flag(TIF_IRET);
++ }
++ putname(filename);
++out:
++ return error;
++}
++
++#define top_esp (THREAD_SIZE - sizeof(unsigned long))
++#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
++
++unsigned long get_wchan(struct task_struct *p)
++{
++ unsigned long ebp, esp, eip;
++ unsigned long stack_page;
++ int count = 0;
++ if (!p || p == current || p->state == TASK_RUNNING)
++ return 0;
++ stack_page = (unsigned long)task_stack_page(p);
++ esp = p->thread.esp;
++ if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
++ return 0;
++ /* include/asm-i386/system.h:switch_to() pushes ebp last. */
++ ebp = *(unsigned long *) esp;
++ do {
++ if (ebp < stack_page || ebp > top_ebp+stack_page)
++ return 0;
++ eip = *(unsigned long *) (ebp+4);
++ if (!in_sched_functions(eip))
++ return eip;
++ ebp = *(unsigned long *) ebp;
++ } while (count++ < 16);
++ return 0;
++}
++
++/*
++ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
++ */
++static int get_free_idx(void)
++{
++ struct thread_struct *t = &current->thread;
++ int idx;
++
++ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
++ if (desc_empty(t->tls_array + idx))
++ return idx + GDT_ENTRY_TLS_MIN;
++ return -ESRCH;
++}
++
++/*
++ * Set a given TLS descriptor:
++ */
++asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
++{
++ struct thread_struct *t = &current->thread;
++ struct user_desc info;
++ struct desc_struct *desc;
++ int cpu, idx;
++
++ if (copy_from_user(&info, u_info, sizeof(info)))
++ return -EFAULT;
++ idx = info.entry_number;
++
++ /*
++ * index -1 means the kernel should try to find and
++ * allocate an empty descriptor:
++ */
++ if (idx == -1) {
++ idx = get_free_idx();
++ if (idx < 0)
++ return idx;
++ if (put_user(idx, &u_info->entry_number))
++ return -EFAULT;
++ }
++
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ return -EINVAL;
++
++ desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
++
++ /*
++ * We must not get preempted while modifying the TLS.
++ */
++ cpu = get_cpu();
++
++ if (LDT_empty(&info)) {
++ desc->a = 0;
++ desc->b = 0;
++ } else {
++ desc->a = LDT_entry_a(&info);
++ desc->b = LDT_entry_b(&info);
++ }
++ load_TLS(t, cpu);
++
++ put_cpu();
++
++ return 0;
++}
++
++/*
++ * Get the current Thread-Local Storage area:
++ */
++
++#define GET_BASE(desc) ( \
++ (((desc)->a >> 16) & 0x0000ffff) | \
++ (((desc)->b << 16) & 0x00ff0000) | \
++ ( (desc)->b & 0xff000000) )
++
++#define GET_LIMIT(desc) ( \
++ ((desc)->a & 0x0ffff) | \
++ ((desc)->b & 0xf0000) )
++
++#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
++#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
++#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
++#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
++#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
++#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
++
++asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
++{
++ struct user_desc info;
++ struct desc_struct *desc;
++ int idx;
++
++ if (get_user(idx, &u_info->entry_number))
++ return -EFAULT;
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ return -EINVAL;
++
++ memset(&info, 0, sizeof(info));
++
++ desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
++
++ info.entry_number = idx;
++ info.base_addr = GET_BASE(desc);
++ info.limit = GET_LIMIT(desc);
++ info.seg_32bit = GET_32BIT(desc);
++ info.contents = GET_CONTENTS(desc);
++ info.read_exec_only = !GET_WRITABLE(desc);
++ info.limit_in_pages = GET_LIMIT_PAGES(desc);
++ info.seg_not_present = !GET_PRESENT(desc);
++ info.useable = GET_USEABLE(desc);
++
++ if (copy_to_user(u_info, &info, sizeof(info)))
++ return -EFAULT;
++ return 0;
++}
++
++unsigned long arch_align_stack(unsigned long sp)
++{
++ if (randomize_va_space)
++ sp -= get_random_int() % 8192;
++ return sp & ~0xf;
++}
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/quirks-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/quirks-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/quirks-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/quirks-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,47 @@
++/*
++ * This file contains work-arounds for x86 and x86_64 platform bugs.
++ */
++#include <linux/pci.h>
++#include <linux/irq.h>
++
++#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI)
++
++static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
++{
++ u8 config, rev;
++ u32 word;
++
++ /* BIOS may enable hardware IRQ balancing for
++ * E7520/E7320/E7525(revision ID 0x9 and below)
++ * based platforms.
++ * Disable SW irqbalance/affinity on those platforms.
++ */
++ pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
++ if (rev > 0x9)
++ return;
++
++ printk(KERN_INFO "Intel E7520/7320/7525 detected.");
++
++ /* enable access to config space*/
++ pci_read_config_byte(dev, 0xf4, &config);
++ pci_write_config_byte(dev, 0xf4, config|0x2);
++
++ /* read xTPR register */
++ raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
++
++ if (!(word & (1 << 13))) {
++ struct xen_platform_op op;
++ printk(KERN_INFO "Disabling irq balancing and affinity\n");
++ op.cmd = XENPF_platform_quirk;
++ op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING;
++ WARN_ON(HYPERVISOR_platform_op(&op));
++ }
++
++ /* put back the original value for config space*/
++ if (!(config & 0x2))
++ pci_write_config_byte(dev, 0xf4, config);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
++#endif
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/relocate_kernel.S linux-2.6.18-xen-3.3.0/arch/i386/kernel/relocate_kernel.S
+--- linux-2.6.18.8/arch/i386/kernel/relocate_kernel.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/relocate_kernel.S 2008-08-21 11:36:07.000000000 +0200
+@@ -7,16 +7,138 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/page.h>
++#include <asm/kexec.h>
++
++/*
++ * Must be relocatable PIC code callable as a C function
++ */
++
++#define PTR(x) (x << 2)
++#define PAGE_ALIGNED (1 << PAGE_SHIFT)
++#define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */
++#define PAE_PGD_ATTR 0x01 /* _PAGE_PRESENT */
++
++ .text
++ .align PAGE_ALIGNED
++ .globl relocate_kernel
++relocate_kernel:
++ movl 8(%esp), %ebp /* list of pages */
++
++#ifdef CONFIG_X86_PAE
++ /* map the control page at its virtual address */
++
++ movl PTR(VA_PGD)(%ebp), %edi
++ movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
++ andl $0xc0000000, %eax
++ shrl $27, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_PMD_0)(%ebp), %edx
++ orl $PAE_PGD_ATTR, %edx
++ movl %edx, (%eax)
++
++ movl PTR(VA_PMD_0)(%ebp), %edi
++ movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
++ andl $0x3fe00000, %eax
++ shrl $18, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_PTE_0)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++
++ movl PTR(VA_PTE_0)(%ebp), %edi
++ movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
++ andl $0x001ff000, %eax
++ shrl $9, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++
++ /* identity map the control page at its physical address */
++
++ movl PTR(VA_PGD)(%ebp), %edi
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
++ andl $0xc0000000, %eax
++ shrl $27, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_PMD_1)(%ebp), %edx
++ orl $PAE_PGD_ATTR, %edx
++ movl %edx, (%eax)
++
++ movl PTR(VA_PMD_1)(%ebp), %edi
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
++ andl $0x3fe00000, %eax
++ shrl $18, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_PTE_1)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++
++ movl PTR(VA_PTE_1)(%ebp), %edi
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
++ andl $0x001ff000, %eax
++ shrl $9, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++#else
++ /* map the control page at its virtual address */
++
++ movl PTR(VA_PGD)(%ebp), %edi
++ movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
++ andl $0xffc00000, %eax
++ shrl $20, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_PTE_0)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++
++ movl PTR(VA_PTE_0)(%ebp), %edi
++ movl PTR(VA_CONTROL_PAGE)(%ebp), %eax
++ andl $0x003ff000, %eax
++ shrl $10, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++
++ /* identity map the control page at its physical address */
++
++ movl PTR(VA_PGD)(%ebp), %edi
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
++ andl $0xffc00000, %eax
++ shrl $20, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_PTE_1)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++
++ movl PTR(VA_PTE_1)(%ebp), %edi
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %eax
++ andl $0x003ff000, %eax
++ shrl $10, %eax
++ addl %edi, %eax
++
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %edx
++ orl $PAGE_ATTR, %edx
++ movl %edx, (%eax)
++#endif
+
+- /*
+- * Must be relocatable PIC code callable as a C function, that once
+- * it starts can not use the previous processes stack.
+- */
+- .globl relocate_new_kernel
+ relocate_new_kernel:
+ /* read the arguments and say goodbye to the stack */
+ movl 4(%esp), %ebx /* page_list */
+- movl 8(%esp), %ebp /* reboot_code_buffer */
++ movl 8(%esp), %ebp /* list of pages */
+ movl 12(%esp), %edx /* start address */
+ movl 16(%esp), %ecx /* cpu_has_pae */
+
+@@ -24,11 +146,57 @@ relocate_new_kernel:
+ pushl $0
+ popfl
+
+- /* set a new stack at the bottom of our page... */
+- lea 4096(%ebp), %esp
++ /* get physical address of control page now */
++ /* this is impossible after page table switch */
++ movl PTR(PA_CONTROL_PAGE)(%ebp), %edi
+
+- /* store the parameters back on the stack */
+- pushl %edx /* store the start address */
++ /* switch to new set of page tables */
++ movl PTR(PA_PGD)(%ebp), %eax
++ movl %eax, %cr3
++
++ /* setup idt */
++ movl %edi, %eax
++ addl $(idt_48 - relocate_kernel), %eax
++ lidtl (%eax)
++
++ /* setup gdt */
++ movl %edi, %eax
++ addl $(gdt - relocate_kernel), %eax
++ movl %edi, %esi
++ addl $((gdt_48 - relocate_kernel) + 2), %esi
++ movl %eax, (%esi)
++
++ movl %edi, %eax
++ addl $(gdt_48 - relocate_kernel), %eax
++ lgdtl (%eax)
++
++ /* setup data segment registers */
++ mov $(gdt_ds - gdt), %eax
++ mov %eax, %ds
++ mov %eax, %es
++ mov %eax, %fs
++ mov %eax, %gs
++ mov %eax, %ss
++
++ /* setup a new stack at the end of the physical control page */
++ lea 4096(%edi), %esp
++
++ /* load new code segment and jump to identity mapped page */
++ movl %edi, %esi
++ xorl %eax, %eax
++ pushl %eax
++ pushl %esi
++ pushl %eax
++ movl $(gdt_cs - gdt), %eax
++ pushl %eax
++ movl %edi, %eax
++ addl $(identity_mapped - relocate_kernel),%eax
++ pushl %eax
++ iretl
++
++identity_mapped:
++ /* store the start address on the stack */
++ pushl %edx
+
+ /* Set cr0 to a known state:
+ * 31 0 == Paging disabled
+@@ -113,8 +281,20 @@ relocate_new_kernel:
+ xorl %edi, %edi
+ xorl %ebp, %ebp
+ ret
+-relocate_new_kernel_end:
+
+- .globl relocate_new_kernel_size
+-relocate_new_kernel_size:
+- .long relocate_new_kernel_end - relocate_new_kernel
++ .align 16
++gdt:
++ .quad 0x0000000000000000 /* NULL descriptor */
++gdt_cs:
++ .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
++gdt_ds:
++ .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
++gdt_end:
++
++gdt_48:
++ .word gdt_end - gdt - 1 /* limit */
++ .long 0 /* base - filled in by code above */
++
++idt_48:
++ .word 0 /* limit */
++ .long 0 /* base */
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/setup.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/setup.c
+--- linux-2.6.18.8/arch/i386/kernel/setup.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/setup.c 2008-08-21 11:36:07.000000000 +0200
+@@ -261,7 +261,12 @@ static struct resource standard_io_resou
+ }, {
+ .name = "keyboard",
+ .start = 0x0060,
+- .end = 0x006f,
++ .end = 0x0060,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "keyboard",
++ .start = 0x0064,
++ .end = 0x0064,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+ }, {
+ .name = "dma page reg",
+@@ -956,6 +961,28 @@ efi_memory_present_wrapper(unsigned long
+ return 0;
+ }
+
++/*
++ * This function checks if any part of the range <start,end> is mapped
++ * with type.
++ */
++int
++e820_any_mapped(u64 start, u64 end, unsigned type)
++{
++ int i;
++
++ for (i = 0; i < e820.nr_map; i++) {
++ const struct e820entry *ei = &e820.map[i];
++
++ if (type && ei->type != type)
++ continue;
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(e820_any_mapped);
++
+ /*
+ * This function checks if the entire range <start,end> is mapped with type.
+ *
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/setup-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/setup-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/setup-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/setup-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1924 @@
++/*
++ * linux/arch/i386/kernel/setup.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ *
++ * Memory region support
++ * David Parsons <orc@pell.chi.il.us>, July-August 1999
++ *
++ * Added E820 sanitization routine (removes overlapping memory regions);
++ * Brian Moyle <bmoyle@mvista.com>, February 2001
++ *
++ * Moved CPU detection code to cpu/${cpu}.c
++ * Patrick Mochel <mochel@osdl.org>, March 2002
++ *
++ * Provisions for empty E820 memory regions (reported by certain BIOSes).
++ * Alex Achenbach <xela@slit.de>, December 2002.
++ *
++ */
++
++/*
++ * This file handles the architecture-dependent parts of initialization
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/mmzone.h>
++#include <linux/screen_info.h>
++#include <linux/ioport.h>
++#include <linux/acpi.h>
++#include <linux/apm_bios.h>
++#include <linux/initrd.h>
++#include <linux/bootmem.h>
++#include <linux/seq_file.h>
++#include <linux/platform_device.h>
++#include <linux/console.h>
++#include <linux/mca.h>
++#include <linux/root_dev.h>
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/efi.h>
++#include <linux/init.h>
++#include <linux/edd.h>
++#include <linux/nodemask.h>
++#include <linux/kernel.h>
++#include <linux/percpu.h>
++#include <linux/notifier.h>
++#include <linux/kexec.h>
++#include <linux/crash_dump.h>
++#include <linux/dmi.h>
++#include <linux/pfn.h>
++
++#include <video/edid.h>
++
++#include <asm/apic.h>
++#include <asm/e820.h>
++#include <asm/mpspec.h>
++#include <asm/setup.h>
++#include <asm/arch_hooks.h>
++#include <asm/sections.h>
++#include <asm/io_apic.h>
++#include <asm/ist.h>
++#include <asm/io.h>
++#include <asm/hypervisor.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/memory.h>
++#include <xen/features.h>
++#include <xen/firmware.h>
++#include <xen/xencons.h>
++#include <setup_arch.h>
++#include <bios_ebda.h>
++
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
++
++/* Forward Declaration. */
++void __init find_max_pfn(void);
++
++static int xen_panic_event(struct notifier_block *, unsigned long, void *);
++static struct notifier_block xen_panic_block = {
++ xen_panic_event, NULL, 0 /* try to go last */
++};
++
++extern char hypercall_page[PAGE_SIZE];
++EXPORT_SYMBOL(hypercall_page);
++
++int disable_pse __devinitdata = 0;
++
++/*
++ * Machine setup..
++ */
++
++#ifdef CONFIG_EFI
++int efi_enabled = 0;
++EXPORT_SYMBOL(efi_enabled);
++#endif
++
++/* cpu data as detected by the assembly code in head.S */
++struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
++/* common cpu data for all cpus */
++struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
++EXPORT_SYMBOL(boot_cpu_data);
++
++unsigned long mmu_cr4_features;
++
++#ifdef CONFIG_ACPI
++ int acpi_disabled = 0;
++#else
++ int acpi_disabled = 1;
++#endif
++EXPORT_SYMBOL(acpi_disabled);
++
++#ifdef CONFIG_ACPI
++int __initdata acpi_force = 0;
++extern acpi_interrupt_flags acpi_sci_flags;
++#endif
++
++/* for MCA, but anyone else can use it if they want */
++unsigned int machine_id;
++#ifdef CONFIG_MCA
++EXPORT_SYMBOL(machine_id);
++#endif
++unsigned int machine_submodel_id;
++unsigned int BIOS_revision;
++unsigned int mca_pentium_flag;
++
++/* For PCI or other memory-mapped resources */
++unsigned long pci_mem_start = 0x10000000;
++#ifdef CONFIG_PCI
++EXPORT_SYMBOL(pci_mem_start);
++#endif
++
++/* Boot loader ID as an integer, for the benefit of proc_dointvec */
++int bootloader_type;
++
++/* user-defined highmem size */
++static unsigned int highmem_pages = -1;
++
++/*
++ * Setup options
++ */
++struct drive_info_struct { char dummy[32]; } drive_info;
++#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
++ defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
++EXPORT_SYMBOL(drive_info);
++#endif
++struct screen_info screen_info;
++EXPORT_SYMBOL(screen_info);
++struct apm_info apm_info;
++EXPORT_SYMBOL(apm_info);
++struct sys_desc_table_struct {
++ unsigned short length;
++ unsigned char table[0];
++};
++struct edid_info edid_info;
++EXPORT_SYMBOL_GPL(edid_info);
++#ifndef CONFIG_XEN
++#define copy_edid() (edid_info = EDID_INFO)
++#endif
++struct ist_info ist_info;
++#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
++ defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
++EXPORT_SYMBOL(ist_info);
++#endif
++struct e820map e820;
++#ifdef CONFIG_XEN
++struct e820map machine_e820;
++#endif
++
++extern void early_cpu_init(void);
++extern void generic_apic_probe(char *);
++extern int root_mountflags;
++
++unsigned long saved_videomode;
++
++#define RAMDISK_IMAGE_START_MASK 0x07FF
++#define RAMDISK_PROMPT_FLAG 0x8000
++#define RAMDISK_LOAD_FLAG 0x4000
++
++static char command_line[COMMAND_LINE_SIZE];
++
++unsigned char __initdata boot_params[PARAM_SIZE];
++
++static struct resource data_resource = {
++ .name = "Kernel data",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource code_resource = {
++ .name = "Kernel code",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource system_rom_resource = {
++ .name = "System ROM",
++ .start = 0xf0000,
++ .end = 0xfffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource extension_rom_resource = {
++ .name = "Extension ROM",
++ .start = 0xe0000,
++ .end = 0xeffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource adapter_rom_resources[] = { {
++ .name = "Adapter ROM",
++ .start = 0xc8000,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++} };
++
++#define ADAPTER_ROM_RESOURCES \
++ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
++
++static struct resource video_rom_resource = {
++ .name = "Video ROM",
++ .start = 0xc0000,
++ .end = 0xc7fff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource video_ram_resource = {
++ .name = "Video RAM area",
++ .start = 0xa0000,
++ .end = 0xbffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource standard_io_resources[] = { {
++ .name = "dma1",
++ .start = 0x0000,
++ .end = 0x001f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "pic1",
++ .start = 0x0020,
++ .end = 0x0021,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "timer0",
++ .start = 0x0040,
++ .end = 0x0043,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "timer1",
++ .start = 0x0050,
++ .end = 0x0053,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "keyboard",
++ .start = 0x0060,
++ .end = 0x0060,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "keyboard",
++ .start = 0x0064,
++ .end = 0x0064,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "dma page reg",
++ .start = 0x0080,
++ .end = 0x008f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "pic2",
++ .start = 0x00a0,
++ .end = 0x00a1,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "dma2",
++ .start = 0x00c0,
++ .end = 0x00df,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "fpu",
++ .start = 0x00f0,
++ .end = 0x00ff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++} };
++
++#define STANDARD_IO_RESOURCES \
++ (sizeof standard_io_resources / sizeof standard_io_resources[0])
++
++#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
++
++static int __init romchecksum(unsigned char *rom, unsigned long length)
++{
++ unsigned char *p, sum = 0;
++
++ for (p = rom; p < rom + length; p++)
++ sum += *p;
++ return sum == 0;
++}
++
++static void __init probe_roms(void)
++{
++ unsigned long start, length, upper;
++ unsigned char *rom;
++ int i;
++
++#ifdef CONFIG_XEN
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return;
++#endif
++
++ /* video rom */
++ upper = adapter_rom_resources[0].start;
++ for (start = video_rom_resource.start; start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ video_rom_resource.start = start;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* if checksum okay, trust length byte */
++ if (length && romchecksum(rom, length))
++ video_rom_resource.end = start + length - 1;
++
++ request_resource(&iomem_resource, &video_rom_resource);
++ break;
++ }
++
++ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
++ if (start < upper)
++ start = upper;
++
++ /* system rom */
++ request_resource(&iomem_resource, &system_rom_resource);
++ upper = system_rom_resource.start;
++
++ /* check for extension rom (ignore length byte!) */
++ rom = isa_bus_to_virt(extension_rom_resource.start);
++ if (romsignature(rom)) {
++ length = extension_rom_resource.end - extension_rom_resource.start + 1;
++ if (romchecksum(rom, length)) {
++ request_resource(&iomem_resource, &extension_rom_resource);
++ upper = extension_rom_resource.start;
++ }
++ }
++
++ /* check for adapter roms on 2k boundaries */
++ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* but accept any length that fits if checksum okay */
++ if (!length || start + length > upper || !romchecksum(rom, length))
++ continue;
++
++ adapter_rom_resources[i].start = start;
++ adapter_rom_resources[i].end = start + length - 1;
++ request_resource(&iomem_resource, &adapter_rom_resources[i]);
++
++ start = adapter_rom_resources[i++].end & ~2047UL;
++ }
++}
++
++/*
++ * Point at the empty zero page to start with. We map the real shared_info
++ * page as soon as fixmap is up and running.
++ */
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++unsigned long *phys_to_machine_mapping;
++unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[16];
++EXPORT_SYMBOL(phys_to_machine_mapping);
++
++/* Raw start-of-day parameters from the hypervisor. */
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++
++void __init add_memory_region(unsigned long long start,
++ unsigned long long size, int type)
++{
++ int x;
++
++ if (!efi_enabled) {
++ x = e820.nr_map;
++
++ if (x == E820MAX) {
++ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
++ return;
++ }
++
++ e820.map[x].addr = start;
++ e820.map[x].size = size;
++ e820.map[x].type = type;
++ e820.nr_map++;
++ }
++} /* add_memory_region */
++
++static void __init limit_regions(unsigned long long size)
++{
++ unsigned long long current_addr = 0;
++ int i;
++
++ if (efi_enabled) {
++ efi_memory_desc_t *md;
++ void *p;
++
++ for (p = memmap.map, i = 0; p < memmap.map_end;
++ p += memmap.desc_size, i++) {
++ md = p;
++ current_addr = md->phys_addr + (md->num_pages << 12);
++ if (md->type == EFI_CONVENTIONAL_MEMORY) {
++ if (current_addr >= size) {
++ md->num_pages -=
++ (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
++ memmap.nr_map = i + 1;
++ return;
++ }
++ }
++ }
++ }
++ for (i = 0; i < e820.nr_map; i++) {
++ current_addr = e820.map[i].addr + e820.map[i].size;
++ if (current_addr < size)
++ continue;
++
++ if (e820.map[i].type != E820_RAM)
++ continue;
++
++ if (e820.map[i].addr >= size) {
++ /*
++ * This region starts past the end of the
++ * requested size, skip it completely.
++ */
++ e820.nr_map = i;
++ } else {
++ e820.nr_map = i + 1;
++ e820.map[i].size -= current_addr - size;
++ }
++ return;
++ }
++#ifdef CONFIG_XEN
++ if (i==e820.nr_map && current_addr < size) {
++ /*
++ * The e820 map finished before our requested size so
++ * extend the final entry to the requested address.
++ */
++ --i;
++ if (e820.map[i].type == E820_RAM)
++ e820.map[i].size -= current_addr - size;
++ else
++ add_memory_region(current_addr, size - current_addr, E820_RAM);
++ }
++#endif
++}
++
++#define E820_DEBUG 1
++
++static void __init print_memory_map(char *who)
++{
++ int i;
++
++ for (i = 0; i < e820.nr_map; i++) {
++ printk(" %s: %016Lx - %016Lx ", who,
++ e820.map[i].addr,
++ e820.map[i].addr + e820.map[i].size);
++ switch (e820.map[i].type) {
++ case E820_RAM: printk("(usable)\n");
++ break;
++ case E820_RESERVED:
++ printk("(reserved)\n");
++ break;
++ case E820_ACPI:
++ printk("(ACPI data)\n");
++ break;
++ case E820_NVS:
++ printk("(ACPI NVS)\n");
++ break;
++ default: printk("type %lu\n", e820.map[i].type);
++ break;
++ }
++ }
++}
++
++/*
++ * Sanitize the BIOS e820 map.
++ *
++ * Some e820 responses include overlapping entries. The following
++ * replaces the original e820 map with a new one, removing overlaps.
++ *
++ */
++struct change_member {
++ struct e820entry *pbios; /* pointer to original bios entry */
++ unsigned long long addr; /* address for this change point */
++};
++static struct change_member change_point_list[2*E820MAX] __initdata;
++static struct change_member *change_point[2*E820MAX] __initdata;
++static struct e820entry *overlap_list[E820MAX] __initdata;
++static struct e820entry new_bios[E820MAX] __initdata;
++
++int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
++{
++ struct change_member *change_tmp;
++ unsigned long current_type, last_type;
++ unsigned long long last_addr;
++ int chgidx, still_changing;
++ int overlap_entries;
++ int new_bios_entry;
++ int old_nr, new_nr, chg_nr;
++ int i;
++
++ /*
++ Visually we're performing the following (1,2,3,4 = memory types)...
++
++ Sample memory map (w/overlaps):
++ ____22__________________
++ ______________________4_
++ ____1111________________
++ _44_____________________
++ 11111111________________
++ ____________________33__
++ ___________44___________
++ __________33333_________
++ ______________22________
++ ___________________2222_
++ _________111111111______
++ _____________________11_
++ _________________4______
++
++ Sanitized equivalent (no overlap):
++ 1_______________________
++ _44_____________________
++ ___1____________________
++ ____22__________________
++ ______11________________
++ _________1______________
++ __________3_____________
++ ___________44___________
++ _____________33_________
++ _______________2________
++ ________________1_______
++ _________________4______
++ ___________________2____
++ ____________________33__
++ ______________________4_
++ */
++
++ /* if there's only one memory region, don't bother */
++ if (*pnr_map < 2)
++ return -1;
++
++ old_nr = *pnr_map;
++
++ /* bail out if we find any unreasonable addresses in bios map */
++ for (i=0; i<old_nr; i++)
++ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
++ return -1;
++
++ /* create pointers for initial change-point information (for sorting) */
++ for (i=0; i < 2*old_nr; i++)
++ change_point[i] = &change_point_list[i];
++
++ /* record all known change-points (starting and ending addresses),
++ omitting those that are for empty memory regions */
++ chgidx = 0;
++ for (i=0; i < old_nr; i++) {
++ if (biosmap[i].size != 0) {
++ change_point[chgidx]->addr = biosmap[i].addr;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ }
++ }
++ chg_nr = chgidx; /* true number of change-points */
++
++ /* sort change-point list by memory addresses (low -> high) */
++ still_changing = 1;
++ while (still_changing) {
++ still_changing = 0;
++ for (i=1; i < chg_nr; i++) {
++ /* if <current_addr> > <last_addr>, swap */
++ /* or, if current=<start_addr> & last=<end_addr>, swap */
++ if ((change_point[i]->addr < change_point[i-1]->addr) ||
++ ((change_point[i]->addr == change_point[i-1]->addr) &&
++ (change_point[i]->addr == change_point[i]->pbios->addr) &&
++ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
++ )
++ {
++ change_tmp = change_point[i];
++ change_point[i] = change_point[i-1];
++ change_point[i-1] = change_tmp;
++ still_changing=1;
++ }
++ }
++ }
++
++ /* create a new bios memory map, removing overlaps */
++ overlap_entries=0; /* number of entries in the overlap table */
++ new_bios_entry=0; /* index for creating new bios map entries */
++ last_type = 0; /* start with undefined memory type */
++ last_addr = 0; /* start with 0 as last starting address */
++ /* loop through change-points, determining affect on the new bios map */
++ for (chgidx=0; chgidx < chg_nr; chgidx++)
++ {
++ /* keep track of all overlapping bios entries */
++ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
++ {
++ /* add map entry to overlap list (> 1 entry implies an overlap) */
++ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
++ }
++ else
++ {
++ /* remove entry from list (order independent, so swap with last) */
++ for (i=0; i<overlap_entries; i++)
++ {
++ if (overlap_list[i] == change_point[chgidx]->pbios)
++ overlap_list[i] = overlap_list[overlap_entries-1];
++ }
++ overlap_entries--;
++ }
++ /* if there are overlapping entries, decide which "type" to use */
++ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
++ current_type = 0;
++ for (i=0; i<overlap_entries; i++)
++ if (overlap_list[i]->type > current_type)
++ current_type = overlap_list[i]->type;
++ /* continue building up new bios map based on this information */
++ if (current_type != last_type) {
++ if (last_type != 0) {
++ new_bios[new_bios_entry].size =
++ change_point[chgidx]->addr - last_addr;
++ /* move forward only if the new size was non-zero */
++ if (new_bios[new_bios_entry].size != 0)
++ if (++new_bios_entry >= E820MAX)
++ break; /* no more space left for new bios entries */
++ }
++ if (current_type != 0) {
++ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
++ new_bios[new_bios_entry].type = current_type;
++ last_addr=change_point[chgidx]->addr;
++ }
++ last_type = current_type;
++ }
++ }
++ new_nr = new_bios_entry; /* retain count for new bios entries */
++
++ /* copy new bios mapping into original location */
++ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
++ *pnr_map = new_nr;
++
++ return 0;
++}
++
++/*
++ * Copy the BIOS e820 map into a safe place.
++ *
++ * Sanity-check it while we're at it..
++ *
++ * If we're lucky and live on a modern system, the setup code
++ * will have given us a memory map that we can use to properly
++ * set up memory. If we aren't, we'll fake a memory map.
++ *
++ * We check to see that the memory map contains at least 2 elements
++ * before we'll use it, because the detection code in setup.S may
++ * not be perfect and most every PC known to man has two memory
++ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
++ * thinkpad 560x, for example, does not cooperate with the memory
++ * detection code.)
++ */
++int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
++{
++#ifndef CONFIG_XEN
++ /* Only one memory region (or negative)? Ignore it */
++ if (nr_map < 2)
++ return -1;
++#else
++ BUG_ON(nr_map < 1);
++#endif
++
++ do {
++ unsigned long long start = biosmap->addr;
++ unsigned long long size = biosmap->size;
++ unsigned long long end = start + size;
++ unsigned long type = biosmap->type;
++
++ /* Overflow in 64 bits? Ignore the memory map. */
++ if (start > end)
++ return -1;
++
++#ifndef CONFIG_XEN
++ /*
++ * Some BIOSes claim RAM in the 640k - 1M region.
++ * Not right. Fix it up.
++ */
++ if (type == E820_RAM) {
++ if (start < 0x100000ULL && end > 0xA0000ULL) {
++ if (start < 0xA0000ULL)
++ add_memory_region(start, 0xA0000ULL-start, type);
++ if (end <= 0x100000ULL)
++ continue;
++ start = 0x100000ULL;
++ size = end - start;
++ }
++ }
++#endif
++ add_memory_region(start, size, type);
++ } while (biosmap++,--nr_map);
++
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain()) {
++ struct xen_memory_map memmap;
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, machine_e820.map);
++
++ if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
++ BUG();
++ machine_e820.nr_map = memmap.nr_entries;
++ } else
++ machine_e820 = e820;
++#endif
++
++ return 0;
++}
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++struct edd edd;
++#ifdef CONFIG_EDD_MODULE
++EXPORT_SYMBOL(edd);
++#endif
++#ifndef CONFIG_XEN
++/**
++ * copy_edd() - Copy the BIOS EDD information
++ * from boot_params into a safe place.
++ *
++ */
++static inline void copy_edd(void)
++{
++ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
++ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
++ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
++ edd.edd_info_nr = EDD_NR;
++}
++#endif
++#else
++static inline void copy_edd(void)
++{
++}
++#endif
++
++static void __init parse_cmdline_early (char ** cmdline_p)
++{
++ char c = ' ', *to = command_line, *from = saved_command_line;
++ int len = 0, max_cmdline;
++ int userdef = 0;
++
++ if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
++ max_cmdline = COMMAND_LINE_SIZE;
++ memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
++ /* Save unparsed command line copy for /proc/cmdline */
++ saved_command_line[max_cmdline-1] = '\0';
++
++ for (;;) {
++ if (c != ' ')
++ goto next_char;
++ /*
++ * "mem=nopentium" disables the 4MB page tables.
++ * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
++ * to <mem>, overriding the bios size.
++ * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
++ * <start> to <start>+<mem>, overriding the bios size.
++ *
++ * HPA tells me bootloaders need to parse mem=, so no new
++ * option should be mem= [also see Documentation/i386/boot.txt]
++ */
++ if (!memcmp(from, "mem=", 4)) {
++ if (to != command_line)
++ to--;
++ if (!memcmp(from+4, "nopentium", 9)) {
++ from += 9+4;
++ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++ disable_pse = 1;
++ } else {
++ /* If the user specifies memory size, we
++ * limit the BIOS-provided memory map to
++ * that size. exactmap can be used to specify
++ * the exact map. mem=number can be used to
++ * trim the existing memory map.
++ */
++ unsigned long long mem_size;
++
++ mem_size = memparse(from+4, &from);
++ limit_regions(mem_size);
++ userdef=1;
++ }
++ }
++
++ else if (!memcmp(from, "memmap=", 7)) {
++ if (to != command_line)
++ to--;
++ if (!memcmp(from+7, "exactmap", 8)) {
++#ifdef CONFIG_CRASH_DUMP
++ /* If we are doing a crash dump, we
++ * still need to know the real mem
++ * size before original memory map is
++ * reset.
++ */
++ find_max_pfn();
++ saved_max_pfn = max_pfn;
++#endif
++ from += 8+7;
++ e820.nr_map = 0;
++ userdef = 1;
++ } else {
++ /* If the user specifies memory size, we
++ * limit the BIOS-provided memory map to
++ * that size. exactmap can be used to specify
++ * the exact map. mem=number can be used to
++ * trim the existing memory map.
++ */
++ unsigned long long start_at, mem_size;
++
++ mem_size = memparse(from+7, &from);
++ if (*from == '@') {
++ start_at = memparse(from+1, &from);
++ add_memory_region(start_at, mem_size, E820_RAM);
++ } else if (*from == '#') {
++ start_at = memparse(from+1, &from);
++ add_memory_region(start_at, mem_size, E820_ACPI);
++ } else if (*from == '$') {
++ start_at = memparse(from+1, &from);
++ add_memory_region(start_at, mem_size, E820_RESERVED);
++ } else {
++ limit_regions(mem_size);
++ userdef=1;
++ }
++ }
++ }
++
++ else if (!memcmp(from, "noexec=", 7))
++ noexec_setup(from + 7);
++
++
++#ifdef CONFIG_X86_MPPARSE
++ /*
++ * If the BIOS enumerates physical processors before logical,
++ * maxcpus=N at enumeration-time can be used to disable HT.
++ */
++ else if (!memcmp(from, "maxcpus=", 8)) {
++ extern unsigned int maxcpus;
++
++ maxcpus = simple_strtoul(from + 8, NULL, 0);
++ }
++#endif
++
++#ifdef CONFIG_ACPI
++ /* "acpi=off" disables both ACPI table parsing and interpreter */
++ else if (!memcmp(from, "acpi=off", 8)) {
++ disable_acpi();
++ }
++
++ /* acpi=force to over-ride black-list */
++ else if (!memcmp(from, "acpi=force", 10)) {
++ acpi_force = 1;
++ acpi_ht = 1;
++ acpi_disabled = 0;
++ }
++
++ /* acpi=strict disables out-of-spec workarounds */
++ else if (!memcmp(from, "acpi=strict", 11)) {
++ acpi_strict = 1;
++ }
++
++ /* Limit ACPI just to boot-time to enable HT */
++ else if (!memcmp(from, "acpi=ht", 7)) {
++ if (!acpi_force)
++ disable_acpi();
++ acpi_ht = 1;
++ }
++
++ /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
++ else if (!memcmp(from, "pci=noacpi", 10)) {
++ acpi_disable_pci();
++ }
++ /* "acpi=noirq" disables ACPI interrupt routing */
++ else if (!memcmp(from, "acpi=noirq", 10)) {
++ acpi_noirq_set();
++ }
++
++ else if (!memcmp(from, "acpi_sci=edge", 13))
++ acpi_sci_flags.trigger = 1;
++
++ else if (!memcmp(from, "acpi_sci=level", 14))
++ acpi_sci_flags.trigger = 3;
++
++ else if (!memcmp(from, "acpi_sci=high", 13))
++ acpi_sci_flags.polarity = 1;
++
++ else if (!memcmp(from, "acpi_sci=low", 12))
++ acpi_sci_flags.polarity = 3;
++
++#ifdef CONFIG_X86_IO_APIC
++ else if (!memcmp(from, "acpi_skip_timer_override", 24))
++ acpi_skip_timer_override = 1;
++
++ if (!memcmp(from, "disable_timer_pin_1", 19))
++ disable_timer_pin_1 = 1;
++ if (!memcmp(from, "enable_timer_pin_1", 18))
++ disable_timer_pin_1 = -1;
++
++ /* disable IO-APIC */
++ else if (!memcmp(from, "noapic", 6))
++ disable_ioapic_setup();
++#endif /* CONFIG_X86_IO_APIC */
++#endif /* CONFIG_ACPI */
++
++#ifdef CONFIG_X86_LOCAL_APIC
++ /* enable local APIC */
++ else if (!memcmp(from, "lapic", 5))
++ lapic_enable();
++
++ /* disable local APIC */
++ else if (!memcmp(from, "nolapic", 6))
++ lapic_disable();
++#endif /* CONFIG_X86_LOCAL_APIC */
++
++#ifdef CONFIG_KEXEC
++ /* crashkernel=size@addr specifies the location to reserve for
++ * a crash kernel. By reserving this memory we guarantee
++ * that linux never set's it up as a DMA target.
++ * Useful for holding code to do something appropriate
++ * after a kernel panic.
++ */
++ else if (!memcmp(from, "crashkernel=", 12)) {
++#ifndef CONFIG_XEN
++ unsigned long size, base;
++ size = memparse(from+12, &from);
++ if (*from == '@') {
++ base = memparse(from+1, &from);
++ /* FIXME: Do I want a sanity check
++ * to validate the memory range?
++ */
++ crashk_res.start = base;
++ crashk_res.end = base + size - 1;
++ }
++#else
++ printk("Ignoring crashkernel command line, "
++ "parameter will be supplied by xen\n");
++#endif
++ }
++#endif
++#ifdef CONFIG_PROC_VMCORE
++ /* elfcorehdr= specifies the location of elf core header
++ * stored by the crashed kernel.
++ */
++ else if (!memcmp(from, "elfcorehdr=", 11))
++ elfcorehdr_addr = memparse(from+11, &from);
++#endif
++
++ /*
++ * highmem=size forces highmem to be exactly 'size' bytes.
++ * This works even on boxes that have no highmem otherwise.
++ * This also works to reduce highmem size on bigger boxes.
++ */
++ else if (!memcmp(from, "highmem=", 8))
++ highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
++
++ /*
++ * vmalloc=size forces the vmalloc area to be exactly 'size'
++ * bytes. This can be used to increase (or decrease) the
++ * vmalloc area - the default is 128m.
++ */
++ else if (!memcmp(from, "vmalloc=", 8))
++ __VMALLOC_RESERVE = memparse(from+8, &from);
++
++ next_char:
++ c = *(from++);
++ if (!c)
++ break;
++ if (COMMAND_LINE_SIZE <= ++len)
++ break;
++ *(to++) = c;
++ }
++ *to = '\0';
++ *cmdline_p = command_line;
++ if (userdef) {
++ printk(KERN_INFO "user-defined physical RAM map:\n");
++ print_memory_map("user");
++ }
++}
++
++/*
++ * Callback for efi_memory_walk.
++ */
++static int __init
++efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
++{
++ unsigned long *max_pfn = arg, pfn;
++
++ if (start < end) {
++ pfn = PFN_UP(end -1);
++ if (pfn > *max_pfn)
++ *max_pfn = pfn;
++ }
++ return 0;
++}
++
++static int __init
++efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
++{
++ memory_present(0, start, end);
++ return 0;
++}
++
++/*
++ * This function checks if any part of the range <start,end> is mapped
++ * with type.
++ */
++int
++e820_any_mapped(u64 start, u64 end, unsigned type)
++{
++ int i;
++
++#ifndef CONFIG_XEN
++ for (i = 0; i < e820.nr_map; i++) {
++ const struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; ++i) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
++
++ if (type && ei->type != type)
++ continue;
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(e820_any_mapped);
++
++ /*
++ * This function checks if the entire range <start,end> is mapped with type.
++ *
++ * Note: this function only works correct if the e820 table is sorted and
++ * not-overlapping, which is the case
++ */
++int __init
++e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
++{
++ u64 start = s;
++ u64 end = e;
++ int i;
++
++#ifndef CONFIG_XEN
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; ++i) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
++ if (type && ei->type != type)
++ continue;
++ /* is the region (part) in overlap with the current region ?*/
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++ /* if the region is at the beginning of <start,end> we move
++ * start to the end of the region since it's ok until there
++ */
++ if (ei->addr <= start)
++ start = ei->addr + ei->size;
++ /* if start is now at or beyond end, we're done, full
++ * coverage */
++ if (start >= end)
++ return 1; /* we're done */
++ }
++ return 0;
++}
++
++/*
++ * Find the highest page frame number we have available
++ */
++void __init find_max_pfn(void)
++{
++ int i;
++
++ max_pfn = 0;
++ if (efi_enabled) {
++ efi_memmap_walk(efi_find_max_pfn, &max_pfn);
++ efi_memmap_walk(efi_memory_present_wrapper, NULL);
++ return;
++ }
++
++ for (i = 0; i < e820.nr_map; i++) {
++ unsigned long start, end;
++ /* RAM? */
++ if (e820.map[i].type != E820_RAM)
++ continue;
++ start = PFN_UP(e820.map[i].addr);
++ end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
++ if (start >= end)
++ continue;
++ if (end > max_pfn)
++ max_pfn = end;
++ memory_present(0, start, end);
++ }
++}
++
++/*
++ * Determine low and high memory ranges:
++ */
++unsigned long __init find_max_low_pfn(void)
++{
++ unsigned long max_low_pfn;
++
++ max_low_pfn = max_pfn;
++ if (max_low_pfn > MAXMEM_PFN) {
++ if (highmem_pages == -1)
++ highmem_pages = max_pfn - MAXMEM_PFN;
++ if (highmem_pages + MAXMEM_PFN < max_pfn)
++ max_pfn = MAXMEM_PFN + highmem_pages;
++ if (highmem_pages + MAXMEM_PFN > max_pfn) {
++ printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
++ highmem_pages = 0;
++ }
++ max_low_pfn = MAXMEM_PFN;
++#ifndef CONFIG_HIGHMEM
++ /* Maximum memory usable is what is directly addressable */
++ printk(KERN_WARNING "Warning only %ldMB will be used.\n",
++ MAXMEM>>20);
++ if (max_pfn > MAX_NONPAE_PFN)
++ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
++ else
++ printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
++ max_pfn = MAXMEM_PFN;
++#else /* !CONFIG_HIGHMEM */
++#ifndef CONFIG_X86_PAE
++ if (max_pfn > MAX_NONPAE_PFN) {
++ max_pfn = MAX_NONPAE_PFN;
++ printk(KERN_WARNING "Warning only 4GB will be used.\n");
++ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
++ }
++#endif /* !CONFIG_X86_PAE */
++#endif /* !CONFIG_HIGHMEM */
++ } else {
++ if (highmem_pages == -1)
++ highmem_pages = 0;
++#ifdef CONFIG_HIGHMEM
++ if (highmem_pages >= max_pfn) {
++ printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
++ highmem_pages = 0;
++ }
++ if (highmem_pages) {
++ if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
++ printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
++ highmem_pages = 0;
++ }
++ max_low_pfn -= highmem_pages;
++ }
++#else
++ if (highmem_pages)
++ printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
++#endif
++ }
++ return max_low_pfn;
++}
++
++/*
++ * Free all available memory for boot time allocation. Used
++ * as a callback function by efi_memory_walk()
++ */
++
++static int __init
++free_available_memory(unsigned long start, unsigned long end, void *arg)
++{
++ /* check max_low_pfn */
++ if (start >= (max_low_pfn << PAGE_SHIFT))
++ return 0;
++ if (end >= (max_low_pfn << PAGE_SHIFT))
++ end = max_low_pfn << PAGE_SHIFT;
++ if (start < end)
++ free_bootmem(start, end - start);
++
++ return 0;
++}
++/*
++ * Register fully available low RAM pages with the bootmem allocator.
++ */
++static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
++{
++ int i;
++
++ if (efi_enabled) {
++ efi_memmap_walk(free_available_memory, NULL);
++ return;
++ }
++ for (i = 0; i < e820.nr_map; i++) {
++ unsigned long curr_pfn, last_pfn, size;
++ /*
++ * Reserve usable low memory
++ */
++ if (e820.map[i].type != E820_RAM)
++ continue;
++ /*
++ * We are rounding up the start address of usable memory:
++ */
++ curr_pfn = PFN_UP(e820.map[i].addr);
++ if (curr_pfn >= max_low_pfn)
++ continue;
++ /*
++ * ... and at the end of the usable range downwards:
++ */
++ last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
++
++#ifdef CONFIG_XEN
++ /*
++ * Truncate to the number of actual pages currently
++ * present.
++ */
++ if (last_pfn > xen_start_info->nr_pages)
++ last_pfn = xen_start_info->nr_pages;
++#endif
++
++ if (last_pfn > max_low_pfn)
++ last_pfn = max_low_pfn;
++
++ /*
++ * .. finally, did all the rounding and playing
++ * around just make the area go away?
++ */
++ if (last_pfn <= curr_pfn)
++ continue;
++
++ size = last_pfn - curr_pfn;
++ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
++ }
++}
++
++#ifndef CONFIG_XEN
++/*
++ * workaround for Dell systems that neglect to reserve EBDA
++ */
++static void __init reserve_ebda_region(void)
++{
++ unsigned int addr;
++ addr = get_bios_ebda();
++ if (addr)
++ reserve_bootmem(addr, PAGE_SIZE);
++}
++#endif
++
++#ifndef CONFIG_NEED_MULTIPLE_NODES
++void __init setup_bootmem_allocator(void);
++static unsigned long __init setup_memory(void)
++{
++ /*
++ * partially used pages are not usable - thus
++ * we are rounding upwards:
++ */
++ min_low_pfn = PFN_UP(__pa(xen_start_info->pt_base)) +
++ xen_start_info->nr_pt_frames;
++
++ find_max_pfn();
++
++ max_low_pfn = find_max_low_pfn();
++
++#ifdef CONFIG_HIGHMEM
++ highstart_pfn = highend_pfn = max_pfn;
++ if (max_pfn > max_low_pfn) {
++ highstart_pfn = max_low_pfn;
++ }
++ printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
++ pages_to_mb(highend_pfn - highstart_pfn));
++#endif
++ printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
++ pages_to_mb(max_low_pfn));
++
++ setup_bootmem_allocator();
++
++ return max_low_pfn;
++}
++
++void __init zone_sizes_init(void)
++{
++ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
++ unsigned int max_dma, low;
++
++ max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
++ low = max_low_pfn;
++
++ if (low < max_dma)
++ zones_size[ZONE_DMA] = low;
++ else {
++ zones_size[ZONE_DMA] = max_dma;
++ zones_size[ZONE_NORMAL] = low - max_dma;
++#ifdef CONFIG_HIGHMEM
++ zones_size[ZONE_HIGHMEM] = highend_pfn - low;
++#endif
++ }
++ free_area_init(zones_size);
++}
++#else
++extern unsigned long __init setup_memory(void);
++extern void zone_sizes_init(void);
++#endif /* !CONFIG_NEED_MULTIPLE_NODES */
++
++void __init setup_bootmem_allocator(void)
++{
++ unsigned long bootmap_size;
++ /*
++ * Initialize the boot-time allocator (with low memory only):
++ */
++ bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
++
++ register_bootmem_low_pages(max_low_pfn);
++
++ /*
++ * Reserve the bootmem bitmap itself as well. We do this in two
++ * steps (first step was init_bootmem()) because this catches
++ * the (very unlikely) case of us accidentally initializing the
++ * bootmem allocator with an invalid RAM area.
++ */
++ reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
++ bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START));
++
++#ifndef CONFIG_XEN
++ /*
++ * reserve physical page 0 - it's a special BIOS page on many boxes,
++ * enabling clean reboots, SMP operation, laptop functions.
++ */
++ reserve_bootmem(0, PAGE_SIZE);
++
++ /* reserve EBDA region, it's a 4K region */
++ reserve_ebda_region();
++
++ /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent
++ PCI prefetch into it (errata #56). Usually the page is reserved anyways,
++ unless you have no PS/2 mouse plugged in. */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 == 6)
++ reserve_bootmem(0xa0000 - 4096, 4096);
++
++#ifdef CONFIG_SMP
++ /*
++ * But first pinch a few for the stack/trampoline stuff
++ * FIXME: Don't need the extra page at 4K, but need to fix
++ * trampoline before removing it. (see the GDT stuff)
++ */
++ reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
++#endif
++#ifdef CONFIG_ACPI_SLEEP
++ /*
++ * Reserve low memory region for sleep support.
++ */
++ acpi_reserve_bootmem();
++#endif
++#endif /* !CONFIG_XEN */
++
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (xen_start_info->mod_start) {
++ if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
++ /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
++ initrd_start = INITRD_START + PAGE_OFFSET;
++ initrd_end = initrd_start+INITRD_SIZE;
++ initrd_below_start_ok = 1;
++ }
++ else {
++ printk(KERN_ERR "initrd extends beyond end of memory "
++ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++ INITRD_START + INITRD_SIZE,
++ max_low_pfn << PAGE_SHIFT);
++ initrd_start = 0;
++ }
++ }
++#endif
++#ifdef CONFIG_KEXEC
++#ifdef CONFIG_XEN
++ xen_machine_kexec_setup_resources();
++#else
++ if (crashk_res.start != crashk_res.end)
++ reserve_bootmem(crashk_res.start,
++ crashk_res.end - crashk_res.start + 1);
++#endif
++#endif
++}
++
++/*
++ * The node 0 pgdat is initialized before all of these because
++ * it's needed for bootmem. node>0 pgdats have their virtual
++ * space allocated before the pagetables are in place to access
++ * them, so they can't be cleared then.
++ *
++ * This should all compile down to nothing when NUMA is off.
++ */
++void __init remapped_pgdat_init(void)
++{
++ int nid;
++
++ for_each_online_node(nid) {
++ if (nid != 0)
++ memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
++ }
++}
++
++/*
++ * Request address space for all standard RAM and ROM resources
++ * and also for regions reported as reserved by the e820.
++ */
++static void __init
++legacy_init_iomem_resources(struct e820entry *e820, int nr_map,
++ struct resource *code_resource,
++ struct resource *data_resource)
++{
++ int i;
++
++ probe_roms();
++
++ for (i = 0; i < nr_map; i++) {
++ struct resource *res;
++#ifndef CONFIG_RESOURCES_64BIT
++ if (e820[i].addr + e820[i].size > 0x100000000ULL)
++ continue;
++#endif
++ res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
++ switch (e820[i].type) {
++ case E820_RAM: res->name = "System RAM"; break;
++ case E820_ACPI: res->name = "ACPI Tables"; break;
++ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
++ default: res->name = "reserved";
++ }
++ res->start = e820[i].addr;
++ res->end = res->start + e820[i].size - 1;
++ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++ if (request_resource(&iomem_resource, res)) {
++ kfree(res);
++ continue;
++ }
++ if (e820[i].type == E820_RAM) {
++ /*
++ * We don't know which RAM region contains kernel data,
++ * so we try it repeatedly and let the resource manager
++ * test it.
++ */
++#ifndef CONFIG_XEN
++ request_resource(res, code_resource);
++ request_resource(res, data_resource);
++#endif
++#ifdef CONFIG_KEXEC
++ if (crashk_res.start != crashk_res.end)
++ request_resource(res, &crashk_res);
++#ifdef CONFIG_XEN
++ xen_machine_kexec_register_resources(res);
++#endif
++#endif
++ }
++ }
++}
++
++/*
++ * Locate a unused range of the physical address space below 4G which
++ * can be used for PCI mappings.
++ */
++static void __init
++e820_setup_gap(struct e820entry *e820, int nr_map)
++{
++ unsigned long gapstart, gapsize, round;
++ unsigned long long last;
++ int i;
++
++ /*
++ * Search for the bigest gap in the low 32 bits of the e820
++ * memory space.
++ */
++ last = 0x100000000ull;
++ gapstart = 0x10000000;
++ gapsize = 0x400000;
++ i = nr_map;
++ while (--i >= 0) {
++ unsigned long long start = e820[i].addr;
++ unsigned long long end = start + e820[i].size;
++
++ /*
++ * Since "last" is at most 4GB, we know we'll
++ * fit in 32 bits if this condition is true
++ */
++ if (last > end) {
++ unsigned long gap = last - end;
++
++ if (gap > gapsize) {
++ gapsize = gap;
++ gapstart = end;
++ }
++ }
++ if (start < last)
++ last = start;
++ }
++
++ /*
++ * See how much we want to round up: start off with
++ * rounding to the next 1MB area.
++ */
++ round = 0x100000;
++ while ((gapsize >> 4) > round)
++ round += round;
++ /* Fun with two's complement */
++ pci_mem_start = (gapstart + round) & -round;
++
++ printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
++ pci_mem_start, gapstart, gapsize);
++}
++
++/*
++ * Request address space for all standard resources
++ *
++ * This is called just before pcibios_init(), which is also a
++ * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
++ */
++static int __init request_standard_resources(void)
++{
++ int i;
++
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return 0;
++
++ printk("Setting up standard PCI resources\n");
++#ifdef CONFIG_XEN
++ legacy_init_iomem_resources(machine_e820.map, machine_e820.nr_map,
++ &code_resource, &data_resource);
++#else
++ if (efi_enabled)
++ efi_initialize_iomem_resources(&code_resource, &data_resource);
++ else
++ legacy_init_iomem_resources(e820.map, e820.nr_map,
++ &code_resource, &data_resource);
++#endif
++
++ /* EFI systems may still have VGA */
++ request_resource(&iomem_resource, &video_ram_resource);
++
++ /* request I/O space for devices used on all i[345]86 PCs */
++ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
++ request_resource(&ioport_resource, &standard_io_resources[i]);
++ return 0;
++}
++
++subsys_initcall(request_standard_resources);
++
++static void __init register_memory(void)
++{
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain())
++ e820_setup_gap(machine_e820.map, machine_e820.nr_map);
++ else
++#endif
++ e820_setup_gap(e820.map, e820.nr_map);
++}
++
++#ifdef CONFIG_MCA
++static void set_mca_bus(int x)
++{
++ MCA_bus = x;
++}
++#else
++static void set_mca_bus(int x) { }
++#endif
++
++/*
++ * Determine if we were loaded by an EFI loader. If so, then we have also been
++ * passed the efi memmap, systab, etc., so we should use these data structures
++ * for initialization. Note, the efi init code path is determined by the
++ * global efi_enabled. This allows the same kernel image to be used on existing
++ * systems (with a traditional BIOS) as well as on EFI systems.
++ */
++void __init setup_arch(char **cmdline_p)
++{
++ int i, j, k, fpp;
++ struct physdev_set_iopl set_iopl;
++ unsigned long max_low_pfn;
++ unsigned long p2m_pages;
++
++ /* Force a quick death if the kernel panics (not domain 0). */
++ extern int panic_timeout;
++ if (!panic_timeout && !is_initial_xendomain())
++ panic_timeout = 1;
++
++ /* Register a call for panic conditions. */
++ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++
++ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++ VMASST_TYPE_4gb_segments));
++ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++ VMASST_TYPE_writable_pagetables));
++
++ memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
++ pre_setup_arch_hook();
++ early_cpu_init();
++#ifdef CONFIG_SMP
++ prefill_possible_map();
++#endif
++
++ /*
++ * FIXME: This isn't an official loader_type right
++ * now but does currently work with elilo.
++ * If we were configured as an EFI kernel, check to make
++ * sure that we were loaded correctly from elilo and that
++ * the system table is valid. If not, then initialize normally.
++ */
++#ifdef CONFIG_EFI
++ if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
++ efi_enabled = 1;
++#endif
++
++ /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
++ properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
++ */
++ ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
++ drive_info = DRIVE_INFO;
++ screen_info = SCREEN_INFO;
++ copy_edid();
++ apm_info.bios = APM_BIOS_INFO;
++ ist_info = IST_INFO;
++ saved_videomode = VIDEO_MODE;
++ if( SYS_DESC_TABLE.length != 0 ) {
++ set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
++ machine_id = SYS_DESC_TABLE.table[0];
++ machine_submodel_id = SYS_DESC_TABLE.table[1];
++ BIOS_revision = SYS_DESC_TABLE.table[2];
++ }
++ bootloader_type = LOADER_TYPE;
++
++ if (is_initial_xendomain()) {
++ const struct dom0_vga_console_info *info =
++ (void *)((char *)xen_start_info +
++ xen_start_info->console.dom0.info_off);
++
++ dom0_init_screen_info(info,
++ xen_start_info->console.dom0.info_size);
++ xen_start_info->console.domU.mfn = 0;
++ xen_start_info->console.domU.evtchn = 0;
++ } else
++ screen_info.orig_video_isVGA = 0;
++
++#ifdef CONFIG_BLK_DEV_RAM
++ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++#endif
++
++ ARCH_SETUP
++ if (efi_enabled)
++ efi_init();
++ else {
++ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++ print_memory_map(machine_specific_memory_setup());
++ }
++
++ copy_edd();
++
++ if (!MOUNT_ROOT_RDONLY)
++ root_mountflags &= ~MS_RDONLY;
++ init_mm.start_code = (unsigned long) _text;
++ init_mm.end_code = (unsigned long) _etext;
++ init_mm.end_data = (unsigned long) _edata;
++ init_mm.brk = (PFN_UP(__pa(xen_start_info->pt_base)) +
++ xen_start_info->nr_pt_frames) << PAGE_SHIFT;
++
++ code_resource.start = virt_to_phys(_text);
++ code_resource.end = virt_to_phys(_etext)-1;
++ data_resource.start = virt_to_phys(_etext);
++ data_resource.end = virt_to_phys(_edata)-1;
++
++ parse_cmdline_early(cmdline_p);
++
++#ifdef CONFIG_EARLY_PRINTK
++ {
++ char *s = strstr(*cmdline_p, "earlyprintk=");
++ if (s) {
++ setup_early_printk(strchr(s, '=') + 1);
++ printk("early console enabled\n");
++ }
++ }
++#endif
++
++ max_low_pfn = setup_memory();
++
++ /*
++ * NOTE: before this point _nobody_ is allowed to allocate
++ * any memory using the bootmem allocator. Although the
++ * alloctor is now initialised only the first 8Mb of the kernel
++ * virtual address space has been mapped. All allocations before
++ * paging_init() has completed must use the alloc_bootmem_low_pages()
++ * variant (which allocates DMA'able memory) and care must be taken
++ * not to exceed the 8Mb limit.
++ */
++
++#ifdef CONFIG_SMP
++ smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
++#endif
++ paging_init();
++ remapped_pgdat_init();
++ sparse_init();
++ zone_sizes_init();
++
++#ifdef CONFIG_X86_FIND_SMP_CONFIG
++ /*
++ * Find and reserve possible boot-time SMP configuration:
++ */
++ find_smp_config();
++#endif
++
++ p2m_pages = max_pfn;
++ if (xen_start_info->nr_pages > max_pfn) {
++ /*
++ * the max_pfn was shrunk (probably by mem= or highmem=
++ * kernel parameter); shrink reservation with the HV
++ */
++ struct xen_memory_reservation reservation = {
++ .address_bits = 0,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ unsigned int difference;
++ int ret;
++
++ difference = xen_start_info->nr_pages - max_pfn;
++
++ set_xen_guest_handle(reservation.extent_start,
++ ((unsigned long *)xen_start_info->mfn_list) + max_pfn);
++ reservation.nr_extents = difference;
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &reservation);
++ BUG_ON (ret != difference);
++ }
++ else if (max_pfn > xen_start_info->nr_pages)
++ p2m_pages = xen_start_info->nr_pages;
++
++ /* Make sure we have a correctly sized P->M table. */
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ phys_to_machine_mapping = alloc_bootmem_low_pages(
++ max_pfn * sizeof(unsigned long));
++ memset(phys_to_machine_mapping, ~0,
++ max_pfn * sizeof(unsigned long));
++ memcpy(phys_to_machine_mapping,
++ (unsigned long *)xen_start_info->mfn_list,
++ p2m_pages * sizeof(unsigned long));
++ free_bootmem(
++ __pa(xen_start_info->mfn_list),
++ PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
++ sizeof(unsigned long))));
++
++ /*
++ * Initialise the list of the frames that specify the list of
++ * frames that make up the p2m table. Used by save/restore
++ */
++ pfn_to_mfn_frame_list_list = alloc_bootmem_low_pages(PAGE_SIZE);
++
++ fpp = PAGE_SIZE/sizeof(unsigned long);
++ for (i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++) {
++ if ((j % fpp) == 0) {
++ k++;
++ BUG_ON(k>=16);
++ pfn_to_mfn_frame_list[k] =
++ alloc_bootmem_low_pages(PAGE_SIZE);
++ pfn_to_mfn_frame_list_list[k] =
++ virt_to_mfn(pfn_to_mfn_frame_list[k]);
++ j=0;
++ }
++ pfn_to_mfn_frame_list[k][j] =
++ virt_to_mfn(&phys_to_machine_mapping[i]);
++ }
++ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
++ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++ virt_to_mfn(pfn_to_mfn_frame_list_list);
++ }
++
++ /* Mark all ISA DMA channels in-use - using them wouldn't work. */
++ for (i = 0; i < MAX_DMA_CHANNELS; ++i)
++ if (i != 4 && request_dma(i, "xen") != 0)
++ BUG();
++
++ /*
++ * NOTE: at this point the bootmem allocator is fully available.
++ */
++
++ if (is_initial_xendomain())
++ dmi_scan_machine();
++
++#ifdef CONFIG_X86_GENERICARCH
++ generic_apic_probe(*cmdline_p);
++#endif
++ if (efi_enabled)
++ efi_map_memmap();
++
++ set_iopl.iopl = 1;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
++
++#ifdef CONFIG_ACPI
++ if (!is_initial_xendomain()) {
++ printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
++ acpi_disabled = 1;
++ acpi_ht = 0;
++ }
++
++ /*
++ * Parse the ACPI tables for possible boot-time SMP configuration.
++ */
++ acpi_boot_table_init();
++#endif
++
++#ifdef CONFIG_X86_IO_APIC
++ check_acpi_pci(); /* Checks more than just ACPI actually */
++#endif
++
++#ifdef CONFIG_ACPI
++ acpi_boot_init();
++
++#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
++ if (def_to_bigsmp)
++ printk(KERN_WARNING "More than 8 CPUs detected and "
++ "CONFIG_X86_PC cannot handle it.\nUse "
++ "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
++#endif
++#endif
++#ifdef CONFIG_X86_LOCAL_APIC
++ if (smp_found_config)
++ get_smp_config();
++#endif
++
++ register_memory();
++
++ if (is_initial_xendomain()) {
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++ if (!efi_enabled ||
++ (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
++ conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++#endif
++ } else {
++#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++ }
++ tsc_init();
++}
++
++static int
++xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ HYPERVISOR_shutdown(SHUTDOWN_crash);
++ /* we're never actually going to get here... */
++ return NOTIFY_DONE;
++}
++
++static __init int add_pcspkr(void)
++{
++ struct platform_device *pd;
++ int ret;
++
++ if (!is_initial_xendomain())
++ return 0;
++
++ pd = platform_device_alloc("pcspkr", -1);
++ if (!pd)
++ return -ENOMEM;
++
++ ret = platform_device_add(pd);
++ if (ret)
++ platform_device_put(pd);
++
++ return ret;
++}
++device_initcall(add_pcspkr);
++
++/*
++ * Local Variables:
++ * mode:c
++ * c-file-style:"k&r"
++ * c-basic-offset:8
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/smp-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/smp-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/smp-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/smp-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,605 @@
++/*
++ * Intel SMP support routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * This code is released under the GNU General Public License version 2 or
++ * later.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/cache.h>
++#include <linux/interrupt.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
++
++#include <asm/mtrr.h>
++#include <asm/tlbflush.h>
++#if 0
++#include <mach_apic.h>
++#endif
++#include <xen/evtchn.h>
++
++/*
++ * Some notes on x86 processor bugs affecting SMP operation:
++ *
++ * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
++ * The Linux implications for SMP are handled as follows:
++ *
++ * Pentium III / [Xeon]
++ * None of the E1AP-E3AP errata are visible to the user.
++ *
++ * E1AP. see PII A1AP
++ * E2AP. see PII A2AP
++ * E3AP. see PII A3AP
++ *
++ * Pentium II / [Xeon]
++ * None of the A1AP-A3AP errata are visible to the user.
++ *
++ * A1AP. see PPro 1AP
++ * A2AP. see PPro 2AP
++ * A3AP. see PPro 7AP
++ *
++ * Pentium Pro
++ * None of 1AP-9AP errata are visible to the normal user,
++ * except occasional delivery of 'spurious interrupt' as trap #15.
++ * This is very rare and a non-problem.
++ *
++ * 1AP. Linux maps APIC as non-cacheable
++ * 2AP. worked around in hardware
++ * 3AP. fixed in C0 and above steppings microcode update.
++ * Linux does not use excessive STARTUP_IPIs.
++ * 4AP. worked around in hardware
++ * 5AP. symmetric IO mode (normal Linux operation) not affected.
++ * 'noapic' mode has vector 0xf filled out properly.
++ * 6AP. 'noapic' mode might be affected - fixed in later steppings
++ * 7AP. We do not assume writes to the LVT deassering IRQs
++ * 8AP. We do not enable low power mode (deep sleep) during MP bootup
++ * 9AP. We do not use mixed mode
++ *
++ * Pentium
++ * There is a marginal case where REP MOVS on 100MHz SMP
++ * machines with B stepping processors can fail. XXX should provide
++ * an L1cache=Writethrough or L1cache=off option.
++ *
++ * B stepping CPUs may hang. There are hardware work arounds
++ * for this. We warn about it in case your board doesn't have the work
++ * arounds. Basically thats so I can tell anyone with a B stepping
++ * CPU and SMP problems "tough".
++ *
++ * Specific items [From Pentium Processor Specification Update]
++ *
++ * 1AP. Linux doesn't use remote read
++ * 2AP. Linux doesn't trust APIC errors
++ * 3AP. We work around this
++ * 4AP. Linux never generated 3 interrupts of the same priority
++ * to cause a lost local interrupt.
++ * 5AP. Remote read is never used
++ * 6AP. not affected - worked around in hardware
++ * 7AP. not affected - worked around in hardware
++ * 8AP. worked around in hardware - we get explicit CS errors if not
++ * 9AP. only 'noapic' mode affected. Might generate spurious
++ * interrupts, we log only the first one and count the
++ * rest silently.
++ * 10AP. not affected - worked around in hardware
++ * 11AP. Linux reads the APIC between writes to avoid this, as per
++ * the documentation. Make sure you preserve this as it affects
++ * the C stepping chips too.
++ * 12AP. not affected - worked around in hardware
++ * 13AP. not affected - worked around in hardware
++ * 14AP. we always deassert INIT during bootup
++ * 15AP. not affected - worked around in hardware
++ * 16AP. not affected - worked around in hardware
++ * 17AP. not affected - worked around in hardware
++ * 18AP. not affected - worked around in hardware
++ * 19AP. not affected - worked around in BIOS
++ *
++ * If this sounds worrying believe me these bugs are either ___RARE___,
++ * or are signal timing bugs worked around in hardware and there's
++ * about nothing of note with C stepping upwards.
++ */
++
++DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
++
++/*
++ * the following functions deal with sending IPIs between CPUs.
++ *
++ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
++ */
++
++static inline int __prepare_ICR (unsigned int shortcut, int vector)
++{
++ unsigned int icr = shortcut | APIC_DEST_LOGICAL;
++
++ switch (vector) {
++ default:
++ icr |= APIC_DM_FIXED | vector;
++ break;
++ case NMI_VECTOR:
++ icr |= APIC_DM_NMI;
++ break;
++ }
++ return icr;
++}
++
++static inline int __prepare_ICR2 (unsigned int mask)
++{
++ return SET_APIC_DEST_FIELD(mask);
++}
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++static inline void __send_IPI_one(unsigned int cpu, int vector)
++{
++ int irq = per_cpu(ipi_to_irq, cpu)[vector];
++ BUG_ON(irq < 0);
++ notify_remote_via_irq(irq);
++}
++
++void __send_IPI_shortcut(unsigned int shortcut, int vector)
++{
++ int cpu;
++
++ switch (shortcut) {
++ case APIC_DEST_SELF:
++ __send_IPI_one(smp_processor_id(), vector);
++ break;
++ case APIC_DEST_ALLBUT:
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu == smp_processor_id())
++ continue;
++ if (cpu_isset(cpu, cpu_online_map)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++ break;
++ default:
++ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
++ vector);
++ break;
++ }
++}
++
++void fastcall send_IPI_self(int vector)
++{
++ __send_IPI_shortcut(APIC_DEST_SELF, vector);
++}
++
++/*
++ * This is only used on smaller machines.
++ */
++void send_IPI_mask_bitmask(cpumask_t mask, int vector)
++{
++ unsigned long flags;
++ unsigned int cpu;
++
++ local_irq_save(flags);
++ WARN_ON(cpus_addr(mask)[0] & ~cpus_addr(cpu_online_map)[0]);
++
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu_isset(cpu, mask)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++
++ local_irq_restore(flags);
++}
++
++void send_IPI_mask_sequence(cpumask_t mask, int vector)
++{
++
++ send_IPI_mask_bitmask(mask, vector);
++}
++
++#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
++
++#if 0 /* XEN */
++/*
++ * Smarter SMP flushing macros.
++ * c/o Linus Torvalds.
++ *
++ * These mean you can really definitely utterly forget about
++ * writing to user space from interrupts. (Its not allowed anyway).
++ *
++ * Optimizations Manfred Spraul <manfred@colorfullife.com>
++ */
++
++static cpumask_t flush_cpumask;
++static struct mm_struct * flush_mm;
++static unsigned long flush_va;
++static DEFINE_SPINLOCK(tlbstate_lock);
++#define FLUSH_ALL 0xffffffff
++
++/*
++ * We cannot call mmdrop() because we are in interrupt context,
++ * instead update mm->cpu_vm_mask.
++ *
++ * We need to reload %cr3 since the page tables may be going
++ * away from under us..
++ */
++static inline void leave_mm (unsigned long cpu)
++{
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
++ BUG();
++ cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
++ load_cr3(swapper_pg_dir);
++}
++
++/*
++ *
++ * The flush IPI assumes that a thread switch happens in this order:
++ * [cpu0: the cpu that switches]
++ * 1) switch_mm() either 1a) or 1b)
++ * 1a) thread switch to a different mm
++ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
++ * Stop ipi delivery for the old mm. This is not synchronized with
++ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
++ * for the wrong mm, and in the worst case we perform a superflous
++ * tlb flush.
++ * 1a2) set cpu_tlbstate to TLBSTATE_OK
++ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
++ * was in lazy tlb mode.
++ * 1a3) update cpu_tlbstate[].active_mm
++ * Now cpu0 accepts tlb flushes for the new mm.
++ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
++ * Now the other cpus will send tlb flush ipis.
++ * 1a4) change cr3.
++ * 1b) thread switch without mm change
++ * cpu_tlbstate[].active_mm is correct, cpu0 already handles
++ * flush ipis.
++ * 1b1) set cpu_tlbstate to TLBSTATE_OK
++ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
++ * Atomically set the bit [other cpus will start sending flush ipis],
++ * and test the bit.
++ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
++ * 2) switch %%esp, ie current
++ *
++ * The interrupt must handle 2 special cases:
++ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
++ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
++ * runs in kernel space, the cpu could load tlb entries for user space
++ * pages.
++ *
++ * The good news is that cpu_tlbstate is local to each cpu, no
++ * write/read ordering problems.
++ */
++
++/*
++ * TLB flush IPI:
++ *
++ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
++ * 2) Leave the mm if we are in the lazy tlb mode.
++ */
++
++irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ unsigned long cpu;
++
++ cpu = get_cpu();
++
++ if (!cpu_isset(cpu, flush_cpumask))
++ goto out;
++ /*
++ * This was a BUG() but until someone can quote me the
++ * line from the intel manual that guarantees an IPI to
++ * multiple CPUs is retried _only_ on the erroring CPUs
++ * its staying as a return
++ *
++ * BUG();
++ */
++
++ if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
++ if (flush_va == FLUSH_ALL)
++ local_flush_tlb();
++ else
++ __flush_tlb_one(flush_va);
++ } else
++ leave_mm(cpu);
++ }
++ smp_mb__before_clear_bit();
++ cpu_clear(cpu, flush_cpumask);
++ smp_mb__after_clear_bit();
++out:
++ put_cpu_no_resched();
++
++ return IRQ_HANDLED;
++}
++
++static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
++ unsigned long va)
++{
++ /*
++ * A couple of (to be removed) sanity checks:
++ *
++ * - current CPU must not be in mask
++ * - mask must exist :)
++ */
++ BUG_ON(cpus_empty(cpumask));
++ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
++ BUG_ON(!mm);
++
++ /* If a CPU which we ran on has gone down, OK. */
++ cpus_and(cpumask, cpumask, cpu_online_map);
++ if (cpus_empty(cpumask))
++ return;
++
++ /*
++ * i'm not happy about this global shared spinlock in the
++ * MM hot path, but we'll see how contended it is.
++ * Temporarily this turns IRQs off, so that lockups are
++ * detected by the NMI watchdog.
++ */
++ spin_lock(&tlbstate_lock);
++
++ flush_mm = mm;
++ flush_va = va;
++#if NR_CPUS <= BITS_PER_LONG
++ atomic_set_mask(cpumask, &flush_cpumask);
++#else
++ {
++ int k;
++ unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
++ unsigned long *cpu_mask = (unsigned long *)&cpumask;
++ for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
++ atomic_set_mask(cpu_mask[k], &flush_mask[k]);
++ }
++#endif
++ /*
++ * We have to send the IPI only to
++ * CPUs affected.
++ */
++ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
++
++ while (!cpus_empty(flush_cpumask))
++ /* nothing. lockup detection does not belong here */
++ mb();
++
++ flush_mm = NULL;
++ flush_va = 0;
++ spin_unlock(&tlbstate_lock);
++}
++
++void flush_tlb_current_task(void)
++{
++ struct mm_struct *mm = current->mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ local_flush_tlb();
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++ preempt_enable();
++}
++
++void flush_tlb_mm (struct mm_struct * mm)
++{
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if (current->mm)
++ local_flush_tlb();
++ else
++ leave_mm(smp_processor_id());
++ }
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++
++ preempt_enable();
++}
++
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if(current->mm)
++ __flush_tlb_one(va);
++ else
++ leave_mm(smp_processor_id());
++ }
++
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, va);
++
++ preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_page);
++
++static void do_flush_tlb_all(void* info)
++{
++ unsigned long cpu = smp_processor_id();
++
++ __flush_tlb_all();
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
++ leave_mm(cpu);
++}
++
++void flush_tlb_all(void)
++{
++ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
++}
++
++#endif /* XEN */
++
++/*
++ * this function sends a 'reschedule' IPI to another CPU.
++ * it goes straight through and wastes no time serializing
++ * anything. Worst case is that we lose a reschedule ...
++ */
++void smp_send_reschedule(int cpu)
++{
++ WARN_ON(cpu_is_offline(cpu));
++ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
++}
++
++/*
++ * Structure and data for smp_call_function(). This is designed to minimise
++ * static memory requirements. It also looks cleaner.
++ */
++static DEFINE_SPINLOCK(call_lock);
++
++struct call_data_struct {
++ void (*func) (void *info);
++ void *info;
++ atomic_t started;
++ atomic_t finished;
++ int wait;
++};
++
++void lock_ipi_call_lock(void)
++{
++ spin_lock_irq(&call_lock);
++}
++
++void unlock_ipi_call_lock(void)
++{
++ spin_unlock_irq(&call_lock);
++}
++
++static struct call_data_struct *call_data;
++
++/**
++ * smp_call_function(): Run a function on all other CPUs.
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: currently unused.
++ * @wait: If true, wait (atomically) until function has completed on other CPUs.
++ *
++ * Returns 0 on success, else a negative status code. Does not return until
++ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ */
++int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++ int wait)
++{
++ struct call_data_struct data;
++ int cpus;
++
++ /* Holding any lock stops cpus from going down. */
++ spin_lock(&call_lock);
++ cpus = num_online_cpus() - 1;
++ if (!cpus) {
++ spin_unlock(&call_lock);
++ return 0;
++ }
++
++ /* Can deadlock when called with interrupts disabled */
++ WARN_ON(irqs_disabled());
++
++ data.func = func;
++ data.info = info;
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ call_data = &data;
++ mb();
++
++ /* Send a message to all other CPUs and wait for them to respond */
++ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != cpus)
++ cpu_relax();
++
++ if (wait)
++ while (atomic_read(&data.finished) != cpus)
++ cpu_relax();
++ spin_unlock(&call_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(smp_call_function);
++
++static void stop_this_cpu (void * dummy)
++{
++ /*
++ * Remove this CPU:
++ */
++ cpu_clear(smp_processor_id(), cpu_online_map);
++ local_irq_disable();
++ disable_all_local_evtchn();
++ if (cpu_data[smp_processor_id()].hlt_works_ok)
++ for(;;) halt();
++ for (;;);
++}
++
++/*
++ * this function calls the 'stop' function on all other CPUs in the system.
++ */
++
++void smp_send_stop(void)
++{
++ smp_call_function(stop_this_cpu, NULL, 1, 0);
++
++ local_irq_disable();
++ disable_all_local_evtchn();
++ local_irq_enable();
++}
++
++/*
++ * Reschedule call back. Nothing to do,
++ * all the work is done automatically when
++ * we return from the interrupt.
++ */
++irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++
++ return IRQ_HANDLED;
++}
++
++#include <linux/kallsyms.h>
++irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ void (*func) (void *info) = call_data->func;
++ void *info = call_data->info;
++ int wait = call_data->wait;
++
++ /*
++ * Notify initiating CPU that I've grabbed the data and am
++ * about to execute the function
++ */
++ mb();
++ atomic_inc(&call_data->started);
++ /*
++ * At this point the info structure may be out of scope unless wait==1
++ */
++ irq_enter();
++ (*func)(info);
++ irq_exit();
++
++ if (wait) {
++ mb();
++ atomic_inc(&call_data->finished);
++ }
++
++ return IRQ_HANDLED;
++}
++
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/sysenter.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/sysenter.c
+--- linux-2.6.18.8/arch/i386/kernel/sysenter.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/sysenter.c 2008-08-21 11:36:07.000000000 +0200
+@@ -23,6 +23,10 @@
+ #include <asm/pgtable.h>
+ #include <asm/unistd.h>
+
++#ifdef CONFIG_XEN
++#include <xen/interface/callback.h>
++#endif
++
+ /*
+ * Should the kernel map a VDSO page into processes and pass its
+ * address down to glibc upon exec()?
+@@ -44,6 +48,7 @@ extern asmlinkage void sysenter_entry(vo
+
+ void enable_sep_cpu(void)
+ {
++#ifndef CONFIG_XEN
+ int cpu = get_cpu();
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+
+@@ -57,7 +62,36 @@ void enable_sep_cpu(void)
+ wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
+ wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
+ wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
+- put_cpu();
++#else
++ extern asmlinkage void sysenter_entry_pv(void);
++ static struct callback_register sysenter = {
++ .type = CALLBACKTYPE_sysenter,
++ .address = { __KERNEL_CS, (unsigned long)sysenter_entry_pv },
++ };
++
++ if (!boot_cpu_has(X86_FEATURE_SEP))
++ return;
++
++ get_cpu();
++
++ if (xen_feature(XENFEAT_supervisor_mode_kernel))
++ sysenter.address.eip = (unsigned long)sysenter_entry;
++
++ switch (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter)) {
++ case 0:
++ break;
++#if CONFIG_XEN_COMPAT < 0x030200
++ case -ENOSYS:
++ sysenter.type = CALLBACKTYPE_sysenter_deprecated;
++ if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) == 0)
++ break;
++#endif
++ default:
++ clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
++ break;
++ }
++#endif
++ put_cpu();
+ }
+
+ /*
+@@ -75,11 +109,6 @@ int __init sysenter_setup(void)
+ #ifdef CONFIG_COMPAT_VDSO
+ __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
+ printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
+-#else
+- /*
+- * In the non-compat case the ELF coredumping code needs the fixmap:
+- */
+- __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO);
+ #endif
+
+ if (!boot_cpu_has(X86_FEATURE_SEP)) {
+@@ -142,6 +171,13 @@ int arch_setup_additional_pages(struct l
+ vma->vm_end = addr + PAGE_SIZE;
+ /* MAYWRITE to allow gdb to COW and set breakpoints */
+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
++ /*
++ * Make sure the vDSO gets into every core dump.
++ * Dumping its contents makes post-mortem fully interpretable later
++ * without matching up the same kernel and hardware config to see
++ * what PC values meant.
++ */
++ vma->vm_flags |= VM_ALWAYSDUMP;
+ vma->vm_flags |= mm->def_flags;
+ vma->vm_page_prot = protection_map[vma->vm_flags & 7];
+ vma->vm_ops = &syscall_vm_ops;
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/time-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/time-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/time-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/time-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1209 @@
++/*
++ * linux/arch/i386/kernel/time.c
++ *
++ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
++ *
++ * This file contains the PC-specific time handling details:
++ * reading the RTC at bootup, etc..
++ * 1994-07-02 Alan Modra
++ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
++ * 1995-03-26 Markus Kuhn
++ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
++ * precision CMOS clock update
++ * 1996-05-03 Ingo Molnar
++ * fixed time warps in do_[slow|fast]_gettimeoffset()
++ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
++ * "A Kernel Model for Precision Timekeeping" by Dave Mills
++ * 1998-09-05 (Various)
++ * More robust do_fast_gettimeoffset() algorithm implemented
++ * (works with APM, Cyrix 6x86MX and Centaur C6),
++ * monotonic gettimeofday() with fast_get_timeoffset(),
++ * drift-proof precision TSC calibration on boot
++ * (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
++ * Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
++ * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
++ * 1998-12-16 Andrea Arcangeli
++ * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
++ * because was not accounting lost_ticks.
++ * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
++ * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
++ * serialize accesses to xtime/lost_ticks).
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/param.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/time.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++#include <linux/bcd.h>
++#include <linux/efi.h>
++#include <linux/mca.h>
++#include <linux/sysctl.h>
++#include <linux/percpu.h>
++#include <linux/kernel_stat.h>
++#include <linux/posix-timers.h>
++#include <linux/cpufreq.h>
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/irq.h>
++#include <asm/msr.h>
++#include <asm/delay.h>
++#include <asm/mpspec.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++#include <asm/timer.h>
++#include <asm/sections.h>
++
++#include "mach_time.h"
++
++#include <linux/timex.h>
++
++#include <asm/hpet.h>
++
++#include <asm/arch_hooks.h>
++
++#include <xen/evtchn.h>
++#include <xen/interface/vcpu.h>
++
++#if defined (__i386__)
++#include <asm/i8259.h>
++#endif
++
++int pit_latch_buggy; /* extern */
++
++#if defined(__x86_64__)
++unsigned long vxtime_hz = PIT_TICK_RATE;
++struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
++volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
++unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
++struct timespec __xtime __section_xtime;
++struct timezone __sys_tz __section_sys_tz;
++#endif
++
++unsigned int cpu_khz; /* Detected as we calibrate the TSC */
++EXPORT_SYMBOL(cpu_khz);
++
++extern unsigned long wall_jiffies;
++
++DEFINE_SPINLOCK(rtc_lock);
++EXPORT_SYMBOL(rtc_lock);
++
++extern struct init_timer_opts timer_tsc_init;
++extern struct timer_opts timer_tsc;
++#define timer_none timer_tsc
++
++/* These are peridically updated in shared_info, and then copied here. */
++struct shadow_time_info {
++ u64 tsc_timestamp; /* TSC at last update of time vals. */
++ u64 system_timestamp; /* Time, in nanosecs, since boot. */
++ u32 tsc_to_nsec_mul;
++ u32 tsc_to_usec_mul;
++ int tsc_shift;
++ u32 version;
++};
++static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
++static struct timespec shadow_tv;
++static u32 shadow_tv_version;
++
++static struct timeval monotonic_tv;
++static spinlock_t monotonic_lock = SPIN_LOCK_UNLOCKED;
++
++/* Keep track of last time we did processing/updating of jiffies and xtime. */
++static u64 processed_system_time; /* System time (ns) at last processing. */
++static DEFINE_PER_CPU(u64, processed_system_time);
++
++/* How much CPU time was spent blocked and how much was 'stolen'? */
++static DEFINE_PER_CPU(u64, processed_stolen_time);
++static DEFINE_PER_CPU(u64, processed_blocked_time);
++
++/* Current runstate of each CPU (updated automatically by the hypervisor). */
++static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
++
++/* Must be signed, as it's compared with s64 quantities which can be -ve. */
++#define NS_PER_TICK (1000000000LL/HZ)
++
++static void __clock_was_set(void *unused)
++{
++ clock_was_set();
++}
++static DECLARE_WORK(clock_was_set_work, __clock_was_set, NULL);
++
++/*
++ * GCC 4.3 can turn loops over an induction variable into division. We do
++ * not support arbitrary 64-bit division, and so must break the induction.
++ */
++#define clobber_induction_variable(v) asm ( "" : "+r" (v) )
++
++static inline void __normalize_time(time_t *sec, s64 *nsec)
++{
++ while (*nsec >= NSEC_PER_SEC) {
++ clobber_induction_variable(*nsec);
++ (*nsec) -= NSEC_PER_SEC;
++ (*sec)++;
++ }
++ while (*nsec < 0) {
++ clobber_induction_variable(*nsec);
++ (*nsec) += NSEC_PER_SEC;
++ (*sec)--;
++ }
++}
++
++/* Does this guest OS track Xen time, or set its wall clock independently? */
++static int independent_wallclock = 0;
++static int __init __independent_wallclock(char *str)
++{
++ independent_wallclock = 1;
++ return 1;
++}
++__setup("independent_wallclock", __independent_wallclock);
++
++/* Permitted clock jitter, in nsecs, beyond which a warning will be printed. */
++static unsigned long permitted_clock_jitter = 10000000UL; /* 10ms */
++static int __init __permitted_clock_jitter(char *str)
++{
++ permitted_clock_jitter = simple_strtoul(str, NULL, 0);
++ return 1;
++}
++__setup("permitted_clock_jitter=", __permitted_clock_jitter);
++
++#if 0
++static void delay_tsc(unsigned long loops)
++{
++ unsigned long bclock, now;
++
++ rdtscl(bclock);
++ do {
++ rep_nop();
++ rdtscl(now);
++ } while ((now - bclock) < loops);
++}
++
++struct timer_opts timer_tsc = {
++ .name = "tsc",
++ .delay = delay_tsc,
++};
++#endif
++
++/*
++ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
++ * yielding a 64-bit result.
++ */
++static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
++{
++ u64 product;
++#ifdef __i386__
++ u32 tmp1, tmp2;
++#endif
++
++ if (shift < 0)
++ delta >>= -shift;
++ else
++ delta <<= shift;
++
++#ifdef __i386__
++ __asm__ (
++ "mul %5 ; "
++ "mov %4,%%eax ; "
++ "mov %%edx,%4 ; "
++ "mul %5 ; "
++ "xor %5,%5 ; "
++ "add %4,%%eax ; "
++ "adc %5,%%edx ; "
++ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
++ : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
++#else
++ __asm__ (
++ "mul %%rdx ; shrd $32,%%rdx,%%rax"
++ : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
++#endif
++
++ return product;
++}
++
++#if 0 /* defined (__i386__) */
++int read_current_timer(unsigned long *timer_val)
++{
++ rdtscl(*timer_val);
++ return 0;
++}
++#endif
++
++void init_cpu_khz(void)
++{
++ u64 __cpu_khz = 1000000ULL << 32;
++ struct vcpu_time_info *info = &vcpu_info(0)->time;
++ do_div(__cpu_khz, info->tsc_to_system_mul);
++ if (info->tsc_shift < 0)
++ cpu_khz = __cpu_khz << -info->tsc_shift;
++ else
++ cpu_khz = __cpu_khz >> info->tsc_shift;
++}
++
++static u64 get_nsec_offset(struct shadow_time_info *shadow)
++{
++ u64 now, delta;
++ rdtscll(now);
++ delta = now - shadow->tsc_timestamp;
++ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
++}
++
++static unsigned long get_usec_offset(struct shadow_time_info *shadow)
++{
++ u64 now, delta;
++ rdtscll(now);
++ delta = now - shadow->tsc_timestamp;
++ return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
++}
++
++static void __update_wallclock(time_t sec, long nsec)
++{
++ long wtm_nsec, xtime_nsec;
++ time_t wtm_sec, xtime_sec;
++ u64 tmp, wc_nsec;
++
++ /* Adjust wall-clock time base based on wall_jiffies ticks. */
++ wc_nsec = processed_system_time;
++ wc_nsec += sec * (u64)NSEC_PER_SEC;
++ wc_nsec += nsec;
++ wc_nsec -= (jiffies - wall_jiffies) * (u64)NS_PER_TICK;
++
++ /* Split wallclock base into seconds and nanoseconds. */
++ tmp = wc_nsec;
++ xtime_nsec = do_div(tmp, 1000000000);
++ xtime_sec = (time_t)tmp;
++
++ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
++ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
++
++ set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
++ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
++
++ ntp_clear();
++}
++
++static void update_wallclock(void)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++
++ do {
++ shadow_tv_version = s->wc_version;
++ rmb();
++ shadow_tv.tv_sec = s->wc_sec;
++ shadow_tv.tv_nsec = s->wc_nsec;
++ rmb();
++ } while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
++
++ if (!independent_wallclock)
++ __update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
++}
++
++/*
++ * Reads a consistent set of time-base values from Xen, into a shadow data
++ * area.
++ */
++static void get_time_values_from_xen(unsigned int cpu)
++{
++ struct vcpu_time_info *src;
++ struct shadow_time_info *dst;
++ unsigned long flags;
++ u32 pre_version, post_version;
++
++ src = &vcpu_info(cpu)->time;
++ dst = &per_cpu(shadow_time, cpu);
++
++ local_irq_save(flags);
++
++ do {
++ pre_version = dst->version = src->version;
++ rmb();
++ dst->tsc_timestamp = src->tsc_timestamp;
++ dst->system_timestamp = src->system_time;
++ dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
++ dst->tsc_shift = src->tsc_shift;
++ rmb();
++ post_version = src->version;
++ } while ((pre_version & 1) | (pre_version ^ post_version));
++
++ dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
++
++ local_irq_restore(flags);
++}
++
++static inline int time_values_up_to_date(unsigned int cpu)
++{
++ struct vcpu_time_info *src;
++ struct shadow_time_info *dst;
++
++ src = &vcpu_info(cpu)->time;
++ dst = &per_cpu(shadow_time, cpu);
++
++ rmb();
++ return (dst->version == src->version);
++}
++
++/*
++ * This is a special lock that is owned by the CPU and holds the index
++ * register we are working with. It is required for NMI access to the
++ * CMOS/RTC registers. See include/asm-i386/mc146818rtc.h for details.
++ */
++volatile unsigned long cmos_lock = 0;
++EXPORT_SYMBOL(cmos_lock);
++
++/* Routines for accessing the CMOS RAM/RTC. */
++unsigned char rtc_cmos_read(unsigned char addr)
++{
++ unsigned char val;
++ lock_cmos_prefix(addr);
++ outb_p(addr, RTC_PORT(0));
++ val = inb_p(RTC_PORT(1));
++ lock_cmos_suffix(addr);
++ return val;
++}
++EXPORT_SYMBOL(rtc_cmos_read);
++
++void rtc_cmos_write(unsigned char val, unsigned char addr)
++{
++ lock_cmos_prefix(addr);
++ outb_p(addr, RTC_PORT(0));
++ outb_p(val, RTC_PORT(1));
++ lock_cmos_suffix(addr);
++}
++EXPORT_SYMBOL(rtc_cmos_write);
++
++/*
++ * This version of gettimeofday has microsecond resolution
++ * and better than microsecond precision on fast x86 machines with TSC.
++ */
++void do_gettimeofday(struct timeval *tv)
++{
++ unsigned long seq;
++ unsigned long usec, sec;
++ unsigned long flags;
++ s64 nsec;
++ unsigned int cpu;
++ struct shadow_time_info *shadow;
++ u32 local_time_version;
++
++ cpu = get_cpu();
++ shadow = &per_cpu(shadow_time, cpu);
++
++ do {
++ unsigned long lost;
++
++ local_time_version = shadow->version;
++ seq = read_seqbegin(&xtime_lock);
++
++ usec = get_usec_offset(shadow);
++ lost = jiffies - wall_jiffies;
++
++ if (unlikely(lost))
++ usec += lost * (USEC_PER_SEC / HZ);
++
++ sec = xtime.tv_sec;
++ usec += (xtime.tv_nsec / NSEC_PER_USEC);
++
++ nsec = shadow->system_timestamp - processed_system_time;
++ __normalize_time(&sec, &nsec);
++ usec += (long)nsec / NSEC_PER_USEC;
++
++ if (unlikely(!time_values_up_to_date(cpu))) {
++ /*
++ * We may have blocked for a long time,
++ * rendering our calculations invalid
++ * (e.g. the time delta may have
++ * overflowed). Detect that and recalculate
++ * with fresh values.
++ */
++ get_time_values_from_xen(cpu);
++ continue;
++ }
++ } while (read_seqretry(&xtime_lock, seq) ||
++ (local_time_version != shadow->version));
++
++ put_cpu();
++
++ while (usec >= USEC_PER_SEC) {
++ usec -= USEC_PER_SEC;
++ sec++;
++ }
++
++ spin_lock_irqsave(&monotonic_lock, flags);
++ if ((sec > monotonic_tv.tv_sec) ||
++ ((sec == monotonic_tv.tv_sec) && (usec > monotonic_tv.tv_usec)))
++ {
++ monotonic_tv.tv_sec = sec;
++ monotonic_tv.tv_usec = usec;
++ } else {
++ sec = monotonic_tv.tv_sec;
++ usec = monotonic_tv.tv_usec;
++ }
++ spin_unlock_irqrestore(&monotonic_lock, flags);
++
++ tv->tv_sec = sec;
++ tv->tv_usec = usec;
++}
++
++EXPORT_SYMBOL(do_gettimeofday);
++
++int do_settimeofday(struct timespec *tv)
++{
++ time_t sec;
++ s64 nsec;
++ unsigned int cpu;
++ struct shadow_time_info *shadow;
++ struct xen_platform_op op;
++
++ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
++ return -EINVAL;
++
++ cpu = get_cpu();
++ shadow = &per_cpu(shadow_time, cpu);
++
++ write_seqlock_irq(&xtime_lock);
++
++ /*
++ * Ensure we don't get blocked for a long time so that our time delta
++ * overflows. If that were to happen then our shadow time values would
++ * be stale, so we can retry with fresh ones.
++ */
++ for (;;) {
++ nsec = tv->tv_nsec - get_nsec_offset(shadow);
++ if (time_values_up_to_date(cpu))
++ break;
++ get_time_values_from_xen(cpu);
++ }
++ sec = tv->tv_sec;
++ __normalize_time(&sec, &nsec);
++
++ if (is_initial_xendomain() && !independent_wallclock) {
++ op.cmd = XENPF_settime;
++ op.u.settime.secs = sec;
++ op.u.settime.nsecs = nsec;
++ op.u.settime.system_time = shadow->system_timestamp;
++ WARN_ON(HYPERVISOR_platform_op(&op));
++ update_wallclock();
++ } else if (independent_wallclock) {
++ nsec -= shadow->system_timestamp;
++ __normalize_time(&sec, &nsec);
++ __update_wallclock(sec, nsec);
++ }
++
++ /* Reset monotonic gettimeofday() timeval. */
++ spin_lock(&monotonic_lock);
++ monotonic_tv.tv_sec = 0;
++ monotonic_tv.tv_usec = 0;
++ spin_unlock(&monotonic_lock);
++
++ write_sequnlock_irq(&xtime_lock);
++
++ put_cpu();
++
++ clock_was_set();
++ return 0;
++}
++
++EXPORT_SYMBOL(do_settimeofday);
++
++static void sync_xen_wallclock(unsigned long dummy);
++static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
++static void sync_xen_wallclock(unsigned long dummy)
++{
++ time_t sec;
++ s64 nsec;
++ struct xen_platform_op op;
++
++ if (!ntp_synced() || independent_wallclock || !is_initial_xendomain())
++ return;
++
++ write_seqlock_irq(&xtime_lock);
++
++ sec = xtime.tv_sec;
++ nsec = xtime.tv_nsec + ((jiffies - wall_jiffies) * (u64)NS_PER_TICK);
++ __normalize_time(&sec, &nsec);
++
++ op.cmd = XENPF_settime;
++ op.u.settime.secs = sec;
++ op.u.settime.nsecs = nsec;
++ op.u.settime.system_time = processed_system_time;
++ WARN_ON(HYPERVISOR_platform_op(&op));
++
++ update_wallclock();
++
++ write_sequnlock_irq(&xtime_lock);
++
++ /* Once per minute. */
++ mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ);
++}
++
++static int set_rtc_mmss(unsigned long nowtime)
++{
++ int retval;
++ unsigned long flags;
++
++ if (independent_wallclock || !is_initial_xendomain())
++ return 0;
++
++ /* gets recalled with irq locally disabled */
++ /* XXX - does irqsave resolve this? -johnstul */
++ spin_lock_irqsave(&rtc_lock, flags);
++ if (efi_enabled)
++ retval = efi_set_rtc_mmss(nowtime);
++ else
++ retval = mach_set_rtc_mmss(nowtime);
++ spin_unlock_irqrestore(&rtc_lock, flags);
++
++ return retval;
++}
++
++/* monotonic_clock(): returns # of nanoseconds passed since time_init()
++ * Note: This function is required to return accurate
++ * time even in the absence of multiple timer ticks.
++ */
++unsigned long long monotonic_clock(void)
++{
++ unsigned int cpu = get_cpu();
++ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++ u64 time;
++ u32 local_time_version;
++
++ do {
++ local_time_version = shadow->version;
++ barrier();
++ time = shadow->system_timestamp + get_nsec_offset(shadow);
++ if (!time_values_up_to_date(cpu))
++ get_time_values_from_xen(cpu);
++ barrier();
++ } while (local_time_version != shadow->version);
++
++ put_cpu();
++
++ return time;
++}
++EXPORT_SYMBOL(monotonic_clock);
++
++#ifdef __x86_64__
++unsigned long long sched_clock(void)
++{
++ return monotonic_clock();
++}
++#endif
++
++#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++unsigned long profile_pc(struct pt_regs *regs)
++{
++ unsigned long pc = instruction_pointer(regs);
++
++#ifdef __x86_64__
++ /* Assume the lock function has either no stack frame or only a single word.
++ This checks if the address on the stack looks like a kernel text address.
++ There is a small window for false hits, but in that case the tick
++ is just accounted to the spinlock function.
++ Better would be to write these functions in assembler again
++ and check exactly. */
++ if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++ char *v = *(char **)regs->rsp;
++ if ((v >= _stext && v <= _etext) ||
++ (v >= _sinittext && v <= _einittext) ||
++ (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
++ return (unsigned long)v;
++ return ((unsigned long *)regs->rsp)[1];
++ }
++#else
++ if (!user_mode_vm(regs) && in_lock_functions(pc))
++ return *(unsigned long *)(regs->ebp + 4);
++#endif
++
++ return pc;
++}
++EXPORT_SYMBOL(profile_pc);
++#endif
++
++/*
++ * This is the same as the above, except we _also_ save the current
++ * Time Stamp Counter value at the time of the timer interrupt, so that
++ * we later on can estimate the time of day more exactly.
++ */
++irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
++{
++ s64 delta, delta_cpu, stolen, blocked;
++ u64 sched_time;
++ unsigned int i, cpu = smp_processor_id();
++ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++
++ /*
++ * Here we are in the timer irq handler. We just have irqs locally
++ * disabled but we don't know if the timer_bh is running on the other
++ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
++ * the irq version of write_lock because as just said we have irq
++ * locally disabled. -arca
++ */
++ write_seqlock(&xtime_lock);
++
++ do {
++ get_time_values_from_xen(cpu);
++
++ /* Obtain a consistent snapshot of elapsed wallclock cycles. */
++ delta = delta_cpu =
++ shadow->system_timestamp + get_nsec_offset(shadow);
++ delta -= processed_system_time;
++ delta_cpu -= per_cpu(processed_system_time, cpu);
++
++ /*
++ * Obtain a consistent snapshot of stolen/blocked cycles. We
++ * can use state_entry_time to detect if we get preempted here.
++ */
++ do {
++ sched_time = runstate->state_entry_time;
++ barrier();
++ stolen = runstate->time[RUNSTATE_runnable] +
++ runstate->time[RUNSTATE_offline] -
++ per_cpu(processed_stolen_time, cpu);
++ blocked = runstate->time[RUNSTATE_blocked] -
++ per_cpu(processed_blocked_time, cpu);
++ barrier();
++ } while (sched_time != runstate->state_entry_time);
++ } while (!time_values_up_to_date(cpu));
++
++ if ((unlikely(delta < -(s64)permitted_clock_jitter) ||
++ unlikely(delta_cpu < -(s64)permitted_clock_jitter))
++ && printk_ratelimit()) {
++ printk("Timer ISR/%u: Time went backwards: "
++ "delta=%lld delta_cpu=%lld shadow=%lld "
++ "off=%lld processed=%lld cpu_processed=%lld\n",
++ cpu, delta, delta_cpu, shadow->system_timestamp,
++ (s64)get_nsec_offset(shadow),
++ processed_system_time,
++ per_cpu(processed_system_time, cpu));
++ for (i = 0; i < num_online_cpus(); i++)
++ printk(" %d: %lld\n", i,
++ per_cpu(processed_system_time, i));
++ }
++
++ /* System-wide jiffy work. */
++ while (delta >= NS_PER_TICK) {
++ delta -= NS_PER_TICK;
++ processed_system_time += NS_PER_TICK;
++ do_timer(regs);
++ }
++
++ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
++ update_wallclock();
++ if (keventd_up())
++ schedule_work(&clock_was_set_work);
++ }
++
++ write_sequnlock(&xtime_lock);
++
++ /*
++ * Account stolen ticks.
++ * HACK: Passing NULL to account_steal_time()
++ * ensures that the ticks are accounted as stolen.
++ */
++ if ((stolen > 0) && (delta_cpu > 0)) {
++ delta_cpu -= stolen;
++ if (unlikely(delta_cpu < 0))
++ stolen += delta_cpu; /* clamp local-time progress */
++ do_div(stolen, NS_PER_TICK);
++ per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK;
++ per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK;
++ account_steal_time(NULL, (cputime_t)stolen);
++ }
++
++ /*
++ * Account blocked ticks.
++ * HACK: Passing idle_task to account_steal_time()
++ * ensures that the ticks are accounted as idle/wait.
++ */
++ if ((blocked > 0) && (delta_cpu > 0)) {
++ delta_cpu -= blocked;
++ if (unlikely(delta_cpu < 0))
++ blocked += delta_cpu; /* clamp local-time progress */
++ do_div(blocked, NS_PER_TICK);
++ per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK;
++ per_cpu(processed_system_time, cpu) += blocked * NS_PER_TICK;
++ account_steal_time(idle_task(cpu), (cputime_t)blocked);
++ }
++
++ /* Account user/system ticks. */
++ if (delta_cpu > 0) {
++ do_div(delta_cpu, NS_PER_TICK);
++ per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK;
++ if (user_mode_vm(regs))
++ account_user_time(current, (cputime_t)delta_cpu);
++ else
++ account_system_time(current, HARDIRQ_OFFSET,
++ (cputime_t)delta_cpu);
++ }
++
++ /* Offlined for more than a few seconds? Avoid lockup warnings. */
++ if (stolen > 5*HZ)
++ touch_softlockup_watchdog();
++
++ /* Local timer processing (see update_process_times()). */
++ run_local_timers();
++ if (rcu_pending(cpu))
++ rcu_check_callbacks(cpu, user_mode_vm(regs));
++ scheduler_tick();
++ run_posix_cpu_timers(current);
++ profile_tick(CPU_PROFILING, regs);
++
++ return IRQ_HANDLED;
++}
++
++static void init_missing_ticks_accounting(unsigned int cpu)
++{
++ struct vcpu_register_runstate_memory_area area;
++ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++ int rc;
++
++ memset(runstate, 0, sizeof(*runstate));
++
++ area.addr.v = runstate;
++ rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
++ WARN_ON(rc && rc != -ENOSYS);
++
++ per_cpu(processed_blocked_time, cpu) =
++ runstate->time[RUNSTATE_blocked];
++ per_cpu(processed_stolen_time, cpu) =
++ runstate->time[RUNSTATE_runnable] +
++ runstate->time[RUNSTATE_offline];
++}
++
++/* not static: needed by APM */
++unsigned long get_cmos_time(void)
++{
++ unsigned long retval;
++ unsigned long flags;
++
++ spin_lock_irqsave(&rtc_lock, flags);
++
++ if (efi_enabled)
++ retval = efi_get_time();
++ else
++ retval = mach_get_cmos_time();
++
++ spin_unlock_irqrestore(&rtc_lock, flags);
++
++ return retval;
++}
++EXPORT_SYMBOL(get_cmos_time);
++
++static void sync_cmos_clock(unsigned long dummy);
++
++static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
++
++static void sync_cmos_clock(unsigned long dummy)
++{
++ struct timeval now, next;
++ int fail = 1;
++
++ /*
++ * If we have an externally synchronized Linux clock, then update
++ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
++ * called as close as possible to 500 ms before the new second starts.
++ * This code is run on a timer. If the clock is set, that timer
++ * may not expire at the correct time. Thus, we adjust...
++ */
++ if (!ntp_synced())
++ /*
++ * Not synced, exit, do not restart a timer (if one is
++ * running, let it run out).
++ */
++ return;
++
++ do_gettimeofday(&now);
++ if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
++ now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
++ fail = set_rtc_mmss(now.tv_sec);
++
++ next.tv_usec = USEC_AFTER - now.tv_usec;
++ if (next.tv_usec <= 0)
++ next.tv_usec += USEC_PER_SEC;
++
++ if (!fail)
++ next.tv_sec = 659;
++ else
++ next.tv_sec = 0;
++
++ if (next.tv_usec >= USEC_PER_SEC) {
++ next.tv_sec++;
++ next.tv_usec -= USEC_PER_SEC;
++ }
++ mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
++}
++
++void notify_arch_cmos_timer(void)
++{
++ mod_timer(&sync_cmos_timer, jiffies + 1);
++ mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
++}
++
++static int timer_resume(struct sys_device *dev)
++{
++ extern void time_resume(void);
++ time_resume();
++ return 0;
++}
++
++static struct sysdev_class timer_sysclass = {
++ .resume = timer_resume,
++ set_kset_name("timer"),
++};
++
++
++/* XXX this driverfs stuff should probably go elsewhere later -john */
++static struct sys_device device_timer = {
++ .id = 0,
++ .cls = &timer_sysclass,
++};
++
++static int time_init_device(void)
++{
++ int error = sysdev_class_register(&timer_sysclass);
++ if (!error)
++ error = sysdev_register(&device_timer);
++ return error;
++}
++
++device_initcall(time_init_device);
++
++#ifdef CONFIG_HPET_TIMER
++extern void (*late_time_init)(void);
++/* Duplicate of time_init() below, with hpet_enable part added */
++static void __init hpet_time_init(void)
++{
++ xtime.tv_sec = get_cmos_time();
++ xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
++ set_normalized_timespec(&wall_to_monotonic,
++ -xtime.tv_sec, -xtime.tv_nsec);
++
++ if ((hpet_enable() >= 0) && hpet_use_timer) {
++ printk("Using HPET for base-timer\n");
++ }
++
++ time_init_hook();
++}
++#endif
++
++/* Dynamically-mapped IRQ. */
++DEFINE_PER_CPU(int, timer_irq);
++
++extern void (*late_time_init)(void);
++static void setup_cpu0_timer_irq(void)
++{
++ per_cpu(timer_irq, 0) =
++ bind_virq_to_irqhandler(
++ VIRQ_TIMER,
++ 0,
++ timer_interrupt,
++ SA_INTERRUPT,
++ "timer0",
++ NULL);
++ BUG_ON(per_cpu(timer_irq, 0) < 0);
++}
++
++static struct vcpu_set_periodic_timer xen_set_periodic_tick = {
++ .period_ns = NS_PER_TICK
++};
++
++void __init time_init(void)
++{
++#ifdef CONFIG_HPET_TIMER
++ if (is_hpet_capable()) {
++ /*
++ * HPET initialization needs to do memory-mapped io. So, let
++ * us do a late initialization after mem_init().
++ */
++ late_time_init = hpet_time_init;
++ return;
++ }
++#endif
++
++ switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, 0,
++ &xen_set_periodic_tick)) {
++ case 0:
++#if CONFIG_XEN_COMPAT <= 0x030004
++ case -ENOSYS:
++#endif
++ break;
++ default:
++ BUG();
++ }
++
++ get_time_values_from_xen(0);
++
++ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++ per_cpu(processed_system_time, 0) = processed_system_time;
++ init_missing_ticks_accounting(0);
++
++ update_wallclock();
++
++ init_cpu_khz();
++ printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
++ cpu_khz / 1000, cpu_khz % 1000);
++
++#if defined(__x86_64__)
++ vxtime.mode = VXTIME_TSC;
++ vxtime.quot = (1000000L << 32) / vxtime_hz;
++ vxtime.tsc_quot = (1000L << 32) / cpu_khz;
++ sync_core();
++ rdtscll(vxtime.last_tsc);
++#endif
++
++ /* Cannot request_irq() until kmem is initialised. */
++ late_time_init = setup_cpu0_timer_irq;
++}
++
++/* Convert jiffies to system time. */
++u64 jiffies_to_st(unsigned long j)
++{
++ unsigned long seq;
++ long delta;
++ u64 st;
++
++ do {
++ seq = read_seqbegin(&xtime_lock);
++ delta = j - jiffies;
++ if (delta < 1) {
++ /* Triggers in some wrap-around cases, but that's okay:
++ * we just end up with a shorter timeout. */
++ st = processed_system_time + NS_PER_TICK;
++ } else if (((unsigned long)delta >> (BITS_PER_LONG-3)) != 0) {
++ /* Very long timeout means there is no pending timer.
++ * We indicate this to Xen by passing zero timeout. */
++ st = 0;
++ } else {
++ st = processed_system_time + delta * (u64)NS_PER_TICK;
++ }
++ } while (read_seqretry(&xtime_lock, seq));
++
++ return st;
++}
++EXPORT_SYMBOL(jiffies_to_st);
++
++/*
++ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
++ * These functions are based on implementations from arch/s390/kernel/time.c
++ */
++static void stop_hz_timer(void)
++{
++ struct vcpu_set_singleshot_timer singleshot;
++ unsigned int cpu = smp_processor_id();
++ unsigned long j;
++ int rc;
++
++ cpu_set(cpu, nohz_cpu_mask);
++
++ /* See matching smp_mb in rcu_start_batch in rcupdate.c. These mbs */
++ /* ensure that if __rcu_pending (nested in rcu_needs_cpu) fetches a */
++ /* value of rcp->cur that matches rdp->quiescbatch and allows us to */
++ /* stop the hz timer then the cpumasks created for subsequent values */
++ /* of cur in rcu_start_batch are guaranteed to pick up the updated */
++ /* nohz_cpu_mask and so will not depend on this cpu. */
++
++ smp_mb();
++
++ /* Leave ourselves in tick mode if rcu or softirq or timer pending. */
++ if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
++ (j = next_timer_interrupt(), time_before_eq(j, jiffies))) {
++ cpu_clear(cpu, nohz_cpu_mask);
++ j = jiffies + 1;
++ }
++
++ singleshot.timeout_abs_ns = jiffies_to_st(j) + NS_PER_TICK/2;
++ singleshot.flags = 0;
++ rc = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &singleshot);
++#if CONFIG_XEN_COMPAT <= 0x030004
++ if (rc) {
++ BUG_ON(rc != -ENOSYS);
++ rc = HYPERVISOR_set_timer_op(singleshot.timeout_abs_ns);
++ }
++#endif
++ BUG_ON(rc);
++}
++
++static void start_hz_timer(void)
++{
++ cpu_clear(smp_processor_id(), nohz_cpu_mask);
++}
++
++void raw_safe_halt(void)
++{
++ stop_hz_timer();
++ /* Blocking includes an implicit local_irq_enable(). */
++ HYPERVISOR_block();
++ start_hz_timer();
++}
++EXPORT_SYMBOL(raw_safe_halt);
++
++void halt(void)
++{
++ if (irqs_disabled())
++ VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
++}
++EXPORT_SYMBOL(halt);
++
++/* No locking required. Interrupts are disabled on all CPUs. */
++void time_resume(void)
++{
++ unsigned int cpu;
++
++ init_cpu_khz();
++
++ for_each_online_cpu(cpu) {
++ switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
++ &xen_set_periodic_tick)) {
++ case 0:
++#if CONFIG_XEN_COMPAT <= 0x030004
++ case -ENOSYS:
++#endif
++ break;
++ default:
++ BUG();
++ }
++ get_time_values_from_xen(cpu);
++ per_cpu(processed_system_time, cpu) =
++ per_cpu(shadow_time, 0).system_timestamp;
++ init_missing_ticks_accounting(cpu);
++ }
++
++ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++
++ update_wallclock();
++}
++
++#ifdef CONFIG_SMP
++static char timer_name[NR_CPUS][15];
++
++int __cpuinit local_setup_timer(unsigned int cpu)
++{
++ int seq, irq;
++
++ BUG_ON(cpu == 0);
++
++ switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
++ &xen_set_periodic_tick)) {
++ case 0:
++#if CONFIG_XEN_COMPAT <= 0x030004
++ case -ENOSYS:
++#endif
++ break;
++ default:
++ BUG();
++ }
++
++ do {
++ seq = read_seqbegin(&xtime_lock);
++ /* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
++ per_cpu(processed_system_time, cpu) =
++ per_cpu(shadow_time, 0).system_timestamp;
++ init_missing_ticks_accounting(cpu);
++ } while (read_seqretry(&xtime_lock, seq));
++
++ sprintf(timer_name[cpu], "timer%u", cpu);
++ irq = bind_virq_to_irqhandler(VIRQ_TIMER,
++ cpu,
++ timer_interrupt,
++ SA_INTERRUPT,
++ timer_name[cpu],
++ NULL);
++ if (irq < 0)
++ return irq;
++ per_cpu(timer_irq, cpu) = irq;
++
++ return 0;
++}
++
++void __cpuexit local_teardown_timer(unsigned int cpu)
++{
++ BUG_ON(cpu == 0);
++ unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
++}
++#endif
++
++#ifdef CONFIG_CPU_FREQ
++static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
++ void *data)
++{
++ struct cpufreq_freqs *freq = data;
++ struct xen_platform_op op;
++
++ if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
++ return 0;
++
++ if (val == CPUFREQ_PRECHANGE)
++ return 0;
++
++ op.cmd = XENPF_change_freq;
++ op.u.change_freq.flags = 0;
++ op.u.change_freq.cpu = freq->cpu;
++ op.u.change_freq.freq = (u64)freq->new * 1000;
++ WARN_ON(HYPERVISOR_platform_op(&op));
++
++ return 0;
++}
++
++static struct notifier_block time_cpufreq_notifier_block = {
++ .notifier_call = time_cpufreq_notifier
++};
++
++static int __init cpufreq_time_setup(void)
++{
++ if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
++ CPUFREQ_TRANSITION_NOTIFIER)) {
++ printk(KERN_ERR "failed to set up cpufreq notifier\n");
++ return -ENODEV;
++ }
++ return 0;
++}
++
++core_initcall(cpufreq_time_setup);
++#endif
++
++/*
++ * /proc/sys/xen: This really belongs in another file. It can stay here for
++ * now however.
++ */
++static ctl_table xen_subtable[] = {
++ {
++ .ctl_name = 1,
++ .procname = "independent_wallclock",
++ .data = &independent_wallclock,
++ .maxlen = sizeof(independent_wallclock),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
++ {
++ .ctl_name = 2,
++ .procname = "permitted_clock_jitter",
++ .data = &permitted_clock_jitter,
++ .maxlen = sizeof(permitted_clock_jitter),
++ .mode = 0644,
++ .proc_handler = proc_doulongvec_minmax
++ },
++ { 0 }
++};
++static ctl_table xen_table[] = {
++ {
++ .ctl_name = 123,
++ .procname = "xen",
++ .mode = 0555,
++ .child = xen_subtable},
++ { 0 }
++};
++static int __init xen_sysctl_init(void)
++{
++ (void)register_sysctl_table(xen_table, 0);
++ return 0;
++}
++__initcall(xen_sysctl_init);
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/traps.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/traps.c
+--- linux-2.6.18.8/arch/i386/kernel/traps.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/traps.c 2008-08-21 11:36:07.000000000 +0200
+@@ -642,18 +642,11 @@ static void mem_parity_error(unsigned ch
+
+ static void io_check_error(unsigned char reason, struct pt_regs * regs)
+ {
+- unsigned long i;
+-
+ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
+ show_registers(regs);
+
+ /* Re-enable the IOCK line, wait for a few seconds */
+- reason = (reason & 0xf) | 8;
+- outb(reason, 0x61);
+- i = 2000;
+- while (--i) udelay(1000);
+- reason &= ~8;
+- outb(reason, 0x61);
++ clear_io_check_error(reason);
+ }
+
+ static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/traps-xen.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/traps-xen.c
+--- linux-2.6.18.8/arch/i386/kernel/traps-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/traps-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1190 @@
++/*
++ * linux/arch/i386/traps.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ */
++
++/*
++ * 'Traps.c' handles hardware traps and faults after we have saved some
++ * state in 'asm.s'.
++ */
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/timer.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/highmem.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/kprobes.h>
++#include <linux/kexec.h>
++#include <linux/unwind.h>
++
++#ifdef CONFIG_EISA
++#include <linux/ioport.h>
++#include <linux/eisa.h>
++#endif
++
++#ifdef CONFIG_MCA
++#include <linux/mca.h>
++#endif
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/debugreg.h>
++#include <asm/desc.h>
++#include <asm/i387.h>
++#include <asm/nmi.h>
++#include <asm/unwind.h>
++#include <asm/smp.h>
++#include <asm/arch_hooks.h>
++#include <asm/kdebug.h>
++
++#include <linux/module.h>
++
++#include "mach_traps.h"
++
++asmlinkage int system_call(void);
++
++struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
++ { 0, 0 }, { 0, 0 } };
++
++/* Do we ignore FPU interrupts ? */
++char ignore_fpu_irq = 0;
++
++#ifndef CONFIG_X86_NO_IDT
++/*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
++#endif
++
++asmlinkage void divide_error(void);
++asmlinkage void debug(void);
++asmlinkage void nmi(void);
++asmlinkage void int3(void);
++asmlinkage void overflow(void);
++asmlinkage void bounds(void);
++asmlinkage void invalid_op(void);
++asmlinkage void device_not_available(void);
++asmlinkage void coprocessor_segment_overrun(void);
++asmlinkage void invalid_TSS(void);
++asmlinkage void segment_not_present(void);
++asmlinkage void stack_segment(void);
++asmlinkage void general_protection(void);
++asmlinkage void page_fault(void);
++asmlinkage void coprocessor_error(void);
++asmlinkage void simd_coprocessor_error(void);
++asmlinkage void alignment_check(void);
++#ifndef CONFIG_XEN
++asmlinkage void spurious_interrupt_bug(void);
++#else
++asmlinkage void fixup_4gb_segment(void);
++#endif
++asmlinkage void machine_check(void);
++
++static int kstack_depth_to_print = 24;
++#ifdef CONFIG_STACK_UNWIND
++static int call_trace = 1;
++#else
++#define call_trace (-1)
++#endif
++ATOMIC_NOTIFIER_HEAD(i386die_chain);
++
++int register_die_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(&i386die_chain, nb);
++}
++EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
++
++int unregister_die_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(&i386die_chain, nb);
++}
++EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
++
++static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
++{
++ return p > (void *)tinfo &&
++ p < (void *)tinfo + THREAD_SIZE - 3;
++}
++
++/*
++ * Print one address/symbol entries per line.
++ */
++static inline void print_addr_and_symbol(unsigned long addr, char *log_lvl)
++{
++ printk(" [<%08lx>] ", addr);
++
++ print_symbol("%s\n", addr);
++}
++
++static inline unsigned long print_context_stack(struct thread_info *tinfo,
++ unsigned long *stack, unsigned long ebp,
++ char *log_lvl)
++{
++ unsigned long addr;
++
++#ifdef CONFIG_FRAME_POINTER
++ while (valid_stack_ptr(tinfo, (void *)ebp)) {
++ addr = *(unsigned long *)(ebp + 4);
++ print_addr_and_symbol(addr, log_lvl);
++ /*
++ * break out of recursive entries (such as
++ * end_of_stack_stop_unwind_function):
++ */
++ if (ebp == *(unsigned long *)ebp)
++ break;
++ ebp = *(unsigned long *)ebp;
++ }
++#else
++ while (valid_stack_ptr(tinfo, stack)) {
++ addr = *stack++;
++ if (__kernel_text_address(addr))
++ print_addr_and_symbol(addr, log_lvl);
++ }
++#endif
++ return ebp;
++}
++
++static asmlinkage int
++show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
++{
++ int n = 0;
++
++ while (unwind(info) == 0 && UNW_PC(info)) {
++ n++;
++ print_addr_and_symbol(UNW_PC(info), log_lvl);
++ if (arch_unw_user_mode(info))
++ break;
++ }
++ return n;
++}
++
++static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
++ unsigned long *stack, char *log_lvl)
++{
++ unsigned long ebp;
++
++ if (!task)
++ task = current;
++
++ if (call_trace >= 0) {
++ int unw_ret = 0;
++ struct unwind_frame_info info;
++
++ if (regs) {
++ if (unwind_init_frame_info(&info, task, regs) == 0)
++ unw_ret = show_trace_unwind(&info, log_lvl);
++ } else if (task == current)
++ unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
++ else {
++ if (unwind_init_blocked(&info, task) == 0)
++ unw_ret = show_trace_unwind(&info, log_lvl);
++ }
++ if (unw_ret > 0) {
++ if (call_trace == 1 && !arch_unw_user_mode(&info)) {
++ print_symbol("DWARF2 unwinder stuck at %s\n",
++ UNW_PC(&info));
++ if (UNW_SP(&info) >= PAGE_OFFSET) {
++ printk("Leftover inexact backtrace:\n");
++ stack = (void *)UNW_SP(&info);
++ } else
++ printk("Full inexact backtrace again:\n");
++ } else if (call_trace >= 1)
++ return;
++ else
++ printk("Full inexact backtrace again:\n");
++ } else
++ printk("Inexact backtrace:\n");
++ }
++
++ if (task == current) {
++ /* Grab ebp right from our regs */
++ asm ("movl %%ebp, %0" : "=r" (ebp) : );
++ } else {
++ /* ebp is the last reg pushed by switch_to */
++ ebp = *(unsigned long *) task->thread.esp;
++ }
++
++ while (1) {
++ struct thread_info *context;
++ context = (struct thread_info *)
++ ((unsigned long)stack & (~(THREAD_SIZE - 1)));
++ ebp = print_context_stack(context, stack, ebp, log_lvl);
++ stack = (unsigned long*)context->previous_esp;
++ if (!stack)
++ break;
++ printk("%s =======================\n", log_lvl);
++ }
++}
++
++void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
++{
++ show_trace_log_lvl(task, regs, stack, "");
++}
++
++static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
++ unsigned long *esp, char *log_lvl)
++{
++ unsigned long *stack;
++ int i;
++
++ if (esp == NULL) {
++ if (task)
++ esp = (unsigned long*)task->thread.esp;
++ else
++ esp = (unsigned long *)&esp;
++ }
++
++ stack = esp;
++ for(i = 0; i < kstack_depth_to_print; i++) {
++ if (kstack_end(stack))
++ break;
++ if (i && ((i % 8) == 0))
++ printk("\n%s ", log_lvl);
++ printk("%08lx ", *stack++);
++ }
++ printk("\n%sCall Trace:\n", log_lvl);
++ show_trace_log_lvl(task, regs, esp, log_lvl);
++}
++
++void show_stack(struct task_struct *task, unsigned long *esp)
++{
++ printk(" ");
++ show_stack_log_lvl(task, NULL, esp, "");
++}
++
++/*
++ * The architecture-independent dump_stack generator
++ */
++void dump_stack(void)
++{
++ unsigned long stack;
++
++ show_trace(current, NULL, &stack);
++}
++
++EXPORT_SYMBOL(dump_stack);
++
++void show_registers(struct pt_regs *regs)
++{
++ int i;
++ int in_kernel = 1;
++ unsigned long esp;
++ unsigned short ss;
++
++ esp = (unsigned long) (&regs->esp);
++ savesegment(ss, ss);
++ if (user_mode_vm(regs)) {
++ in_kernel = 0;
++ esp = regs->esp;
++ ss = regs->xss & 0xffff;
++ }
++ print_modules();
++ printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
++ "EFLAGS: %08lx (%s %.*s) \n",
++ smp_processor_id(), 0xffff & regs->xcs, regs->eip,
++ print_tainted(), regs->eflags, system_utsname.release,
++ (int)strcspn(system_utsname.version, " "),
++ system_utsname.version);
++ print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
++ printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
++ regs->eax, regs->ebx, regs->ecx, regs->edx);
++ printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
++ regs->esi, regs->edi, regs->ebp, esp);
++ printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
++ regs->xds & 0xffff, regs->xes & 0xffff, ss);
++ printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
++ TASK_COMM_LEN, current->comm, current->pid,
++ current_thread_info(), current, current->thread_info);
++ /*
++ * When in-kernel, we also print out the stack and code at the
++ * time of the fault..
++ */
++ if (in_kernel) {
++ u8 __user *eip;
++
++ printk("\n" KERN_EMERG "Stack: ");
++ show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
++
++ printk(KERN_EMERG "Code: ");
++
++ eip = (u8 __user *)regs->eip - 43;
++ for (i = 0; i < 64; i++, eip++) {
++ unsigned char c;
++
++ if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
++ printk(" Bad EIP value.");
++ break;
++ }
++ if (eip == (u8 __user *)regs->eip)
++ printk("<%02x> ", c);
++ else
++ printk("%02x ", c);
++ }
++ }
++ printk("\n");
++}
++
++static void handle_BUG(struct pt_regs *regs)
++{
++ unsigned long eip = regs->eip;
++ unsigned short ud2;
++
++ if (eip < PAGE_OFFSET)
++ return;
++ if (__get_user(ud2, (unsigned short __user *)eip))
++ return;
++ if (ud2 != 0x0b0f)
++ return;
++
++ printk(KERN_EMERG "------------[ cut here ]------------\n");
++
++#ifdef CONFIG_DEBUG_BUGVERBOSE
++ do {
++ unsigned short line;
++ char *file;
++ char c;
++
++ if (__get_user(line, (unsigned short __user *)(eip + 2)))
++ break;
++ if (__get_user(file, (char * __user *)(eip + 4)) ||
++ (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
++ file = "<bad filename>";
++
++ printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
++ return;
++ } while (0);
++#endif
++ printk(KERN_EMERG "Kernel BUG at [verbose debug info unavailable]\n");
++}
++
++/* This is gone through when something in the kernel
++ * has done something bad and is about to be terminated.
++*/
++void die(const char * str, struct pt_regs * regs, long err)
++{
++ static struct {
++ spinlock_t lock;
++ u32 lock_owner;
++ int lock_owner_depth;
++ } die = {
++ .lock = SPIN_LOCK_UNLOCKED,
++ .lock_owner = -1,
++ .lock_owner_depth = 0
++ };
++ static int die_counter;
++ unsigned long flags;
++
++ oops_enter();
++
++ if (die.lock_owner != raw_smp_processor_id()) {
++ console_verbose();
++ spin_lock_irqsave(&die.lock, flags);
++ die.lock_owner = smp_processor_id();
++ die.lock_owner_depth = 0;
++ bust_spinlocks(1);
++ }
++ else
++ local_save_flags(flags);
++
++ if (++die.lock_owner_depth < 3) {
++ int nl = 0;
++ unsigned long esp;
++ unsigned short ss;
++
++ handle_BUG(regs);
++ printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
++#ifdef CONFIG_PREEMPT
++ printk(KERN_EMERG "PREEMPT ");
++ nl = 1;
++#endif
++#ifdef CONFIG_SMP
++ if (!nl)
++ printk(KERN_EMERG);
++ printk("SMP ");
++ nl = 1;
++#endif
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ if (!nl)
++ printk(KERN_EMERG);
++ printk("DEBUG_PAGEALLOC");
++ nl = 1;
++#endif
++ if (nl)
++ printk("\n");
++ if (notify_die(DIE_OOPS, str, regs, err,
++ current->thread.trap_no, SIGSEGV) !=
++ NOTIFY_STOP) {
++ show_registers(regs);
++ /* Executive summary in case the oops scrolled away */
++ esp = (unsigned long) (&regs->esp);
++ savesegment(ss, ss);
++ if (user_mode(regs)) {
++ esp = regs->esp;
++ ss = regs->xss & 0xffff;
++ }
++ printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
++ print_symbol("%s", regs->eip);
++ printk(" SS:ESP %04x:%08lx\n", ss, esp);
++ }
++ else
++ regs = NULL;
++ } else
++ printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
++
++ bust_spinlocks(0);
++ die.lock_owner = -1;
++ spin_unlock_irqrestore(&die.lock, flags);
++
++ if (!regs)
++ return;
++
++ if (kexec_should_crash(current))
++ crash_kexec(regs);
++
++ if (in_interrupt())
++ panic("Fatal exception in interrupt");
++
++ if (panic_on_oops)
++ panic("Fatal exception");
++
++ oops_exit();
++ do_exit(SIGSEGV);
++}
++
++static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
++{
++ if (!user_mode_vm(regs))
++ die(str, regs, err);
++}
++
++static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
++ struct pt_regs * regs, long error_code,
++ siginfo_t *info)
++{
++ struct task_struct *tsk = current;
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = trapnr;
++
++ if (regs->eflags & VM_MASK) {
++ if (vm86)
++ goto vm86_trap;
++ goto trap_signal;
++ }
++
++ if (!user_mode(regs))
++ goto kernel_trap;
++
++ trap_signal: {
++ if (info)
++ force_sig_info(signr, info, tsk);
++ else
++ force_sig(signr, tsk);
++ return;
++ }
++
++ kernel_trap: {
++ if (!fixup_exception(regs))
++ die(str, regs, error_code);
++ return;
++ }
++
++ vm86_trap: {
++ int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
++ if (ret) goto trap_signal;
++ return;
++ }
++}
++
++#define DO_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
++}
++
++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ siginfo_t info; \
++ info.si_signo = signr; \
++ info.si_errno = 0; \
++ info.si_code = sicode; \
++ info.si_addr = (void __user *)siaddr; \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
++}
++
++#define DO_VM86_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
++}
++
++#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ siginfo_t info; \
++ info.si_signo = signr; \
++ info.si_errno = 0; \
++ info.si_code = sicode; \
++ info.si_addr = (void __user *)siaddr; \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
++}
++
++DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
++#ifndef CONFIG_KPROBES
++DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
++#endif
++DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
++DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
++DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
++DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
++DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
++
++fastcall void __kprobes do_general_protection(struct pt_regs * regs,
++ long error_code)
++{
++ current->thread.error_code = error_code;
++ current->thread.trap_no = 13;
++
++ if (regs->eflags & VM_MASK)
++ goto gp_in_vm86;
++
++ if (!user_mode(regs))
++ goto gp_in_kernel;
++
++ current->thread.error_code = error_code;
++ current->thread.trap_no = 13;
++ force_sig(SIGSEGV, current);
++ return;
++
++gp_in_vm86:
++ local_irq_enable();
++ handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
++ return;
++
++gp_in_kernel:
++ if (!fixup_exception(regs)) {
++ if (notify_die(DIE_GPF, "general protection fault", regs,
++ error_code, 13, SIGSEGV) == NOTIFY_STOP)
++ return;
++ die("general protection fault", regs, error_code);
++ }
++}
++
++static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
++ "to continue\n");
++ printk(KERN_EMERG "You probably have a hardware problem with your RAM "
++ "chips\n");
++
++ /* Clear and disable the memory parity error line. */
++ clear_mem_error(reason);
++}
++
++static void io_check_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
++ show_registers(regs);
++
++ /* Re-enable the IOCK line, wait for a few seconds */
++ clear_io_check_error(reason);
++}
++
++static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++{
++#ifdef CONFIG_MCA
++ /* Might actually be able to figure out what the guilty party
++ * is. */
++ if( MCA_bus ) {
++ mca_handle_nmi();
++ return;
++ }
++#endif
++ printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
++ reason, smp_processor_id());
++ printk("Dazed and confused, but trying to continue\n");
++ printk("Do you have a strange power saving mode enabled?\n");
++}
++
++static DEFINE_SPINLOCK(nmi_print_lock);
++
++void die_nmi (struct pt_regs *regs, const char *msg)
++{
++ if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
++ NOTIFY_STOP)
++ return;
++
++ spin_lock(&nmi_print_lock);
++ /*
++ * We are in trouble anyway, lets at least try
++ * to get a message out.
++ */
++ bust_spinlocks(1);
++ printk(KERN_EMERG "%s", msg);
++ printk(" on CPU%d, eip %08lx, registers:\n",
++ smp_processor_id(), regs->eip);
++ show_registers(regs);
++ printk(KERN_EMERG "console shuts up ...\n");
++ console_silent();
++ spin_unlock(&nmi_print_lock);
++ bust_spinlocks(0);
++
++ /* If we are in kernel we are probably nested up pretty bad
++ * and might aswell get out now while we still can.
++ */
++ if (!user_mode_vm(regs)) {
++ current->thread.trap_no = 2;
++ crash_kexec(regs);
++ }
++
++ do_exit(SIGSEGV);
++}
++
++static void default_do_nmi(struct pt_regs * regs)
++{
++ unsigned char reason = 0;
++
++ /* Only the BSP gets external NMIs from the system. */
++ if (!smp_processor_id())
++ reason = get_nmi_reason();
++
++ if (!(reason & 0xc0)) {
++ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
++ == NOTIFY_STOP)
++ return;
++#ifdef CONFIG_X86_LOCAL_APIC
++ /*
++ * Ok, so this is none of the documented NMI sources,
++ * so it must be the NMI watchdog.
++ */
++ if (nmi_watchdog) {
++ nmi_watchdog_tick(regs);
++ return;
++ }
++#endif
++ unknown_nmi_error(reason, regs);
++ return;
++ }
++ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
++ return;
++ if (reason & 0x80)
++ mem_parity_error(reason, regs);
++ if (reason & 0x40)
++ io_check_error(reason, regs);
++ /*
++ * Reassert NMI in case it became active meanwhile
++ * as it's edge-triggered.
++ */
++ reassert_nmi();
++}
++
++static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
++{
++ return 0;
++}
++
++static nmi_callback_t nmi_callback = dummy_nmi_callback;
++
++fastcall void do_nmi(struct pt_regs * regs, long error_code)
++{
++ int cpu;
++
++ nmi_enter();
++
++ cpu = smp_processor_id();
++
++ ++nmi_count(cpu);
++
++ if (!rcu_dereference(nmi_callback)(regs, cpu))
++ default_do_nmi(regs);
++
++ nmi_exit();
++}
++
++void set_nmi_callback(nmi_callback_t callback)
++{
++ vmalloc_sync_all();
++ rcu_assign_pointer(nmi_callback, callback);
++}
++EXPORT_SYMBOL_GPL(set_nmi_callback);
++
++void unset_nmi_callback(void)
++{
++ nmi_callback = dummy_nmi_callback;
++}
++EXPORT_SYMBOL_GPL(unset_nmi_callback);
++
++#ifdef CONFIG_KPROBES
++fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
++{
++ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
++ == NOTIFY_STOP)
++ return;
++ /* This is an interrupt gate, because kprobes wants interrupts
++ disabled. Normal trap handlers don't. */
++ restore_interrupts(regs);
++ do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
++}
++#endif
++
++/*
++ * Our handling of the processor debug registers is non-trivial.
++ * We do not clear them on entry and exit from the kernel. Therefore
++ * it is possible to get a watchpoint trap here from inside the kernel.
++ * However, the code in ./ptrace.c has ensured that the user can
++ * only set watchpoints on userspace addresses. Therefore the in-kernel
++ * watchpoint trap can only occur in code which is reading/writing
++ * from user space. Such code must not hold kernel locks (since it
++ * can equally take a page fault), therefore it is safe to call
++ * force_sig_info even though that claims and releases locks.
++ *
++ * Code in ./signal.c ensures that the debug control register
++ * is restored before we deliver any signal, and therefore that
++ * user code runs with the correct debug control register even though
++ * we clear it here.
++ *
++ * Being careful here means that we don't have to be as careful in a
++ * lot of more complicated places (task switching can be a bit lazy
++ * about restoring all the debug state, and ptrace doesn't have to
++ * find every occurrence of the TF bit that could be saved away even
++ * by user code)
++ */
++fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
++{
++ unsigned int condition;
++ struct task_struct *tsk = current;
++
++ get_debugreg(condition, 6);
++
++ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
++ SIGTRAP) == NOTIFY_STOP)
++ return;
++ /* It's safe to allow irq's after DR6 has been saved */
++ if (regs->eflags & X86_EFLAGS_IF)
++ local_irq_enable();
++
++ /* Mask out spurious debug traps due to lazy DR7 setting */
++ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
++ if (!tsk->thread.debugreg[7])
++ goto clear_dr7;
++ }
++
++ if (regs->eflags & VM_MASK)
++ goto debug_vm86;
++
++ /* Save debug status register where ptrace can see it */
++ tsk->thread.debugreg[6] = condition;
++
++ /*
++ * Single-stepping through TF: make sure we ignore any events in
++ * kernel space (but re-enable TF when returning to user mode).
++ */
++ if (condition & DR_STEP) {
++ /*
++ * We already checked v86 mode above, so we can
++ * check for kernel mode by just checking the CPL
++ * of CS.
++ */
++ if (!user_mode(regs))
++ goto clear_TF_reenable;
++ }
++
++ /* Ok, finally something we can handle */
++ send_sigtrap(tsk, regs, error_code);
++
++ /* Disable additional traps. They'll be re-enabled when
++ * the signal is delivered.
++ */
++clear_dr7:
++ set_debugreg(0, 7);
++ return;
++
++debug_vm86:
++ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
++ return;
++
++clear_TF_reenable:
++ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++ regs->eflags &= ~TF_MASK;
++ return;
++}
++
++/*
++ * Note that we play around with the 'TS' bit in an attempt to get
++ * the correct behaviour even in the presence of the asynchronous
++ * IRQ13 behaviour
++ */
++void math_error(void __user *eip)
++{
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short cwd, swd;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 16;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = eip;
++ /*
++ * (~cwd & swd) will mask out exceptions that are not set to unmasked
++ * status. 0x3f is the exception bits in these regs, 0x200 is the
++ * C1 reg you need in case of a stack fault, 0x040 is the stack
++ * fault bit. We should only be taking one exception at a time,
++ * so if this combination doesn't produce any single exception,
++ * then we have a bad program that isn't syncronizing its FPU usage
++ * and it will suffer the consequences since we won't be able to
++ * fully reproduce the context of the exception
++ */
++ cwd = get_fpu_cwd(task);
++ swd = get_fpu_swd(task);
++ switch (swd & ~cwd & 0x3f) {
++ case 0x000: /* No unmasked exception */
++ return;
++ default: /* Multiple exceptions */
++ break;
++ case 0x001: /* Invalid Op */
++ /*
++ * swd & 0x240 == 0x040: Stack Underflow
++ * swd & 0x240 == 0x240: Stack Overflow
++ * User must clear the SF bit (0x40) if set
++ */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
++{
++ ignore_fpu_irq = 1;
++ math_error((void __user *)regs->eip);
++}
++
++static void simd_math_error(void __user *eip)
++{
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short mxcsr;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 19;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = eip;
++ /*
++ * The SIMD FPU exceptions are handled a little differently, as there
++ * is only a single status/control register. Thus, to determine which
++ * unmasked exception was caught we must mask the exception mask bits
++ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
++ */
++ mxcsr = get_fpu_mxcsr(task);
++ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
++ case 0x000:
++ default:
++ break;
++ case 0x001: /* Invalid Op */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
++ long error_code)
++{
++ if (cpu_has_xmm) {
++ /* Handle SIMD FPU exceptions on PIII+ processors. */
++ ignore_fpu_irq = 1;
++ simd_math_error((void __user *)regs->eip);
++ } else {
++ /*
++ * Handle strange cache flush from user space exception
++ * in all other cases. This is undocumented behaviour.
++ */
++ if (regs->eflags & VM_MASK) {
++ handle_vm86_fault((struct kernel_vm86_regs *)regs,
++ error_code);
++ return;
++ }
++ current->thread.trap_no = 19;
++ current->thread.error_code = error_code;
++ die_if_kernel("cache flush denied", regs, error_code);
++ force_sig(SIGSEGV, current);
++ }
++}
++
++#ifndef CONFIG_XEN
++fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
++ long error_code)
++{
++#if 0
++ /* No need to warn about this any longer. */
++ printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
++#endif
++}
++
++fastcall void setup_x86_bogus_stack(unsigned char * stk)
++{
++ unsigned long *switch16_ptr, *switch32_ptr;
++ struct pt_regs *regs;
++ unsigned long stack_top, stack_bot;
++ unsigned short iret_frame16_off;
++ int cpu = smp_processor_id();
++ /* reserve the space on 32bit stack for the magic switch16 pointer */
++ memmove(stk, stk + 8, sizeof(struct pt_regs));
++ switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
++ regs = (struct pt_regs *)stk;
++ /* now the switch32 on 16bit stack */
++ stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
++ stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
++ switch32_ptr = (unsigned long *)(stack_top - 8);
++ iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
++ /* copy iret frame on 16bit stack */
++ memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
++ /* fill in the switch pointers */
++ switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
++ switch16_ptr[1] = __ESPFIX_SS;
++ switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
++ 8 - CPU_16BIT_STACK_SIZE;
++ switch32_ptr[1] = __KERNEL_DS;
++}
++
++fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
++{
++ unsigned long *switch32_ptr;
++ unsigned char *stack16, *stack32;
++ unsigned long stack_top, stack_bot;
++ int len;
++ int cpu = smp_processor_id();
++ stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
++ stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
++ switch32_ptr = (unsigned long *)(stack_top - 8);
++ /* copy the data from 16bit stack to 32bit stack */
++ len = CPU_16BIT_STACK_SIZE - 8 - sp;
++ stack16 = (unsigned char *)(stack_bot + sp);
++ stack32 = (unsigned char *)
++ (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
++ memcpy(stack32, stack16, len);
++ return stack32;
++}
++#endif
++
++/*
++ * 'math_state_restore()' saves the current math information in the
++ * old math state array, and gets the new ones from the current task
++ *
++ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
++ * Don't touch unless you *really* know how it works.
++ *
++ * Must be called with kernel preemption disabled (in this case,
++ * local interrupts are disabled at the call-site in entry.S).
++ */
++asmlinkage void math_state_restore(struct pt_regs regs)
++{
++ struct thread_info *thread = current_thread_info();
++ struct task_struct *tsk = thread->task;
++
++ /* NB. 'clts' is done for us by Xen during virtual trap. */
++ if (!tsk_used_math(tsk))
++ init_fpu(tsk);
++ restore_fpu(tsk);
++ thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
++}
++
++#ifndef CONFIG_MATH_EMULATION
++
++asmlinkage void math_emulate(long arg)
++{
++ printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
++ printk(KERN_EMERG "killing %s.\n",current->comm);
++ force_sig(SIGFPE,current);
++ schedule();
++}
++
++#endif /* CONFIG_MATH_EMULATION */
++
++#ifdef CONFIG_X86_F00F_BUG
++void __init trap_init_f00f_bug(void)
++{
++ __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
++
++ /*
++ * Update the IDT descriptor and reload the IDT so that
++ * it uses the read-only mapped virtual address.
++ */
++ idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++ load_idt(&idt_descr);
++}
++#endif
++
++
++/*
++ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
++ * for those that specify <dpl>|4 in the second field.
++ */
++static trap_info_t __cpuinitdata trap_table[] = {
++ { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
++ { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
++ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
++ { 4, 3, __KERNEL_CS, (unsigned long)overflow },
++ { 5, 0, __KERNEL_CS, (unsigned long)bounds },
++ { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
++ { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
++ { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
++ { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
++ { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
++ { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
++ { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
++ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
++ { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
++ { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
++ { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
++#ifdef CONFIG_X86_MCE
++ { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
++#endif
++ { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
++ { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
++ { 0, 0, 0, 0 }
++};
++
++void __init trap_init(void)
++{
++ int ret;
++
++ ret = HYPERVISOR_set_trap_table(trap_table);
++ if (ret)
++ printk("HYPERVISOR_set_trap_table failed: error %d\n", ret);
++
++ if (cpu_has_fxsr) {
++ /*
++ * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
++ * Generates a compile-time "error: zero width for bit-field" if
++ * the alignment is wrong.
++ */
++ struct fxsrAlignAssert {
++ int _:!(offsetof(struct task_struct,
++ thread.i387.fxsave) & 15);
++ };
++
++ printk(KERN_INFO "Enabling fast FPU save and restore... ");
++ set_in_cr4(X86_CR4_OSFXSR);
++ printk("done.\n");
++ }
++ if (cpu_has_xmm) {
++ printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
++ "support... ");
++ set_in_cr4(X86_CR4_OSXMMEXCPT);
++ printk("done.\n");
++ }
++
++ /*
++ * Should be a barrier for any external CPU state.
++ */
++ cpu_init();
++}
++
++void __cpuinit smp_trap_init(trap_info_t *trap_ctxt)
++{
++ const trap_info_t *t = trap_table;
++
++ for (t = trap_table; t->address; t++) {
++ trap_ctxt[t->vector].flags = t->flags;
++ trap_ctxt[t->vector].cs = t->cs;
++ trap_ctxt[t->vector].address = t->address;
++ }
++}
++
++static int __init kstack_setup(char *s)
++{
++ kstack_depth_to_print = simple_strtoul(s, NULL, 0);
++ return 1;
++}
++__setup("kstack=", kstack_setup);
++
++#ifdef CONFIG_STACK_UNWIND
++static int __init call_trace_setup(char *s)
++{
++ if (strcmp(s, "old") == 0)
++ call_trace = -1;
++ else if (strcmp(s, "both") == 0)
++ call_trace = 0;
++ else if (strcmp(s, "newfallback") == 0)
++ call_trace = 1;
++ else if (strcmp(s, "new") == 2)
++ call_trace = 2;
++ return 1;
++}
++__setup("call_trace=", call_trace_setup);
++#endif
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/vm86.c linux-2.6.18-xen-3.3.0/arch/i386/kernel/vm86.c
+--- linux-2.6.18.8/arch/i386/kernel/vm86.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/vm86.c 2008-08-21 11:36:07.000000000 +0200
+@@ -97,7 +97,9 @@
+ struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
+ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ struct tss_struct *tss;
++#endif
+ struct pt_regs *ret;
+ unsigned long tmp;
+
+@@ -122,12 +124,16 @@ struct pt_regs * fastcall save_v86_state
+ do_exit(SIGSEGV);
+ }
+
++#ifndef CONFIG_X86_NO_TSS
+ tss = &per_cpu(init_tss, get_cpu());
++#endif
+ current->thread.esp0 = current->thread.saved_esp0;
+ current->thread.sysenter_cs = __KERNEL_CS;
+ load_esp0(tss, &current->thread);
+ current->thread.saved_esp0 = 0;
++#ifndef CONFIG_X86_NO_TSS
+ put_cpu();
++#endif
+
+ loadsegment(fs, current->thread.saved_fs);
+ loadsegment(gs, current->thread.saved_gs);
+@@ -251,7 +257,9 @@ out:
+
+ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ struct tss_struct *tss;
++#endif
+ long eax;
+ /*
+ * make sure the vm86() system call doesn't try to do anything silly
+@@ -296,12 +304,16 @@ static void do_sys_vm86(struct kernel_vm
+ savesegment(fs, tsk->thread.saved_fs);
+ savesegment(gs, tsk->thread.saved_gs);
+
++#ifndef CONFIG_X86_NO_TSS
+ tss = &per_cpu(init_tss, get_cpu());
++#endif
+ tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ if (cpu_has_sep)
+ tsk->thread.sysenter_cs = 0;
+ load_esp0(tss, &tsk->thread);
++#ifndef CONFIG_X86_NO_TSS
+ put_cpu();
++#endif
+
+ tsk->thread.screen_bitmap = info->screen_bitmap;
+ if (info->flags & VM86_SCREEN_BITMAP)
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/vmlinux.lds.S linux-2.6.18-xen-3.3.0/arch/i386/kernel/vmlinux.lds.S
+--- linux-2.6.18.8/arch/i386/kernel/vmlinux.lds.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/vmlinux.lds.S 2008-08-21 11:36:07.000000000 +0200
+@@ -13,6 +13,12 @@ OUTPUT_FORMAT("elf32-i386", "elf32-i386"
+ OUTPUT_ARCH(i386)
+ ENTRY(phys_startup_32)
+ jiffies = jiffies_64;
++
++PHDRS {
++ text PT_LOAD FLAGS(5); /* R_E */
++ data PT_LOAD FLAGS(7); /* RWE */
++ note PT_NOTE FLAGS(4); /* R__ */
++}
+ SECTIONS
+ {
+ . = __KERNEL_START;
+@@ -26,7 +32,7 @@ SECTIONS
+ KPROBES_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+- } = 0x9090
++ } :text = 0x9090
+
+ _etext = .; /* End of text section */
+
+@@ -45,10 +51,11 @@ SECTIONS
+ __tracedata_end = .;
+
+ /* writeable */
++ . = ALIGN(4096);
+ .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */
+ *(.data)
+ CONSTRUCTORS
+- }
++ } :data
+
+ . = ALIGN(4096);
+ __nosave_begin = .;
+@@ -184,4 +191,6 @@ SECTIONS
+ STABS_DEBUG
+
+ DWARF_DEBUG
++
++ NOTES
+ }
+diff -rpuN linux-2.6.18.8/arch/i386/kernel/vsyscall-note-xen.S linux-2.6.18-xen-3.3.0/arch/i386/kernel/vsyscall-note-xen.S
+--- linux-2.6.18.8/arch/i386/kernel/vsyscall-note-xen.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/kernel/vsyscall-note-xen.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,32 @@
++/*
++ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
++ * Here we can supply some information useful to userland.
++ * First we get the vanilla i386 note that supplies the kernel version info.
++ */
++
++#include "vsyscall-note.S"
++
++/*
++ * Now we add a special note telling glibc's dynamic linker a fake hardware
++ * flavor that it will use to choose the search path for libraries in the
++ * same way it uses real hardware capabilities like "mmx".
++ * We supply "nosegneg" as the fake capability, to indicate that we
++ * do not like negative offsets in instructions using segment overrides,
++ * since we implement those inefficiently. This makes it possible to
++ * install libraries optimized to avoid those access patterns in someplace
++ * like /lib/i686/tls/nosegneg. Note that an /etc/ld.so.conf.d/file
++ * corresponding to the bits here is needed to make ldconfig work right.
++ * It should contain:
++ * hwcap 0 nosegneg
++ * to match the mapping of bit to name that we give here.
++ */
++#define NOTE_KERNELCAP_BEGIN(ncaps, mask) \
++ ASM_ELF_NOTE_BEGIN(".note.kernelcap", "a", "GNU", 2) \
++ .long ncaps, mask
++#define NOTE_KERNELCAP(bit, name) \
++ .byte bit; .asciz name
++#define NOTE_KERNELCAP_END ASM_ELF_NOTE_END
++
++NOTE_KERNELCAP_BEGIN(1, 1)
++NOTE_KERNELCAP(0, "nosegneg")
++NOTE_KERNELCAP_END
+diff -rpuN linux-2.6.18.8/arch/i386/lib/Makefile linux-2.6.18-xen-3.3.0/arch/i386/lib/Makefile
+--- linux-2.6.18.8/arch/i386/lib/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/lib/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -7,3 +7,4 @@ lib-y = checksum.o delay.o usercopy.o ge
+ bitops.o
+
+ lib-$(CONFIG_X86_USE_3DNOW) += mmx.o
++lib-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o
+diff -rpuN linux-2.6.18.8/arch/i386/lib/scrub.c linux-2.6.18-xen-3.3.0/arch/i386/lib/scrub.c
+--- linux-2.6.18.8/arch/i386/lib/scrub.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/lib/scrub.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,21 @@
++#include <asm/cpufeature.h>
++#include <asm/page.h>
++#include <asm/processor.h>
++
++void scrub_pages(void *v, unsigned int count)
++{
++ if (likely(cpu_has_xmm2)) {
++ unsigned long n = count * (PAGE_SIZE / sizeof(long) / 4);
++
++ for (; n--; v += sizeof(long) * 4)
++ asm("movnti %1,(%0)\n\t"
++ "movnti %1,%c2(%0)\n\t"
++ "movnti %1,2*%c2(%0)\n\t"
++ "movnti %1,3*%c2(%0)\n\t"
++ : : "r" (v), "r" (0L), "i" (sizeof(long))
++ : "memory");
++ asm volatile("sfence" : : : "memory");
++ } else
++ for (; count--; v += PAGE_SIZE)
++ clear_page(v);
++}
+diff -rpuN linux-2.6.18.8/arch/i386/mach-xen/Makefile linux-2.6.18-xen-3.3.0/arch/i386/mach-xen/Makefile
+--- linux-2.6.18.8/arch/i386/mach-xen/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/mach-xen/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,5 @@
++#
++# Makefile for the linux kernel.
++#
++
++obj-y := setup.o
+diff -rpuN linux-2.6.18.8/arch/i386/mach-xen/setup.c linux-2.6.18-xen-3.3.0/arch/i386/mach-xen/setup.c
+--- linux-2.6.18.8/arch/i386/mach-xen/setup.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/mach-xen/setup.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,158 @@
++/*
++ * Machine specific setup for generic
++ */
++
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <asm/acpi.h>
++#include <asm/arch_hooks.h>
++#include <asm/e820.h>
++#include <asm/setup.h>
++#include <asm/fixmap.h>
++
++#include <xen/interface/callback.h>
++#include <xen/interface/memory.h>
++
++#ifdef CONFIG_HOTPLUG_CPU
++#define DEFAULT_SEND_IPI (1)
++#else
++#define DEFAULT_SEND_IPI (0)
++#endif
++
++int no_broadcast=DEFAULT_SEND_IPI;
++
++static __init int no_ipi_broadcast(char *str)
++{
++ get_option(&str, &no_broadcast);
++ printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
++ "IPI Broadcast");
++ return 1;
++}
++
++__setup("no_ipi_broadcast", no_ipi_broadcast);
++
++static int __init print_ipi_mode(void)
++{
++ printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
++ "Shortcut");
++ return 0;
++}
++
++late_initcall(print_ipi_mode);
++
++/**
++ * machine_specific_memory_setup - Hook for machine specific memory setup.
++ *
++ * Description:
++ * This is included late in kernel/setup.c so that it can make
++ * use of all of the static functions.
++ **/
++
++char * __init machine_specific_memory_setup(void)
++{
++ int rc;
++ struct xen_memory_map memmap;
++ /*
++ * This is rather large for a stack variable but this early in
++ * the boot process we know we have plenty slack space.
++ */
++ struct e820entry map[E820MAX];
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, map);
++
++ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
++ if ( rc == -ENOSYS ) {
++ memmap.nr_entries = 1;
++ map[0].addr = 0ULL;
++ map[0].size = PFN_PHYS((unsigned long long)xen_start_info->nr_pages);
++ /* 8MB slack (to balance backend allocations). */
++ map[0].size += 8ULL << 20;
++ map[0].type = E820_RAM;
++ rc = 0;
++ }
++ BUG_ON(rc);
++
++ sanitize_e820_map(map, (char *)&memmap.nr_entries);
++
++ BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
++
++ return "Xen";
++}
++
++
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void nmi(void);
++
++unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
++EXPORT_SYMBOL(machine_to_phys_mapping);
++unsigned int machine_to_phys_order;
++EXPORT_SYMBOL(machine_to_phys_order);
++
++void __init pre_setup_arch_hook(void)
++{
++ struct xen_machphys_mapping mapping;
++ unsigned long machine_to_phys_nr_ents;
++ struct xen_platform_parameters pp;
++
++ init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base;
++
++ setup_xen_features();
++
++ if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
++ set_fixaddr_top(pp.virt_start);
++
++ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
++ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
++ machine_to_phys_nr_ents = mapping.max_mfn + 1;
++ } else
++ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
++ machine_to_phys_order = fls(machine_to_phys_nr_ents - 1);
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap))
++ phys_to_machine_mapping =
++ (unsigned long *)xen_start_info->mfn_list;
++}
++
++void __init machine_specific_arch_setup(void)
++{
++ int ret;
++ static struct callback_register __initdata event = {
++ .type = CALLBACKTYPE_event,
++ .address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
++ };
++ static struct callback_register __initdata failsafe = {
++ .type = CALLBACKTYPE_failsafe,
++ .address = { __KERNEL_CS, (unsigned long)failsafe_callback },
++ };
++ static struct callback_register __initdata nmi_cb = {
++ .type = CALLBACKTYPE_nmi,
++ .address = { __KERNEL_CS, (unsigned long)nmi },
++ };
++
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
++ if (ret == 0)
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS)
++ ret = HYPERVISOR_set_callbacks(
++ event.address.cs, event.address.eip,
++ failsafe.address.cs, failsafe.address.eip);
++#endif
++ BUG_ON(ret);
++
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS) {
++ static struct xennmi_callback __initdata cb = {
++ .handler_address = (unsigned long)nmi
++ };
++
++ HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
++ }
++#endif
++}
+diff -rpuN linux-2.6.18.8/arch/i386/Makefile linux-2.6.18-xen-3.3.0/arch/i386/Makefile
+--- linux-2.6.18.8/arch/i386/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -71,6 +71,10 @@ mcore-$(CONFIG_X86_BIGSMP) := mach-defau
+ mflags-$(CONFIG_X86_SUMMIT) := -Iinclude/asm-i386/mach-summit
+ mcore-$(CONFIG_X86_SUMMIT) := mach-default
+
++# Xen subarch support
++mflags-$(CONFIG_X86_XEN) := -Iinclude/asm-i386/mach-xen
++mcore-$(CONFIG_X86_XEN) := mach-xen
++
+ # generic subarchitecture
+ mflags-$(CONFIG_X86_GENERICARCH) := -Iinclude/asm-i386/mach-generic
+ mcore-$(CONFIG_X86_GENERICARCH) := mach-default
+@@ -102,9 +106,20 @@ AFLAGS += $(mflags-y)
+
+ boot := arch/i386/boot
+
+-PHONY += zImage bzImage compressed zlilo bzlilo \
++PHONY += zImage bzImage vmlinuz compressed zlilo bzlilo \
+ zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
+
++ifdef CONFIG_XEN
++CPPFLAGS := -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION) \
++ -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
++all: vmlinuz
++
++# KBUILD_IMAGE specifies the target image being built
++KBUILD_IMAGE := $(boot)/vmlinuz
++
++vmlinuz: vmlinux
++ $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
++else
+ all: bzImage
+
+ # KBUILD_IMAGE specify target image being built
+@@ -124,6 +139,7 @@ zdisk bzdisk: vmlinux
+
+ fdimage fdimage144 fdimage288 isoimage: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
++endif
+
+ install:
+ $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
+diff -rpuN linux-2.6.18.8/arch/i386/mm/fault-xen.c linux-2.6.18-xen-3.3.0/arch/i386/mm/fault-xen.c
+--- linux-2.6.18.8/arch/i386/mm/fault-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/mm/fault-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,779 @@
++/*
++ * linux/arch/i386/mm/fault.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h> /* For unblank_screen() */
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/kprobes.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/desc.h>
++#include <asm/kdebug.h>
++
++extern void die(const char *,struct pt_regs *,long);
++
++#ifdef CONFIG_KPROBES
++ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++int register_page_fault_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
++}
++
++int unregister_page_fault_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
++}
++
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ struct die_args args = {
++ .regs = regs,
++ .str = str,
++ .err = err,
++ .trapnr = trap,
++ .signr = sig
++ };
++ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
++}
++#else
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ return NOTIFY_DONE;
++}
++#endif
++
++
++/*
++ * Unlock any spinlocks which will prevent us from getting the
++ * message out
++ */
++void bust_spinlocks(int yes)
++{
++ int loglevel_save = console_loglevel;
++
++ if (yes) {
++ oops_in_progress = 1;
++ return;
++ }
++#ifdef CONFIG_VT
++ unblank_screen();
++#endif
++ oops_in_progress = 0;
++ /*
++ * OK, the message is on the console. Now we call printk()
++ * without oops_in_progress set so that printk will give klogd
++ * a poke. Hold onto your hats...
++ */
++ console_loglevel = 15; /* NMI oopser may have shut the console up */
++ printk(" ");
++ console_loglevel = loglevel_save;
++}
++
++/*
++ * Return EIP plus the CS segment base. The segment limit is also
++ * adjusted, clamped to the kernel/user address space (whichever is
++ * appropriate), and returned in *eip_limit.
++ *
++ * The segment is checked, because it might have been changed by another
++ * task between the original faulting instruction and here.
++ *
++ * If CS is no longer a valid code segment, or if EIP is beyond the
++ * limit, or if it is a kernel address when CS is not a kernel segment,
++ * then the returned value will be greater than *eip_limit.
++ *
++ * This is slow, but is very rarely executed.
++ */
++static inline unsigned long get_segment_eip(struct pt_regs *regs,
++ unsigned long *eip_limit)
++{
++ unsigned long eip = regs->eip;
++ unsigned seg = regs->xcs & 0xffff;
++ u32 seg_ar, seg_limit, base, *desc;
++
++ /* Unlikely, but must come before segment checks. */
++ if (unlikely(regs->eflags & VM_MASK)) {
++ base = seg << 4;
++ *eip_limit = base + 0xffff;
++ return base + (eip & 0xffff);
++ }
++
++ /* The standard kernel/user address space limit. */
++ *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
++
++ /* By far the most common cases. */
++ if (likely(seg == __USER_CS || seg == GET_KERNEL_CS()))
++ return eip;
++
++ /* Check the segment exists, is within the current LDT/GDT size,
++ that kernel/user (ring 0..3) has the appropriate privilege,
++ that it's a code segment, and get the limit. */
++ __asm__ ("larl %3,%0; lsll %3,%1"
++ : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
++ if ((~seg_ar & 0x9800) || eip > seg_limit) {
++ *eip_limit = 0;
++ return 1; /* So that returned eip > *eip_limit. */
++ }
++
++ /* Get the GDT/LDT descriptor base.
++ When you look for races in this code remember that
++ LDT and other horrors are only used in user space. */
++ if (seg & (1<<2)) {
++ /* Must lock the LDT while reading it. */
++ down(&current->mm->context.sem);
++ desc = current->mm->context.ldt;
++ desc = (void *)desc + (seg & ~7);
++ } else {
++ /* Must disable preemption while reading the GDT. */
++ desc = (u32 *)get_cpu_gdt_table(get_cpu());
++ desc = (void *)desc + (seg & ~7);
++ }
++
++ /* Decode the code segment base from the descriptor */
++ base = get_desc_base((unsigned long *)desc);
++
++ if (seg & (1<<2)) {
++ up(&current->mm->context.sem);
++ } else
++ put_cpu();
++
++ /* Adjust EIP and segment limit, and clamp at the kernel limit.
++ It's legitimate for segments to wrap at 0xffffffff. */
++ seg_limit += base;
++ if (seg_limit < *eip_limit && seg_limit >= base)
++ *eip_limit = seg_limit;
++ return eip + base;
++}
++
++/*
++ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
++ * Check that here and ignore it.
++ */
++static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
++{
++ unsigned long limit;
++ unsigned long instr = get_segment_eip (regs, &limit);
++ int scan_more = 1;
++ int prefetch = 0;
++ int i;
++
++ for (i = 0; scan_more && i < 15; i++) {
++ unsigned char opcode;
++ unsigned char instr_hi;
++ unsigned char instr_lo;
++
++ if (instr > limit)
++ break;
++ if (__get_user(opcode, (unsigned char __user *) instr))
++ break;
++
++ instr_hi = opcode & 0xf0;
++ instr_lo = opcode & 0x0f;
++ instr++;
++
++ switch (instr_hi) {
++ case 0x20:
++ case 0x30:
++ /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
++ scan_more = ((instr_lo & 7) == 0x6);
++ break;
++
++ case 0x60:
++ /* 0x64 thru 0x67 are valid prefixes in all modes. */
++ scan_more = (instr_lo & 0xC) == 0x4;
++ break;
++ case 0xF0:
++ /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
++ scan_more = !instr_lo || (instr_lo>>1) == 1;
++ break;
++ case 0x00:
++ /* Prefetch instruction is 0x0F0D or 0x0F18 */
++ scan_more = 0;
++ if (instr > limit)
++ break;
++ if (__get_user(opcode, (unsigned char __user *) instr))
++ break;
++ prefetch = (instr_lo == 0xF) &&
++ (opcode == 0x0D || opcode == 0x18);
++ break;
++ default:
++ scan_more = 0;
++ break;
++ }
++ }
++ return prefetch;
++}
++
++static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
++ unsigned long error_code)
++{
++ if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 >= 6)) {
++ /* Catch an obscure case of prefetch inside an NX page. */
++ if (nx_enabled && (error_code & 16))
++ return 0;
++ return __is_prefetch(regs, addr);
++ }
++ return 0;
++}
++
++static noinline void force_sig_info_fault(int si_signo, int si_code,
++ unsigned long address, struct task_struct *tsk)
++{
++ siginfo_t info;
++
++ info.si_signo = si_signo;
++ info.si_errno = 0;
++ info.si_code = si_code;
++ info.si_addr = (void __user *)address;
++ force_sig_info(si_signo, &info, tsk);
++}
++
++fastcall void do_invalid_op(struct pt_regs *, unsigned long);
++
++#ifdef CONFIG_X86_PAE
++static void dump_fault_path(unsigned long address)
++{
++ unsigned long *p, page;
++ unsigned long mfn;
++
++ page = read_cr3();
++ p = (unsigned long *)__va(page);
++ p += (address >> 30) * 2;
++ printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
++ if (p[0] & _PAGE_PRESENT) {
++ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
++ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
++ p = (unsigned long *)__va(page);
++ address &= 0x3fffffff;
++ p += (address >> 21) * 2;
++ printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n",
++ page, p[1], p[0]);
++ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
++#ifdef CONFIG_HIGHPTE
++ if (mfn_to_pfn(mfn) >= highstart_pfn)
++ return;
++#endif
++ if (p[0] & _PAGE_PRESENT) {
++ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
++ p = (unsigned long *) __va(page);
++ address &= 0x001fffff;
++ p += (address >> 12) * 2;
++ printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
++ page, p[1], p[0]);
++ }
++ }
++}
++#else
++static void dump_fault_path(unsigned long address)
++{
++ unsigned long page;
++
++ page = read_cr3();
++ page = ((unsigned long *) __va(page))[address >> 22];
++ if (oops_may_print())
++ printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
++ machine_to_phys(page));
++ /*
++ * We must not directly access the pte in the highpte
++ * case if the page table is located in highmem.
++ * And lets rather not kmap-atomic the pte, just in case
++ * it's allocated already.
++ */
++#ifdef CONFIG_HIGHPTE
++ if ((page >> PAGE_SHIFT) >= highstart_pfn)
++ return;
++#endif
++ if ((page & 1) && oops_may_print()) {
++ page &= PAGE_MASK;
++ address &= 0x003ff000;
++ page = machine_to_phys(page);
++ page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
++ printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
++ machine_to_phys(page));
++ }
++}
++#endif
++
++static int spurious_fault(struct pt_regs *regs,
++ unsigned long address,
++ unsigned long error_code)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ /* Reserved-bit violation or user access to kernel space? */
++ if (error_code & 0x0c)
++ return 0;
++
++ pgd = init_mm.pgd + pgd_index(address);
++ if (!pgd_present(*pgd))
++ return 0;
++
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return 0;
++
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return 0;
++
++ pte = pte_offset_kernel(pmd, address);
++ if (!pte_present(*pte))
++ return 0;
++ if ((error_code & 0x02) && !pte_write(*pte))
++ return 0;
++#ifdef CONFIG_X86_PAE
++ if ((error_code & 0x10) && (__pte_val(*pte) & _PAGE_NX))
++ return 0;
++#endif
++
++ return 1;
++}
++
++static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
++{
++ unsigned index = pgd_index(address);
++ pgd_t *pgd_k;
++ pud_t *pud, *pud_k;
++ pmd_t *pmd, *pmd_k;
++
++ pgd += index;
++ pgd_k = init_mm.pgd + index;
++
++ if (!pgd_present(*pgd_k))
++ return NULL;
++
++ /*
++ * set_pgd(pgd, *pgd_k); here would be useless on PAE
++ * and redundant with the set_pmd() on non-PAE. As would
++ * set_pud.
++ */
++
++ pud = pud_offset(pgd, address);
++ pud_k = pud_offset(pgd_k, address);
++ if (!pud_present(*pud_k))
++ return NULL;
++
++ pmd = pmd_offset(pud, address);
++ pmd_k = pmd_offset(pud_k, address);
++ if (!pmd_present(*pmd_k))
++ return NULL;
++ if (!pmd_present(*pmd))
++#if CONFIG_XEN_COMPAT > 0x030002
++ set_pmd(pmd, *pmd_k);
++#else
++ /*
++ * When running on older Xen we must launder *pmd_k through
++ * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
++ */
++ set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
++#endif
++ else
++ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
++ return pmd_k;
++}
++
++/*
++ * Handle a fault on the vmalloc or module mapping area
++ *
++ * This assumes no large pages in there.
++ */
++static inline int vmalloc_fault(unsigned long address)
++{
++ unsigned long pgd_paddr;
++ pmd_t *pmd_k;
++ pte_t *pte_k;
++ /*
++ * Synchronize this task's top level page-table
++ * with the 'reference' page table.
++ *
++ * Do _not_ use "current" here. We might be inside
++ * an interrupt in the middle of a task switch..
++ */
++ pgd_paddr = read_cr3();
++ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
++ if (!pmd_k)
++ return -1;
++ pte_k = pte_offset_kernel(pmd_k, address);
++ if (!pte_present(*pte_k))
++ return -1;
++ return 0;
++}
++
++/*
++ * This routine handles page faults. It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ *
++ * error_code:
++ * bit 0 == 0 means no page found, 1 means protection fault
++ * bit 1 == 0 means read, 1 means write
++ * bit 2 == 0 means kernel, 1 means user-mode
++ * bit 3 == 1 means use of reserved bit detected
++ * bit 4 == 1 means fault was an instruction fetch
++ */
++fastcall void __kprobes do_page_fault(struct pt_regs *regs,
++ unsigned long error_code)
++{
++ struct task_struct *tsk;
++ struct mm_struct *mm;
++ struct vm_area_struct * vma;
++ unsigned long address;
++ int write, si_code;
++
++ /* get the address */
++ address = read_cr2();
++
++ /* Set the "privileged fault" bit to something sane. */
++ error_code &= ~4;
++ error_code |= (regs->xcs & 2) << 1;
++ if (regs->eflags & X86_EFLAGS_VM)
++ error_code |= 4;
++
++ tsk = current;
++
++ si_code = SEGV_MAPERR;
++
++ /*
++ * We fault-in kernel-space virtual memory on-demand. The
++ * 'reference' page table is init_mm.pgd.
++ *
++ * NOTE! We MUST NOT take any locks for this case. We may
++ * be in an interrupt or a critical region, and should
++ * only copy the information from the master page table,
++ * nothing more.
++ *
++ * This verifies that the fault happens in kernel space
++ * (error_code & 4) == 0, and that the fault was not a
++ * protection error (error_code & 9) == 0.
++ */
++ if (unlikely(address >= TASK_SIZE)) {
++#ifdef CONFIG_XEN
++ /* Faults in hypervisor area can never be patched up. */
++ if (address >= hypervisor_virt_start)
++ goto bad_area_nosemaphore;
++#endif
++ if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
++ return;
++ /* Can take a spurious fault if mapping changes R/O -> R/W. */
++ if (spurious_fault(regs, address, error_code))
++ return;
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++ /*
++ * Don't take the mm semaphore here. If we fixup a prefetch
++ * fault we could otherwise deadlock.
++ */
++ goto bad_area_nosemaphore;
++ }
++
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++
++ /* It's safe to allow irq's after cr2 has been saved and the vmalloc
++ fault has been handled. */
++ if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
++ local_irq_enable();
++
++ mm = tsk->mm;
++
++ /*
++ * If we're in an interrupt, have no user context or are running in an
++ * atomic region then we must not take the fault..
++ */
++ if (in_atomic() || !mm)
++ goto bad_area_nosemaphore;
++
++ /* When running in the kernel we expect faults to occur only to
++ * addresses in user space. All other faults represent errors in the
++ * kernel and should generate an OOPS. Unfortunatly, in the case of an
++ * erroneous fault occurring in a code path which already holds mmap_sem
++ * we will deadlock attempting to validate the fault against the
++ * address space. Luckily the kernel only validly references user
++ * space from well defined areas of code, which are listed in the
++ * exceptions table.
++ *
++ * As the vast majority of faults will be valid we will only perform
++ * the source reference check when there is a possibilty of a deadlock.
++ * Attempt to lock the address space, if we cannot we then validate the
++ * source. If this is invalid we can skip the address space check,
++ * thus avoiding the deadlock.
++ */
++ if (!down_read_trylock(&mm->mmap_sem)) {
++ if ((error_code & 4) == 0 &&
++ !search_exception_tables(regs->eip))
++ goto bad_area_nosemaphore;
++ down_read(&mm->mmap_sem);
++ }
++
++ vma = find_vma(mm, address);
++ if (!vma)
++ goto bad_area;
++ if (vma->vm_start <= address)
++ goto good_area;
++ if (!(vma->vm_flags & VM_GROWSDOWN))
++ goto bad_area;
++ if (error_code & 4) {
++ /*
++ * Accessing the stack below %esp is always a bug.
++ * The large cushion allows instructions like enter
++ * and pusha to work. ("enter $65535,$31" pushes
++ * 32 pointers and then decrements %esp by 65535.)
++ */
++ if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
++ goto bad_area;
++ }
++ if (expand_stack(vma, address))
++ goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++ si_code = SEGV_ACCERR;
++ write = 0;
++ switch (error_code & 3) {
++ default: /* 3: write, present */
++#ifdef TEST_VERIFY_AREA
++ if (regs->cs == GET_KERNEL_CS())
++ printk("WP fault at %08lx\n", regs->eip);
++#endif
++ /* fall through */
++ case 2: /* write, not present */
++ if (!(vma->vm_flags & VM_WRITE))
++ goto bad_area;
++ write++;
++ break;
++ case 1: /* read, present */
++ goto bad_area;
++ case 0: /* read, not present */
++ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++ goto bad_area;
++ }
++
++ survive:
++ /*
++ * If for any reason at all we couldn't handle the fault,
++ * make sure we exit gracefully rather than endlessly redo
++ * the fault.
++ */
++ switch (handle_mm_fault(mm, vma, address, write)) {
++ case VM_FAULT_MINOR:
++ tsk->min_flt++;
++ break;
++ case VM_FAULT_MAJOR:
++ tsk->maj_flt++;
++ break;
++ case VM_FAULT_SIGBUS:
++ goto do_sigbus;
++ case VM_FAULT_OOM:
++ goto out_of_memory;
++ default:
++ BUG();
++ }
++
++ /*
++ * Did it hit the DOS screen memory VA from vm86 mode?
++ */
++ if (regs->eflags & VM_MASK) {
++ unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
++ if (bit < 32)
++ tsk->thread.screen_bitmap |= 1 << bit;
++ }
++ up_read(&mm->mmap_sem);
++ return;
++
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++ up_read(&mm->mmap_sem);
++
++bad_area_nosemaphore:
++ /* User mode accesses just cause a SIGSEGV */
++ if (error_code & 4) {
++ /*
++ * Valid to do another page fault here because this one came
++ * from user space.
++ */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ tsk->thread.cr2 = address;
++ /* Kernel addresses are always protection faults */
++ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++ tsk->thread.trap_no = 14;
++ force_sig_info_fault(SIGSEGV, si_code, address, tsk);
++ return;
++ }
++
++#ifdef CONFIG_X86_F00F_BUG
++ /*
++ * Pentium F0 0F C7 C8 bug workaround.
++ */
++ if (boot_cpu_data.f00f_bug) {
++ unsigned long nr;
++
++ nr = (address - idt_descr.address) >> 3;
++
++ if (nr == 6) {
++ do_invalid_op(regs, 0);
++ return;
++ }
++ }
++#endif
++
++no_context:
++ /* Are we prepared to handle this kernel fault? */
++ if (fixup_exception(regs))
++ return;
++
++ /*
++ * Valid to do another page fault here, because if this fault
++ * had been triggered by is_prefetch fixup_exception would have
++ * handled it.
++ */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ */
++
++ bust_spinlocks(1);
++
++ if (oops_may_print()) {
++ #ifdef CONFIG_X86_PAE
++ if (error_code & 16) {
++ pte_t *pte = lookup_address(address);
++
++ if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
++ printk(KERN_CRIT "kernel tried to execute "
++ "NX-protected page - exploit attempt? "
++ "(uid: %d)\n", current->uid);
++ }
++ #endif
++ if (address < PAGE_SIZE)
++ printk(KERN_ALERT "BUG: unable to handle kernel NULL "
++ "pointer dereference");
++ else
++ printk(KERN_ALERT "BUG: unable to handle kernel paging"
++ " request");
++ printk(" at virtual address %08lx\n",address);
++ printk(KERN_ALERT " printing eip:\n");
++ printk("%08lx\n", regs->eip);
++ }
++ dump_fault_path(address);
++ tsk->thread.cr2 = address;
++ tsk->thread.trap_no = 14;
++ tsk->thread.error_code = error_code;
++ die("Oops", regs, error_code);
++ bust_spinlocks(0);
++ do_exit(SIGKILL);
++
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++ up_read(&mm->mmap_sem);
++ if (tsk->pid == 1) {
++ yield();
++ down_read(&mm->mmap_sem);
++ goto survive;
++ }
++ printk("VM: killing process %s\n", tsk->comm);
++ if (error_code & 4)
++ do_exit(SIGKILL);
++ goto no_context;
++
++do_sigbus:
++ up_read(&mm->mmap_sem);
++
++ /* Kernel mode? Handle exceptions or die */
++ if (!(error_code & 4))
++ goto no_context;
++
++ /* User space => ok to do another page fault */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ tsk->thread.cr2 = address;
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 14;
++ force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
++}
++
++#if !HAVE_SHARED_KERNEL_PMD
++void vmalloc_sync_all(void)
++{
++ /*
++ * Note that races in the updates of insync and start aren't
++ * problematic: insync can only get set bits added, and updates to
++ * start are only improving performance (without affecting correctness
++ * if undone).
++ * XEN: To work on PAE, we need to iterate over PMDs rather than PGDs.
++ * This change works just fine with 2-level paging too.
++ */
++#define sync_index(a) ((a) >> PMD_SHIFT)
++ static DECLARE_BITMAP(insync, PTRS_PER_PGD*PTRS_PER_PMD);
++ static unsigned long start = TASK_SIZE;
++ unsigned long address;
++
++ BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
++ for (address = start;
++ address >= TASK_SIZE && address < hypervisor_virt_start;
++ address += 1UL << PMD_SHIFT) {
++ if (!test_bit(sync_index(address), insync)) {
++ unsigned long flags;
++ struct page *page;
++
++ spin_lock_irqsave(&pgd_lock, flags);
++ /* XEN: failure path assumes non-empty pgd_list. */
++ if (unlikely(!pgd_list)) {
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ return;
++ }
++ for (page = pgd_list; page; page =
++ (struct page *)page->index)
++ if (!vmalloc_sync_one(page_address(page),
++ address)) {
++ BUG_ON(page != pgd_list);
++ break;
++ }
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ if (!page)
++ set_bit(sync_index(address), insync);
++ }
++ if (address == start && test_bit(sync_index(address), insync))
++ start = address + (1UL << PMD_SHIFT);
++ }
++}
++#endif
+diff -rpuN linux-2.6.18.8/arch/i386/mm/highmem-xen.c linux-2.6.18-xen-3.3.0/arch/i386/mm/highmem-xen.c
+--- linux-2.6.18.8/arch/i386/mm/highmem-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/mm/highmem-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,136 @@
++#include <linux/highmem.h>
++#include <linux/module.h>
++
++void *kmap(struct page *page)
++{
++ might_sleep();
++ if (!PageHighMem(page))
++ return page_address(page);
++ return kmap_high(page);
++}
++
++void kunmap(struct page *page)
++{
++ if (in_interrupt())
++ BUG();
++ if (!PageHighMem(page))
++ return;
++ kunmap_high(page);
++}
++
++/*
++ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
++ * no global lock is needed and because the kmap code must perform a global TLB
++ * invalidation when the kmap pool wraps.
++ *
++ * However when holding an atomic kmap is is not legal to sleep, so atomic
++ * kmaps are appropriate for short, tight code paths only.
++ */
++static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
++{
++ enum fixed_addresses idx;
++ unsigned long vaddr;
++
++ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
++ inc_preempt_count();
++ if (!PageHighMem(page))
++ return page_address(page);
++
++ idx = type + KM_TYPE_NR*smp_processor_id();
++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++#ifdef CONFIG_DEBUG_HIGHMEM
++ if (!pte_none(*(kmap_pte-idx)))
++ BUG();
++#endif
++ set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
++
++ return (void*) vaddr;
++}
++
++void *kmap_atomic(struct page *page, enum km_type type)
++{
++ return __kmap_atomic(page, type, kmap_prot);
++}
++
++/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
++void *kmap_atomic_pte(struct page *page, enum km_type type)
++{
++ return __kmap_atomic(page, type,
++ test_bit(PG_pinned, &page->flags)
++ ? PAGE_KERNEL_RO : kmap_prot);
++}
++
++void kunmap_atomic(void *kvaddr, enum km_type type)
++{
++#if defined(CONFIG_DEBUG_HIGHMEM) || defined(CONFIG_XEN)
++ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
++ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
++
++ if (vaddr < FIXADDR_START) { // FIXME
++ dec_preempt_count();
++ preempt_check_resched();
++ return;
++ }
++#endif
++
++#if defined(CONFIG_DEBUG_HIGHMEM)
++ if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
++ BUG();
++
++ /*
++ * force other mappings to Oops if they'll try to access
++ * this pte without first remap it
++ */
++ pte_clear(&init_mm, vaddr, kmap_pte-idx);
++ __flush_tlb_one(vaddr);
++#elif defined(CONFIG_XEN)
++ /*
++ * We must ensure there are no dangling pagetable references when
++ * returning memory to Xen (decrease_reservation).
++ * XXX TODO: We could make this faster by only zapping when
++ * kmap_flush_unused is called but that is trickier and more invasive.
++ */
++ pte_clear(&init_mm, vaddr, kmap_pte-idx);
++#endif
++
++ dec_preempt_count();
++ preempt_check_resched();
++}
++
++/* This is the same as kmap_atomic() but can map memory that doesn't
++ * have a struct page associated with it.
++ */
++void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
++{
++ enum fixed_addresses idx;
++ unsigned long vaddr;
++
++ inc_preempt_count();
++
++ idx = type + KM_TYPE_NR*smp_processor_id();
++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++ set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
++ __flush_tlb_one(vaddr);
++
++ return (void*) vaddr;
++}
++
++struct page *kmap_atomic_to_page(void *ptr)
++{
++ unsigned long idx, vaddr = (unsigned long)ptr;
++ pte_t *pte;
++
++ if (vaddr < FIXADDR_START)
++ return virt_to_page(ptr);
++
++ idx = virt_to_fix(vaddr);
++ pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
++ return pte_page(*pte);
++}
++
++EXPORT_SYMBOL(kmap);
++EXPORT_SYMBOL(kunmap);
++EXPORT_SYMBOL(kmap_atomic);
++EXPORT_SYMBOL(kmap_atomic_pte);
++EXPORT_SYMBOL(kunmap_atomic);
++EXPORT_SYMBOL(kmap_atomic_to_page);
+diff -rpuN linux-2.6.18.8/arch/i386/mm/hypervisor.c linux-2.6.18-xen-3.3.0/arch/i386/mm/hypervisor.c
+--- linux-2.6.18.8/arch/i386/mm/hypervisor.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/mm/hypervisor.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,602 @@
++/******************************************************************************
++ * mm/hypervisor.c
++ *
++ * Update page tables via the hypervisor.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/features.h>
++#include <xen/interface/memory.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <asm/tlbflush.h>
++#include <linux/highmem.h>
++
++void xen_l1_entry_update(pte_t *ptr, pte_t val)
++{
++ mmu_update_t u;
++#ifdef CONFIG_HIGHPTE
++ u.ptr = ((unsigned long)ptr >= (unsigned long)high_memory) ?
++ arbitrary_virt_to_machine(ptr) : virt_to_machine(ptr);
++#else
++ u.ptr = virt_to_machine(ptr);
++#endif
++ u.val = __pte_val(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL_GPL(xen_l1_entry_update);
++
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = __pmd_val(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++
++#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
++void xen_l3_entry_update(pud_t *ptr, pud_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = __pud_val(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++#endif
++
++#ifdef CONFIG_X86_64
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = __pgd_val(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++#endif /* CONFIG_X86_64 */
++
++void xen_pt_switch(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_NEW_BASEPTR;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_new_user_pt(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_NEW_USER_BASEPTR;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_tlb_flush(void)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL(xen_tlb_flush);
++
++void xen_invlpg(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_INVLPG_LOCAL;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL(xen_invlpg);
++
++#ifdef CONFIG_SMP
++
++void xen_tlb_flush_all(void)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_TLB_FLUSH_ALL;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_tlb_flush_mask(cpumask_t *mask)
++{
++ struct mmuext_op op;
++ if ( cpus_empty(*mask) )
++ return;
++ op.cmd = MMUEXT_TLB_FLUSH_MULTI;
++ set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_invlpg_all(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_INVLPG_ALL;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
++{
++ struct mmuext_op op;
++ if ( cpus_empty(*mask) )
++ return;
++ op.cmd = MMUEXT_INVLPG_MULTI;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++#endif /* CONFIG_SMP */
++
++void xen_pgd_pin(unsigned long ptr)
++{
++ struct mmuext_op op;
++#ifdef CONFIG_X86_64
++ op.cmd = MMUEXT_PIN_L4_TABLE;
++#elif defined(CONFIG_X86_PAE)
++ op.cmd = MMUEXT_PIN_L3_TABLE;
++#else
++ op.cmd = MMUEXT_PIN_L2_TABLE;
++#endif
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_pgd_unpin(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_UNPIN_TABLE;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_set_ldt(const void *ptr, unsigned int ents)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_SET_LDT;
++ op.arg1.linear_addr = (unsigned long)ptr;
++ op.arg2.nr_ents = ents;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++/*
++ * Bitmap is indexed by page number. If bit is set, the page is part of a
++ * xen_create_contiguous_region() area of memory.
++ */
++unsigned long *contiguous_bitmap;
++
++static void contiguous_bitmap_set(
++ unsigned long first_page, unsigned long nr_pages)
++{
++ unsigned long start_off, end_off, curr_idx, end_idx;
++
++ curr_idx = first_page / BITS_PER_LONG;
++ start_off = first_page & (BITS_PER_LONG-1);
++ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
++ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
++
++ if (curr_idx == end_idx) {
++ contiguous_bitmap[curr_idx] |=
++ ((1UL<<end_off)-1) & -(1UL<<start_off);
++ } else {
++ contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
++ while ( ++curr_idx < end_idx )
++ contiguous_bitmap[curr_idx] = ~0UL;
++ contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
++ }
++}
++
++static void contiguous_bitmap_clear(
++ unsigned long first_page, unsigned long nr_pages)
++{
++ unsigned long start_off, end_off, curr_idx, end_idx;
++
++ curr_idx = first_page / BITS_PER_LONG;
++ start_off = first_page & (BITS_PER_LONG-1);
++ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
++ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
++
++ if (curr_idx == end_idx) {
++ contiguous_bitmap[curr_idx] &=
++ -(1UL<<end_off) | ((1UL<<start_off)-1);
++ } else {
++ contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
++ while ( ++curr_idx != end_idx )
++ contiguous_bitmap[curr_idx] = 0;
++ contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
++ }
++}
++
++/* Protected by balloon_lock. */
++#define MAX_CONTIG_ORDER 9 /* 2MB */
++static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
++static unsigned long limited_frames[1<<MAX_CONTIG_ORDER];
++static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER];
++
++/* Ensure multi-page extents are contiguous in machine memory. */
++int xen_create_contiguous_region(
++ unsigned long vstart, unsigned int order, unsigned int address_bits)
++{
++ unsigned long *in_frames = discontig_frames, out_frame;
++ unsigned long frame, flags;
++ unsigned int i;
++ int rc, success;
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .nr_extents = 1UL << order,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .nr_extents = 1,
++ .extent_order = order,
++ .address_bits = address_bits,
++ .domid = DOMID_SELF
++ }
++ };
++
++ /*
++ * Currently an auto-translated guest will not perform I/O, nor will
++ * it require PAE page directories below 4GB. Therefore any calls to
++ * this function are redundant and can be ignored.
++ */
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return 0;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return -ENOMEM;
++
++ set_xen_guest_handle(exchange.in.extent_start, in_frames);
++ set_xen_guest_handle(exchange.out.extent_start, &out_frame);
++
++ scrub_pages((void *)vstart, 1 << order);
++
++ balloon_lock(flags);
++
++ /* 1. Zap current PTEs, remembering MFNs. */
++ for (i = 0; i < (1U<<order); i++) {
++ in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ __pte_ma(0), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++ INVALID_P2M_ENTRY);
++ }
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ /* 2. Get a new contiguous memory extent. */
++ out_frame = __pa(vstart) >> PAGE_SHIFT;
++ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == (1UL << order));
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++ BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in) != (1UL << order))
++ BUG();
++ success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out) == 1);
++ if (!success) {
++ /* Couldn't get special memory: fall back to normal. */
++ for (i = 0; i < (1U<<order); i++)
++ in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
++ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.in) != (1UL<<order))
++ BUG();
++ }
++ }
++#endif
++
++ /* 3. Map the new extent in place of old pages. */
++ for (i = 0; i < (1U<<order); i++) {
++ frame = success ? (out_frame + i) : in_frames[i];
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ pfn_pte_ma(frame, PAGE_KERNEL), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++ }
++
++ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
++ ? UVMF_TLB_FLUSH|UVMF_ALL
++ : UVMF_INVLPG|UVMF_ALL;
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ if (success)
++ contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT,
++ 1UL << order);
++
++ balloon_unlock(flags);
++
++ return success ? 0 : -ENOMEM;
++}
++EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
++
++void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++{
++ unsigned long *out_frames = discontig_frames, in_frame;
++ unsigned long frame, flags;
++ unsigned int i;
++ int rc, success;
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .nr_extents = 1,
++ .extent_order = order,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .nr_extents = 1UL << order,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ }
++ };
++
++ if (xen_feature(XENFEAT_auto_translated_physmap) ||
++ !test_bit(__pa(vstart) >> PAGE_SHIFT, contiguous_bitmap))
++ return;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return;
++
++ set_xen_guest_handle(exchange.in.extent_start, &in_frame);
++ set_xen_guest_handle(exchange.out.extent_start, out_frames);
++
++ scrub_pages((void *)vstart, 1 << order);
++
++ balloon_lock(flags);
++
++ contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
++
++ /* 1. Find start MFN of contiguous extent. */
++ in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
++
++ /* 2. Zap current PTEs. */
++ for (i = 0; i < (1U<<order); i++) {
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ __pte_ma(0), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++ INVALID_P2M_ENTRY);
++ out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i;
++ }
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ /* 3. Do the exchange for non-contiguous MFNs. */
++ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == 1);
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++ BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in) != 1)
++ BUG();
++ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out) != (1UL << order))
++ BUG();
++ success = 1;
++ }
++#endif
++
++ /* 4. Map new pages in place of old pages. */
++ for (i = 0; i < (1U<<order); i++) {
++ frame = success ? out_frames[i] : (in_frame + i);
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ pfn_pte_ma(frame, PAGE_KERNEL), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++ }
++
++ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
++ ? UVMF_TLB_FLUSH|UVMF_ALL
++ : UVMF_INVLPG|UVMF_ALL;
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ balloon_unlock(flags);
++}
++EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
++
++int xen_limit_pages_to_max_mfn(
++ struct page *pages, unsigned int order, unsigned int address_bits)
++{
++ unsigned long flags, frame;
++ unsigned long *in_frames = discontig_frames, *out_frames = limited_frames;
++ struct page *page;
++ unsigned int i, n, nr_mcl;
++ int rc, success;
++ DECLARE_BITMAP(limit_map, 1 << MAX_CONTIG_ORDER);
++
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .extent_order = 0,
++ .address_bits = address_bits,
++ .domid = DOMID_SELF
++ }
++ };
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return 0;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return -ENOMEM;
++
++ bitmap_zero(limit_map, 1U << order);
++ set_xen_guest_handle(exchange.in.extent_start, in_frames);
++ set_xen_guest_handle(exchange.out.extent_start, out_frames);
++
++ /* 0. Scrub the pages. */
++ for (i = 0, n = 0; i < 1U<<order ; i++) {
++ page = &pages[i];
++ if (!(pfn_to_mfn(page_to_pfn(page)) >> (address_bits - PAGE_SHIFT)))
++ continue;
++ __set_bit(i, limit_map);
++
++ if (!PageHighMem(page))
++ scrub_pages(page_address(page), 1);
++#ifdef CONFIG_XEN_SCRUB_PAGES
++ else {
++ scrub_pages(kmap(page), 1);
++ kunmap(page);
++ ++n;
++ }
++#endif
++ }
++ if (bitmap_empty(limit_map, 1U << order))
++ return 0;
++
++ if (n)
++ kmap_flush_unused();
++
++ balloon_lock(flags);
++
++ /* 1. Zap current PTEs (if any), remembering MFNs. */
++ for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
++ if(!test_bit(i, limit_map))
++ continue;
++ page = &pages[i];
++
++ out_frames[n] = page_to_pfn(page);
++ in_frames[n] = pfn_to_mfn(out_frames[n]);
++
++ if (!PageHighMem(page))
++ MULTI_update_va_mapping(cr_mcl + nr_mcl++,
++ (unsigned long)page_address(page),
++ __pte_ma(0), 0);
++
++ set_phys_to_machine(out_frames[n], INVALID_P2M_ENTRY);
++ ++n;
++ }
++ if (nr_mcl && HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
++ BUG();
++
++ /* 2. Get new memory below the required limit. */
++ exchange.in.nr_extents = n;
++ exchange.out.nr_extents = n;
++ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == n);
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++ BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in) != n)
++ BUG();
++ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out) != n)
++ BUG();
++ success = 1;
++ }
++#endif
++
++ /* 3. Map the new pages in place of old pages. */
++ for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
++ if(!test_bit(i, limit_map))
++ continue;
++ page = &pages[i];
++
++ frame = success ? out_frames[n] : in_frames[n];
++
++ if (!PageHighMem(page))
++ MULTI_update_va_mapping(cr_mcl + nr_mcl++,
++ (unsigned long)page_address(page),
++ pfn_pte_ma(frame, PAGE_KERNEL), 0);
++
++ set_phys_to_machine(page_to_pfn(page), frame);
++ ++n;
++ }
++ if (nr_mcl) {
++ cr_mcl[nr_mcl - 1].args[MULTI_UVMFLAGS_INDEX] = order
++ ? UVMF_TLB_FLUSH|UVMF_ALL
++ : UVMF_INVLPG|UVMF_ALL;
++ if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
++ BUG();
++ }
++
++ balloon_unlock(flags);
++
++ return success ? 0 : -ENOMEM;
++}
++EXPORT_SYMBOL_GPL(xen_limit_pages_to_max_mfn);
++
++#ifdef __i386__
++int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
++{
++ __u32 *lp = (__u32 *)((char *)ldt + entry * 8);
++ maddr_t mach_lp = arbitrary_virt_to_machine(lp);
++ return HYPERVISOR_update_descriptor(
++ mach_lp, (u64)entry_a | ((u64)entry_b<<32));
++}
++#endif
++
++#define MAX_BATCHED_FULL_PTES 32
++
++int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long addr, unsigned long end, pgprot_t newprot)
++{
++ int rc = 0, i = 0;
++ mmu_update_t u[MAX_BATCHED_FULL_PTES];
++ pte_t *pte;
++ spinlock_t *ptl;
++
++ if (!xen_feature(XENFEAT_mmu_pt_update_preserve_ad))
++ return 0;
++
++ pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
++ do {
++ if (pte_present(*pte)) {
++ u[i].ptr = (__pmd_val(*pmd) & PHYSICAL_PAGE_MASK)
++ | ((unsigned long)pte & ~PAGE_MASK)
++ | MMU_PT_UPDATE_PRESERVE_AD;
++ u[i].val = __pte_val(pte_modify(*pte, newprot));
++ if (++i == MAX_BATCHED_FULL_PTES) {
++ if ((rc = HYPERVISOR_mmu_update(
++ &u[0], i, NULL, DOMID_SELF)) != 0)
++ break;
++ i = 0;
++ }
++ }
++ } while (pte++, addr += PAGE_SIZE, addr != end);
++ if (i)
++ rc = HYPERVISOR_mmu_update( &u[0], i, NULL, DOMID_SELF);
++ pte_unmap_unlock(pte - 1, ptl);
++ BUG_ON(rc && rc != -ENOSYS);
++ return !rc;
++}
+diff -rpuN linux-2.6.18.8/arch/i386/mm/init-xen.c linux-2.6.18-xen-3.3.0/arch/i386/mm/init-xen.c
+--- linux-2.6.18.8/arch/i386/mm/init-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/mm/init-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,847 @@
++/*
++ * linux/arch/i386/mm/init.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ */
++
++#include <linux/module.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/hugetlb.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/poison.h>
++#include <linux/bootmem.h>
++#include <linux/slab.h>
++#include <linux/proc_fs.h>
++#include <linux/efi.h>
++#include <linux/memory_hotplug.h>
++#include <linux/initrd.h>
++#include <linux/cpumask.h>
++#include <linux/dma-mapping.h>
++#include <linux/scatterlist.h>
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/dma.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/apic.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/sections.h>
++#include <asm/hypervisor.h>
++#include <asm/swiotlb.h>
++
++extern unsigned long *contiguous_bitmap;
++
++unsigned int __VMALLOC_RESERVE = 128 << 20;
++
++DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
++unsigned long highstart_pfn, highend_pfn;
++
++static int noinline do_test_wp_bit(void);
++
++/*
++ * Creates a middle page table and puts a pointer to it in the
++ * given global directory entry. This only returns the gd entry
++ * in non-PAE compilation mode, since the middle layer is folded.
++ */
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
++{
++ pud_t *pud;
++ pmd_t *pmd_table;
++
++#ifdef CONFIG_X86_PAE
++ pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++ make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
++ set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
++ pud = pud_offset(pgd, 0);
++ if (pmd_table != pmd_offset(pud, 0))
++ BUG();
++#else
++ pud = pud_offset(pgd, 0);
++ pmd_table = pmd_offset(pud, 0);
++#endif
++
++ return pmd_table;
++}
++
++/*
++ * Create a page table and place a pointer to it in a middle page
++ * directory entry.
++ */
++static pte_t * __init one_page_table_init(pmd_t *pmd)
++{
++ if (pmd_none(*pmd)) {
++ pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++ make_lowmem_page_readonly(page_table,
++ XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++ if (page_table != pte_offset_kernel(pmd, 0))
++ BUG();
++
++ return page_table;
++ }
++
++ return pte_offset_kernel(pmd, 0);
++}
++
++/*
++ * This function initializes a certain range of kernel virtual memory
++ * with new bootmem page tables, everywhere page tables are missing in
++ * the given range.
++ */
++
++/*
++ * NOTE: The pagetables are allocated contiguous on the physical space
++ * so we can cache the place of the first one and move around without
++ * checking the pgd every time.
++ */
++static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ int pgd_idx, pmd_idx;
++ unsigned long vaddr;
++
++ vaddr = start;
++ pgd_idx = pgd_index(vaddr);
++ pmd_idx = pmd_index(vaddr);
++ pgd = pgd_base + pgd_idx;
++
++ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
++ if (pgd_none(*pgd))
++ one_md_table_init(pgd);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
++ if (vaddr < hypervisor_virt_start && pmd_none(*pmd))
++ one_page_table_init(pmd);
++
++ vaddr += PMD_SIZE;
++ }
++ pmd_idx = 0;
++ }
++}
++
++static inline int is_kernel_text(unsigned long addr)
++{
++ if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
++ return 1;
++ return 0;
++}
++
++/*
++ * This maps the physical memory to kernel virtual address space, a total
++ * of max_low_pfn pages, by creating page tables starting from address
++ * PAGE_OFFSET.
++ */
++static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
++{
++ unsigned long pfn;
++ pgd_t *pgd;
++ pmd_t *pmd;
++ pte_t *pte;
++ int pgd_idx, pmd_idx, pte_ofs;
++
++ unsigned long max_ram_pfn = xen_start_info->nr_pages;
++ if (max_ram_pfn > max_low_pfn)
++ max_ram_pfn = max_low_pfn;
++
++ pgd_idx = pgd_index(PAGE_OFFSET);
++ pgd = pgd_base + pgd_idx;
++ pfn = 0;
++ pmd_idx = pmd_index(PAGE_OFFSET);
++ pte_ofs = pte_index(PAGE_OFFSET);
++
++ for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
++#ifdef CONFIG_XEN
++ /*
++ * Native linux hasn't PAE-paging enabled yet at this
++ * point. When running as xen domain we are in PAE
++ * mode already, thus we can't simply hook a empty
++ * pmd. That would kill the mappings we are currently
++ * using ...
++ */
++ pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
++#else
++ pmd = one_md_table_init(pgd);
++#endif
++ if (pfn >= max_low_pfn)
++ continue;
++ pmd += pmd_idx;
++ for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
++ unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
++ if (address >= hypervisor_virt_start)
++ continue;
++
++ /* Map with big pages if possible, otherwise create normal page tables. */
++ if (cpu_has_pse) {
++ unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
++
++ if (is_kernel_text(address) || is_kernel_text(address2))
++ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
++ else
++ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
++ pfn += PTRS_PER_PTE;
++ } else {
++ pte = one_page_table_init(pmd);
++
++ pte += pte_ofs;
++ for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
++ /* XEN: Only map initial RAM allocation. */
++ if ((pfn >= max_ram_pfn) || pte_present(*pte))
++ continue;
++ if (is_kernel_text(address))
++ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
++ else
++ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
++ }
++ pte_ofs = 0;
++ }
++ }
++ pmd_idx = 0;
++ }
++}
++
++#ifndef CONFIG_XEN
++
++static inline int page_kills_ppro(unsigned long pagenr)
++{
++ if (pagenr >= 0x70000 && pagenr <= 0x7003F)
++ return 1;
++ return 0;
++}
++
++#else
++
++#define page_kills_ppro(p) 0
++
++#endif
++
++extern int is_available_memory(efi_memory_desc_t *);
++
++int page_is_ram(unsigned long pagenr)
++{
++ int i;
++ unsigned long addr, end;
++
++ if (efi_enabled) {
++ efi_memory_desc_t *md;
++ void *p;
++
++ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++ md = p;
++ if (!is_available_memory(md))
++ continue;
++ addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++ end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
++
++ if ((pagenr >= addr) && (pagenr < end))
++ return 1;
++ }
++ return 0;
++ }
++
++ for (i = 0; i < e820.nr_map; i++) {
++
++ if (e820.map[i].type != E820_RAM) /* not usable memory */
++ continue;
++ /*
++ * !!!FIXME!!! Some BIOSen report areas as RAM that
++ * are not. Notably the 640->1Mb area. We need a sanity
++ * check here.
++ */
++ addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++ end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
++ if ((pagenr >= addr) && (pagenr < end))
++ return 1;
++ }
++ return 0;
++}
++
++#ifdef CONFIG_HIGHMEM
++pte_t *kmap_pte;
++pgprot_t kmap_prot;
++
++#define kmap_get_fixmap_pte(vaddr) \
++ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
++
++static void __init kmap_init(void)
++{
++ unsigned long kmap_vstart;
++
++ /* cache the first kmap pte */
++ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
++ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
++
++ kmap_prot = PAGE_KERNEL;
++}
++
++static void __init permanent_kmaps_init(pgd_t *pgd_base)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++ unsigned long vaddr;
++
++ vaddr = PKMAP_BASE;
++ page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
++
++ pgd = swapper_pg_dir + pgd_index(vaddr);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++ pte = pte_offset_kernel(pmd, vaddr);
++ pkmap_page_table = pte;
++}
++
++static void __meminit free_new_highpage(struct page *page, int pfn)
++{
++ init_page_count(page);
++ if (pfn < xen_start_info->nr_pages)
++ __free_page(page);
++ totalhigh_pages++;
++}
++
++void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
++{
++ if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
++ ClearPageReserved(page);
++ free_new_highpage(page, pfn);
++ } else
++ SetPageReserved(page);
++}
++
++static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
++{
++ free_new_highpage(page, pfn);
++ totalram_pages++;
++#ifdef CONFIG_FLATMEM
++ max_mapnr = max(pfn, max_mapnr);
++#endif
++ num_physpages++;
++ return 0;
++}
++
++/*
++ * Not currently handling the NUMA case.
++ * Assuming single node and all memory that
++ * has been added dynamically that would be
++ * onlined here is in HIGHMEM
++ */
++void online_page(struct page *page)
++{
++ ClearPageReserved(page);
++ add_one_highpage_hotplug(page, page_to_pfn(page));
++}
++
++
++#ifdef CONFIG_NUMA
++extern void set_highmem_pages_init(int);
++#else
++static void __init set_highmem_pages_init(int bad_ppro)
++{
++ int pfn;
++ for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
++ add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
++ totalram_pages += totalhigh_pages;
++}
++#endif /* CONFIG_FLATMEM */
++
++#else
++#define kmap_init() do { } while (0)
++#define permanent_kmaps_init(pgd_base) do { } while (0)
++#define set_highmem_pages_init(bad_ppro) do { } while (0)
++#endif /* CONFIG_HIGHMEM */
++
++unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
++EXPORT_SYMBOL(__PAGE_KERNEL);
++unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
++
++#ifdef CONFIG_NUMA
++extern void __init remap_numa_kva(void);
++#else
++#define remap_numa_kva() do {} while (0)
++#endif
++
++pgd_t *swapper_pg_dir;
++
++static void __init pagetable_init (void)
++{
++ unsigned long vaddr;
++ pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
++
++ /* Enable PSE if available */
++ if (cpu_has_pse) {
++ set_in_cr4(X86_CR4_PSE);
++ }
++
++ /* Enable PGE if available */
++ if (cpu_has_pge) {
++ set_in_cr4(X86_CR4_PGE);
++ __PAGE_KERNEL |= _PAGE_GLOBAL;
++ __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
++ }
++
++ kernel_physical_mapping_init(pgd_base);
++ remap_numa_kva();
++
++ /*
++ * Fixed mappings, only the page table structure has to be
++ * created - mappings will be set by set_fixmap():
++ */
++ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
++ page_table_range_init(vaddr, hypervisor_virt_start, pgd_base);
++
++ permanent_kmaps_init(pgd_base);
++}
++
++#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
++/*
++ * Swap suspend & friends need this for resume because things like the intel-agp
++ * driver might have split up a kernel 4MB mapping.
++ */
++char __nosavedata swsusp_pg_dir[PAGE_SIZE]
++ __attribute__ ((aligned (PAGE_SIZE)));
++
++static inline void save_pg_dir(void)
++{
++ memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
++}
++#else
++static inline void save_pg_dir(void)
++{
++}
++#endif
++
++void zap_low_mappings (void)
++{
++ int i;
++
++ save_pg_dir();
++
++ /*
++ * Zap initial low-memory mappings.
++ *
++ * Note that "pgd_clear()" doesn't do it for
++ * us, because pgd_clear() is a no-op on i386.
++ */
++ for (i = 0; i < USER_PTRS_PER_PGD; i++)
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++ set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
++#else
++ set_pgd(swapper_pg_dir+i, __pgd(0));
++#endif
++ flush_tlb_all();
++}
++
++static int disable_nx __initdata = 0;
++u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
++EXPORT_SYMBOL(__supported_pte_mask);
++
++/*
++ * noexec = on|off
++ *
++ * Control non executable mappings.
++ *
++ * on Enable
++ * off Disable
++ */
++void __init noexec_setup(const char *str)
++{
++ if (!strncmp(str, "on",2) && cpu_has_nx) {
++ __supported_pte_mask |= _PAGE_NX;
++ disable_nx = 0;
++ } else if (!strncmp(str,"off",3)) {
++ disable_nx = 1;
++ __supported_pte_mask &= ~_PAGE_NX;
++ }
++}
++
++int nx_enabled = 0;
++#ifdef CONFIG_X86_PAE
++
++static void __init set_nx(void)
++{
++ unsigned int v[4], l, h;
++
++ if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
++ cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
++ if ((v[3] & (1 << 20)) && !disable_nx) {
++ rdmsr(MSR_EFER, l, h);
++ l |= EFER_NX;
++ wrmsr(MSR_EFER, l, h);
++ nx_enabled = 1;
++ __supported_pte_mask |= _PAGE_NX;
++ }
++ }
++}
++
++/*
++ * Enables/disables executability of a given kernel page and
++ * returns the previous setting.
++ */
++int __init set_kernel_exec(unsigned long vaddr, int enable)
++{
++ pte_t *pte;
++ int ret = 1;
++
++ if (!nx_enabled)
++ goto out;
++
++ pte = lookup_address(vaddr);
++ BUG_ON(!pte);
++
++ if (!pte_exec_kernel(*pte))
++ ret = 0;
++
++ if (enable)
++ pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
++ else
++ pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
++ __flush_tlb_all();
++out:
++ return ret;
++}
++
++#endif
++
++/*
++ * paging_init() sets up the page tables - note that the first 8MB are
++ * already mapped by head.S.
++ *
++ * This routines also unmaps the page at virtual kernel address 0, so
++ * that we can trap those pesky NULL-reference errors in the kernel.
++ */
++void __init paging_init(void)
++{
++ int i;
++
++#ifdef CONFIG_X86_PAE
++ set_nx();
++ if (nx_enabled)
++ printk("NX (Execute Disable) protection: active\n");
++#endif
++
++ pagetable_init();
++
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++ /*
++ * We will bail out later - printk doesn't work right now so
++ * the user would just see a hanging kernel.
++ * when running as xen domain we are already in PAE mode at
++ * this point.
++ */
++ if (cpu_has_pae)
++ set_in_cr4(X86_CR4_PAE);
++#endif
++ __flush_tlb_all();
++
++ kmap_init();
++
++ /* Switch to the real shared_info page, and clear the
++ * dummy page. */
++ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
++ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++ memset(empty_zero_page, 0, sizeof(empty_zero_page));
++
++ /* Setup mapping of lower 1st MB */
++ for (i = 0; i < NR_FIX_ISAMAPS; i++)
++ if (is_initial_xendomain())
++ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
++ else
++ __set_fixmap(FIX_ISAMAP_BEGIN - i,
++ virt_to_machine(empty_zero_page),
++ PAGE_KERNEL_RO);
++}
++
++/*
++ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
++ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
++ * used to involve black magic jumps to work around some nasty CPU bugs,
++ * but fortunately the switch to using exceptions got rid of all that.
++ */
++
++static void __init test_wp_bit(void)
++{
++ printk("Checking if this processor honours the WP bit even in supervisor mode... ");
++
++ /* Any page-aligned address will do, the test is non-destructive */
++ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
++ boot_cpu_data.wp_works_ok = do_test_wp_bit();
++ clear_fixmap(FIX_WP_TEST);
++
++ if (!boot_cpu_data.wp_works_ok) {
++ printk("No.\n");
++#ifdef CONFIG_X86_WP_WORKS_OK
++ panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
++#endif
++ } else {
++ printk("Ok.\n");
++ }
++}
++
++static void __init set_max_mapnr_init(void)
++{
++#ifdef CONFIG_HIGHMEM
++ num_physpages = highend_pfn;
++#else
++ num_physpages = max_low_pfn;
++#endif
++#ifdef CONFIG_FLATMEM
++ max_mapnr = num_physpages;
++#endif
++}
++
++static struct kcore_list kcore_mem, kcore_vmalloc;
++
++void __init mem_init(void)
++{
++ extern int ppro_with_ram_bug(void);
++ int codesize, reservedpages, datasize, initsize;
++ int tmp;
++ int bad_ppro;
++ unsigned long pfn;
++
++ contiguous_bitmap = alloc_bootmem_low_pages(
++ (max_low_pfn + 2*BITS_PER_LONG) >> 3);
++ BUG_ON(!contiguous_bitmap);
++ memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
++
++#if defined(CONFIG_SWIOTLB)
++ swiotlb_init();
++#endif
++
++#ifdef CONFIG_FLATMEM
++ if (!mem_map)
++ BUG();
++#endif
++
++ bad_ppro = ppro_with_ram_bug();
++
++#ifdef CONFIG_HIGHMEM
++ /* check that fixmap and pkmap do not overlap */
++ if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
++ printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
++ printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
++ PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
++ BUG();
++ }
++#endif
++
++ set_max_mapnr_init();
++
++#ifdef CONFIG_HIGHMEM
++ high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
++#else
++ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
++#endif
++ printk("vmalloc area: %lx-%lx, maxmem %lx\n",
++ VMALLOC_START,VMALLOC_END,MAXMEM);
++ BUG_ON(VMALLOC_START > VMALLOC_END);
++
++ /* this will put all low memory onto the freelists */
++ totalram_pages += free_all_bootmem();
++ /* XEN: init and count low-mem pages outside initial allocation. */
++ for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
++ ClearPageReserved(pfn_to_page(pfn));
++ init_page_count(pfn_to_page(pfn));
++ totalram_pages++;
++ }
++
++ reservedpages = 0;
++ for (tmp = 0; tmp < max_low_pfn; tmp++)
++ /*
++ * Only count reserved RAM pages
++ */
++ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
++ reservedpages++;
++
++ set_highmem_pages_init(bad_ppro);
++
++ codesize = (unsigned long) &_etext - (unsigned long) &_text;
++ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
++
++ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
++ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
++ VMALLOC_END-VMALLOC_START);
++
++ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
++ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
++ num_physpages << (PAGE_SHIFT-10),
++ codesize >> 10,
++ reservedpages << (PAGE_SHIFT-10),
++ datasize >> 10,
++ initsize >> 10,
++ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
++ );
++
++#ifdef CONFIG_X86_PAE
++ if (!cpu_has_pae)
++ panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
++#endif
++ if (boot_cpu_data.wp_works_ok < 0)
++ test_wp_bit();
++
++ /*
++ * Subtle. SMP is doing it's boot stuff late (because it has to
++ * fork idle threads) - but it also needs low mappings for the
++ * protected-mode entry to work. We zap these entries only after
++ * the WP-bit has been tested.
++ */
++#ifndef CONFIG_SMP
++ zap_low_mappings();
++#endif
++
++ set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
++}
++
++/*
++ * this is for the non-NUMA, single node SMP system case.
++ * Specifically, in the case of x86, we will always add
++ * memory to the highmem for now.
++ */
++#ifdef CONFIG_MEMORY_HOTPLUG
++#ifndef CONFIG_NEED_MULTIPLE_NODES
++int arch_add_memory(int nid, u64 start, u64 size)
++{
++ struct pglist_data *pgdata = &contig_page_data;
++ struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
++ unsigned long start_pfn = start >> PAGE_SHIFT;
++ unsigned long nr_pages = size >> PAGE_SHIFT;
++
++ return __add_pages(zone, start_pfn, nr_pages);
++}
++
++int remove_memory(u64 start, u64 size)
++{
++ return -EINVAL;
++}
++#endif
++#endif
++
++kmem_cache_t *pgd_cache;
++kmem_cache_t *pmd_cache;
++
++void __init pgtable_cache_init(void)
++{
++ if (PTRS_PER_PMD > 1) {
++ pmd_cache = kmem_cache_create("pmd",
++ PTRS_PER_PMD*sizeof(pmd_t),
++ PTRS_PER_PMD*sizeof(pmd_t),
++ 0,
++ pmd_ctor,
++ NULL);
++ if (!pmd_cache)
++ panic("pgtable_cache_init(): cannot create pmd cache");
++ }
++ pgd_cache = kmem_cache_create("pgd",
++#ifndef CONFIG_XEN
++ PTRS_PER_PGD*sizeof(pgd_t),
++ PTRS_PER_PGD*sizeof(pgd_t),
++#else
++ PAGE_SIZE,
++ PAGE_SIZE,
++#endif
++ 0,
++ pgd_ctor,
++ PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
++ if (!pgd_cache)
++ panic("pgtable_cache_init(): Cannot create pgd cache");
++}
++
++/*
++ * This function cannot be __init, since exceptions don't work in that
++ * section. Put this after the callers, so that it cannot be inlined.
++ */
++static int noinline do_test_wp_bit(void)
++{
++ char tmp_reg;
++ int flag;
++
++ __asm__ __volatile__(
++ " movb %0,%1 \n"
++ "1: movb %1,%0 \n"
++ " xorl %2,%2 \n"
++ "2: \n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4 \n"
++ " .long 1b,2b \n"
++ ".previous \n"
++ :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
++ "=q" (tmp_reg),
++ "=r" (flag)
++ :"2" (1)
++ :"memory");
++
++ return flag;
++}
++
++#ifdef CONFIG_DEBUG_RODATA
++
++void mark_rodata_ro(void)
++{
++ unsigned long addr = (unsigned long)__start_rodata;
++
++ for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
++ change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
++
++ printk("Write protecting the kernel read-only data: %uk\n",
++ (__end_rodata - __start_rodata) >> 10);
++
++ /*
++ * change_page_attr() requires a global_flush_tlb() call after it.
++ * We do this after the printk so that if something went wrong in the
++ * change, the printk gets out at least to give a better debug hint
++ * of who is the culprit.
++ */
++ global_flush_tlb();
++}
++#endif
++
++void free_init_pages(char *what, unsigned long begin, unsigned long end)
++{
++ unsigned long addr;
++
++ for (addr = begin; addr < end; addr += PAGE_SIZE) {
++ ClearPageReserved(virt_to_page(addr));
++ init_page_count(virt_to_page(addr));
++ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
++ free_page(addr);
++ totalram_pages++;
++ }
++ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
++}
++
++void free_initmem(void)
++{
++ free_init_pages("unused kernel memory",
++ (unsigned long)(&__init_begin),
++ (unsigned long)(&__init_end));
++}
++
++#ifdef CONFIG_BLK_DEV_INITRD
++void free_initrd_mem(unsigned long start, unsigned long end)
++{
++ free_init_pages("initrd memory", start, end);
++}
++#endif
++
+diff -rpuN linux-2.6.18.8/arch/i386/mm/ioremap-xen.c linux-2.6.18-xen-3.3.0/arch/i386/mm/ioremap-xen.c
+--- linux-2.6.18.8/arch/i386/mm/ioremap-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/mm/ioremap-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,443 @@
++/*
++ * arch/i386/mm/ioremap.c
++ *
++ * Re-map IO memory to kernel address space so that we can access it.
++ * This is needed for high PCI addresses that aren't mapped in the
++ * 640k-1MB IO memory area on PC's
++ *
++ * (C) Copyright 1995 1996 Linus Torvalds
++ */
++
++#include <linux/vmalloc.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <asm/io.h>
++#include <asm/fixmap.h>
++#include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++
++#define ISA_START_ADDRESS 0x0
++#define ISA_END_ADDRESS 0x100000
++
++static int direct_remap_area_pte_fn(pte_t *pte,
++ struct page *pmd_page,
++ unsigned long address,
++ void *data)
++{
++ mmu_update_t **v = (mmu_update_t **)data;
++
++ BUG_ON(!pte_none(*pte));
++
++ (*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pmd_page)) <<
++ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
++ (*v)++;
++
++ return 0;
++}
++
++static int __direct_remap_pfn_range(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid)
++{
++ int rc;
++ unsigned long i, start_address;
++ mmu_update_t *u, *v, *w;
++
++ u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
++ if (u == NULL)
++ return -ENOMEM;
++
++ start_address = address;
++
++ flush_cache_all();
++
++ for (i = 0; i < size; i += PAGE_SIZE) {
++ if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
++ /* Flush a full batch after filling in the PTE ptrs. */
++ rc = apply_to_page_range(mm, start_address,
++ address - start_address,
++ direct_remap_area_pte_fn, &w);
++ if (rc)
++ goto out;
++ rc = -EFAULT;
++ if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
++ goto out;
++ v = w = u;
++ start_address = address;
++ }
++
++ /*
++ * Fill in the machine address: PTE ptr is done later by
++ * apply_to_page_range().
++ */
++ v->val = __pte_val(pfn_pte_ma(mfn, prot)) | _PAGE_IO;
++
++ mfn++;
++ address += PAGE_SIZE;
++ v++;
++ }
++
++ if (v != u) {
++ /* Final batch. */
++ rc = apply_to_page_range(mm, start_address,
++ address - start_address,
++ direct_remap_area_pte_fn, &w);
++ if (rc)
++ goto out;
++ rc = -EFAULT;
++ if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
++ goto out;
++ }
++
++ rc = 0;
++
++ out:
++ flush_tlb_all();
++
++ free_page((unsigned long)u);
++
++ return rc;
++}
++
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return remap_pfn_range(vma, address, mfn, size, prot);
++
++ if (domid == DOMID_SELF)
++ return -EINVAL;
++
++ vma->vm_flags |= VM_IO | VM_RESERVED;
++
++ vma->vm_mm->context.has_foreign_mappings = 1;
++
++ return __direct_remap_pfn_range(
++ vma->vm_mm, address, mfn, size, prot, domid);
++}
++EXPORT_SYMBOL(direct_remap_pfn_range);
++
++int direct_kernel_remap_pfn_range(unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid)
++{
++ return __direct_remap_pfn_range(
++ &init_mm, address, mfn, size, prot, domid);
++}
++EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
++
++static int lookup_pte_fn(
++ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++ uint64_t *ptep = (uint64_t *)data;
++ if (ptep)
++ *ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pmd_page)) <<
++ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
++ return 0;
++}
++
++int create_lookup_pte_addr(struct mm_struct *mm,
++ unsigned long address,
++ uint64_t *ptep)
++{
++ return apply_to_page_range(mm, address, PAGE_SIZE,
++ lookup_pte_fn, ptep);
++}
++
++EXPORT_SYMBOL(create_lookup_pte_addr);
++
++static int noop_fn(
++ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++ return 0;
++}
++
++int touch_pte_range(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long size)
++{
++ return apply_to_page_range(mm, address, size, noop_fn, NULL);
++}
++
++EXPORT_SYMBOL(touch_pte_range);
++
++/*
++ * Does @address reside within a non-highmem page that is local to this virtual
++ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
++ * See the comment that accompanies mfn_to_local_pfn() in page.h to understand
++ * why this works.
++ */
++static inline int is_local_lowmem(unsigned long address)
++{
++ extern unsigned long max_low_pfn;
++ return (mfn_to_local_pfn(address >> PAGE_SHIFT) < max_low_pfn);
++}
++
++/*
++ * Generic mapping function (not visible outside):
++ */
++
++/*
++ * Remap an arbitrary physical address space into the kernel virtual
++ * address space. Needed when the kernel wants to access high addresses
++ * directly.
++ *
++ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
++ * have to convert them into an offset in a page-aligned mapping, but the
++ * caller shouldn't need to know that small detail.
++ */
++void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
++{
++ void __iomem * addr;
++ struct vm_struct * area;
++ unsigned long offset, last_addr;
++ domid_t domid = DOMID_IO;
++
++ /* Don't allow wraparound or zero size */
++ last_addr = phys_addr + size - 1;
++ if (!size || last_addr < phys_addr)
++ return NULL;
++
++ /*
++ * Don't remap the low PCI/ISA area, it's always mapped..
++ */
++ if (is_initial_xendomain() &&
++ phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
++ return (void __iomem *) isa_bus_to_virt(phys_addr);
++
++ /*
++ * Don't allow anybody to remap normal RAM that we're using..
++ */
++ if (is_local_lowmem(phys_addr)) {
++ char *t_addr, *t_end;
++ struct page *page;
++
++ t_addr = bus_to_virt(phys_addr);
++ t_end = t_addr + (size - 1);
++
++ for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
++ if(!PageReserved(page))
++ return NULL;
++
++ domid = DOMID_SELF;
++ }
++
++ /*
++ * Mappings have to be page-aligned
++ */
++ offset = phys_addr & ~PAGE_MASK;
++ phys_addr &= PAGE_MASK;
++ size = PAGE_ALIGN(last_addr+1) - phys_addr;
++
++ /*
++ * Ok, go for it..
++ */
++ area = get_vm_area(size, VM_IOREMAP | (flags << 20));
++ if (!area)
++ return NULL;
++ area->phys_addr = phys_addr;
++ addr = (void __iomem *) area->addr;
++ flags |= _KERNPG_TABLE;
++ if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
++ phys_addr>>PAGE_SHIFT,
++ size, __pgprot(flags), domid)) {
++ vunmap((void __force *) addr);
++ return NULL;
++ }
++ return (void __iomem *) (offset + (char __iomem *)addr);
++}
++EXPORT_SYMBOL(__ioremap);
++
++/**
++ * ioremap_nocache - map bus memory into CPU space
++ * @offset: bus address of the memory
++ * @size: size of the resource to map
++ *
++ * ioremap_nocache performs a platform specific sequence of operations to
++ * make bus memory CPU accessible via the readb/readw/readl/writeb/
++ * writew/writel functions and the other mmio helpers. The returned
++ * address is not guaranteed to be usable directly as a virtual
++ * address.
++ *
++ * This version of ioremap ensures that the memory is marked uncachable
++ * on the CPU as well as honouring existing caching rules from things like
++ * the PCI bus. Note that there are other caches and buffers on many
++ * busses. In particular driver authors should read up on PCI writes
++ *
++ * It's useful if some control registers are in such an area and
++ * write combining or read caching is not desirable:
++ *
++ * Must be freed with iounmap.
++ */
++
++void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
++{
++ unsigned long last_addr;
++ void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
++ if (!p)
++ return p;
++
++ /* Guaranteed to be > phys_addr, as per __ioremap() */
++ last_addr = phys_addr + size - 1;
++
++ if (is_local_lowmem(last_addr)) {
++ struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
++ unsigned long npages;
++
++ phys_addr &= PAGE_MASK;
++
++ /* This might overflow and become zero.. */
++ last_addr = PAGE_ALIGN(last_addr);
++
++ /* .. but that's ok, because modulo-2**n arithmetic will make
++ * the page-aligned "last - first" come out right.
++ */
++ npages = (last_addr - phys_addr) >> PAGE_SHIFT;
++
++ if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
++ iounmap(p);
++ p = NULL;
++ }
++ global_flush_tlb();
++ }
++
++ return p;
++}
++EXPORT_SYMBOL(ioremap_nocache);
++
++/**
++ * iounmap - Free a IO remapping
++ * @addr: virtual address from ioremap_*
++ *
++ * Caller must ensure there is only one unmapping for the same pointer.
++ */
++void iounmap(volatile void __iomem *addr)
++{
++ struct vm_struct *p, *o;
++
++ if ((void __force *)addr <= high_memory)
++ return;
++
++ /*
++ * __ioremap special-cases the PCI/ISA range by not instantiating a
++ * vm_area and by simply returning an address into the kernel mapping
++ * of ISA space. So handle that here.
++ */
++ if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
++ return;
++
++ addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
++
++ /* Use the vm area unlocked, assuming the caller
++ ensures there isn't another iounmap for the same address
++ in parallel. Reuse of the virtual address is prevented by
++ leaving it in the global lists until we're done with it.
++ cpa takes care of the direct mappings. */
++ read_lock(&vmlist_lock);
++ for (p = vmlist; p; p = p->next) {
++ if (p->addr == addr)
++ break;
++ }
++ read_unlock(&vmlist_lock);
++
++ if (!p) {
++ printk("iounmap: bad address %p\n", addr);
++ dump_stack();
++ return;
++ }
++
++ /* Reset the direct mapping. Can block */
++ if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
++ /* p->size includes the guard page, but cpa doesn't like that */
++ change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
++ (p->size - PAGE_SIZE) >> PAGE_SHIFT,
++ PAGE_KERNEL);
++ global_flush_tlb();
++ }
++
++ /* Finally remove it */
++ o = remove_vm_area((void *)addr);
++ BUG_ON(p != o || o == NULL);
++ kfree(p);
++}
++EXPORT_SYMBOL(iounmap);
++
++void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
++{
++ unsigned long offset, last_addr;
++ unsigned int nrpages;
++ enum fixed_addresses idx;
++
++ /* Don't allow wraparound or zero size */
++ last_addr = phys_addr + size - 1;
++ if (!size || last_addr < phys_addr)
++ return NULL;
++
++ /*
++ * Don't remap the low PCI/ISA area, it's always mapped..
++ */
++ if (is_initial_xendomain() &&
++ phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
++ return isa_bus_to_virt(phys_addr);
++
++ /*
++ * Mappings have to be page-aligned
++ */
++ offset = phys_addr & ~PAGE_MASK;
++ phys_addr &= PAGE_MASK;
++ size = PAGE_ALIGN(last_addr) - phys_addr;
++
++ /*
++ * Mappings have to fit in the FIX_BTMAP area.
++ */
++ nrpages = size >> PAGE_SHIFT;
++ if (nrpages > NR_FIX_BTMAPS)
++ return NULL;
++
++ /*
++ * Ok, go for it..
++ */
++ idx = FIX_BTMAP_BEGIN;
++ while (nrpages > 0) {
++ set_fixmap(idx, phys_addr);
++ phys_addr += PAGE_SIZE;
++ --idx;
++ --nrpages;
++ }
++ return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
++}
++
++void __init bt_iounmap(void *addr, unsigned long size)
++{
++ unsigned long virt_addr;
++ unsigned long offset;
++ unsigned int nrpages;
++ enum fixed_addresses idx;
++
++ virt_addr = (unsigned long)addr;
++ if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
++ return;
++ if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
++ return;
++ offset = virt_addr & ~PAGE_MASK;
++ nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
++
++ idx = FIX_BTMAP_BEGIN;
++ while (nrpages > 0) {
++ clear_fixmap(idx);
++ --idx;
++ --nrpages;
++ }
++}
+diff -rpuN linux-2.6.18.8/arch/i386/mm/Makefile linux-2.6.18-xen-3.3.0/arch/i386/mm/Makefile
+--- linux-2.6.18.8/arch/i386/mm/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/mm/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -8,3 +8,4 @@ obj-$(CONFIG_NUMA) += discontig.o
+ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+ obj-$(CONFIG_HIGHMEM) += highmem.o
+ obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
++obj-$(CONFIG_XEN) += hypervisor.o
+diff -rpuN linux-2.6.18.8/arch/i386/mm/pageattr.c linux-2.6.18-xen-3.3.0/arch/i386/mm/pageattr.c
+--- linux-2.6.18.8/arch/i386/mm/pageattr.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/mm/pageattr.c 2008-08-21 11:36:07.000000000 +0200
+@@ -84,7 +84,7 @@ static void set_pmd_pte(pte_t *kpte, uns
+ unsigned long flags;
+
+ set_pte_atomic(kpte, pte); /* change init_mm */
+- if (PTRS_PER_PMD > 1)
++ if (HAVE_SHARED_KERNEL_PMD)
+ return;
+
+ spin_lock_irqsave(&pgd_lock, flags);
+diff -rpuN linux-2.6.18.8/arch/i386/mm/pgtable.c linux-2.6.18-xen-3.3.0/arch/i386/mm/pgtable.c
+--- linux-2.6.18.8/arch/i386/mm/pgtable.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/mm/pgtable.c 2008-08-21 11:36:07.000000000 +0200
+@@ -12,6 +12,7 @@
+ #include <linux/slab.h>
+ #include <linux/pagemap.h>
+ #include <linux/spinlock.h>
++#include <linux/module.h>
+
+ #include <asm/system.h>
+ #include <asm/pgtable.h>
+@@ -137,6 +138,10 @@ void set_pmd_pfn(unsigned long vaddr, un
+ __flush_tlb_one(vaddr);
+ }
+
++static int nr_fixmaps = 0;
++unsigned long __FIXADDR_TOP = 0xfffff000;
++EXPORT_SYMBOL(__FIXADDR_TOP);
++
+ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
+ {
+ unsigned long address = __fix_to_virt(idx);
+@@ -146,6 +151,13 @@ void __set_fixmap (enum fixed_addresses
+ return;
+ }
+ set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
++ nr_fixmaps++;
++}
++
++void set_fixaddr_top(unsigned long top)
++{
++ BUG_ON(nr_fixmaps > 0);
++ __FIXADDR_TOP = top - PAGE_SIZE;
+ }
+
+ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+@@ -214,9 +226,10 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
+ spin_lock_irqsave(&pgd_lock, flags);
+ }
+
+- clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
+- swapper_pg_dir + USER_PTRS_PER_PGD,
+- KERNEL_PGD_PTRS);
++ if (PTRS_PER_PMD == 1 || HAVE_SHARED_KERNEL_PMD)
++ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
++ swapper_pg_dir + USER_PTRS_PER_PGD,
++ KERNEL_PGD_PTRS);
+ if (PTRS_PER_PMD > 1)
+ return;
+
+@@ -248,6 +261,30 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+ goto out_oom;
+ set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
+ }
++
++ if (!HAVE_SHARED_KERNEL_PMD) {
++ unsigned long flags;
++
++ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++ pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++ if (!pmd)
++ goto out_oom;
++ set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
++ }
++
++ spin_lock_irqsave(&pgd_lock, flags);
++ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++ unsigned long v = (unsigned long)i << PGDIR_SHIFT;
++ pgd_t *kpgd = pgd_offset_k(v);
++ pud_t *kpud = pud_offset(kpgd, v);
++ pmd_t *kpmd = pmd_offset(kpud, v);
++ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++ memcpy(pmd, kpmd, PAGE_SIZE);
++ }
++ pgd_list_add(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ }
++
+ return pgd;
+
+ out_oom:
+@@ -262,9 +299,23 @@ void pgd_free(pgd_t *pgd)
+ int i;
+
+ /* in the PAE case user pgd entries are overwritten before usage */
+- if (PTRS_PER_PMD > 1)
+- for (i = 0; i < USER_PTRS_PER_PGD; ++i)
+- kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
++ if (PTRS_PER_PMD > 1) {
++ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++ kmem_cache_free(pmd_cache, pmd);
++ }
++ if (!HAVE_SHARED_KERNEL_PMD) {
++ unsigned long flags;
++ spin_lock_irqsave(&pgd_lock, flags);
++ pgd_list_del(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++ kmem_cache_free(pmd_cache, pmd);
++ }
++ }
++ }
+ /* in the non-PAE case, free_pgtables() clears user pgd entries */
+ kmem_cache_free(pgd_cache, pgd);
+ }
+diff -rpuN linux-2.6.18.8/arch/i386/mm/pgtable-xen.c linux-2.6.18-xen-3.3.0/arch/i386/mm/pgtable-xen.c
+--- linux-2.6.18.8/arch/i386/mm/pgtable-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/mm/pgtable-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,725 @@
++/*
++ * linux/arch/i386/mm/pgtable.c
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/highmem.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/spinlock.h>
++#include <linux/module.h>
++
++#include <asm/system.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/io.h>
++#include <asm/mmu_context.h>
++
++#include <xen/features.h>
++#include <asm/hypervisor.h>
++
++static void pgd_test_and_unpin(pgd_t *pgd);
++
++void show_mem(void)
++{
++ int total = 0, reserved = 0;
++ int shared = 0, cached = 0;
++ int highmem = 0;
++ struct page *page;
++ pg_data_t *pgdat;
++ unsigned long i;
++ unsigned long flags;
++
++ printk(KERN_INFO "Mem-info:\n");
++ show_free_areas();
++ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
++ for_each_online_pgdat(pgdat) {
++ pgdat_resize_lock(pgdat, &flags);
++ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
++ page = pgdat_page_nr(pgdat, i);
++ total++;
++ if (PageHighMem(page))
++ highmem++;
++ if (PageReserved(page))
++ reserved++;
++ else if (PageSwapCache(page))
++ cached++;
++ else if (page_count(page))
++ shared += page_count(page) - 1;
++ }
++ pgdat_resize_unlock(pgdat, &flags);
++ }
++ printk(KERN_INFO "%d pages of RAM\n", total);
++ printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
++ printk(KERN_INFO "%d reserved pages\n", reserved);
++ printk(KERN_INFO "%d pages shared\n", shared);
++ printk(KERN_INFO "%d pages swap cached\n", cached);
++
++ printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
++ printk(KERN_INFO "%lu pages writeback\n",
++ global_page_state(NR_WRITEBACK));
++ printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
++ printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB));
++ printk(KERN_INFO "%lu pages pagetables\n",
++ global_page_state(NR_PAGETABLE));
++}
++
++/*
++ * Associate a large virtual page frame with a given physical page frame
++ * and protection flags for that frame. pfn is for the base of the page,
++ * vaddr is what the page gets mapped to - both must be properly aligned.
++ * The pmd must already be instantiated. Assumes PAE mode.
++ */
++void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
++ printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
++ return; /* BUG(); */
++ }
++ if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
++ printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
++ return; /* BUG(); */
++ }
++ pgd = swapper_pg_dir + pgd_index(vaddr);
++ if (pgd_none(*pgd)) {
++ printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
++ return; /* BUG(); */
++ }
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++ set_pmd(pmd, pfn_pmd(pfn, flags));
++ /*
++ * It's enough to flush this one mapping.
++ * (PGE mappings get flushed as well)
++ */
++ __flush_tlb_one(vaddr);
++}
++
++static int nr_fixmaps = 0;
++unsigned long hypervisor_virt_start = HYPERVISOR_VIRT_START;
++unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE);
++EXPORT_SYMBOL(__FIXADDR_TOP);
++
++void __init set_fixaddr_top(unsigned long top)
++{
++ BUG_ON(nr_fixmaps > 0);
++ hypervisor_virt_start = top;
++ __FIXADDR_TOP = hypervisor_virt_start - 2 * PAGE_SIZE;
++}
++
++void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
++{
++ unsigned long address = __fix_to_virt(idx);
++ pte_t pte;
++
++ if (idx >= __end_of_fixed_addresses) {
++ BUG();
++ return;
++ }
++ switch (idx) {
++ case FIX_WP_TEST:
++ case FIX_VDSO:
++ pte = pfn_pte(phys >> PAGE_SHIFT, flags);
++ break;
++ default:
++ pte = pfn_pte_ma(phys >> PAGE_SHIFT, flags);
++ break;
++ }
++ if (HYPERVISOR_update_va_mapping(address, pte,
++ UVMF_INVLPG|UVMF_ALL))
++ BUG();
++ nr_fixmaps++;
++}
++
++pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
++{
++ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
++ if (pte)
++ make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
++ return pte;
++}
++
++struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
++{
++ struct page *pte;
++
++#ifdef CONFIG_HIGHPTE
++ pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
++#else
++ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
++#endif
++ if (pte) {
++ SetPageForeign(pte, pte_free);
++ init_page_count(pte);
++ }
++ return pte;
++}
++
++void pte_free(struct page *pte)
++{
++ unsigned long pfn = page_to_pfn(pte);
++
++ if (!PageHighMem(pte)) {
++ unsigned long va = (unsigned long)__va(pfn << PAGE_SHIFT);
++
++ if (!pte_write(*virt_to_ptep(va)))
++ if (HYPERVISOR_update_va_mapping(
++ va, pfn_pte(pfn, PAGE_KERNEL), 0))
++ BUG();
++ } else
++ clear_bit(PG_pinned, &pte->flags);
++
++ ClearPageForeign(pte);
++ init_page_count(pte);
++
++ __free_page(pte);
++}
++
++void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
++{
++ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++}
++
++/*
++ * List of all pgd's needed for non-PAE so it can invalidate entries
++ * in both cached and uncached pgd's; not needed for PAE since the
++ * kernel pmd is shared. If PAE were not to share the pmd a similar
++ * tactic would be needed. This is essentially codepath-based locking
++ * against pageattr.c; it is the unique case in which a valid change
++ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
++ * vmalloc faults work because attached pagetables are never freed.
++ * The locking scheme was chosen on the basis of manfred's
++ * recommendations and having no core impact whatsoever.
++ * -- wli
++ */
++DEFINE_SPINLOCK(pgd_lock);
++struct page *pgd_list;
++
++static inline void pgd_list_add(pgd_t *pgd)
++{
++ struct page *page = virt_to_page(pgd);
++ page->index = (unsigned long)pgd_list;
++ if (pgd_list)
++ set_page_private(pgd_list, (unsigned long)&page->index);
++ pgd_list = page;
++ set_page_private(page, (unsigned long)&pgd_list);
++}
++
++static inline void pgd_list_del(pgd_t *pgd)
++{
++ struct page *next, **pprev, *page = virt_to_page(pgd);
++ next = (struct page *)page->index;
++ pprev = (struct page **)page_private(page);
++ *pprev = next;
++ if (next)
++ set_page_private(next, (unsigned long)pprev);
++}
++
++void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
++{
++ unsigned long flags;
++
++ if (PTRS_PER_PMD > 1) {
++ if (HAVE_SHARED_KERNEL_PMD)
++ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
++ swapper_pg_dir + USER_PTRS_PER_PGD,
++ KERNEL_PGD_PTRS);
++ } else {
++ spin_lock_irqsave(&pgd_lock, flags);
++ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
++ swapper_pg_dir + USER_PTRS_PER_PGD,
++ KERNEL_PGD_PTRS);
++ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
++ pgd_list_add(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ }
++}
++
++/* never called when PTRS_PER_PMD > 1 */
++void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
++{
++ unsigned long flags; /* can be called from interrupt context */
++
++ spin_lock_irqsave(&pgd_lock, flags);
++ pgd_list_del(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++
++ pgd_test_and_unpin(pgd);
++}
++
++pgd_t *pgd_alloc(struct mm_struct *mm)
++{
++ int i;
++ pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
++ pmd_t **pmd;
++ unsigned long flags;
++
++ pgd_test_and_unpin(pgd);
++
++ if (PTRS_PER_PMD == 1 || !pgd)
++ return pgd;
++
++ if (HAVE_SHARED_KERNEL_PMD) {
++ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++ pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++ if (!pmd)
++ goto out_oom;
++ set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
++ }
++ return pgd;
++ }
++
++ /*
++ * We can race save/restore (if we sleep during a GFP_KERNEL memory
++ * allocation). We therefore store virtual addresses of pmds as they
++ * do not change across save/restore, and poke the machine addresses
++ * into the pgdir under the pgd_lock.
++ */
++ pmd = kmalloc(PTRS_PER_PGD * sizeof(pmd_t *), GFP_KERNEL);
++ if (!pmd) {
++ kmem_cache_free(pgd_cache, pgd);
++ return NULL;
++ }
++
++ /* Allocate pmds, remember virtual addresses. */
++ for (i = 0; i < PTRS_PER_PGD; ++i) {
++ pmd[i] = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++ if (!pmd[i])
++ goto out_oom;
++ }
++
++ spin_lock_irqsave(&pgd_lock, flags);
++
++ /* Protect against save/restore: move below 4GB under pgd_lock. */
++ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) {
++ int rc = xen_create_contiguous_region(
++ (unsigned long)pgd, 0, 32);
++ if (rc) {
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ goto out_oom;
++ }
++ }
++
++ /* Copy kernel pmd contents and write-protect the new pmds. */
++ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++ unsigned long v = (unsigned long)i << PGDIR_SHIFT;
++ pgd_t *kpgd = pgd_offset_k(v);
++ pud_t *kpud = pud_offset(kpgd, v);
++ pmd_t *kpmd = pmd_offset(kpud, v);
++ memcpy(pmd[i], kpmd, PAGE_SIZE);
++ make_lowmem_page_readonly(
++ pmd[i], XENFEAT_writable_page_tables);
++ }
++
++ /* It is safe to poke machine addresses of pmds under the pmd_lock. */
++ for (i = 0; i < PTRS_PER_PGD; i++)
++ set_pgd(&pgd[i], __pgd(1 + __pa(pmd[i])));
++
++ /* Ensure this pgd gets picked up and pinned on save/restore. */
++ pgd_list_add(pgd);
++
++ spin_unlock_irqrestore(&pgd_lock, flags);
++
++ kfree(pmd);
++
++ return pgd;
++
++out_oom:
++ if (HAVE_SHARED_KERNEL_PMD) {
++ for (i--; i >= 0; i--)
++ kmem_cache_free(pmd_cache,
++ (void *)__va(pgd_val(pgd[i])-1));
++ } else {
++ for (i--; i >= 0; i--)
++ kmem_cache_free(pmd_cache, pmd[i]);
++ kfree(pmd);
++ }
++ kmem_cache_free(pgd_cache, pgd);
++ return NULL;
++}
++
++void pgd_free(pgd_t *pgd)
++{
++ int i;
++
++ /*
++ * After this the pgd should not be pinned for the duration of this
++ * function's execution. We should never sleep and thus never race:
++ * 1. User pmds will not become write-protected under our feet due
++ * to a concurrent mm_pin_all().
++ * 2. The machine addresses in PGD entries will not become invalid
++ * due to a concurrent save/restore.
++ */
++ pgd_test_and_unpin(pgd);
++
++ /* in the PAE case user pgd entries are overwritten before usage */
++ if (PTRS_PER_PMD > 1) {
++ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++ kmem_cache_free(pmd_cache, pmd);
++ }
++
++ if (!HAVE_SHARED_KERNEL_PMD) {
++ unsigned long flags;
++ spin_lock_irqsave(&pgd_lock, flags);
++ pgd_list_del(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++
++ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++ make_lowmem_page_writable(
++ pmd, XENFEAT_writable_page_tables);
++ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++ kmem_cache_free(pmd_cache, pmd);
++ }
++
++ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
++ xen_destroy_contiguous_region(
++ (unsigned long)pgd, 0);
++ }
++ }
++
++ /* in the non-PAE case, free_pgtables() clears user pgd entries */
++ kmem_cache_free(pgd_cache, pgd);
++}
++
++void make_lowmem_page_readonly(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_wrprotect(*pte), 0);
++ BUG_ON(rc);
++}
++
++void make_lowmem_page_writable(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_mkwrite(*pte), 0);
++ BUG_ON(rc);
++}
++
++void make_page_readonly(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_wrprotect(*pte), 0);
++ if (rc) /* fallback? */
++ xen_l1_entry_update(pte, pte_wrprotect(*pte));
++ if ((unsigned long)va >= (unsigned long)high_memory) {
++ unsigned long pfn = pte_pfn(*pte);
++#ifdef CONFIG_HIGHMEM
++ if (pfn >= highstart_pfn)
++ kmap_flush_unused(); /* flush stale writable kmaps */
++ else
++#endif
++ make_lowmem_page_readonly(
++ phys_to_virt(pfn << PAGE_SHIFT), feature);
++ }
++}
++
++void make_page_writable(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_mkwrite(*pte), 0);
++ if (rc) /* fallback? */
++ xen_l1_entry_update(pte, pte_mkwrite(*pte));
++ if ((unsigned long)va >= (unsigned long)high_memory) {
++ unsigned long pfn = pte_pfn(*pte);
++#ifdef CONFIG_HIGHMEM
++ if (pfn < highstart_pfn)
++#endif
++ make_lowmem_page_writable(
++ phys_to_virt(pfn << PAGE_SHIFT), feature);
++ }
++}
++
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature)
++{
++ if (xen_feature(feature))
++ return;
++
++ while (nr-- != 0) {
++ make_page_readonly(va, feature);
++ va = (void *)((unsigned long)va + PAGE_SIZE);
++ }
++}
++
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
++{
++ if (xen_feature(feature))
++ return;
++
++ while (nr-- != 0) {
++ make_page_writable(va, feature);
++ va = (void *)((unsigned long)va + PAGE_SIZE);
++ }
++}
++
++static void _pin_lock(struct mm_struct *mm, int lock) {
++ if (lock)
++ spin_lock(&mm->page_table_lock);
++#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
++ /* While mm->page_table_lock protects us against insertions and
++ * removals of higher level page table pages, it doesn't protect
++ * against updates of pte-s. Such updates, however, require the
++ * pte pages to be in consistent state (unpinned+writable or
++ * pinned+readonly). The pinning and attribute changes, however
++ * cannot be done atomically, which is why such updates must be
++ * prevented from happening concurrently.
++ * Note that no pte lock can ever elsewhere be acquired nesting
++ * with an already acquired one in the same mm, or with the mm's
++ * page_table_lock already acquired, as that would break in the
++ * non-split case (where all these are actually resolving to the
++ * one page_table_lock). Thus acquiring all of them here is not
++ * going to result in dead locks, and the order of acquires
++ * doesn't matter.
++ */
++ {
++ pgd_t *pgd = mm->pgd;
++ unsigned g;
++
++ for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
++ pud_t *pud;
++ unsigned u;
++
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, 0);
++ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++ pmd_t *pmd;
++ unsigned m;
++
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, 0);
++ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++ spinlock_t *ptl;
++
++ if (pmd_none(*pmd))
++ continue;
++ ptl = pte_lockptr(0, pmd);
++ if (lock)
++ spin_lock(ptl);
++ else
++ spin_unlock(ptl);
++ }
++ }
++ }
++ }
++#endif
++ if (!lock)
++ spin_unlock(&mm->page_table_lock);
++}
++#define pin_lock(mm) _pin_lock(mm, 1)
++#define pin_unlock(mm) _pin_lock(mm, 0)
++
++#define PIN_BATCH 4
++static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl);
++
++static inline unsigned int pgd_walk_set_prot(struct page *page, pgprot_t flags,
++ unsigned int cpu, unsigned seq)
++{
++ unsigned long pfn = page_to_pfn(page);
++
++ if (PageHighMem(page)) {
++ if (pgprot_val(flags) & _PAGE_RW)
++ clear_bit(PG_pinned, &page->flags);
++ else
++ set_bit(PG_pinned, &page->flags);
++ } else {
++ MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
++ (unsigned long)__va(pfn << PAGE_SHIFT),
++ pfn_pte(pfn, flags), 0);
++ if (unlikely(++seq == PIN_BATCH)) {
++ if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
++ PIN_BATCH, NULL)))
++ BUG();
++ seq = 0;
++ }
++ }
++
++ return seq;
++}
++
++static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
++{
++ pgd_t *pgd = pgd_base;
++ pud_t *pud;
++ pmd_t *pmd;
++ int g, u, m;
++ unsigned int cpu, seq;
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return;
++
++ cpu = get_cpu();
++
++ for (g = 0, seq = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, 0);
++ if (PTRS_PER_PUD > 1) /* not folded */
++ seq = pgd_walk_set_prot(virt_to_page(pud),flags,cpu,seq);
++ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, 0);
++ if (PTRS_PER_PMD > 1) /* not folded */
++ seq = pgd_walk_set_prot(virt_to_page(pmd),flags,cpu,seq);
++ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++ if (pmd_none(*pmd))
++ continue;
++ seq = pgd_walk_set_prot(pmd_page(*pmd),flags,cpu,seq);
++ }
++ }
++ }
++
++ if (likely(seq != 0)) {
++ MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
++ (unsigned long)pgd_base,
++ pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
++ UVMF_TLB_FLUSH);
++ if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
++ seq + 1, NULL)))
++ BUG();
++ } else if(HYPERVISOR_update_va_mapping((unsigned long)pgd_base,
++ pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
++ UVMF_TLB_FLUSH))
++ BUG();
++
++ put_cpu();
++}
++
++static void __pgd_pin(pgd_t *pgd)
++{
++ pgd_walk(pgd, PAGE_KERNEL_RO);
++ kmap_flush_unused();
++ xen_pgd_pin(__pa(pgd));
++ set_bit(PG_pinned, &virt_to_page(pgd)->flags);
++}
++
++static void __pgd_unpin(pgd_t *pgd)
++{
++ xen_pgd_unpin(__pa(pgd));
++ pgd_walk(pgd, PAGE_KERNEL);
++ clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
++}
++
++static void pgd_test_and_unpin(pgd_t *pgd)
++{
++ if (test_bit(PG_pinned, &virt_to_page(pgd)->flags))
++ __pgd_unpin(pgd);
++}
++
++void mm_pin(struct mm_struct *mm)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++ pin_lock(mm);
++ __pgd_pin(mm->pgd);
++ pin_unlock(mm);
++}
++
++void mm_unpin(struct mm_struct *mm)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++ pin_lock(mm);
++ __pgd_unpin(mm->pgd);
++ pin_unlock(mm);
++}
++
++void mm_pin_all(void)
++{
++ struct page *page;
++ unsigned long flags;
++
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++
++ /*
++ * Allow uninterrupted access to the pgd_list. Also protects
++ * __pgd_pin() by disabling preemption.
++ * All other CPUs must be at a safe point (e.g., in stop_machine
++ * or offlined entirely).
++ */
++ spin_lock_irqsave(&pgd_lock, flags);
++ for (page = pgd_list; page; page = (struct page *)page->index) {
++ if (!test_bit(PG_pinned, &page->flags))
++ __pgd_pin((pgd_t *)page_address(page));
++ }
++ spin_unlock_irqrestore(&pgd_lock, flags);
++}
++
++void _arch_dup_mmap(struct mm_struct *mm)
++{
++ if (!test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags))
++ mm_pin(mm);
++}
++
++void _arch_exit_mmap(struct mm_struct *mm)
++{
++ struct task_struct *tsk = current;
++
++ task_lock(tsk);
++
++ /*
++ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
++ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
++ */
++ if (tsk->active_mm == mm) {
++ tsk->active_mm = &init_mm;
++ atomic_inc(&init_mm.mm_count);
++
++ switch_mm(mm, &init_mm, tsk);
++
++ atomic_dec(&mm->mm_count);
++ BUG_ON(atomic_read(&mm->mm_count) == 0);
++ }
++
++ task_unlock(tsk);
++
++ if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) &&
++ (atomic_read(&mm->mm_count) == 1) &&
++ !mm->context.has_foreign_mappings)
++ mm_unpin(mm);
++}
+diff -rpuN linux-2.6.18.8/arch/i386/oprofile/Makefile linux-2.6.18-xen-3.3.0/arch/i386/oprofile/Makefile
+--- linux-2.6.18.8/arch/i386/oprofile/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/oprofile/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -6,7 +6,14 @@ DRIVER_OBJS = $(addprefix ../../../drive
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
++ifdef CONFIG_XEN
++XENOPROF_COMMON_OBJS = $(addprefix ../../../drivers/xen/xenoprof/, \
++ xenoprofile.o)
++oprofile-y := $(DRIVER_OBJS) \
++ $(XENOPROF_COMMON_OBJS) xenoprof.o
++else
+ oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
+ oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o \
+ op_model_ppro.o op_model_p4.o
+ oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
++endif
+diff -rpuN linux-2.6.18.8/arch/i386/oprofile/xenoprof.c linux-2.6.18-xen-3.3.0/arch/i386/oprofile/xenoprof.c
+--- linux-2.6.18.8/arch/i386/oprofile/xenoprof.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/oprofile/xenoprof.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,179 @@
++/**
++ * @file xenoprof.c
++ *
++ * @remark Copyright 2002 OProfile authors
++ * @remark Read the file COPYING
++ *
++ * @author John Levon <levon@movementarian.org>
++ *
++ * Modified by Aravind Menon and Jose Renato Santos for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ *
++ * x86-specific part
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ */
++
++#include <linux/init.h>
++#include <linux/oprofile.h>
++#include <linux/sched.h>
++#include <asm/pgtable.h>
++
++#include <xen/driver_util.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/xenoprof.h>
++#include <xen/xenoprof.h>
++#include "op_counter.h"
++
++static unsigned int num_events = 0;
++
++void __init xenoprof_arch_init_counter(struct xenoprof_init *init)
++{
++ num_events = init->num_events;
++ /* just in case - make sure we do not overflow event list
++ (i.e. counter_config list) */
++ if (num_events > OP_MAX_COUNTER) {
++ num_events = OP_MAX_COUNTER;
++ init->num_events = num_events;
++ }
++}
++
++void xenoprof_arch_counter(void)
++{
++ int i;
++ struct xenoprof_counter counter;
++
++ for (i=0; i<num_events; i++) {
++ counter.ind = i;
++ counter.count = (uint64_t)counter_config[i].count;
++ counter.enabled = (uint32_t)counter_config[i].enabled;
++ counter.event = (uint32_t)counter_config[i].event;
++ counter.kernel = (uint32_t)counter_config[i].kernel;
++ counter.user = (uint32_t)counter_config[i].user;
++ counter.unit_mask = (uint64_t)counter_config[i].unit_mask;
++ WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_counter,
++ &counter));
++ }
++}
++
++void xenoprof_arch_start(void)
++{
++ /* nothing */
++}
++
++void xenoprof_arch_stop(void)
++{
++ /* nothing */
++}
++
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer * sbuf)
++{
++ if (sbuf->buffer) {
++ vunmap(sbuf->buffer);
++ sbuf->buffer = NULL;
++ }
++}
++
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer * get_buffer,
++ struct xenoprof_shared_buffer * sbuf)
++{
++ int npages, ret;
++ struct vm_struct *area;
++
++ sbuf->buffer = NULL;
++ if ( (ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, get_buffer)) )
++ return ret;
++
++ npages = (get_buffer->bufsize * get_buffer->nbuf - 1) / PAGE_SIZE + 1;
++
++ area = alloc_vm_area(npages * PAGE_SIZE);
++ if (area == NULL)
++ return -ENOMEM;
++
++ if ( (ret = direct_kernel_remap_pfn_range(
++ (unsigned long)area->addr,
++ get_buffer->buf_gmaddr >> PAGE_SHIFT,
++ npages * PAGE_SIZE, __pgprot(_KERNPG_TABLE),
++ DOMID_SELF)) ) {
++ vunmap(area->addr);
++ return ret;
++ }
++
++ sbuf->buffer = area->addr;
++ return ret;
++}
++
++int xenoprof_arch_set_passive(struct xenoprof_passive * pdomain,
++ struct xenoprof_shared_buffer * sbuf)
++{
++ int ret;
++ int npages;
++ struct vm_struct *area;
++ pgprot_t prot = __pgprot(_KERNPG_TABLE);
++
++ sbuf->buffer = NULL;
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive, pdomain);
++ if (ret)
++ goto out;
++
++ npages = (pdomain->bufsize * pdomain->nbuf - 1) / PAGE_SIZE + 1;
++
++ area = alloc_vm_area(npages * PAGE_SIZE);
++ if (area == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ret = direct_kernel_remap_pfn_range(
++ (unsigned long)area->addr,
++ pdomain->buf_gmaddr >> PAGE_SHIFT,
++ npages * PAGE_SIZE, prot, DOMID_SELF);
++ if (ret) {
++ vunmap(area->addr);
++ goto out;
++ }
++ sbuf->buffer = area->addr;
++
++out:
++ return ret;
++}
++
++struct op_counter_config counter_config[OP_MAX_COUNTER];
++
++int xenoprof_create_files(struct super_block * sb, struct dentry * root)
++{
++ unsigned int i;
++
++ for (i = 0; i < num_events; ++i) {
++ struct dentry * dir;
++ char buf[2];
++
++ snprintf(buf, 2, "%d", i);
++ dir = oprofilefs_mkdir(sb, root, buf);
++ oprofilefs_create_ulong(sb, dir, "enabled",
++ &counter_config[i].enabled);
++ oprofilefs_create_ulong(sb, dir, "event",
++ &counter_config[i].event);
++ oprofilefs_create_ulong(sb, dir, "count",
++ &counter_config[i].count);
++ oprofilefs_create_ulong(sb, dir, "unit_mask",
++ &counter_config[i].unit_mask);
++ oprofilefs_create_ulong(sb, dir, "kernel",
++ &counter_config[i].kernel);
++ oprofilefs_create_ulong(sb, dir, "user",
++ &counter_config[i].user);
++ }
++
++ return 0;
++}
++
++int __init oprofile_arch_init(struct oprofile_operations * ops)
++{
++ return xenoprofile_init(ops);
++}
++
++void oprofile_arch_exit(void)
++{
++ xenoprofile_exit();
++}
+diff -rpuN linux-2.6.18.8/arch/i386/pci/irq.c linux-2.6.18-xen-3.3.0/arch/i386/pci/irq.c
+--- linux-2.6.18.8/arch/i386/pci/irq.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/pci/irq.c 2008-08-21 11:36:07.000000000 +0200
+@@ -543,6 +543,16 @@ static __init int intel_router_probe(str
+ case PCI_DEVICE_ID_INTEL_ICH8_2:
+ case PCI_DEVICE_ID_INTEL_ICH8_3:
+ case PCI_DEVICE_ID_INTEL_ICH8_4:
++ case PCI_DEVICE_ID_INTEL_ICH9_0:
++ case PCI_DEVICE_ID_INTEL_ICH9_1:
++ case PCI_DEVICE_ID_INTEL_ICH9_2:
++ case PCI_DEVICE_ID_INTEL_ICH9_3:
++ case PCI_DEVICE_ID_INTEL_ICH9_4:
++ case PCI_DEVICE_ID_INTEL_ICH9_5:
++ case PCI_DEVICE_ID_INTEL_ICH10_0:
++ case PCI_DEVICE_ID_INTEL_ICH10_1:
++ case PCI_DEVICE_ID_INTEL_ICH10_2:
++ case PCI_DEVICE_ID_INTEL_ICH10_3:
+ r->name = "PIIX/ICH";
+ r->get = pirq_piix_get;
+ r->set = pirq_piix_set;
+diff -rpuN linux-2.6.18.8/arch/i386/pci/irq-xen.c linux-2.6.18-xen-3.3.0/arch/i386/pci/irq-xen.c
+--- linux-2.6.18.8/arch/i386/pci/irq-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/pci/irq-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1215 @@
++/*
++ * Low-Level PCI Support for PC -- Routing of Interrupts
++ *
++ * (c) 1999--2000 Martin Mares <mj@ucw.cz>
++ */
++
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/dmi.h>
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/io_apic.h>
++#include <linux/irq.h>
++#include <linux/acpi.h>
++
++#include "pci.h"
++
++#define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
++#define PIRQ_VERSION 0x0100
++
++static int broken_hp_bios_irq9;
++static int acer_tm360_irqrouting;
++
++static struct irq_routing_table *pirq_table;
++
++static int pirq_enable_irq(struct pci_dev *dev);
++
++/*
++ * Never use: 0, 1, 2 (timer, keyboard, and cascade)
++ * Avoid using: 13, 14 and 15 (FP error and IDE).
++ * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
++ */
++unsigned int pcibios_irq_mask = 0xfff8;
++
++static int pirq_penalty[16] = {
++ 1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
++ 0, 0, 0, 0, 1000, 100000, 100000, 100000
++};
++
++struct irq_router {
++ char *name;
++ u16 vendor, device;
++ int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
++ int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new);
++};
++
++struct irq_router_handler {
++ u16 vendor;
++ int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
++};
++
++int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
++void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
++
++/*
++ * Check passed address for the PCI IRQ Routing Table signature
++ * and perform checksum verification.
++ */
++
++static inline struct irq_routing_table * pirq_check_routing_table(u8 *addr)
++{
++ struct irq_routing_table *rt;
++ int i;
++ u8 sum;
++
++ rt = (struct irq_routing_table *) addr;
++ if (rt->signature != PIRQ_SIGNATURE ||
++ rt->version != PIRQ_VERSION ||
++ rt->size % 16 ||
++ rt->size < sizeof(struct irq_routing_table))
++ return NULL;
++ sum = 0;
++ for (i=0; i < rt->size; i++)
++ sum += addr[i];
++ if (!sum) {
++ DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n", rt);
++ return rt;
++ }
++ return NULL;
++}
++
++
++
++/*
++ * Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table.
++ */
++
++static struct irq_routing_table * __init pirq_find_routing_table(void)
++{
++ u8 *addr;
++ struct irq_routing_table *rt;
++
++#ifdef CONFIG_XEN
++ if (!is_initial_xendomain())
++ return NULL;
++#endif
++ if (pirq_table_addr) {
++ rt = pirq_check_routing_table((u8 *) isa_bus_to_virt(pirq_table_addr));
++ if (rt)
++ return rt;
++ printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n");
++ }
++ for(addr = (u8 *) isa_bus_to_virt(0xf0000); addr < (u8 *) isa_bus_to_virt(0x100000); addr += 16) {
++ rt = pirq_check_routing_table(addr);
++ if (rt)
++ return rt;
++ }
++ return NULL;
++}
++
++/*
++ * If we have a IRQ routing table, use it to search for peer host
++ * bridges. It's a gross hack, but since there are no other known
++ * ways how to get a list of buses, we have to go this way.
++ */
++
++static void __init pirq_peer_trick(void)
++{
++ struct irq_routing_table *rt = pirq_table;
++ u8 busmap[256];
++ int i;
++ struct irq_info *e;
++
++ memset(busmap, 0, sizeof(busmap));
++ for(i=0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) {
++ e = &rt->slots[i];
++#ifdef DEBUG
++ {
++ int j;
++ DBG(KERN_DEBUG "%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
++ for(j=0; j<4; j++)
++ DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
++ DBG("\n");
++ }
++#endif
++ busmap[e->bus] = 1;
++ }
++ for(i = 1; i < 256; i++) {
++ if (!busmap[i] || pci_find_bus(0, i))
++ continue;
++ if (pci_scan_bus(i, &pci_root_ops, NULL))
++ printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
++ }
++ pcibios_last_bus = -1;
++}
++
++/*
++ * Code for querying and setting of IRQ routes on various interrupt routers.
++ */
++
++void eisa_set_level_irq(unsigned int irq)
++{
++ unsigned char mask = 1 << (irq & 7);
++ unsigned int port = 0x4d0 + (irq >> 3);
++ unsigned char val;
++ static u16 eisa_irq_mask;
++
++ if (irq >= 16 || (1 << irq) & eisa_irq_mask)
++ return;
++
++ eisa_irq_mask |= (1 << irq);
++ printk(KERN_DEBUG "PCI: setting IRQ %u as level-triggered\n", irq);
++ val = inb(port);
++ if (!(val & mask)) {
++ DBG(KERN_DEBUG " -> edge");
++ outb(val | mask, port);
++ }
++}
++
++/*
++ * Common IRQ routing practice: nybbles in config space,
++ * offset by some magic constant.
++ */
++static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
++{
++ u8 x;
++ unsigned reg = offset + (nr >> 1);
++
++ pci_read_config_byte(router, reg, &x);
++ return (nr & 1) ? (x >> 4) : (x & 0xf);
++}
++
++static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val)
++{
++ u8 x;
++ unsigned reg = offset + (nr >> 1);
++
++ pci_read_config_byte(router, reg, &x);
++ x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val);
++ pci_write_config_byte(router, reg, x);
++}
++
++/*
++ * ALI pirq entries are damn ugly, and completely undocumented.
++ * This has been figured out from pirq tables, and it's not a pretty
++ * picture.
++ */
++static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
++
++ return irqmap[read_config_nybble(router, 0x48, pirq-1)];
++}
++
++static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
++ unsigned int val = irqmap[irq];
++
++ if (val) {
++ write_config_nybble(router, 0x48, pirq-1, val);
++ return 1;
++ }
++ return 0;
++}
++
++/*
++ * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
++ * just a pointer to the config space.
++ */
++static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ u8 x;
++
++ pci_read_config_byte(router, pirq, &x);
++ return (x < 16) ? x : 0;
++}
++
++static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ pci_write_config_byte(router, pirq, irq);
++ return 1;
++}
++
++/*
++ * The VIA pirq rules are nibble-based, like ALI,
++ * but without the ugly irq number munging.
++ * However, PIRQD is in the upper instead of lower 4 bits.
++ */
++static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq);
++}
++
++static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq);
++ return 1;
++}
++
++/*
++ * The VIA pirq rules are nibble-based, like ALI,
++ * but without the ugly irq number munging.
++ * However, for 82C586, nibble map is different .
++ */
++static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
++ return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
++}
++
++static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
++ write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
++ return 1;
++}
++
++/*
++ * ITE 8330G pirq rules are nibble-based
++ * FIXME: pirqmap may be { 1, 0, 3, 2 },
++ * 2+3 are both mapped to irq 9 on my system
++ */
++static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
++ return read_config_nybble(router,0x43, pirqmap[pirq-1]);
++}
++
++static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
++ write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
++ return 1;
++}
++
++/*
++ * OPTI: high four bits are nibble pointer..
++ * I wonder what the low bits do?
++ */
++static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ return read_config_nybble(router, 0xb8, pirq >> 4);
++}
++
++static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ write_config_nybble(router, 0xb8, pirq >> 4, irq);
++ return 1;
++}
++
++/*
++ * Cyrix: nibble offset 0x5C
++ * 0x5C bits 7:4 is INTB bits 3:0 is INTA
++ * 0x5D bits 7:4 is INTD bits 3:0 is INTC
++ */
++static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ return read_config_nybble(router, 0x5C, (pirq-1)^1);
++}
++
++static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ write_config_nybble(router, 0x5C, (pirq-1)^1, irq);
++ return 1;
++}
++
++/*
++ * PIRQ routing for SiS 85C503 router used in several SiS chipsets.
++ * We have to deal with the following issues here:
++ * - vendors have different ideas about the meaning of link values
++ * - some onboard devices (integrated in the chipset) have special
++ * links and are thus routed differently (i.e. not via PCI INTA-INTD)
++ * - different revision of the router have a different layout for
++ * the routing registers, particularly for the onchip devices
++ *
++ * For all routing registers the common thing is we have one byte
++ * per routeable link which is defined as:
++ * bit 7 IRQ mapping enabled (0) or disabled (1)
++ * bits [6:4] reserved (sometimes used for onchip devices)
++ * bits [3:0] IRQ to map to
++ * allowed: 3-7, 9-12, 14-15
++ * reserved: 0, 1, 2, 8, 13
++ *
++ * The config-space registers located at 0x41/0x42/0x43/0x44 are
++ * always used to route the normal PCI INT A/B/C/D respectively.
++ * Apparently there are systems implementing PCI routing table using
++ * link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D.
++ * We try our best to handle both link mappings.
++ *
++ * Currently (2003-05-21) it appears most SiS chipsets follow the
++ * definition of routing registers from the SiS-5595 southbridge.
++ * According to the SiS 5595 datasheets the revision id's of the
++ * router (ISA-bridge) should be 0x01 or 0xb0.
++ *
++ * Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1.
++ * Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets.
++ * They seem to work with the current routing code. However there is
++ * some concern because of the two USB-OHCI HCs (original SiS 5595
++ * had only one). YMMV.
++ *
++ * Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1:
++ *
++ * 0x61: IDEIRQ:
++ * bits [6:5] must be written 01
++ * bit 4 channel-select primary (0), secondary (1)
++ *
++ * 0x62: USBIRQ:
++ * bit 6 OHCI function disabled (0), enabled (1)
++ *
++ * 0x6a: ACPI/SCI IRQ: bits 4-6 reserved
++ *
++ * 0x7e: Data Acq. Module IRQ - bits 4-6 reserved
++ *
++ * We support USBIRQ (in addition to INTA-INTD) and keep the
++ * IDE, ACPI and DAQ routing untouched as set by the BIOS.
++ *
++ * Currently the only reported exception is the new SiS 65x chipset
++ * which includes the SiS 69x southbridge. Here we have the 85C503
++ * router revision 0x04 and there are changes in the register layout
++ * mostly related to the different USB HCs with USB 2.0 support.
++ *
++ * Onchip routing for router rev-id 0x04 (try-and-error observation)
++ *
++ * 0x60/0x61/0x62/0x63: 1xEHCI and 3xOHCI (companion) USB-HCs
++ * bit 6-4 are probably unused, not like 5595
++ */
++
++#define PIRQ_SIS_IRQ_MASK 0x0f
++#define PIRQ_SIS_IRQ_DISABLE 0x80
++#define PIRQ_SIS_USB_ENABLE 0x40
++
++static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ u8 x;
++ int reg;
++
++ reg = pirq;
++ if (reg >= 0x01 && reg <= 0x04)
++ reg += 0x40;
++ pci_read_config_byte(router, reg, &x);
++ return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK);
++}
++
++static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ u8 x;
++ int reg;
++
++ reg = pirq;
++ if (reg >= 0x01 && reg <= 0x04)
++ reg += 0x40;
++ pci_read_config_byte(router, reg, &x);
++ x &= ~(PIRQ_SIS_IRQ_MASK | PIRQ_SIS_IRQ_DISABLE);
++ x |= irq ? irq: PIRQ_SIS_IRQ_DISABLE;
++ pci_write_config_byte(router, reg, x);
++ return 1;
++}
++
++
++/*
++ * VLSI: nibble offset 0x74 - educated guess due to routing table and
++ * config space of VLSI 82C534 PCI-bridge/router (1004:0102)
++ * Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard
++ * devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6
++ * for the busbridge to the docking station.
++ */
++
++static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ if (pirq > 8) {
++ printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
++ return 0;
++ }
++ return read_config_nybble(router, 0x74, pirq-1);
++}
++
++static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ if (pirq > 8) {
++ printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
++ return 0;
++ }
++ write_config_nybble(router, 0x74, pirq-1, irq);
++ return 1;
++}
++
++/*
++ * ServerWorks: PCI interrupts mapped to system IRQ lines through Index
++ * and Redirect I/O registers (0x0c00 and 0x0c01). The Index register
++ * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a. The Redirect
++ * register is a straight binary coding of desired PIC IRQ (low nibble).
++ *
++ * The 'link' value in the PIRQ table is already in the correct format
++ * for the Index register. There are some special index values:
++ * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1,
++ * and 0x03 for SMBus.
++ */
++static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ outb_p(pirq, 0xc00);
++ return inb(0xc01) & 0xf;
++}
++
++static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ outb_p(pirq, 0xc00);
++ outb_p(irq, 0xc01);
++ return 1;
++}
++
++/* Support for AMD756 PCI IRQ Routing
++ * Jhon H. Caicedo <jhcaiced@osso.org.co>
++ * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced)
++ * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced)
++ * The AMD756 pirq rules are nibble-based
++ * offset 0x56 0-3 PIRQA 4-7 PIRQB
++ * offset 0x57 0-3 PIRQC 4-7 PIRQD
++ */
++static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ u8 irq;
++ irq = 0;
++ if (pirq <= 4)
++ {
++ irq = read_config_nybble(router, 0x56, pirq - 1);
++ }
++ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n",
++ dev->vendor, dev->device, pirq, irq);
++ return irq;
++}
++
++static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n",
++ dev->vendor, dev->device, pirq, irq);
++ if (pirq <= 4)
++ {
++ write_config_nybble(router, 0x56, pirq - 1, irq);
++ }
++ return 1;
++}
++
++#ifdef CONFIG_PCI_BIOS
++
++static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ struct pci_dev *bridge;
++ int pin = pci_get_interrupt_pin(dev, &bridge);
++ return pcibios_set_irq_routing(bridge, pin, irq);
++}
++
++#endif
++
++static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ static struct pci_device_id __initdata pirq_440gx[] = {
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
++ { },
++ };
++
++ /* 440GX has a proprietary PIRQ router -- don't use it */
++ if (pci_dev_present(pirq_440gx))
++ return 0;
++
++ switch(device)
++ {
++ case PCI_DEVICE_ID_INTEL_82371FB_0:
++ case PCI_DEVICE_ID_INTEL_82371SB_0:
++ case PCI_DEVICE_ID_INTEL_82371AB_0:
++ case PCI_DEVICE_ID_INTEL_82371MX:
++ case PCI_DEVICE_ID_INTEL_82443MX_0:
++ case PCI_DEVICE_ID_INTEL_82801AA_0:
++ case PCI_DEVICE_ID_INTEL_82801AB_0:
++ case PCI_DEVICE_ID_INTEL_82801BA_0:
++ case PCI_DEVICE_ID_INTEL_82801BA_10:
++ case PCI_DEVICE_ID_INTEL_82801CA_0:
++ case PCI_DEVICE_ID_INTEL_82801CA_12:
++ case PCI_DEVICE_ID_INTEL_82801DB_0:
++ case PCI_DEVICE_ID_INTEL_82801E_0:
++ case PCI_DEVICE_ID_INTEL_82801EB_0:
++ case PCI_DEVICE_ID_INTEL_ESB_1:
++ case PCI_DEVICE_ID_INTEL_ICH6_0:
++ case PCI_DEVICE_ID_INTEL_ICH6_1:
++ case PCI_DEVICE_ID_INTEL_ICH7_0:
++ case PCI_DEVICE_ID_INTEL_ICH7_1:
++ case PCI_DEVICE_ID_INTEL_ICH7_30:
++ case PCI_DEVICE_ID_INTEL_ICH7_31:
++ case PCI_DEVICE_ID_INTEL_ESB2_0:
++ case PCI_DEVICE_ID_INTEL_ICH8_0:
++ case PCI_DEVICE_ID_INTEL_ICH8_1:
++ case PCI_DEVICE_ID_INTEL_ICH8_2:
++ case PCI_DEVICE_ID_INTEL_ICH8_3:
++ case PCI_DEVICE_ID_INTEL_ICH8_4:
++ case PCI_DEVICE_ID_INTEL_ICH9_0:
++ case PCI_DEVICE_ID_INTEL_ICH9_1:
++ case PCI_DEVICE_ID_INTEL_ICH9_2:
++ case PCI_DEVICE_ID_INTEL_ICH9_3:
++ case PCI_DEVICE_ID_INTEL_ICH9_4:
++ case PCI_DEVICE_ID_INTEL_ICH9_5:
++ case PCI_DEVICE_ID_INTEL_ICH10_0:
++ case PCI_DEVICE_ID_INTEL_ICH10_1:
++ case PCI_DEVICE_ID_INTEL_ICH10_2:
++ case PCI_DEVICE_ID_INTEL_ICH10_3:
++ r->name = "PIIX/ICH";
++ r->get = pirq_piix_get;
++ r->set = pirq_piix_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int via_router_probe(struct irq_router *r,
++ struct pci_dev *router, u16 device)
++{
++ /* FIXME: We should move some of the quirk fixup stuff here */
++
++ /*
++ * work arounds for some buggy BIOSes
++ */
++ if (device == PCI_DEVICE_ID_VIA_82C586_0) {
++ switch(router->device) {
++ case PCI_DEVICE_ID_VIA_82C686:
++ /*
++ * Asus k7m bios wrongly reports 82C686A
++ * as 586-compatible
++ */
++ device = PCI_DEVICE_ID_VIA_82C686;
++ break;
++ case PCI_DEVICE_ID_VIA_8235:
++ /**
++ * Asus a7v-x bios wrongly reports 8235
++ * as 586-compatible
++ */
++ device = PCI_DEVICE_ID_VIA_8235;
++ break;
++ }
++ }
++
++ switch(device) {
++ case PCI_DEVICE_ID_VIA_82C586_0:
++ r->name = "VIA";
++ r->get = pirq_via586_get;
++ r->set = pirq_via586_set;
++ return 1;
++ case PCI_DEVICE_ID_VIA_82C596:
++ case PCI_DEVICE_ID_VIA_82C686:
++ case PCI_DEVICE_ID_VIA_8231:
++ case PCI_DEVICE_ID_VIA_8233A:
++ case PCI_DEVICE_ID_VIA_8235:
++ case PCI_DEVICE_ID_VIA_8237:
++ /* FIXME: add new ones for 8233/5 */
++ r->name = "VIA";
++ r->get = pirq_via_get;
++ r->set = pirq_via_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_VLSI_82C534:
++ r->name = "VLSI 82C534";
++ r->get = pirq_vlsi_get;
++ r->set = pirq_vlsi_set;
++ return 1;
++ }
++ return 0;
++}
++
++
++static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_SERVERWORKS_OSB4:
++ case PCI_DEVICE_ID_SERVERWORKS_CSB5:
++ r->name = "ServerWorks";
++ r->get = pirq_serverworks_get;
++ r->set = pirq_serverworks_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ if (device != PCI_DEVICE_ID_SI_503)
++ return 0;
++
++ r->name = "SIS";
++ r->get = pirq_sis_get;
++ r->set = pirq_sis_set;
++ return 1;
++}
++
++static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_CYRIX_5520:
++ r->name = "NatSemi";
++ r->get = pirq_cyrix_get;
++ r->set = pirq_cyrix_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_OPTI_82C700:
++ r->name = "OPTI";
++ r->get = pirq_opti_get;
++ r->set = pirq_opti_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_ITE_IT8330G_0:
++ r->name = "ITE";
++ r->get = pirq_ite_get;
++ r->set = pirq_ite_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_AL_M1533:
++ case PCI_DEVICE_ID_AL_M1563:
++ printk(KERN_DEBUG "PCI: Using ALI IRQ Router\n");
++ r->name = "ALI";
++ r->get = pirq_ali_get;
++ r->set = pirq_ali_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_AMD_VIPER_740B:
++ r->name = "AMD756";
++ break;
++ case PCI_DEVICE_ID_AMD_VIPER_7413:
++ r->name = "AMD766";
++ break;
++ case PCI_DEVICE_ID_AMD_VIPER_7443:
++ r->name = "AMD768";
++ break;
++ default:
++ return 0;
++ }
++ r->get = pirq_amd756_get;
++ r->set = pirq_amd756_set;
++ return 1;
++}
++
++static __initdata struct irq_router_handler pirq_routers[] = {
++ { PCI_VENDOR_ID_INTEL, intel_router_probe },
++ { PCI_VENDOR_ID_AL, ali_router_probe },
++ { PCI_VENDOR_ID_ITE, ite_router_probe },
++ { PCI_VENDOR_ID_VIA, via_router_probe },
++ { PCI_VENDOR_ID_OPTI, opti_router_probe },
++ { PCI_VENDOR_ID_SI, sis_router_probe },
++ { PCI_VENDOR_ID_CYRIX, cyrix_router_probe },
++ { PCI_VENDOR_ID_VLSI, vlsi_router_probe },
++ { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe },
++ { PCI_VENDOR_ID_AMD, amd_router_probe },
++ /* Someone with docs needs to add the ATI Radeon IGP */
++ { 0, NULL }
++};
++static struct irq_router pirq_router;
++static struct pci_dev *pirq_router_dev;
++
++
++/*
++ * FIXME: should we have an option to say "generic for
++ * chipset" ?
++ */
++
++static void __init pirq_find_router(struct irq_router *r)
++{
++ struct irq_routing_table *rt = pirq_table;
++ struct irq_router_handler *h;
++
++#ifdef CONFIG_PCI_BIOS
++ if (!rt->signature) {
++ printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n");
++ r->set = pirq_bios_set;
++ r->name = "BIOS";
++ return;
++ }
++#endif
++
++ /* Default unless a driver reloads it */
++ r->name = "default";
++ r->get = NULL;
++ r->set = NULL;
++
++ DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n",
++ rt->rtr_vendor, rt->rtr_device);
++
++ pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
++ if (!pirq_router_dev) {
++ DBG(KERN_DEBUG "PCI: Interrupt router not found at "
++ "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
++ return;
++ }
++
++ for( h = pirq_routers; h->vendor; h++) {
++ /* First look for a router match */
++ if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device))
++ break;
++ /* Fall back to a device match */
++ if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device))
++ break;
++ }
++ printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n",
++ pirq_router.name,
++ pirq_router_dev->vendor,
++ pirq_router_dev->device,
++ pci_name(pirq_router_dev));
++}
++
++static struct irq_info *pirq_get_info(struct pci_dev *dev)
++{
++ struct irq_routing_table *rt = pirq_table;
++ int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
++ struct irq_info *info;
++
++ for (info = rt->slots; entries--; info++)
++ if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn))
++ return info;
++ return NULL;
++}
++
++static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
++{
++ u8 pin;
++ struct irq_info *info;
++ int i, pirq, newirq;
++ int irq = 0;
++ u32 mask;
++ struct irq_router *r = &pirq_router;
++ struct pci_dev *dev2 = NULL;
++ char *msg = NULL;
++
++ /* Find IRQ pin */
++ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++ if (!pin) {
++ DBG(KERN_DEBUG " -> no interrupt pin\n");
++ return 0;
++ }
++ pin = pin - 1;
++
++ /* Find IRQ routing entry */
++
++ if (!pirq_table)
++ return 0;
++
++ DBG(KERN_DEBUG "IRQ for %s[%c]", pci_name(dev), 'A' + pin);
++ info = pirq_get_info(dev);
++ if (!info) {
++ DBG(" -> not found in routing table\n" KERN_DEBUG);
++ return 0;
++ }
++ pirq = info->irq[pin].link;
++ mask = info->irq[pin].bitmap;
++ if (!pirq) {
++ DBG(" -> not routed\n" KERN_DEBUG);
++ return 0;
++ }
++ DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs);
++ mask &= pcibios_irq_mask;
++
++ /* Work around broken HP Pavilion Notebooks which assign USB to
++ IRQ 9 even though it is actually wired to IRQ 11 */
++
++ if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) {
++ dev->irq = 11;
++ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
++ r->set(pirq_router_dev, dev, pirq, 11);
++ }
++
++ /* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */
++ if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) {
++ pirq = 0x68;
++ mask = 0x400;
++ dev->irq = r->get(pirq_router_dev, dev, pirq);
++ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
++ }
++
++ /*
++ * Find the best IRQ to assign: use the one
++ * reported by the device if possible.
++ */
++ newirq = dev->irq;
++ if (newirq && !((1 << newirq) & mask)) {
++ if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0;
++ else printk("\n" KERN_WARNING
++ "PCI: IRQ %i for device %s doesn't match PIRQ mask "
++ "- try pci=usepirqmask\n" KERN_DEBUG, newirq,
++ pci_name(dev));
++ }
++ if (!newirq && assign) {
++ for (i = 0; i < 16; i++) {
++ if (!(mask & (1 << i)))
++ continue;
++ if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, IRQF_SHARED))
++ newirq = i;
++ }
++ }
++ DBG(" -> newirq=%d", newirq);
++
++ /* Check if it is hardcoded */
++ if ((pirq & 0xf0) == 0xf0) {
++ irq = pirq & 0xf;
++ DBG(" -> hardcoded IRQ %d\n", irq);
++ msg = "Hardcoded";
++ } else if ( r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
++ ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask)) ) {
++ DBG(" -> got IRQ %d\n", irq);
++ msg = "Found";
++ eisa_set_level_irq(irq);
++ } else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
++ DBG(" -> assigning IRQ %d", newirq);
++ if (r->set(pirq_router_dev, dev, pirq, newirq)) {
++ eisa_set_level_irq(newirq);
++ DBG(" ... OK\n");
++ msg = "Assigned";
++ irq = newirq;
++ }
++ }
++
++ if (!irq) {
++ DBG(" ... failed\n");
++ if (newirq && mask == (1 << newirq)) {
++ msg = "Guessed";
++ irq = newirq;
++ } else
++ return 0;
++ }
++ printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, pci_name(dev));
++
++ /* Update IRQ for all devices with the same pirq value */
++ while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
++ pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin);
++ if (!pin)
++ continue;
++ pin--;
++ info = pirq_get_info(dev2);
++ if (!info)
++ continue;
++ if (info->irq[pin].link == pirq) {
++ /* We refuse to override the dev->irq information. Give a warning! */
++ if ( dev2->irq && dev2->irq != irq && \
++ (!(pci_probe & PCI_USE_PIRQ_MASK) || \
++ ((1 << dev2->irq) & mask)) ) {
++#ifndef CONFIG_PCI_MSI
++ printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
++ pci_name(dev2), dev2->irq, irq);
++#endif
++ continue;
++ }
++ dev2->irq = irq;
++ pirq_penalty[irq]++;
++ if (dev != dev2)
++ printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, pci_name(dev2));
++ }
++ }
++ return 1;
++}
++
++static void __init pcibios_fixup_irqs(void)
++{
++ struct pci_dev *dev = NULL;
++ u8 pin;
++
++ DBG(KERN_DEBUG "PCI: IRQ fixup\n");
++ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
++ /*
++ * If the BIOS has set an out of range IRQ number, just ignore it.
++ * Also keep track of which IRQ's are already in use.
++ */
++ if (dev->irq >= 16) {
++ DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq);
++ dev->irq = 0;
++ }
++ /* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */
++ if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000)
++ pirq_penalty[dev->irq] = 0;
++ pirq_penalty[dev->irq]++;
++ }
++
++ dev = NULL;
++ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
++ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++#ifdef CONFIG_X86_IO_APIC
++ /*
++ * Recalculate IRQ numbers if we use the I/O APIC.
++ */
++ if (io_apic_assign_pci_irqs)
++ {
++ int irq;
++
++ if (pin) {
++ pin--; /* interrupt pins are numbered starting from 1 */
++ irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
++ /*
++ * Busses behind bridges are typically not listed in the MP-table.
++ * In this case we have to look up the IRQ based on the parent bus,
++ * parent slot, and pin number. The SMP code detects such bridged
++ * busses itself so we should get into this branch reliably.
++ */
++ if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
++ struct pci_dev * bridge = dev->bus->self;
++
++ pin = (pin + PCI_SLOT(dev->devfn)) % 4;
++ irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
++ PCI_SLOT(bridge->devfn), pin);
++ if (irq >= 0)
++ printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
++ pci_name(bridge), 'A' + pin, irq);
++ }
++ if (irq >= 0) {
++ if (use_pci_vector() &&
++ !platform_legacy_irq(irq))
++ irq = IO_APIC_VECTOR(irq);
++
++ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
++ pci_name(dev), 'A' + pin, irq);
++ dev->irq = irq;
++ }
++ }
++ }
++#endif
++ /*
++ * Still no IRQ? Try to lookup one...
++ */
++ if (pin && !dev->irq)
++ pcibios_lookup_irq(dev, 0);
++ }
++}
++
++/*
++ * Work around broken HP Pavilion Notebooks which assign USB to
++ * IRQ 9 even though it is actually wired to IRQ 11
++ */
++static int __init fix_broken_hp_bios_irq9(struct dmi_system_id *d)
++{
++ if (!broken_hp_bios_irq9) {
++ broken_hp_bios_irq9 = 1;
++ printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
++ }
++ return 0;
++}
++
++/*
++ * Work around broken Acer TravelMate 360 Notebooks which assign
++ * Cardbus to IRQ 11 even though it is actually wired to IRQ 10
++ */
++static int __init fix_acer_tm360_irqrouting(struct dmi_system_id *d)
++{
++ if (!acer_tm360_irqrouting) {
++ acer_tm360_irqrouting = 1;
++ printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
++ }
++ return 0;
++}
++
++static struct dmi_system_id __initdata pciirq_dmi_table[] = {
++ {
++ .callback = fix_broken_hp_bios_irq9,
++ .ident = "HP Pavilion N5400 Series Laptop",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"),
++ DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
++ },
++ },
++ {
++ .callback = fix_acer_tm360_irqrouting,
++ .ident = "Acer TravelMate 36x Laptop",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
++ },
++ },
++ { }
++};
++
++static int __init pcibios_irq_init(void)
++{
++ DBG(KERN_DEBUG "PCI: IRQ init\n");
++
++ if (pcibios_enable_irq || raw_pci_ops == NULL)
++ return 0;
++
++ dmi_check_system(pciirq_dmi_table);
++
++ pirq_table = pirq_find_routing_table();
++
++#ifdef CONFIG_PCI_BIOS
++ if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
++ pirq_table = pcibios_get_irq_routing_table();
++#endif
++ if (pirq_table) {
++ pirq_peer_trick();
++ pirq_find_router(&pirq_router);
++ if (pirq_table->exclusive_irqs) {
++ int i;
++ for (i=0; i<16; i++)
++ if (!(pirq_table->exclusive_irqs & (1 << i)))
++ pirq_penalty[i] += 100;
++ }
++ /* If we're using the I/O APIC, avoid using the PCI IRQ routing table */
++ if (io_apic_assign_pci_irqs)
++ pirq_table = NULL;
++ }
++
++ pcibios_enable_irq = pirq_enable_irq;
++
++ pcibios_fixup_irqs();
++ return 0;
++}
++
++subsys_initcall(pcibios_irq_init);
++
++
++static void pirq_penalize_isa_irq(int irq, int active)
++{
++ /*
++ * If any ISAPnP device reports an IRQ in its list of possible
++ * IRQ's, we try to avoid assigning it to PCI devices.
++ */
++ if (irq < 16) {
++ if (active)
++ pirq_penalty[irq] += 1000;
++ else
++ pirq_penalty[irq] += 100;
++ }
++}
++
++void pcibios_penalize_isa_irq(int irq, int active)
++{
++#ifdef CONFIG_ACPI
++ if (!acpi_noirq)
++ acpi_penalize_isa_irq(irq, active);
++ else
++#endif
++ pirq_penalize_isa_irq(irq, active);
++}
++
++static int pirq_enable_irq(struct pci_dev *dev)
++{
++ u8 pin;
++ struct pci_dev *temp_dev;
++
++ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++ if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
++ char *msg = "";
++
++ pin--; /* interrupt pins are numbered starting from 1 */
++
++ if (io_apic_assign_pci_irqs) {
++ int irq;
++
++ irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
++ /*
++ * Busses behind bridges are typically not listed in the MP-table.
++ * In this case we have to look up the IRQ based on the parent bus,
++ * parent slot, and pin number. The SMP code detects such bridged
++ * busses itself so we should get into this branch reliably.
++ */
++ temp_dev = dev;
++ while (irq < 0 && dev->bus->parent) { /* go back to the bridge */
++ struct pci_dev * bridge = dev->bus->self;
++
++ pin = (pin + PCI_SLOT(dev->devfn)) % 4;
++ irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
++ PCI_SLOT(bridge->devfn), pin);
++ if (irq >= 0)
++ printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
++ pci_name(bridge), 'A' + pin, irq);
++ dev = bridge;
++ }
++ dev = temp_dev;
++ if (irq >= 0) {
++#ifdef CONFIG_PCI_MSI
++ if (!platform_legacy_irq(irq))
++ irq = IO_APIC_VECTOR(irq);
++#endif
++ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
++ pci_name(dev), 'A' + pin, irq);
++ dev->irq = irq;
++ return 0;
++ } else
++ msg = " Probably buggy MP table.";
++ } else if (pci_probe & PCI_BIOS_IRQ_SCAN)
++ msg = "";
++ else
++ msg = " Please try using pci=biosirq.";
++
++ /* With IDE legacy devices the IRQ lookup failure is not a problem.. */
++ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5))
++ return 0;
++
++ printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
++ 'A' + pin, pci_name(dev), msg);
++ }
++ return 0;
++}
++
++int pci_vector_resources(int last, int nr_released)
++{
++ int count = nr_released;
++
++ int next = last;
++ int offset = (last % 8);
++
++ while (next < FIRST_SYSTEM_VECTOR) {
++ next += 8;
++#ifdef CONFIG_X86_64
++ if (next == IA32_SYSCALL_VECTOR)
++ continue;
++#else
++ if (next == SYSCALL_VECTOR)
++ continue;
++#endif
++ count++;
++ if (next >= FIRST_SYSTEM_VECTOR) {
++ if (offset%8) {
++ next = FIRST_DEVICE_VECTOR + offset;
++ offset++;
++ continue;
++ }
++ count--;
++ }
++ }
++
++ return count;
++}
+diff -rpuN linux-2.6.18.8/arch/i386/pci/Makefile linux-2.6.18-xen-3.3.0/arch/i386/pci/Makefile
+--- linux-2.6.18.8/arch/i386/pci/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/pci/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -4,6 +4,10 @@ obj-$(CONFIG_PCI_BIOS) += pcbios.o
+ obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o
+ obj-$(CONFIG_PCI_DIRECT) += direct.o
+
++# pcifront should be after pcbios.o, mmconfig.o, and direct.o as it should only
++# take over if direct access to the PCI bus is unavailable
++obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront.o
++
+ pci-y := fixup.o
+ pci-$(CONFIG_ACPI) += acpi.o
+ pci-y += legacy.o irq.o
+diff -rpuN linux-2.6.18.8/arch/i386/pci/pcifront.c linux-2.6.18-xen-3.3.0/arch/i386/pci/pcifront.c
+--- linux-2.6.18.8/arch/i386/pci/pcifront.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/pci/pcifront.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,55 @@
++/*
++ * PCI Frontend Stub - puts some "dummy" functions in to the Linux x86 PCI core
++ * to support the Xen PCI Frontend's operation
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <asm/acpi.h>
++#include "pci.h"
++
++static int pcifront_enable_irq(struct pci_dev *dev)
++{
++ u8 irq;
++ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
++ dev->irq = irq;
++
++ return 0;
++}
++
++extern u8 pci_cache_line_size;
++
++static int __init pcifront_x86_stub_init(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ /* Only install our method if we haven't found real hardware already */
++ if (raw_pci_ops)
++ return 0;
++
++ printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
++
++ /* Copied from arch/i386/pci/common.c */
++ pci_cache_line_size = 32 >> 2;
++ if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
++ pci_cache_line_size = 64 >> 2; /* K7 & K8 */
++ else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
++ pci_cache_line_size = 128 >> 2; /* P4 */
++
++ /* On x86, we need to disable the normal IRQ routing table and
++ * just ask the backend
++ */
++ pcibios_enable_irq = pcifront_enable_irq;
++ pcibios_disable_irq = NULL;
++
++#ifdef CONFIG_ACPI
++ /* Keep ACPI out of the picture */
++ acpi_noirq = 1;
++#endif
++
++ return 0;
++}
++
++arch_initcall(pcifront_x86_stub_init);
+diff -rpuN linux-2.6.18.8/arch/i386/power/cpu.c linux-2.6.18-xen-3.3.0/arch/i386/power/cpu.c
+--- linux-2.6.18.8/arch/i386/power/cpu.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/power/cpu.c 2008-08-21 11:36:07.000000000 +0200
+@@ -62,11 +62,12 @@ static void do_fpu_end(void)
+
+ static void fix_processor_context(void)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ int cpu = smp_processor_id();
+ struct tss_struct * t = &per_cpu(init_tss, cpu);
+
+ set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
+-
++#endif
+ load_TR_desc(); /* This does ltr */
+ load_LDT(&current->active_mm->context); /* This does lldt */
+
+diff -rpuN linux-2.6.18.8/arch/i386/power/Makefile linux-2.6.18-xen-3.3.0/arch/i386/power/Makefile
+--- linux-2.6.18.8/arch/i386/power/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/i386/power/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -1,2 +1,4 @@
+-obj-$(CONFIG_PM) += cpu.o
++obj-$(subst m,y,$(CONFIG_APM)) += cpu.o
++obj-$(CONFIG_SOFTWARE_SUSPEND) += cpu.o
++obj-$(CONFIG_ACPI_SLEEP) += cpu.o
+ obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
+diff -rpuN linux-2.6.18.8/arch/ia64/hp/common/sba_iommu.c linux-2.6.18-xen-3.3.0/arch/ia64/hp/common/sba_iommu.c
+--- linux-2.6.18.8/arch/ia64/hp/common/sba_iommu.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/hp/common/sba_iommu.c 2008-08-21 11:36:07.000000000 +0200
+@@ -42,6 +42,11 @@
+ #include <asm/system.h> /* wmb() */
+
+ #include <asm/acpi-ext.h>
++#include <asm/maddr.h> /* range_straddles_page_boundary() */
++#ifdef CONFIG_XEN
++#include <xen/gnttab.h>
++#include <asm/gnttab_dma.h>
++#endif
+
+ #define PFX "IOC: "
+
+@@ -198,6 +203,9 @@ struct ioc {
+ void __iomem *ioc_hpa; /* I/O MMU base address */
+ char *res_map; /* resource map, bit == pdir entry */
+ u64 *pdir_base; /* physical base address */
++#ifdef CONFIG_XEN
++ u64 *xen_virt_cache;
++#endif
+ unsigned long ibase; /* pdir IOV Space base */
+ unsigned long imask; /* pdir IOV Space mask */
+
+@@ -762,14 +770,21 @@ sba_free_range(struct ioc *ioc, dma_addr
+ * on the vba.
+ */
+
+-#if 1
+-#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
+- | 0x8000000000000000ULL)
++#ifndef CONFIG_XEN
++#define sba_io_pdir_entry(ioc, pdir_ptr, vba) *pdir_ptr = \
++ ((virt_to_bus((void *)vba) & ~0xFFFULL) | 0x8000000000000000ULL)
+ #else
+ void SBA_INLINE
+-sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
++sba_io_pdir_entry(struct ioc *ioc, u64 *pdir_ptr, unsigned long vba)
+ {
+- *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
++ *pdir_ptr = ((virt_to_bus((void *)vba) & ~0xFFFULL) |
++ 0x80000000000000FFULL);
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ int pide = ((u64)pdir_ptr - (u64)ioc->pdir_base) >> 3;
++ ioc->xen_virt_cache[pide] = vba;
++ }
++#endif
+ }
+ #endif
+
+@@ -784,6 +799,12 @@ mark_clean (void *addr, size_t size)
+ {
+ unsigned long pg_addr, end;
+
++#ifdef CONFIG_XEN
++ /* XXX: Bad things happen starting domUs when this is enabled. */
++ if (is_running_on_xen())
++ return;
++#endif
++
+ pg_addr = PAGE_ALIGN((unsigned long) addr);
+ end = (unsigned long) addr + size;
+ while (pg_addr + PAGE_SIZE <= end) {
+@@ -850,6 +871,10 @@ sba_mark_invalid(struct ioc *ioc, dma_ad
+ */
+ ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
+ #endif
++#ifdef CONFIG_XEN
++ if (is_running_on_xen())
++ ioc->xen_virt_cache[off] = 0UL;
++#endif
+ } else {
+ u32 t = get_iovp_order(byte_cnt) + iovp_shift;
+
+@@ -865,6 +890,10 @@ sba_mark_invalid(struct ioc *ioc, dma_ad
+ #else
+ ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
+ #endif
++#ifdef CONFIG_XEN
++ if (is_running_on_xen())
++ ioc->xen_virt_cache[off] = 0UL;
++#endif
+ off++;
+ byte_cnt -= iovp_size;
+ } while (byte_cnt > 0);
+@@ -894,15 +923,29 @@ sba_map_single(struct device *dev, void
+ unsigned long flags;
+ #endif
+ #ifdef ALLOW_IOV_BYPASS
+- unsigned long pci_addr = virt_to_phys(addr);
++ unsigned long pci_addr;
++#endif
++
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ void* tmp_addr = addr;
++ size_t tmp_size = size;
++ do {
++ gnttab_dma_use_page(virt_to_page(tmp_addr));
++ tmp_addr += PAGE_SIZE;
++ tmp_size -= min(tmp_size, PAGE_SIZE);
++ } while (tmp_size);
++ }
+ #endif
+
+ #ifdef ALLOW_IOV_BYPASS
++ pci_addr = virt_to_bus(addr);
+ ASSERT(to_pci_dev(dev)->dma_mask);
+ /*
+ ** Check if the PCI device can DMA to ptr... if so, just return ptr
+ */
+- if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
++ if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0 &&
++ !range_straddles_page_boundary(__pa(addr), size))) {
+ /*
+ ** Device is bit capable of DMA'ing to the buffer...
+ ** just return the PCI address of ptr
+@@ -944,7 +987,7 @@ sba_map_single(struct device *dev, void
+
+ while (size > 0) {
+ ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
+- sba_io_pdir_entry(pdir_start, (unsigned long) addr);
++ sba_io_pdir_entry(ioc, pdir_start, (unsigned long) addr);
+
+ DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
+
+@@ -973,13 +1016,29 @@ sba_mark_clean(struct ioc *ioc, dma_addr
+ void *addr;
+
+ if (size <= iovp_size) {
+- addr = phys_to_virt(ioc->pdir_base[off] &
+- ~0xE000000000000FFFULL);
++#ifdef CONFIG_XEN
++ if (is_running_on_xen())
++ addr = (void *)ioc->xen_virt_cache[off];
++ else
++ addr = bus_to_virt(ioc->pdir_base[off] &
++ ~0xE000000000000FFFULL);
++#else
++ addr = bus_to_virt(ioc->pdir_base[off] &
++ ~0xE000000000000FFFULL);
++#endif
+ mark_clean(addr, size);
+ } else {
+ do {
+- addr = phys_to_virt(ioc->pdir_base[off] &
+- ~0xE000000000000FFFULL);
++#ifdef CONFIG_XEN
++ if (is_running_on_xen())
++ addr = (void *)ioc->xen_virt_cache[off];
++ else
++ addr = bus_to_virt(ioc->pdir_base[off] &
++ ~0xE000000000000FFFULL);
++#else
++ addr = bus_to_virt(ioc->pdir_base[off] &
++ ~0xE000000000000FFFULL);
++#endif
+ mark_clean(addr, min(size, iovp_size));
+ off++;
+ size -= iovp_size;
+@@ -988,6 +1047,34 @@ sba_mark_clean(struct ioc *ioc, dma_addr
+ }
+ #endif
+
++#ifdef CONFIG_XEN
++static void
++sba_gnttab_dma_unmap_page(struct ioc *ioc, dma_addr_t iova, size_t size)
++{
++ u32 iovp = (u32) SBA_IOVP(ioc,iova);
++ int off = PDIR_INDEX(iovp);
++ struct page *page;
++
++ if (size <= iovp_size) {
++ BUG_ON(!ioc->xen_virt_cache[off]);
++ page = virt_to_page(ioc->xen_virt_cache[off]);
++ __gnttab_dma_unmap_page(page);
++ } else {
++ struct page *last_page = (struct page *)~0UL;
++ do {
++ BUG_ON(!ioc->xen_virt_cache[off]);
++ page = virt_to_page(ioc->xen_virt_cache[off]);
++ if (page != last_page) {
++ __gnttab_dma_unmap_page(page);
++ last_page = page;
++ }
++ off++;
++ size -= iovp_size;
++ } while (size > 0);
++ }
++}
++#endif
++
+ /**
+ * sba_unmap_single - unmap one IOVA and free resources
+ * @dev: instance of PCI owned by the driver that's asking.
+@@ -1018,7 +1105,16 @@ void sba_unmap_single(struct device *dev
+
+ #ifdef ENABLE_MARK_CLEAN
+ if (dir == DMA_FROM_DEVICE) {
+- mark_clean(phys_to_virt(iova), size);
++ mark_clean(bus_to_virt(iova), size);
++ }
++#endif
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ do {
++ gnttab_dma_unmap_page(iova);
++ iova += PAGE_SIZE;
++ size -= min(size,PAGE_SIZE);
++ } while (size);
+ }
+ #endif
+ return;
+@@ -1037,6 +1133,10 @@ void sba_unmap_single(struct device *dev
+ if (dir == DMA_FROM_DEVICE)
+ sba_mark_clean(ioc, iova, size);
+ #endif
++#ifdef CONFIG_XEN
++ if (is_running_on_xen())
++ sba_gnttab_dma_unmap_page(ioc, iova, size);
++#endif
+
+ #if DELAYED_RESOURCE_CNT > 0
+ spin_lock_irqsave(&ioc->saved_lock, flags);
+@@ -1102,9 +1202,14 @@ sba_alloc_coherent (struct device *dev,
+ return NULL;
+
+ memset(addr, 0, size);
+- *dma_handle = virt_to_phys(addr);
+
+ #ifdef ALLOW_IOV_BYPASS
++#ifdef CONFIG_XEN
++ if (xen_create_contiguous_region((unsigned long)addr, get_order(size),
++ fls64(dev->coherent_dma_mask)))
++ goto iommu_map;
++#endif
++ *dma_handle = virt_to_bus(addr);
+ ASSERT(dev->coherent_dma_mask);
+ /*
+ ** Check if the PCI device can DMA to ptr... if so, just return ptr
+@@ -1115,6 +1220,9 @@ sba_alloc_coherent (struct device *dev,
+
+ return addr;
+ }
++#ifdef CONFIG_XEN
++iommu_map:
++#endif
+ #endif
+
+ /*
+@@ -1138,6 +1246,13 @@ sba_alloc_coherent (struct device *dev,
+ */
+ void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
+ {
++#if defined(ALLOW_IOV_BYPASS) && defined(CONFIG_XEN)
++ struct ioc *ioc = GET_IOC(dev);
++
++ if (likely((dma_handle & ioc->imask) != ioc->ibase))
++ xen_destroy_contiguous_region((unsigned long)vaddr,
++ get_order(size));
++#endif
+ sba_unmap_single(dev, dma_handle, size, 0);
+ free_pages((unsigned long) vaddr, get_order(size));
+ }
+@@ -1219,7 +1334,7 @@ sba_fill_pdir(
+ dma_offset=0; /* only want offset on first chunk */
+ cnt = ROUNDUP(cnt, iovp_size);
+ do {
+- sba_io_pdir_entry(pdirp, vaddr);
++ sba_io_pdir_entry(ioc, pdirp, vaddr);
+ vaddr += iovp_size;
+ cnt -= iovp_size;
+ pdirp++;
+@@ -1406,7 +1521,11 @@ int sba_map_sg(struct device *dev, struc
+ if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
+ for (sg = sglist ; filled < nents ; filled++, sg++){
+ sg->dma_length = sg->length;
+- sg->dma_address = virt_to_phys(sba_sg_address(sg));
++#ifdef CONFIG_XEN
++ sg->dma_address = gnttab_dma_map_page(sg->page) + sg->offset;
++#else
++ sg->dma_address = virt_to_bus(sba_sg_address(sg));
++#endif
+ }
+ return filled;
+ }
+@@ -1430,6 +1549,15 @@ int sba_map_sg(struct device *dev, struc
+
+ prefetch(ioc->res_hint);
+
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ int i;
++
++ for (i = 0; i < nents; i++)
++ gnttab_dma_use_page(sglist[i].page);
++ }
++#endif
++
+ /*
+ ** First coalesce the chunks and allocate I/O pdir space
+ **
+@@ -1562,11 +1690,25 @@ ioc_iova_init(struct ioc *ioc)
+
+ memset(ioc->pdir_base, 0, ioc->pdir_size);
+
++#ifdef CONFIG_XEN
++ /* The page table needs to be pinned in Xen memory */
++ if (xen_create_contiguous_region((unsigned long)ioc->pdir_base,
++ get_order(ioc->pdir_size), 0))
++ panic(PFX "Couldn't contiguously map I/O Page Table\n");
++
++ ioc->xen_virt_cache = (void *) __get_free_pages(
++ GFP_KERNEL, get_order(ioc->pdir_size));
++ if (!ioc->xen_virt_cache)
++ panic(PFX "Couldn't allocate Xen virtual address cache\n");
++
++ memset(ioc->xen_virt_cache, 0, ioc->pdir_size);
++#endif
++
+ DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__,
+ iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
+
+ ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
+- WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
++ WRITE_REG(virt_to_bus(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
+
+ /*
+ ** If an AGP device is present, only use half of the IOV space
+@@ -1603,7 +1745,7 @@ ioc_iova_init(struct ioc *ioc)
+ for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
+ memcpy(poison_addr, spill_poison, poison_size);
+
+- prefetch_spill_page = virt_to_phys(addr);
++ prefetch_spill_page = virt_to_bus(addr);
+
+ DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page);
+ }
+diff -rpuN linux-2.6.18.8/arch/ia64/Kconfig linux-2.6.18-xen-3.3.0/arch/ia64/Kconfig
+--- linux-2.6.18.8/arch/ia64/Kconfig 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -58,6 +58,28 @@ config GENERIC_IOMAP
+ bool
+ default y
+
++config XEN
++ bool "Xen hypervisor support"
++ default y
++ select XEN_XENCOMM
++ help
++ Enable Xen hypervisor support. Resulting kernel runs
++ both as a guest OS on Xen and natively on hardware.
++
++config XEN_IA64_EXPOSE_P2M
++ bool "Xen/IA64 exposure p2m table"
++ depends on XEN
++ default y
++ help
++ expose p2m from xen
++
++config XEN_IA64_EXPOSE_P2M_USE_DTR
++ bool "Xen/IA64 map p2m table with dtr"
++ depends on XEN_IA64_EXPOSE_P2M
++ default y
++ help
++ use dtr to map the exposed p2m table
++
+ config SCHED_NO_NO_OMIT_FRAME_POINTER
+ bool
+ default y
+@@ -133,6 +155,10 @@ config IA64_SGI_SN2
+ config IA64_HP_SIM
+ bool "Ski-simulator"
+
++config IA64_XEN
++ bool "Xen guest"
++ depends on XEN
++
+ endchoice
+
+ choice
+@@ -431,6 +457,29 @@ config SGI_SN
+
+ source "drivers/sn/Kconfig"
+
++config KEXEC
++ bool "kexec system call (EXPERIMENTAL)"
++ depends on EXPERIMENTAL && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU) && !XEN_UNPRIVILEGED_GUEST
++ help
++ kexec is a system call that implements the ability to shutdown your
++ current kernel, and to start another kernel. It is like a reboot
++ but it is indepedent of the system firmware. And like a reboot
++ you can start any kernel with it, not just Linux.
++
++ The name comes from the similiarity to the exec system call.
++
++ It is an ongoing process to be certain the hardware in a machine
++ is properly shutdown, so do not be surprised if this code does not
++ initially work for you. It may help to enable device hotplugging
++ support. As of this writing the exact hardware interface is
++ strongly in flux, so no good recommendation can be made.
++
++config CRASH_DUMP
++ bool "kernel crash dumps (EXPERIMENTAL)"
++ depends on EXPERIMENTAL && IA64_MCA_RECOVERY && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
++ help
++ Generate crash dump after being started by kexec.
++
+ source "drivers/firmware/Kconfig"
+
+ source "fs/Kconfig.binfmt"
+@@ -465,6 +514,21 @@ config PCI_DOMAINS
+ bool
+ default PCI
+
++config XEN_PCIDEV_FRONTEND
++ bool "Xen PCI Frontend"
++ depends on PCI && XEN
++ default y
++ help
++ The PCI device frontend driver allows the kernel to import arbitrary
++ PCI devices from a PCI backend to support PCI driver domains.
++
++config XEN_PCIDEV_FE_DEBUG
++ bool "Xen PCI Frontend Debugging"
++ depends on XEN_PCIDEV_FRONTEND
++ default n
++ help
++ Enables some debug statements within the PCI Frontend.
++
+ source "drivers/pci/pcie/Kconfig"
+
+ source "drivers/pci/Kconfig"
+@@ -528,3 +592,16 @@ source "arch/ia64/Kconfig.debug"
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
++
++#
++# override default values of drivers/xen/Kconfig
++#
++if XEN
++config XEN_SMPBOOT
++ default n
++
++config XEN_DEVMEM
++ default n
++endif
++
++source "drivers/xen/Kconfig"
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/acpi.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/acpi.c
+--- linux-2.6.18.8/arch/ia64/kernel/acpi.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/acpi.c 2008-08-21 11:36:07.000000000 +0200
+@@ -109,6 +109,10 @@ const char *acpi_get_sysname(void)
+ return "hpzx1";
+ } else if (!strcmp(hdr->oem_id, "SGI")) {
+ return "sn2";
++#ifdef CONFIG_XEN
++ } else if (is_running_on_xen() && !strcmp(hdr->oem_id, "XEN")) {
++ return "xen";
++#endif
+ }
+
+ return "dig";
+@@ -123,6 +127,8 @@ const char *acpi_get_sysname(void)
+ return "sn2";
+ # elif defined (CONFIG_IA64_DIG)
+ return "dig";
++# elif defined (CONFIG_IA64_XEN)
++ return "xen";
+ # else
+ # error Unknown platform. Fix acpi.c.
+ # endif
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/asm-offsets.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/asm-offsets.c
+--- linux-2.6.18.8/arch/ia64/kernel/asm-offsets.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/asm-offsets.c 2008-08-21 11:36:07.000000000 +0200
+@@ -268,4 +268,29 @@ void foo(void)
+ DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64);
+ DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32);
+ DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
++
++#ifdef CONFIG_XEN
++ BLANK();
++
++#define DEFINE_MAPPED_REG_OFS(sym, field) \
++ DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(mapped_regs_t, field)))
++
++ DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
++ DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
++ DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
++ DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
++ DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
++ DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
++ DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
++ DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
++ DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
++ DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
++ DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
++ DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
++ DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
++ DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
++ DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
++ DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
++ DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
++#endif /* CONFIG_XEN */
+ }
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/crash.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/crash.c
+--- linux-2.6.18.8/arch/ia64/kernel/crash.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/crash.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,222 @@
++/*
++ * arch/ia64/kernel/crash.c
++ *
++ * Architecture specific (ia64) functions for kexec based crash dumps.
++ *
++ * Created by: Khalid Aziz <khalid.aziz@hp.com>
++ * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
++ * Copyright (C) 2005 Intel Corp Zou Nan hai <nanhai.zou@intel.com>
++ *
++ */
++#include <linux/smp.h>
++#include <linux/delay.h>
++#include <linux/crash_dump.h>
++#include <linux/bootmem.h>
++#include <linux/kexec.h>
++#include <linux/elfcore.h>
++#include <linux/sysctl.h>
++#include <linux/init.h>
++
++#include <asm/kdebug.h>
++#include <asm/mca.h>
++
++int kdump_status[NR_CPUS];
++atomic_t kdump_cpu_freezed;
++atomic_t kdump_in_progress;
++int kdump_on_init = 1;
++
++static inline Elf64_Word
++*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
++ size_t data_len)
++{
++ struct elf_note *note = (struct elf_note *)buf;
++ note->n_namesz = strlen(name) + 1;
++ note->n_descsz = data_len;
++ note->n_type = type;
++ buf += (sizeof(*note) + 3)/4;
++ memcpy(buf, name, note->n_namesz);
++ buf += (note->n_namesz + 3)/4;
++ memcpy(buf, data, data_len);
++ buf += (data_len + 3)/4;
++ return buf;
++}
++
++static void
++final_note(void *buf)
++{
++ memset(buf, 0, sizeof(struct elf_note));
++}
++
++extern void ia64_dump_cpu_regs(void *);
++
++static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
++
++void
++crash_save_this_cpu()
++{
++ void *buf;
++ unsigned long cfm, sof, sol;
++
++ int cpu = smp_processor_id();
++ struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
++
++ elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
++ memset(prstatus, 0, sizeof(*prstatus));
++ prstatus->pr_pid = current->pid;
++
++ ia64_dump_cpu_regs(dst);
++ cfm = dst[43];
++ sol = (cfm >> 7) & 0x7f;
++ sof = cfm & 0x7f;
++ dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46],
++ sof - sol);
++
++ buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
++ if (!buf)
++ return;
++ buf = append_elf_note(buf, "CORE", NT_PRSTATUS, prstatus,
++ sizeof(*prstatus));
++ final_note(buf);
++}
++
++static int
++kdump_wait_cpu_freeze(void)
++{
++ int cpu_num = num_online_cpus() - 1;
++ int timeout = 1000;
++ while(timeout-- > 0) {
++ if (atomic_read(&kdump_cpu_freezed) == cpu_num)
++ return 0;
++ udelay(1000);
++ }
++ return 1;
++}
++
++void
++machine_crash_shutdown(struct pt_regs *pt)
++{
++ /* This function is only called after the system
++ * has paniced or is otherwise in a critical state.
++ * The minimum amount of code to allow a kexec'd kernel
++ * to run successfully needs to happen here.
++ *
++ * In practice this means shooting down the other cpus in
++ * an SMP system.
++ */
++ kexec_disable_iosapic();
++#ifdef CONFIG_SMP
++ kdump_smp_send_stop();
++ if (kdump_wait_cpu_freeze() && kdump_on_init) {
++ //not all cpu response to IPI, send INIT to freeze them
++ kdump_smp_send_init();
++ }
++#endif
++}
++
++static void
++machine_kdump_on_init(void)
++{
++ local_irq_disable();
++ kexec_disable_iosapic();
++ machine_kexec(ia64_kimage);
++}
++
++void
++kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
++{
++ int cpuid;
++ local_irq_disable();
++ cpuid = smp_processor_id();
++ crash_save_this_cpu();
++ current->thread.ksp = (__u64)info->sw - 16;
++ atomic_inc(&kdump_cpu_freezed);
++ kdump_status[cpuid] = 1;
++ mb();
++ if (cpuid == 0) {
++ for (;;)
++ cpu_relax();
++ } else
++ ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
++}
++
++static int
++kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
++{
++ struct die_args *args = data;
++
++ if (!kdump_on_init)
++ return NOTIFY_DONE;
++
++ if (val != DIE_INIT_MONARCH_ENTER &&
++ val != DIE_INIT_SLAVE_ENTER &&
++ val != DIE_MCA_RENDZVOUS_LEAVE &&
++ val != DIE_MCA_MONARCH_LEAVE)
++ return NOTIFY_DONE;
++
++ /* There really ought to be a check here to see if this
++ * is a machine check rendevous. The kexec code that
++ * was merged around 2.6.20-rc1 includes such a check.
++ * But the check relies on infastructure that is not
++ * available in 2.6.16. */
++
++ switch (val) {
++ case DIE_INIT_MONARCH_ENTER:
++ machine_kdump_on_init();
++ break;
++ case DIE_INIT_SLAVE_ENTER:
++ unw_init_running(kdump_cpu_freeze, NULL);
++ break;
++ case DIE_MCA_RENDZVOUS_LEAVE:
++ if (atomic_read(&kdump_in_progress))
++ unw_init_running(kdump_cpu_freeze, NULL);
++ break;
++ case DIE_MCA_MONARCH_LEAVE:
++ /* die_register->signr indicate if MCA is recoverable */
++ if (!args->signr)
++ machine_kdump_on_init();
++ break;
++ }
++ return NOTIFY_DONE;
++}
++
++#ifdef CONFIG_SYSCTL
++static ctl_table kdump_on_init_table[] = {
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "kdump_on_init",
++ .data = &kdump_on_init,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec,
++ },
++ { .ctl_name = 0 }
++};
++
++static ctl_table sys_table[] = {
++ {
++ .ctl_name = CTL_KERN,
++ .procname = "kernel",
++ .mode = 0555,
++ .child = kdump_on_init_table,
++ },
++ { .ctl_name = 0 }
++};
++#endif
++
++static int
++machine_crash_setup(void)
++{
++ static struct notifier_block kdump_init_notifier_nb = {
++ .notifier_call = kdump_init_notifier,
++ };
++ int ret;
++ if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
++ return ret;
++#ifdef CONFIG_SYSCTL
++ register_sysctl_table(sys_table, 0);
++#endif
++ return 0;
++}
++
++__initcall(machine_crash_setup);
++
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/crash_dump.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/crash_dump.c
+--- linux-2.6.18.8/arch/ia64/kernel/crash_dump.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/crash_dump.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,48 @@
++/*
++ * kernel/crash_dump.c - Memory preserving reboot related code.
++ *
++ * Created by: Simon Horman <horms@verge.net.au>
++ * Original code moved from kernel/crash.c
++ * Original code comment copied from the i386 version of this file
++ */
++
++#include <linux/errno.h>
++#include <linux/types.h>
++
++#include <linux/uaccess.h>
++
++/**
++ * copy_oldmem_page - copy one page from "oldmem"
++ * @pfn: page frame number to be copied
++ * @buf: target memory address for the copy; this can be in kernel address
++ * space or user address space (see @userbuf)
++ * @csize: number of bytes to copy
++ * @offset: offset in bytes into the page (based on pfn) to begin the copy
++ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
++ * otherwise @buf is in kernel address space, use memcpy().
++ *
++ * Copy a page from "oldmem". For this page, there is no pte mapped
++ * in the current kernel. We stitch up a pte, similar to kmap_atomic.
++ *
++ * Calling copy_to_user() in atomic context is not desirable. Hence first
++ * copying the data to a pre-allocated kernel page and then copying to user
++ * space in non-atomic context.
++ */
++ssize_t
++copy_oldmem_page(unsigned long pfn, char *buf,
++ size_t csize, unsigned long offset, int userbuf)
++{
++ void *vaddr;
++
++ if (!csize)
++ return 0;
++ vaddr = __va(pfn<<PAGE_SHIFT);
++ if (userbuf) {
++ if (copy_to_user(buf, (vaddr + offset), csize)) {
++ return -EFAULT;
++ }
++ } else
++ memcpy(buf, (vaddr + offset), csize);
++ return csize;
++}
++
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/efi.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/efi.c
+--- linux-2.6.18.8/arch/ia64/kernel/efi.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/efi.c 2008-08-21 11:36:07.000000000 +0200
+@@ -21,11 +21,13 @@
+ * Skip non-WB memory and ignore empty memory ranges.
+ */
+ #include <linux/module.h>
++#include <linux/bootmem.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/types.h>
+ #include <linux/time.h>
+ #include <linux/efi.h>
++#include <linux/kexec.h>
+
+ #include <asm/io.h>
+ #include <asm/kregs.h>
+@@ -34,6 +36,11 @@
+ #include <asm/processor.h>
+ #include <asm/mca.h>
+
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++#include <xen/interface/memory.h>
++#include <asm/hypercall.h>
++#endif
++
+ #define EFI_DEBUG 0
+
+ extern efi_status_t efi_call_phys (void *, ...);
+@@ -41,7 +48,7 @@ extern efi_status_t efi_call_phys (void
+ struct efi efi;
+ EXPORT_SYMBOL(efi);
+ static efi_runtime_services_t *runtime;
+-static unsigned long mem_limit = ~0UL, max_addr = ~0UL;
++static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
+
+ #define efi_call_virt(f, args...) (*(f))(args)
+
+@@ -421,6 +428,8 @@ efi_init (void)
+ mem_limit = memparse(cp + 4, &cp);
+ } else if (memcmp(cp, "max_addr=", 9) == 0) {
+ max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
++ } else if (memcmp(cp, "min_addr=", 9) == 0) {
++ min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
+ } else {
+ while (*cp != ' ' && *cp)
+ ++cp;
+@@ -428,6 +437,8 @@ efi_init (void)
+ ++cp;
+ }
+ }
++ if (min_addr != 0UL)
++ printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20);
+ if (max_addr != ~0UL)
+ printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
+
+@@ -894,7 +905,8 @@ find_memmap_space (void)
+ as = max(contig_low, md->phys_addr);
+ ae = min(contig_high, efi_md_end(md));
+
+- /* keep within max_addr= command line arg */
++ /* keep within max_addr= and min_addr= command line arg */
++ as = max(as, min_addr);
+ ae = min(ae, max_addr);
+ if (ae <= as)
+ continue;
+@@ -965,6 +977,11 @@ efi_memmap_init(unsigned long *s, unsign
+ if (!is_available_memory(md))
+ continue;
+
++#ifdef CONFIG_CRASH_DUMP
++ /* saved_max_pfn should ignore max_addr= command line arg */
++ if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT))
++ saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT);
++#endif
+ /*
+ * Round ends inward to granule boundaries
+ * Give trimmings to uncached allocator
+@@ -1004,7 +1021,8 @@ efi_memmap_init(unsigned long *s, unsign
+ } else
+ ae = efi_md_end(md);
+
+- /* keep within max_addr= command line arg */
++ /* keep within max_addr= and min_addr= command line arg */
++ as = max(as, min_addr);
+ ae = min(ae, max_addr);
+ if (ae <= as)
+ continue;
+@@ -1033,21 +1051,22 @@ efi_memmap_init(unsigned long *s, unsign
+ *e = (u64)++k;
+ }
+
+-void
+-efi_initialize_iomem_resources(struct resource *code_resource,
+- struct resource *data_resource)
++#define EFI_INITIALISE_PHYS 0x1
++#define EFI_INITIALISE_MACH 0x2
++#define EFI_INITIALISE_ALL (EFI_INITIALISE_PHYS|EFI_INITIALISE_MACH)
++
++static void
++efi_initialize_resources(void *efi_map_start, void *efi_map_end,
++ u64 efi_desc_size, struct resource *root_resource,
++ struct resource *code_resource,
++ struct resource *data_resource, unsigned flag)
+ {
+ struct resource *res;
+- void *efi_map_start, *efi_map_end, *p;
++ void *p;
+ efi_memory_desc_t *md;
+- u64 efi_desc_size;
+ char *name;
+ unsigned long flags;
+
+- efi_map_start = __va(ia64_boot_param->efi_memmap);
+- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
+- efi_desc_size = ia64_boot_param->efi_memdesc_size;
+-
+ res = NULL;
+
+ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+@@ -1106,7 +1125,7 @@ efi_initialize_iomem_resources(struct re
+ res->end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+ res->flags = flags;
+
+- if (insert_resource(&iomem_resource, res) < 0)
++ if (insert_resource(root_resource, res) < 0)
+ kfree(res);
+ else {
+ /*
+@@ -1114,8 +1133,135 @@ efi_initialize_iomem_resources(struct re
+ * kernel data so we try it repeatedly and
+ * let the resource manager test it.
+ */
+- insert_resource(res, code_resource);
+- insert_resource(res, data_resource);
++ if (flag & EFI_INITIALISE_PHYS) {
++ insert_resource(res, code_resource);
++ insert_resource(res, data_resource);
++ }
++#ifdef CONFIG_KEXEC
++ if (flag & EFI_INITIALISE_MACH) {
++ insert_resource(res, &efi_memmap_res);
++ insert_resource(res, &boot_param_res);
++ if (crashk_res.end > crashk_res.start)
++ insert_resource(res, &crashk_res);
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain())
++ xen_machine_kexec_register_resources(
++ res);
++#endif
++ }
++#endif
+ }
+ }
+ }
++
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++static void
++efi_initialize_iomem_machine_resources(void)
++{
++ unsigned long size;
++ xen_memory_map_t memmap;
++ xen_ia64_memmap_info_t *memmap_info = NULL;
++ void *efi_map_start, *efi_map_end;
++ u64 efi_desc_size;
++ int ret;
++
++ /* It would be nice if it wasn't neccessary to loop like this */
++ for (size = 1024; 1; size += 1024) {
++ memmap_info = kmalloc(size, GFP_KERNEL);
++ if (memmap_info == NULL)
++ return; /* -ENOMEM, but no way to return error */
++
++ memmap.nr_entries = size;
++ set_xen_guest_handle(memmap.buffer, memmap_info);
++ ret = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
++ if (!ret)
++ break;
++
++ kfree(memmap_info);
++ }
++
++ efi_map_start = &memmap_info->memdesc;
++ efi_map_end = efi_map_start + memmap_info->efi_memmap_size;
++ efi_desc_size = memmap_info->efi_memdesc_size;
++ efi_initialize_resources(efi_map_start, efi_map_end, efi_desc_size,
++ &iomem_machine_resource, NULL, NULL,
++ EFI_INITIALISE_MACH);
++
++ kfree(memmap_info);
++}
++#endif
++
++void
++efi_initialize_iomem_resources(struct resource *code_resource,
++ struct resource *data_resource)
++{
++ void *efi_map_start, *efi_map_end;
++ u64 efi_desc_size;
++
++ efi_map_start = __va(ia64_boot_param->efi_memmap);
++ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
++ efi_desc_size = ia64_boot_param->efi_memdesc_size;
++
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++ if (is_initial_xendomain()) {
++ efi_initialize_resources(efi_map_start, efi_map_end,
++ efi_desc_size, &iomem_resource,
++ code_resource, data_resource,
++ EFI_INITIALISE_PHYS);
++ efi_initialize_iomem_machine_resources();
++ }
++ else
++#endif
++ efi_initialize_resources(efi_map_start, efi_map_end,
++ efi_desc_size, &iomem_resource,
++ code_resource, data_resource,
++ EFI_INITIALISE_ALL);
++}
++
++
++
++#ifdef CONFIG_KEXEC
++/* find a block of memory aligned to 64M exclude reserved regions
++ rsvd_regions are sorted
++ */
++unsigned long
++kdump_find_rsvd_region (unsigned long size,
++ struct rsvd_region *r, int n)
++{
++ int i;
++ u64 start, end;
++ u64 alignment = 1UL << _PAGE_SIZE_64M;
++ void *efi_map_start, *efi_map_end, *p;
++ efi_memory_desc_t *md;
++ u64 efi_desc_size;
++
++ efi_map_start = __va(ia64_boot_param->efi_memmap);
++ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
++ efi_desc_size = ia64_boot_param->efi_memdesc_size;
++
++ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
++ md = p;
++ if (!efi_wb(md))
++ continue;
++ start = ALIGN(md->phys_addr, alignment);
++ end = efi_md_end(md);
++ for (i = 0; i < n; i++) {
++ if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
++ if (__pa(r[i].start) > start + size)
++ return start;
++ start = ALIGN(__pa(r[i].end), alignment);
++ if (i < n-1 && __pa(r[i+1].start) < start + size)
++ continue;
++ else
++ break;
++ }
++ }
++ if (end > start + size)
++ return start;
++ }
++
++ printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n",
++ size);
++ return ~0UL;
++}
++#endif
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/entry.S linux-2.6.18-xen-3.3.0/arch/ia64/kernel/entry.S
+--- linux-2.6.18.8/arch/ia64/kernel/entry.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/entry.S 2008-08-21 11:36:07.000000000 +0200
+@@ -180,7 +180,7 @@ END(sys_clone)
+ * called. The code starting at .map relies on this. The rest of the code
+ * doesn't care about the interrupt masking status.
+ */
+-GLOBAL_ENTRY(ia64_switch_to)
++GLOBAL_ENTRY(__ia64_switch_to)
+ .prologue
+ alloc r16=ar.pfs,1,0,0,0
+ DO_SAVE_SWITCH_STACK
+@@ -234,7 +234,7 @@ GLOBAL_ENTRY(ia64_switch_to)
+ ;;
+ srlz.d
+ br.cond.sptk .done
+-END(ia64_switch_to)
++END(__ia64_switch_to)
+
+ /*
+ * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
+@@ -375,7 +375,7 @@ END(save_switch_stack)
+ * - b7 holds address to return to
+ * - must not touch r8-r11
+ */
+-ENTRY(load_switch_stack)
++GLOBAL_ENTRY(load_switch_stack)
+ .prologue
+ .altrp b7
+
+@@ -510,7 +510,7 @@ END(clone)
+ * because some system calls (such as ia64_execve) directly
+ * manipulate ar.pfs.
+ */
+-GLOBAL_ENTRY(ia64_trace_syscall)
++GLOBAL_ENTRY(__ia64_trace_syscall)
+ PT_REGS_UNWIND_INFO(0)
+ /*
+ * We need to preserve the scratch registers f6-f11 in case the system
+@@ -582,7 +582,7 @@ strace_error:
+ (p6) mov r10=-1
+ (p6) mov r8=r9
+ br.cond.sptk .strace_save_retval
+-END(ia64_trace_syscall)
++END(__ia64_trace_syscall)
+
+ /*
+ * When traced and returning from sigreturn, we invoke syscall_trace but then
+@@ -601,7 +601,7 @@ GLOBAL_ENTRY(ia64_strace_leave_kernel)
+ .ret4: br.cond.sptk ia64_leave_kernel
+ END(ia64_strace_leave_kernel)
+
+-GLOBAL_ENTRY(ia64_ret_from_clone)
++GLOBAL_ENTRY(__ia64_ret_from_clone)
+ PT_REGS_UNWIND_INFO(0)
+ { /*
+ * Some versions of gas generate bad unwind info if the first instruction of a
+@@ -627,7 +627,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
+ cmp.ne p6,p0=r2,r0
+ (p6) br.cond.spnt .strace_check_retval
+ ;; // added stop bits to prevent r8 dependency
+-END(ia64_ret_from_clone)
++END(__ia64_ret_from_clone)
+ // fall through
+ GLOBAL_ENTRY(ia64_ret_from_syscall)
+ PT_REGS_UNWIND_INFO(0)
+@@ -635,8 +635,11 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
+ adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
+ mov r10=r0 // clear error indication in r10
+ (p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
++ ;;
++ // don't fall through, ia64_leave_syscall may be #define'd
++ br.cond.sptk.few ia64_leave_syscall
++ ;;
+ END(ia64_ret_from_syscall)
+- // fall through
+ /*
+ * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
+ * need to switch to bank 0 and doesn't restore the scratch registers.
+@@ -681,7 +684,7 @@ END(ia64_ret_from_syscall)
+ * ar.csd: cleared
+ * ar.ssd: cleared
+ */
+-ENTRY(ia64_leave_syscall)
++GLOBAL_ENTRY(__ia64_leave_syscall)
+ PT_REGS_UNWIND_INFO(0)
+ /*
+ * work.need_resched etc. mustn't get changed by this CPU before it returns to
+@@ -789,7 +792,7 @@ ENTRY(ia64_leave_syscall)
+ mov.m ar.ssd=r0 // M2 clear ar.ssd
+ mov f11=f0 // F clear f11
+ br.cond.sptk.many rbs_switch // B
+-END(ia64_leave_syscall)
++END(__ia64_leave_syscall)
+
+ #ifdef CONFIG_IA32_SUPPORT
+ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
+@@ -801,10 +804,13 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
+ st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
+ .mem.offset 8,0
+ st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
++ ;;
++ // don't fall through, ia64_leave_kernel may be #define'd
++ br.cond.sptk.few ia64_leave_kernel
++ ;;
+ END(ia64_ret_from_ia32_execve)
+- // fall through
+ #endif /* CONFIG_IA32_SUPPORT */
+-GLOBAL_ENTRY(ia64_leave_kernel)
++GLOBAL_ENTRY(__ia64_leave_kernel)
+ PT_REGS_UNWIND_INFO(0)
+ /*
+ * work.need_resched etc. mustn't get changed by this CPU before it returns to
+@@ -1135,7 +1141,7 @@ skip_rbs_switch:
+ ld8 r10=[r3]
+ br.cond.sptk.many .work_processed_syscall // re-check
+
+-END(ia64_leave_kernel)
++END(__ia64_leave_kernel)
+
+ ENTRY(handle_syscall_error)
+ /*
+@@ -1175,7 +1181,7 @@ END(ia64_invoke_schedule_tail)
+ * be set up by the caller. We declare 8 input registers so the system call
+ * args get preserved, in case we need to restart a system call.
+ */
+-ENTRY(notify_resume_user)
++GLOBAL_ENTRY(notify_resume_user)
+ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+ alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
+ mov r9=ar.unat
+@@ -1263,7 +1269,7 @@ ENTRY(sys_rt_sigreturn)
+ adds sp=16,sp
+ ;;
+ ld8 r9=[sp] // load new ar.unat
+- mov.sptk b7=r8,ia64_leave_kernel
++ mov.sptk b7=r8,__ia64_leave_kernel
+ ;;
+ mov ar.unat=r9
+ br.many b7
+@@ -1575,7 +1581,7 @@ sys_call_table:
+ data8 sys_mq_timedreceive // 1265
+ data8 sys_mq_notify
+ data8 sys_mq_getsetattr
+- data8 sys_ni_syscall // reserved for kexec_load
++ data8 sys_kexec_load
+ data8 sys_ni_syscall // reserved for vserver
+ data8 sys_waitid // 1270
+ data8 sys_add_key
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/fsys.S linux-2.6.18-xen-3.3.0/arch/ia64/kernel/fsys.S
+--- linux-2.6.18.8/arch/ia64/kernel/fsys.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/fsys.S 2008-08-21 11:36:07.000000000 +0200
+@@ -516,11 +516,34 @@ ENTRY(fsys_fallback_syscall)
+ adds r17=-1024,r15
+ movl r14=sys_call_table
+ ;;
++#ifdef CONFIG_XEN
++ movl r18=running_on_xen;;
++ ld4 r18=[r18];;
++ // p14 = running_on_xen
++ // p15 = !running_on_xen
++ cmp.ne p14,p15=r0,r18
++ ;;
++(p14) movl r18=XSI_PSR_I_ADDR;;
++(p14) ld8 r18=[r18]
++(p14) mov r29=1;;
++(p14) st1 [r18]=r29
++(p15) rsm psr.i
++#else
+ rsm psr.i
++#endif
+ shladd r18=r17,3,r14
+ ;;
+ ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point
++#ifdef CONFIG_XEN
++(p14) mov r27=r8
++(p14) XEN_HYPER_GET_PSR
++ ;;
++(p14) mov r29=r8
++(p14) mov r8=r27
++(p15) mov r29=psr // read psr (12 cyc load latency)
++#else
+ mov r29=psr // read psr (12 cyc load latency)
++#endif
+ mov r27=ar.rsc
+ mov r21=ar.fpsr
+ mov r26=ar.pfs
+@@ -632,7 +655,25 @@ GLOBAL_ENTRY(fsys_bubble_down)
+ mov rp=r14 // I0 set the real return addr
+ and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A
+ ;;
++#ifdef CONFIG_XEN
++ movl r14=running_on_xen;;
++ ld4 r14=[r14];;
++ // p14 = running_on_xen
++ // p15 = !running_on_xen
++ cmp.ne p14,p15=r0,r14
++ ;;
++(p14) movl r28=XSI_PSR_I_ADDR;;
++(p14) ld8 r28=[r28];;
++(p14) adds r28=-1,r28;; // event_pending
++(p14) ld1 r14=[r28];;
++(p14) cmp.ne.unc p13,p14=r14,r0;;
++(p13) XEN_HYPER_SSM_I
++(p14) adds r28=1,r28;; // event_mask
++(p14) st1 [r28]=r0;;
++(p15) ssm psr.i
++#else
+ ssm psr.i // M2 we're on kernel stacks now, reenable irqs
++#endif
+ cmp.eq p8,p0=r3,r0 // A
+ (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
+
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/gate.lds.S linux-2.6.18-xen-3.3.0/arch/ia64/kernel/gate.lds.S
+--- linux-2.6.18.8/arch/ia64/kernel/gate.lds.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/gate.lds.S 2008-08-21 11:36:07.000000000 +0200
+@@ -28,6 +28,24 @@ SECTIONS
+ . = GATE_ADDR + 0x500;
+
+ .data.patch : {
++#ifdef __XEN_IA64_VDSO_PARAVIRT
++#define __start_gate_mckinley_e9_patchlist \
++ __start_gate_mckinley_e9_patchlist_xen
++#define __end_gate_mckinley_e9_patchlist \
++ __end_gate_mckinley_e9_patchlist_xen
++#define __start_gate_vtop_patchlist \
++ __start_gate_vtop_patchlist_xen
++#define __end_gate_vtop_patchlist \
++ __end_gate_vtop_patchlist_xen
++#define __start_gate_fsyscall_patchlist \
++ __start_gate_fsyscall_patchlist_xen
++#define __end_gate_fsyscall_patchlist \
++ __end_gate_fsyscall_patchlist_xen
++#define __start_gate_brl_fsys_bubble_down_patchlist \
++ __start_gate_brl_fsys_bubble_down_patchlist_xen
++#define __end_gate_brl_fsys_bubble_down_patchlist \
++ __end_gate_brl_fsys_bubble_down_patchlist_xen
++#endif
+ __start_gate_mckinley_e9_patchlist = .;
+ *(.data.patch.mckinley_e9)
+ __end_gate_mckinley_e9_patchlist = .;
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/gate.S linux-2.6.18-xen-3.3.0/arch/ia64/kernel/gate.S
+--- linux-2.6.18.8/arch/ia64/kernel/gate.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/gate.S 2008-08-21 11:36:07.000000000 +0200
+@@ -32,102 +32,6 @@
+ [1:](pr)brl.cond.sptk 0; \
+ .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-.
+
+-GLOBAL_ENTRY(__kernel_syscall_via_break)
+- .prologue
+- .altrp b6
+- .body
+- /*
+- * Note: for (fast) syscall restart to work, the break instruction must be
+- * the first one in the bundle addressed by syscall_via_break.
+- */
+-{ .mib
+- break 0x100000
+- nop.i 0
+- br.ret.sptk.many b6
+-}
+-END(__kernel_syscall_via_break)
+-
+-/*
+- * On entry:
+- * r11 = saved ar.pfs
+- * r15 = system call #
+- * b0 = saved return address
+- * b6 = return address
+- * On exit:
+- * r11 = saved ar.pfs
+- * r15 = system call #
+- * b0 = saved return address
+- * all other "scratch" registers: undefined
+- * all "preserved" registers: same as on entry
+- */
+-
+-GLOBAL_ENTRY(__kernel_syscall_via_epc)
+- .prologue
+- .altrp b6
+- .body
+-{
+- /*
+- * Note: the kernel cannot assume that the first two instructions in this
+- * bundle get executed. The remaining code must be safe even if
+- * they do not get executed.
+- */
+- adds r17=-1024,r15 // A
+- mov r10=0 // A default to successful syscall execution
+- epc // B causes split-issue
+-}
+- ;;
+- rsm psr.be | psr.i // M2 (5 cyc to srlz.d)
+- LOAD_FSYSCALL_TABLE(r14) // X
+- ;;
+- mov r16=IA64_KR(CURRENT) // M2 (12 cyc)
+- shladd r18=r17,3,r14 // A
+- mov r19=NR_syscalls-1 // A
+- ;;
+- lfetch [r18] // M0|1
+- mov r29=psr // M2 (12 cyc)
+- // If r17 is a NaT, p6 will be zero
+- cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)?
+- ;;
+- mov r21=ar.fpsr // M2 (12 cyc)
+- tnat.nz p10,p9=r15 // I0
+- mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...)
+- ;;
+- srlz.d // M0 (forces split-issue) ensure PSR.BE==0
+-(p6) ld8 r18=[r18] // M0|1
+- nop.i 0
+- ;;
+- nop.m 0
+-(p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!)
+- nop.i 0
+- ;;
+-(p8) ssm psr.i
+-(p6) mov b7=r18 // I0
+-(p8) br.dptk.many b7 // B
+-
+- mov r27=ar.rsc // M2 (12 cyc)
+-/*
+- * brl.cond doesn't work as intended because the linker would convert this branch
+- * into a branch to a PLT. Perhaps there will be a way to avoid this with some
+- * future version of the linker. In the meantime, we just use an indirect branch
+- * instead.
+- */
+-#ifdef CONFIG_ITANIUM
+-(p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
+- ;;
+-(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
+- ;;
+-(p6) mov b7=r14
+-(p6) br.sptk.many b7
+-#else
+- BRL_COND_FSYS_BUBBLE_DOWN(p6)
+-#endif
+- ssm psr.i
+- mov r10=-1
+-(p10) mov r8=EINVAL
+-(p9) mov r8=ENOSYS
+- FSYS_RETURN
+-END(__kernel_syscall_via_epc)
+-
+ # define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET)
+ # define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET)
+ # define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET)
+@@ -373,3 +277,154 @@ restore_rbs:
+ // invala not necessary as that will happen when returning to user-mode
+ br.cond.sptk back_from_restore_rbs
+ END(__kernel_sigtramp)
++
++GLOBAL_ENTRY(__kernel_syscall_via_break)
++ .prologue
++ .altrp b6
++ .body
++ /*
++ * Note: for (fast) syscall restart to work, the break instruction must be
++ * the first one in the bundle addressed by syscall_via_break.
++ */
++{ .mib
++ break 0x100000
++ nop.i 0
++ br.ret.sptk.many b6
++}
++END(__kernel_syscall_via_break)
++
++/*
++ * On entry:
++ * r11 = saved ar.pfs
++ * r15 = system call #
++ * b0 = saved return address
++ * b6 = return address
++ * On exit:
++ * r11 = saved ar.pfs
++ * r15 = system call #
++ * b0 = saved return address
++ * all other "scratch" registers: undefined
++ * all "preserved" registers: same as on entry
++ */
++
++GLOBAL_ENTRY(__kernel_syscall_via_epc)
++ .prologue
++ .altrp b6
++ .body
++{
++ /*
++ * Note: the kernel cannot assume that the first two instructions in this
++ * bundle get executed. The remaining code must be safe even if
++ * they do not get executed.
++ */
++ adds r17=-1024,r15 // A
++ mov r10=0 // A default to successful syscall execution
++ epc // B causes split-issue
++}
++ ;;
++#ifdef __XEN_IA64_VDSO_PARAVIRT
++ // r20 = 1
++ // r22 = &vcpu->vcpu_info->evtchn_upcall_mask
++ // r24 = &vcpu->vcpu_info->evtchn_upcall_pending
++ // r25 = tmp
++ // r31 = tmp
++ // p11 = tmp
++ // p14 = tmp
++ mov r20=1
++ movl r22=XSI_PSR_I_ADDR
++ ;;
++ ld8 r22=[r22]
++ ;;
++ st1 [r22]=r20
++ rum psr.be
++ adds r24=-1,r22
++#else
++ rsm psr.be | psr.i // M2 (5 cyc to srlz.d)
++#endif
++ LOAD_FSYSCALL_TABLE(r14) // X
++ ;;
++ mov r16=IA64_KR(CURRENT) // M2 (12 cyc)
++ shladd r18=r17,3,r14 // A
++ mov r19=NR_syscalls-1 // A
++#ifdef __XEN_IA64_VDSO_PARAVIRT
++ XEN_HYPER_GET_PSR
++ ;;
++ lfetch [r18] // M0|1
++ mov r29=r8
++#else
++ ;;
++ lfetch [r18] // M0|1
++ mov r29=psr // M2 (12 cyc)
++#endif
++ // If r17 is a NaT, p6 will be zero
++ cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)?
++ ;;
++ mov r21=ar.fpsr // M2 (12 cyc)
++ tnat.nz p10,p9=r15 // I0
++ mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...)
++ ;;
++ srlz.d // M0 (forces split-issue) ensure PSR.BE==0
++(p6) ld8 r18=[r18] // M0|1
++ nop.i 0
++ ;;
++ nop.m 0
++(p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!)
++#ifdef __XEN_IA64_VDSO_PARAVIRT
++
++#define XEN_SET_PSR_I(pred) \
++(pred) ld1 r31=[r22]; \
++ ;; ; \
++(pred) st1 [r22]=r0; \
++(pred) cmp.ne.unc p14,p0=r0,r31; \
++ ;; ; \
++(p14) ld1 r25=[r24]; \
++ ;; ; \
++(p14) cmp.ne.unc p11,p0=r0,r25; \
++ ;; ; \
++(p11) XEN_HYPER_SSM_I;
++
++ ;;
++ XEN_SET_PSR_I(p8)
++#else
++ nop.i 0
++ ;;
++(p8) ssm psr.i
++#endif
++(p6) mov b7=r18 // I0
++(p8) br.dptk.many b7 // B
++
++ mov r27=ar.rsc // M2 (12 cyc)
++/*
++ * brl.cond doesn't work as intended because the linker would convert this branch
++ * into a branch to a PLT. Perhaps there will be a way to avoid this with some
++ * future version of the linker. In the meantime, we just use an indirect branch
++ * instead.
++ */
++#ifdef CONFIG_ITANIUM
++(p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
++ ;;
++(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
++ ;;
++(p6) mov b7=r14
++(p6) br.sptk.many b7
++#else
++ BRL_COND_FSYS_BUBBLE_DOWN(p6)
++#endif
++#ifdef __XEN_IA64_VDSO_PARAVIRT
++ XEN_SET_PSR_I(p0)
++#else
++ ssm psr.i
++#endif
++ mov r10=-1
++(p10) mov r8=EINVAL
++(p9) mov r8=ENOSYS
++ FSYS_RETURN
++#ifdef __KERNEL_SYSCALL_VIA_EPC_PADDING
++ /*
++ * All values/sizes of __kernel_xxx symbol in gate.so and xengate.so
++ * must be same to each other.
++ * Adjust symbol size in gate.so to be same to the one in xengate.so.
++ */
++.include "arch/ia64/kernel/gate-skip.s"
++#endif
++END(__kernel_syscall_via_epc)
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/head.S linux-2.6.18-xen-3.3.0/arch/ia64/kernel/head.S
+--- linux-2.6.18.8/arch/ia64/kernel/head.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/head.S 2008-08-21 11:36:07.000000000 +0200
+@@ -367,6 +367,12 @@ start_ap:
+ ;;
+ (isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
+
++#ifdef CONFIG_XEN
++ // Note: isBP is used by the subprogram.
++ br.call.sptk.many rp=early_xen_setup
++ ;;
++#endif
++
+ #ifdef CONFIG_SMP
+ (isAP) br.call.sptk.many rp=start_secondary
+ .ret0:
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/iosapic.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/iosapic.c
+--- linux-2.6.18.8/arch/ia64/kernel/iosapic.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/iosapic.c 2008-08-21 11:36:07.000000000 +0200
+@@ -159,6 +159,75 @@ static unsigned char pcat_compat __devin
+ static int iosapic_kmalloc_ok;
+ static LIST_HEAD(free_rte_list);
+
++#ifdef CONFIG_XEN
++#include <xen/interface/xen.h>
++#include <xen/interface/physdev.h>
++#include <asm/hypervisor.h>
++static inline unsigned int xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
++{
++ struct physdev_apic apic_op;
++ int ret;
++
++ apic_op.apic_physbase = (unsigned long)iosapic -
++ __IA64_UNCACHED_OFFSET;
++ apic_op.reg = reg;
++ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
++ if (ret)
++ return ret;
++ return apic_op.value;
++}
++
++static inline void xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
++{
++ struct physdev_apic apic_op;
++
++ apic_op.apic_physbase = (unsigned long)iosapic -
++ __IA64_UNCACHED_OFFSET;
++ apic_op.reg = reg;
++ apic_op.value = val;
++ HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
++}
++
++static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg)
++{
++ if (!is_running_on_xen()) {
++ writel(reg, iosapic + IOSAPIC_REG_SELECT);
++ return readl(iosapic + IOSAPIC_WINDOW);
++ } else
++ return xen_iosapic_read(iosapic, reg);
++}
++
++static inline void iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
++{
++ if (!is_running_on_xen()) {
++ writel(reg, iosapic + IOSAPIC_REG_SELECT);
++ writel(val, iosapic + IOSAPIC_WINDOW);
++ } else
++ xen_iosapic_write(iosapic, reg, val);
++}
++
++int xen_assign_irq_vector(int irq)
++{
++ struct physdev_irq irq_op;
++
++ irq_op.irq = irq;
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
++ return -ENOSPC;
++
++ return irq_op.vector;
++}
++
++void xen_free_irq_vector(int vector)
++{
++ struct physdev_irq irq_op;
++
++ irq_op.vector = vector;
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op))
++ printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n",
++ __FUNCTION__, vector);
++}
++#endif /* XEN */
++
+ /*
+ * Find an IOSAPIC associated with a GSI
+ */
+@@ -288,6 +357,27 @@ nop (unsigned int irq)
+ /* do nothing... */
+ }
+
++
++#ifdef CONFIG_KEXEC
++void
++kexec_disable_iosapic(void)
++{
++ struct iosapic_intr_info *info;
++ struct iosapic_rte_info *rte;
++ u8 vec = 0;
++ for (info = iosapic_intr_info; info <
++ iosapic_intr_info + IA64_NUM_VECTORS; ++info, ++vec) {
++ list_for_each_entry(rte, &info->rtes,
++ rte_list) {
++ iosapic_write(rte->addr,
++ IOSAPIC_RTE_LOW(rte->rte_index),
++ IOSAPIC_MASK|vec);
++ iosapic_eoi(rte->addr, vec);
++ }
++ }
++}
++#endif
++
+ static void
+ mask_irq (unsigned int irq)
+ {
+@@ -653,6 +743,9 @@ register_intr (unsigned int gsi, int vec
+ iosapic_intr_info[vector].dmode = delivery;
+ iosapic_intr_info[vector].trigger = trigger;
+
++ if (is_running_on_xen())
++ return 0;
++
+ if (trigger == IOSAPIC_EDGE)
+ irq_type = &irq_type_iosapic_edge;
+ else
+@@ -1015,6 +1108,9 @@ iosapic_system_init (int system_pcat_com
+ }
+
+ pcat_compat = system_pcat_compat;
++ if (is_running_on_xen())
++ return;
++
+ if (pcat_compat) {
+ /*
+ * Disable the compatibility mode interrupts (8259 style),
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/irq_ia64.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/irq_ia64.c
+--- linux-2.6.18.8/arch/ia64/kernel/irq_ia64.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/irq_ia64.c 2008-08-21 11:36:07.000000000 +0200
+@@ -30,6 +30,9 @@
+ #include <linux/smp_lock.h>
+ #include <linux/threads.h>
+ #include <linux/bitops.h>
++#ifdef CONFIG_XEN
++#include <linux/cpu.h>
++#endif
+
+ #include <asm/delay.h>
+ #include <asm/intrinsics.h>
+@@ -69,6 +72,13 @@ int
+ assign_irq_vector (int irq)
+ {
+ int pos, vector;
++
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ extern int xen_assign_irq_vector(int);
++ return xen_assign_irq_vector(irq);
++ }
++#endif
+ again:
+ pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
+ vector = IA64_FIRST_DEVICE_VECTOR + pos;
+@@ -87,6 +97,13 @@ free_irq_vector (int vector)
+ if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
+ return;
+
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ extern void xen_free_irq_vector(int);
++ xen_free_irq_vector(vector);
++ return;
++ }
++#endif
+ pos = vector - IA64_FIRST_DEVICE_VECTOR;
+ if (!test_and_clear_bit(pos, ia64_vector_mask))
+ printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
+@@ -240,12 +257,342 @@ static struct irqaction ipi_irqaction =
+ };
+ #endif
+
++#ifdef CONFIG_XEN
++#include <xen/evtchn.h>
++#include <xen/interface/callback.h>
++
++static DEFINE_PER_CPU(int, timer_irq) = -1;
++static DEFINE_PER_CPU(int, ipi_irq) = -1;
++static DEFINE_PER_CPU(int, resched_irq) = -1;
++static DEFINE_PER_CPU(int, cmc_irq) = -1;
++static DEFINE_PER_CPU(int, cmcp_irq) = -1;
++static DEFINE_PER_CPU(int, cpep_irq) = -1;
++static char timer_name[NR_CPUS][15];
++static char ipi_name[NR_CPUS][15];
++static char resched_name[NR_CPUS][15];
++static char cmc_name[NR_CPUS][15];
++static char cmcp_name[NR_CPUS][15];
++static char cpep_name[NR_CPUS][15];
++
++struct saved_irq {
++ unsigned int irq;
++ struct irqaction *action;
++};
++/* 16 should be far optimistic value, since only several percpu irqs
++ * are registered early.
++ */
++#define MAX_LATE_IRQ 16
++static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
++static unsigned short late_irq_cnt = 0;
++static unsigned short saved_irq_cnt = 0;
++static int xen_slab_ready = 0;
++
++#ifdef CONFIG_SMP
++/* Dummy stub. Though we may check RESCHEDULE_VECTOR before __do_IRQ,
++ * it ends up to issue several memory accesses upon percpu data and
++ * thus adds unnecessary traffic to other paths.
++ */
++static irqreturn_t
++handle_reschedule(int irq, void *dev_id, struct pt_regs *regs)
++{
++
++ return IRQ_HANDLED;
++}
++
++static struct irqaction resched_irqaction = {
++ .handler = handle_reschedule,
++ .flags = SA_INTERRUPT,
++ .name = "RESCHED"
++};
++#endif
++
++/*
++ * This is xen version percpu irq registration, which needs bind
++ * to xen specific evtchn sub-system. One trick here is that xen
++ * evtchn binding interface depends on kmalloc because related
++ * port needs to be freed at device/cpu down. So we cache the
++ * registration on BSP before slab is ready and then deal them
++ * at later point. For rest instances happening after slab ready,
++ * we hook them to xen evtchn immediately.
++ *
++ * FIXME: MCA is not supported by far, and thus "nomca" boot param is
++ * required.
++ */
++static void
++xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
++ struct irqaction *action, int save)
++{
++ irq_desc_t *desc;
++ int irq = 0;
++
++ if (xen_slab_ready) {
++ switch (vec) {
++ case IA64_TIMER_VECTOR:
++ sprintf(timer_name[cpu], "%s%d", action->name, cpu);
++ irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
++ action->handler, action->flags,
++ timer_name[cpu], action->dev_id);
++ per_cpu(timer_irq,cpu) = irq;
++ break;
++ case IA64_IPI_RESCHEDULE:
++ sprintf(resched_name[cpu], "%s%d", action->name, cpu);
++ irq = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
++ action->handler, action->flags,
++ resched_name[cpu], action->dev_id);
++ per_cpu(resched_irq,cpu) = irq;
++ break;
++ case IA64_IPI_VECTOR:
++ sprintf(ipi_name[cpu], "%s%d", action->name, cpu);
++ irq = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
++ action->handler, action->flags,
++ ipi_name[cpu], action->dev_id);
++ per_cpu(ipi_irq,cpu) = irq;
++ break;
++ case IA64_CMC_VECTOR:
++ sprintf(cmc_name[cpu], "%s%d", action->name, cpu);
++ irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
++ action->handler,
++ action->flags,
++ cmc_name[cpu],
++ action->dev_id);
++ per_cpu(cmc_irq,cpu) = irq;
++ break;
++ case IA64_CMCP_VECTOR:
++ sprintf(cmcp_name[cpu], "%s%d", action->name, cpu);
++ irq = bind_ipi_to_irqhandler(CMCP_VECTOR, cpu,
++ action->handler,
++ action->flags,
++ cmcp_name[cpu],
++ action->dev_id);
++ per_cpu(cmcp_irq,cpu) = irq;
++ break;
++ case IA64_CPEP_VECTOR:
++ sprintf(cpep_name[cpu], "%s%d", action->name, cpu);
++ irq = bind_ipi_to_irqhandler(CPEP_VECTOR, cpu,
++ action->handler,
++ action->flags,
++ cpep_name[cpu],
++ action->dev_id);
++ per_cpu(cpep_irq,cpu) = irq;
++ break;
++ case IA64_CPE_VECTOR:
++ case IA64_MCA_RENDEZ_VECTOR:
++ case IA64_PERFMON_VECTOR:
++ case IA64_MCA_WAKEUP_VECTOR:
++ case IA64_SPURIOUS_INT_VECTOR:
++ /* No need to complain, these aren't supported. */
++ break;
++ default:
++ printk(KERN_WARNING "Percpu irq %d is unsupported "
++ "by xen!\n", vec);
++ break;
++ }
++ BUG_ON(irq < 0);
++
++ if (irq > 0) {
++ /*
++ * Mark percpu. Without this, migrate_irqs() will
++ * mark the interrupt for migrations and trigger it
++ * on cpu hotplug.
++ */
++ desc = irq_desc + irq;
++ desc->status |= IRQ_PER_CPU;
++ }
++ }
++
++ /* For BSP, we cache registered percpu irqs, and then re-walk
++ * them when initializing APs
++ */
++ if (!cpu && save) {
++ BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
++ saved_percpu_irqs[saved_irq_cnt].irq = vec;
++ saved_percpu_irqs[saved_irq_cnt].action = action;
++ saved_irq_cnt++;
++ if (!xen_slab_ready)
++ late_irq_cnt++;
++ }
++}
++
++static void
++xen_bind_early_percpu_irq (void)
++{
++ int i;
++
++ xen_slab_ready = 1;
++ /* There's no race when accessing this cached array, since only
++ * BSP will face with such step shortly
++ */
++ for (i = 0; i < late_irq_cnt; i++)
++ xen_register_percpu_irq(smp_processor_id(),
++ saved_percpu_irqs[i].irq,
++ saved_percpu_irqs[i].action, 0);
++}
++
++/* FIXME: There's no obvious point to check whether slab is ready. So
++ * a hack is used here by utilizing a late time hook.
++ */
++extern void (*late_time_init)(void);
++extern char xen_event_callback;
++extern void xen_init_IRQ(void);
++
++#ifdef CONFIG_HOTPLUG_CPU
++static int __devinit
++unbind_evtchn_callback(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ unsigned int cpu = (unsigned long)hcpu;
++
++ if (action == CPU_DEAD) {
++ /* Unregister evtchn. */
++ if (per_cpu(cpep_irq,cpu) >= 0) {
++ unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL);
++ per_cpu(cpep_irq, cpu) = -1;
++ }
++ if (per_cpu(cmcp_irq,cpu) >= 0) {
++ unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL);
++ per_cpu(cmcp_irq, cpu) = -1;
++ }
++ if (per_cpu(cmc_irq,cpu) >= 0) {
++ unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL);
++ per_cpu(cmc_irq, cpu) = -1;
++ }
++ if (per_cpu(ipi_irq,cpu) >= 0) {
++ unbind_from_irqhandler (per_cpu(ipi_irq, cpu), NULL);
++ per_cpu(ipi_irq, cpu) = -1;
++ }
++ if (per_cpu(resched_irq,cpu) >= 0) {
++ unbind_from_irqhandler (per_cpu(resched_irq, cpu),
++ NULL);
++ per_cpu(resched_irq, cpu) = -1;
++ }
++ if (per_cpu(timer_irq,cpu) >= 0) {
++ unbind_from_irqhandler (per_cpu(timer_irq, cpu), NULL);
++ per_cpu(timer_irq, cpu) = -1;
++ }
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block unbind_evtchn_notifier = {
++ .notifier_call = unbind_evtchn_callback,
++ .priority = 0
++};
++#endif
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++void xen_smp_intr_init_early(unsigned int cpu)
++{
++#ifdef CONFIG_SMP
++ unsigned int i;
++
++ for (i = 0; i < saved_irq_cnt; i++)
++ xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq,
++ saved_percpu_irqs[i].action, 0);
++#endif
++}
++
++void xen_smp_intr_init(void)
++{
++#ifdef CONFIG_SMP
++ unsigned int cpu = smp_processor_id();
++ struct callback_register event = {
++ .type = CALLBACKTYPE_event,
++ .address = (unsigned long)&xen_event_callback,
++ };
++
++ if (cpu == 0) {
++ /* Initialization was already done for boot cpu. */
++#ifdef CONFIG_HOTPLUG_CPU
++ /* Register the notifier only once. */
++ register_cpu_notifier(&unbind_evtchn_notifier);
++#endif
++ return;
++ }
++
++ /* This should be piggyback when setup vcpu guest context */
++ BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
++#endif /* CONFIG_SMP */
++}
++
++void
++xen_irq_init(void)
++{
++ struct callback_register event = {
++ .type = CALLBACKTYPE_event,
++ .address = (unsigned long)&xen_event_callback,
++ };
++
++ xen_init_IRQ();
++ BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
++ late_time_init = xen_bind_early_percpu_irq;
++#ifdef CONFIG_SMP
++ register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
++#endif
++}
++
++void
++xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
++{
++ int irq = -1;
++ extern void xen_send_ipi(int cpu, int vec);
++
++#ifdef CONFIG_SMP
++ /* TODO: we need to call vcpu_up here */
++ if (unlikely(vector == ap_wakeup_vector)) {
++ /* XXX
++ * This should be in __cpu_up(cpu) in ia64 smpboot.c
++ * like x86. But don't want to modify it,
++ * keep it untouched.
++ */
++ xen_smp_intr_init_early(cpu);
++
++ xen_send_ipi (cpu, vector);
++ //vcpu_prepare_and_up(cpu);
++ return;
++ }
++#endif
++
++ switch (vector) {
++ case IA64_IPI_VECTOR:
++ irq = per_cpu(ipi_to_irq, cpu)[IPI_VECTOR];
++ break;
++ case IA64_IPI_RESCHEDULE:
++ irq = per_cpu(ipi_to_irq, cpu)[RESCHEDULE_VECTOR];
++ break;
++ case IA64_CMCP_VECTOR:
++ irq = per_cpu(ipi_to_irq, cpu)[CMCP_VECTOR];
++ break;
++ case IA64_CPEP_VECTOR:
++ irq = per_cpu(ipi_to_irq, cpu)[CPEP_VECTOR];
++ break;
++ case IA64_TIMER_VECTOR:
++ xen_send_ipi(cpu, vector);
++ return;
++ default:
++ printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
++ vector);
++ irq = 0;
++ break;
++ }
++
++ BUG_ON(irq < 0);
++ notify_remote_via_irq(irq);
++ return;
++}
++#endif /* CONFIG_XEN */
++
+ void
+ register_percpu_irq (ia64_vector vec, struct irqaction *action)
+ {
+ irq_desc_t *desc;
+ unsigned int irq;
+
++#ifdef CONFIG_XEN
++ if (is_running_on_xen())
++ return xen_register_percpu_irq(smp_processor_id(),
++ vec, action, 1);
++#endif
++
+ for (irq = 0; irq < NR_IRQS; ++irq)
+ if (irq_to_vector(irq) == vec) {
+ desc = irq_desc + irq;
+@@ -267,6 +614,10 @@ init_IRQ (void)
+ pfm_init_percpu();
+ #endif
+ platform_irq_init();
++#ifdef CONFIG_XEN
++ if (is_running_on_xen() && !ia64_platform_is("xen"))
++ xen_irq_init();
++#endif
+ }
+
+ void
+@@ -276,6 +627,13 @@ ia64_send_ipi (int cpu, int vector, int
+ unsigned long ipi_data;
+ unsigned long phys_cpu_id;
+
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ xen_platform_send_ipi(cpu, vector, delivery_mode, redirect);
++ return;
++ }
++#endif
++
+ #ifdef CONFIG_SMP
+ phys_cpu_id = cpu_physical_id(cpu);
+ #else
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/machine_kexec.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/machine_kexec.c
+--- linux-2.6.18.8/arch/ia64/kernel/machine_kexec.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/machine_kexec.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,204 @@
++/*
++ * arch/ia64/kernel/machine_kexec.c
++ *
++ * Handle transition of Linux booting another kernel
++ * Copyright (C) 2005 Hewlett-Packard Development Comapny, L.P.
++ * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
++ * Copyright (C) 2006 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
++ *
++ * This source code is licensed under the GNU General Public License,
++ * Version 2. See the file COPYING for more details.
++ */
++
++#include <linux/mm.h>
++#include <linux/kexec.h>
++#include <linux/cpu.h>
++#include <linux/irq.h>
++#include <asm/mmu_context.h>
++#include <asm/setup.h>
++#include <asm/delay.h>
++#include <asm/meminit.h>
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#include <asm/kexec.h>
++#endif
++
++typedef void (*relocate_new_kernel_t)(unsigned long, unsigned long,
++ struct ia64_boot_param *, unsigned long);
++
++struct kimage *ia64_kimage;
++
++struct resource efi_memmap_res = {
++ .name = "EFI Memory Map",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++struct resource boot_param_res = {
++ .name = "Boot parameter",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++
++/*
++ * Do what every setup is needed on image and the
++ * reboot code buffer to allow us to avoid allocations
++ * later.
++ */
++int machine_kexec_prepare(struct kimage *image)
++{
++ void *control_code_buffer;
++ const unsigned long *func;
++
++ func = (unsigned long *)&relocate_new_kernel;
++ /* Pre-load control code buffer to minimize work in kexec path */
++ control_code_buffer = page_address(image->control_code_page);
++ memcpy((void *)control_code_buffer, (const void *)func[0],
++ relocate_new_kernel_size);
++ flush_icache_range((unsigned long)control_code_buffer,
++ (unsigned long)control_code_buffer + relocate_new_kernel_size);
++ ia64_kimage = image;
++
++ return 0;
++}
++
++void machine_kexec_cleanup(struct kimage *image)
++{
++}
++
++#ifndef CONFIG_XEN
++void machine_shutdown(void)
++{
++ int cpu;
++
++ for_each_online_cpu(cpu) {
++ if (cpu != smp_processor_id())
++ cpu_down(cpu);
++ }
++ kexec_disable_iosapic();
++}
++
++/*
++ * Do not allocate memory (or fail in any way) in machine_kexec().
++ * We are past the point of no return, committed to rebooting now.
++ */
++extern void *efi_get_pal_addr(void);
++static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
++{
++ struct kimage *image = arg;
++ relocate_new_kernel_t rnk;
++ void *pal_addr = efi_get_pal_addr();
++ unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
++ unsigned long vector;
++ int ii;
++
++ if (image->type == KEXEC_TYPE_CRASH) {
++ crash_save_this_cpu();
++ current->thread.ksp = (__u64)info->sw - 16;
++ }
++
++ /* Interrupts aren't acceptable while we reboot */
++ local_irq_disable();
++
++ /* Mask CMC and Performance Monitor interrupts */
++ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
++ ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
++
++ /* Mask ITV and Local Redirect Registers */
++ ia64_set_itv(1 << 16);
++ ia64_set_lrr0(1 << 16);
++ ia64_set_lrr1(1 << 16);
++
++ /* terminate possible nested in-service interrupts */
++ for (ii = 0; ii < 16; ii++)
++ ia64_eoi();
++
++ /* unmask TPR and clear any pending interrupts */
++ ia64_setreg(_IA64_REG_CR_TPR, 0);
++ ia64_srlz_d();
++ vector = ia64_get_ivr();
++ while (vector != IA64_SPURIOUS_INT_VECTOR) {
++ ia64_eoi();
++ vector = ia64_get_ivr();
++ }
++ platform_kernel_launch_event();
++ rnk = (relocate_new_kernel_t)&code_addr;
++ (*rnk)(image->head, image->start, ia64_boot_param,
++ GRANULEROUNDDOWN((unsigned long) pal_addr));
++ BUG();
++}
++
++void machine_kexec(struct kimage *image)
++{
++ unw_init_running(ia64_machine_kexec, image);
++ for(;;);
++}
++#else /* CONFIG_XEN */
++void machine_kexec_setup_load_arg(xen_kexec_image_t *xki,struct kimage *image)
++{
++ xki->reboot_code_buffer =
++ kexec_page_to_pfn(image->control_code_page) << PAGE_SHIFT;
++}
++
++static struct resource xen_hypervisor_heap_res;
++
++int __init machine_kexec_setup_resources(struct resource *hypervisor,
++ struct resource *phys_cpus,
++ int nr_phys_cpus)
++{
++ xen_kexec_range_t range;
++ int k;
++
++ /* fill in xen_hypervisor_heap_res with hypervisor heap
++ * machine address range
++ */
++
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_XENHEAP;
++
++ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ return -1;
++
++ xen_hypervisor_heap_res.name = "Hypervisor heap";
++ xen_hypervisor_heap_res.start = range.start;
++ xen_hypervisor_heap_res.end = range.start + range.size - 1;
++ xen_hypervisor_heap_res.flags = IORESOURCE_BUSY | IORESOURCE_MEM;
++
++ /* The per-cpu crash note resources belong inside the
++ * hypervisor heap resource */
++ for (k = 0; k < nr_phys_cpus; k++)
++ request_resource(&xen_hypervisor_heap_res, phys_cpus + k);
++
++ /* fill in efi_memmap_res with EFI memmap machine address range */
++
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_EFI_MEMMAP;
++
++ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ return -1;
++
++ efi_memmap_res.start = range.start;
++ efi_memmap_res.end = range.start + range.size - 1;
++
++ /* fill in boot_param_res with boot parameter machine address range */
++
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_BOOT_PARAM;
++
++ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ return -1;
++
++ boot_param_res.start = range.start;
++ boot_param_res.end = range.start + range.size - 1;
++
++ return 0;
++}
++
++void machine_kexec_register_resources(struct resource *res)
++{
++ request_resource(res, &xen_hypervisor_heap_res);
++}
++#endif /* CONFIG_XEN */
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/Makefile linux-2.6.18-xen-3.3.0/arch/ia64/kernel/Makefile
+--- linux-2.6.18.8/arch/ia64/kernel/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -28,6 +28,8 @@ obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
+ obj-$(CONFIG_CPU_FREQ) += cpufreq/
+ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
+ obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
++obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
++obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+ obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
+ obj-$(CONFIG_AUDIT) += audit.o
+ mca_recovery-y += mca_drv.o mca_drv_asm.o
+@@ -61,3 +63,61 @@ $(obj)/gate-syms.o: $(obj)/gate.lds $(ob
+ # We must build gate.so before we can assemble it.
+ # Note: kbuild does not track this dependency due to usage of .incbin
+ $(obj)/gate-data.o: $(obj)/gate.so
++
++#
++# gate page paravirtualization for xen
++#
++obj-$(CONFIG_XEN) += xengate-data.o
++
++ifeq ($(CONFIG_XEN), y)
++# The gate DSO image is built using a special linker script.
++targets += xengate.so xengate-syms.o
++endif
++
++extra-$(CONFIG_XEN) += xengate.so xengate.lds xengate.o
++
++AFLAGS_xengate.o += -D__XEN_IA64_VDSO_PARAVIRT
++$(obj)/xengate.o: $(src)/gate.S FORCE
++ $(call if_changed_dep,as_o_S)
++
++CPPFLAGS_xengate.lds := -P -C -U$(ARCH) -D__XEN_IA64_VDSO_PARAVIRT
++$(obj)/xengate.lds: $(src)/gate.lds.S
++ $(call if_changed_dep,cpp_lds_S)
++
++GATECFLAGS_xengate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
++ $(call ld-option, -Wl$(comma)--hash-style=sysv)
++$(obj)/xengate.so: $(obj)/xengate.lds $(obj)/xengate.o FORCE
++ $(call if_changed,gate)
++
++ifeq ($(CONFIG_XEN), y)
++$(obj)/built-in.o: $(obj)/xengate-syms.o
++$(obj)/built-in.o: ld_flags += -R $(obj)/xengate-syms.o
++$(obj)/mca_recovery.o: $(obj)/gate-syms.o $(obj)/xengate-syms.o
++endif
++
++GATECFLAGS_xengate-syms.o = -r
++$(obj)/xengate-syms.o: $(obj)/xengate.lds $(obj)/xengate.o FORCE
++ $(call if_changed,gate)
++$(obj)/xengate-data.o: $(obj)/xengate.so
++
++#
++# .tmp_gate.o to calculate padding size for __kernel_syscall_via_epc
++#
++extra-$(CONFIG_XEN) += gate-skip.s .tmp_gate.o
++
++ifeq ($(CONFIG_XEN), y)
++AFLAGS_gate.o += -D__KERNEL_SYSCALL_VIA_EPC_PADDING
++$(obj)/gate.o: $(obj)/gate-skip.s FORCE
++endif
++
++$(obj)/.tmp_gate.o: $(src)/gate.S FORCE
++ $(call if_changed_dep,as_o_S)
++
++quiet_cmd_gate_size = GATE_SIZE $@
++ cmd_gate_size = $(NM) --extern-only --print-size $(obj)/xengate.o | \
++ $(AWK) '/__kernel_syscall_via_epc/{printf "\t.skip 0x"$$2" - "}' > $@; \
++ $(NM) --extern-only --print-size $(obj)/.tmp_gate.o | \
++ $(AWK) '/__kernel_syscall_via_epc/{printf "0x"$$2"\n"}' >> $@
++
++$(obj)/gate-skip.s: $(obj)/xengate.o $(obj)/.tmp_gate.o FORCE
++ $(call if_changed,gate_size)
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/mca.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/mca.c
+--- linux-2.6.18.8/arch/ia64/kernel/mca.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/mca.c 2008-08-21 11:36:07.000000000 +0200
+@@ -79,6 +79,7 @@
+ #include <asm/system.h>
+ #include <asm/sal.h>
+ #include <asm/mca.h>
++#include <asm/kexec.h>
+
+ #include <asm/irq.h>
+ #include <asm/hw_irq.h>
+@@ -160,11 +161,33 @@ typedef struct ia64_state_log_s
+
+ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
+
++#ifdef CONFIG_XEN
++DEFINE_SPINLOCK(ia64_mca_xencomm_lock);
++LIST_HEAD(ia64_mca_xencomm_list);
++
++#define IA64_MCA_XENCOMM_ALLOCATE(rec, desc) \
++ if (is_running_on_xen()) { \
++ ia64_mca_xencomm_t *entry; \
++ entry = alloc_bootmem(sizeof(ia64_mca_xencomm_t)); \
++ entry->record = rec; \
++ entry->handle = desc; \
++ list_add(&entry->list, &ia64_mca_xencomm_list); \
++ }
++#define IA64_LOG_ALLOCATE(it, size) \
++ {ia64_err_rec_t *rec; \
++ ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = rec = \
++ (ia64_err_rec_t *)alloc_bootmem(size); \
++ IA64_MCA_XENCOMM_ALLOCATE(rec, xencomm_map(rec, size)); \
++ ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = rec = \
++ (ia64_err_rec_t *)alloc_bootmem(size); \
++ IA64_MCA_XENCOMM_ALLOCATE(rec, xencomm_map(rec, size));}
++#else
+ #define IA64_LOG_ALLOCATE(it, size) \
+ {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
+ (ia64_err_rec_t *)alloc_bootmem(size); \
+ ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
+ (ia64_err_rec_t *)alloc_bootmem(size);}
++#endif
+ #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
+ #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
+ #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
+@@ -1066,7 +1089,12 @@ ia64_mca_handler(struct pt_regs *regs, s
+ rh->severity = sal_log_severity_corrected;
+ ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
+ sos->os_status = IA64_MCA_CORRECTED;
+- }
++ } else {
++#ifdef CONFIG_KEXEC
++ atomic_set(&kdump_in_progress, 1);
++ monarch_cpu = -1;
++#endif
++ }
+ if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
+ == NOTIFY_STOP)
+ ia64_mca_spin(__FUNCTION__);
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/pal.S linux-2.6.18-xen-3.3.0/arch/ia64/kernel/pal.S
+--- linux-2.6.18.8/arch/ia64/kernel/pal.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/pal.S 2008-08-21 11:36:07.000000000 +0200
+@@ -16,6 +16,7 @@
+ #include <asm/processor.h>
+
+ .data
++ .globl pal_entry_point
+ pal_entry_point:
+ data8 ia64_pal_default_handler
+ .text
+@@ -53,7 +54,7 @@ END(ia64_pal_default_handler)
+ * in4 1 ==> clear psr.ic, 0 ==> don't clear psr.ic
+ *
+ */
+-GLOBAL_ENTRY(ia64_pal_call_static)
++GLOBAL_ENTRY(__ia64_pal_call_static)
+ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
+ alloc loc1 = ar.pfs,5,5,0,0
+ movl loc2 = pal_entry_point
+@@ -90,7 +91,7 @@ GLOBAL_ENTRY(ia64_pal_call_static)
+ ;;
+ srlz.d // seralize restoration of psr.l
+ br.ret.sptk.many b0
+-END(ia64_pal_call_static)
++END(__ia64_pal_call_static)
+
+ /*
+ * Make a PAL call using the stacked registers calling convention.
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/patch.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/patch.c
+--- linux-2.6.18.8/arch/ia64/kernel/patch.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/patch.c 2008-08-21 11:36:07.000000000 +0200
+@@ -184,9 +184,37 @@ patch_brl_fsys_bubble_down (unsigned lon
+ ia64_srlz_i();
+ }
+
++#ifdef CONFIG_XEN
++void __init
++ia64_patch_gate_xen (void)
++{
++ extern char __start_gate_mckinley_e9_patchlist_xen[], __end_gate_mckinley_e9_patchlist_xen[];
++ extern char __start_gate_vtop_patchlist_xen[], __end_gate_vtop_patchlist_xen[];
++ extern char __start_gate_fsyscall_patchlist_xen[], __end_gate_fsyscall_patchlist_xen[];
++ extern char __start_gate_brl_fsys_bubble_down_patchlist_xen[], __end_gate_brl_fsys_bubble_down_patchlist_xen[];
++# define START(name) ((unsigned long) __start_gate_##name##_patchlist_xen)
++# define END(name) ((unsigned long)__end_gate_##name##_patchlist_xen)
++
++ patch_fsyscall_table(START(fsyscall), END(fsyscall));
++ patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down));
++ ia64_patch_vtop(START(vtop), END(vtop));
++ ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
++
++# undef START
++# undef END
++}
++#else
++#define ia64_patch_gate_xen() do { } while (0)
++#endif
++
+ void __init
+ ia64_patch_gate (void)
+ {
++ if (is_running_on_xen()) {
++ ia64_patch_gate_xen();
++ return;
++ }
++
+ # define START(name) ((unsigned long) __start_gate_##name##_patchlist)
+ # define END(name) ((unsigned long)__end_gate_##name##_patchlist)
+
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/perfmon.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/perfmon.c
+--- linux-2.6.18.8/arch/ia64/kernel/perfmon.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/perfmon.c 2008-08-21 11:36:07.000000000 +0200
+@@ -52,6 +52,31 @@
+ #include <asm/delay.h>
+
+ #ifdef CONFIG_PERFMON
++#include <asm/hypervisor.h>
++#ifdef CONFIG_XEN
++//#include <xen/xenoprof.h>
++#include <xen/interface/xenoprof.h>
++
++static int xenoprof_is_primary = 0;
++#define init_xenoprof_primary(is_primary) (xenoprof_is_primary = (is_primary))
++#define is_xenoprof_primary() (xenoprof_is_primary)
++#define XEN_NOT_SUPPORTED_YET \
++ do { \
++ if (is_running_on_xen()) { \
++ printk("%s is not supported yet under xen.\n", \
++ __func__); \
++ return -ENOSYS; \
++ } \
++ } while (0)
++#else
++#define init_xenoprof_primary(is_primary) do { } while (0)
++#define is_xenoprof_primary() (0)
++#define XEN_NOT_SUPPORTED_YET do { } while (0)
++#define HYPERVISOR_perfmon_op(cmd, arg, count) (0)
++#define HYPERVISOR_xenoprof_op(op, arg) ({(void)arg;0;})
++struct xenoprof_init { /* dummy */ };
++#endif
++
+ /*
+ * perfmon context state
+ */
+@@ -1514,6 +1539,7 @@ pfm_read(struct file *filp, char __user
+ ssize_t ret;
+ unsigned long flags;
+ DECLARE_WAITQUEUE(wait, current);
++ XEN_NOT_SUPPORTED_YET;
+ if (PFM_IS_FILE(filp) == 0) {
+ printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
+ return -EINVAL;
+@@ -2112,6 +2138,15 @@ doit:
+ */
+ if (free_possible) pfm_context_free(ctx);
+
++ if (is_running_on_xen()) {
++ if (is_xenoprof_primary()) {
++ int ret = HYPERVISOR_perfmon_op(PFM_DESTROY_CONTEXT,
++ NULL, 0);
++ if (ret)
++ printk("%s:%d PFM_DESTROY_CONTEXT hypercall "
++ "failed\n", __func__, __LINE__);
++ }
++ }
+ return 0;
+ }
+
+@@ -2735,6 +2770,23 @@ pfm_context_create(pfm_context_t *ctx, v
+ */
+ pfm_reset_pmu_state(ctx);
+
++ if (is_running_on_xen()) {
++ /*
++ * kludge to get xenoprof.is_primary.
++ * XENOPROF_init/ia64 is nop. so it is safe to call it here.
++ */
++ struct xenoprof_init init;
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
++ if (ret)
++ goto buffer_error;
++ init_xenoprof_primary(init.is_primary);
++
++ if (is_xenoprof_primary()) {
++ ret = HYPERVISOR_perfmon_op(PFM_CREATE_CONTEXT, arg, 0);
++ if (ret)
++ goto buffer_error;
++ }
++ }
+ return 0;
+
+ buffer_error:
+@@ -2871,6 +2923,12 @@ pfm_write_pmcs(pfm_context_t *ctx, void
+ pfm_reg_check_t wr_func;
+ #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
+
++ if (is_running_on_xen()) {
++ if (is_xenoprof_primary())
++ return HYPERVISOR_perfmon_op(PFM_WRITE_PMCS,
++ arg, count);
++ return 0;
++ }
+ state = ctx->ctx_state;
+ is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
+ is_system = ctx->ctx_fl_system;
+@@ -3111,6 +3169,12 @@ pfm_write_pmds(pfm_context_t *ctx, void
+ int ret = -EINVAL;
+ pfm_reg_check_t wr_func;
+
++ if (is_running_on_xen()) {
++ if (is_xenoprof_primary())
++ return HYPERVISOR_perfmon_op(PFM_WRITE_PMDS,
++ arg, count);
++ return 0;
++ }
+
+ state = ctx->ctx_state;
+ is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
+@@ -3308,6 +3372,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *
+ int is_loaded, is_system, is_counting, expert_mode;
+ int ret = -EINVAL;
+ pfm_reg_check_t rd_func;
++ XEN_NOT_SUPPORTED_YET;
+
+ /*
+ * access is possible when loaded only for
+@@ -3559,6 +3624,7 @@ pfm_restart(pfm_context_t *ctx, void *ar
+ pfm_ovfl_ctrl_t rst_ctrl;
+ int state, is_system;
+ int ret = 0;
++ XEN_NOT_SUPPORTED_YET;
+
+ state = ctx->ctx_state;
+ fmt = ctx->ctx_buf_fmt;
+@@ -3708,6 +3774,7 @@ static int
+ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
+ {
+ unsigned int m = *(unsigned int *)arg;
++ XEN_NOT_SUPPORTED_YET;
+
+ pfm_sysctl.debug = m == 0 ? 0 : 1;
+
+@@ -3978,6 +4045,8 @@ pfm_get_features(pfm_context_t *ctx, voi
+ {
+ pfarg_features_t *req = (pfarg_features_t *)arg;
+
++ if (is_running_on_xen())
++ return HYPERVISOR_perfmon_op(PFM_GET_FEATURES, &arg, 0);
+ req->ft_version = PFM_VERSION;
+ return 0;
+ }
+@@ -3989,6 +4058,12 @@ pfm_stop(pfm_context_t *ctx, void *arg,
+ struct task_struct *task = PFM_CTX_TASK(ctx);
+ int state, is_system;
+
++ if (is_running_on_xen()) {
++ if (is_xenoprof_primary())
++ return HYPERVISOR_perfmon_op(PFM_STOP, NULL, 0);
++ return 0;
++ }
++
+ state = ctx->ctx_state;
+ is_system = ctx->ctx_fl_system;
+
+@@ -4077,6 +4152,11 @@ pfm_start(pfm_context_t *ctx, void *arg,
+ struct pt_regs *tregs;
+ int state, is_system;
+
++ if (is_running_on_xen()) {
++ if (is_xenoprof_primary())
++ return HYPERVISOR_perfmon_op(PFM_START, NULL, 0);
++ return 0;
++ }
+ state = ctx->ctx_state;
+ is_system = ctx->ctx_fl_system;
+
+@@ -4159,6 +4239,7 @@ pfm_get_pmc_reset(pfm_context_t *ctx, vo
+ unsigned int cnum;
+ int i;
+ int ret = -EINVAL;
++ XEN_NOT_SUPPORTED_YET;
+
+ for (i = 0; i < count; i++, req++) {
+
+@@ -4217,6 +4298,11 @@ pfm_context_load(pfm_context_t *ctx, voi
+ int ret = 0;
+ int state, is_system, set_dbregs = 0;
+
++ if (is_running_on_xen()) {
++ if (is_xenoprof_primary())
++ return HYPERVISOR_perfmon_op(PFM_LOAD_CONTEXT, arg, 0);
++ return 0;
++ }
+ state = ctx->ctx_state;
+ is_system = ctx->ctx_fl_system;
+ /*
+@@ -4465,6 +4551,12 @@ pfm_context_unload(pfm_context_t *ctx, v
+ int prev_state, is_system;
+ int ret;
+
++ if (is_running_on_xen()) {
++ if (is_xenoprof_primary())
++ return HYPERVISOR_perfmon_op(PFM_UNLOAD_CONTEXT,
++ NULL, 0);
++ return 0;
++ }
+ DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
+
+ prev_state = ctx->ctx_state;
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/relocate_kernel.S linux-2.6.18-xen-3.3.0/arch/ia64/kernel/relocate_kernel.S
+--- linux-2.6.18.8/arch/ia64/kernel/relocate_kernel.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/relocate_kernel.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,338 @@
++/*
++ * arch/ia64/kernel/relocate_kernel.S
++ *
++ * Relocate kexec'able kernel and start it
++ *
++ * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
++ * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
++ * Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
++ *
++ * This source code is licensed under the GNU General Public License,
++ * Version 2. See the file COPYING for more details.
++ */
++#include <asm/asmmacro.h>
++#include <asm/kregs.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/mca_asm.h>
++
++ /* Must be relocatable PIC code callable as a C function
++ */
++GLOBAL_ENTRY(relocate_new_kernel)
++ .prologue
++ alloc r31=ar.pfs,4,0,0,0
++ .body
++.reloc_entry:
++{
++ rsm psr.i| psr.ic
++ mov r2=ip
++}
++ ;;
++{
++ flushrs // must be first insn in group
++ srlz.i
++}
++ ;;
++ dep r2=0,r2,61,3 //to physical address
++ ;;
++ //first switch to physical mode
++ add r3=1f-.reloc_entry, r2
++ movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC
++ mov ar.rsc=0 // put RSE in enforced lazy mode
++ ;;
++ add sp=(memory_stack_end - 16 - .reloc_entry),r2
++ add r8=(register_stack - .reloc_entry),r2
++ ;;
++ mov r18=ar.rnat
++ mov ar.bspstore=r8
++ ;;
++ mov cr.ipsr=r16
++ mov cr.iip=r3
++ mov cr.ifs=r0
++ srlz.i
++ ;;
++ mov ar.rnat=r18
++ rfi
++ ;;
++1:
++ //physical mode code begin
++ mov b6=in1
++#ifdef CONFIG_XEN
++ mov r28=in2 //already a physical address
++#else
++ dep r28=0,in2,61,3 //to physical address
++
++ // purge all TC entries
++#define O(member) IA64_CPUINFO_##member##_OFFSET
++ GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
++ ;;
++ addl r17=O(PTCE_STRIDE),r2
++ addl r2=O(PTCE_BASE),r2
++ ;;
++ ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
++ ld4 r19=[r2],4 // r19=ptce_count[0]
++ ld4 r21=[r17],4 // r21=ptce_stride[0]
++ ;;
++ ld4 r20=[r2] // r20=ptce_count[1]
++ ld4 r22=[r17] // r22=ptce_stride[1]
++ mov r24=r0
++ ;;
++ adds r20=-1,r20
++ ;;
++#undef O
++2:
++ cmp.ltu p6,p7=r24,r19
++(p7) br.cond.dpnt.few 4f
++ mov ar.lc=r20
++3:
++ ptc.e r18
++ ;;
++ add r18=r22,r18
++ br.cloop.sptk.few 3b
++ ;;
++ add r18=r21,r18
++ add r24=1,r24
++ ;;
++ br.sptk.few 2b
++4:
++ srlz.i
++ ;;
++ //purge TR entry for kernel text and data
++ movl r16=KERNEL_START
++ mov r18=KERNEL_TR_PAGE_SHIFT<<2
++ ;;
++ ptr.i r16, r18
++ ptr.d r16, r18
++ ;;
++ srlz.i
++ ;;
++
++ // purge TR entry for percpu data
++ movl r16=PERCPU_ADDR
++ mov r18=PERCPU_PAGE_SHIFT<<2
++ ;;
++ ptr.d r16,r18
++ ;;
++ srlz.d
++ ;;
++
++ // purge TR entry for pal code
++ mov r16=in3
++ mov r18=IA64_GRANULE_SHIFT<<2
++ ;;
++ ptr.i r16,r18
++ ;;
++ srlz.i
++ ;;
++
++ // purge TR entry for stack
++ mov r16=IA64_KR(CURRENT_STACK)
++ ;;
++ shl r16=r16,IA64_GRANULE_SHIFT
++ movl r19=PAGE_OFFSET
++ ;;
++ add r16=r19,r16
++ mov r18=IA64_GRANULE_SHIFT<<2
++ ;;
++ ptr.d r16,r18
++ ;;
++ srlz.i
++ ;;
++#endif /* ! CONFIG_XEN */
++
++ //copy segments
++ movl r16=PAGE_MASK
++ mov r30=in0 // in0 is page_list
++ br.sptk.few .dest_page
++ ;;
++.loop:
++ ld8 r30=[in0], 8;;
++.dest_page:
++ tbit.z p0, p6=r30, 0;; // 0x1 dest page
++(p6) and r17=r30, r16
++(p6) br.cond.sptk.few .loop;;
++
++ tbit.z p0, p6=r30, 1;; // 0x2 indirect page
++(p6) and in0=r30, r16
++(p6) br.cond.sptk.few .loop;;
++
++ tbit.z p0, p6=r30, 2;; // 0x4 end flag
++(p6) br.cond.sptk.few .end_loop;;
++
++ tbit.z p6, p0=r30, 3;; // 0x8 source page
++(p6) br.cond.sptk.few .loop
++
++ and r18=r30, r16
++
++ // simple copy page, may optimize later
++ movl r14=PAGE_SIZE/8 - 1;;
++ mov ar.lc=r14;;
++1:
++ ld8 r14=[r18], 8;;
++ st8 [r17]=r14;;
++ fc.i r17
++ add r17=8, r17
++ br.ctop.sptk.few 1b
++ br.sptk.few .loop
++ ;;
++
++.end_loop:
++ sync.i // for fc.i
++ ;;
++ srlz.i
++ ;;
++ srlz.d
++ ;;
++ br.call.sptk.many b0=b6;;
++
++.align 32
++memory_stack:
++ .fill 8192, 1, 0
++memory_stack_end:
++register_stack:
++ .fill 8192, 1, 0
++register_stack_end:
++relocate_new_kernel_end:
++END(relocate_new_kernel)
++
++.global relocate_new_kernel_size
++relocate_new_kernel_size:
++ data8 relocate_new_kernel_end - relocate_new_kernel
++
++GLOBAL_ENTRY(ia64_dump_cpu_regs)
++ .prologue
++ alloc loc0=ar.pfs,1,2,0,0
++ .body
++ mov ar.rsc=0 // put RSE in enforced lazy mode
++ add loc1=4*8, in0 // save r4 and r5 first
++ ;;
++{
++ flushrs // flush dirty regs to backing store
++ srlz.i
++}
++ st8 [loc1]=r4, 8
++ ;;
++ st8 [loc1]=r5, 8
++ ;;
++ add loc1=32*8, in0
++ mov r4=ar.rnat
++ ;;
++ st8 [in0]=r0, 8 // r0
++ st8 [loc1]=r4, 8 // rnat
++ mov r5=pr
++ ;;
++ st8 [in0]=r1, 8 // r1
++ st8 [loc1]=r5, 8 // pr
++ mov r4=b0
++ ;;
++ st8 [in0]=r2, 8 // r2
++ st8 [loc1]=r4, 8 // b0
++ mov r5=b1;
++ ;;
++ st8 [in0]=r3, 24 // r3
++ st8 [loc1]=r5, 8 // b1
++ mov r4=b2
++ ;;
++ st8 [in0]=r6, 8 // r6
++ st8 [loc1]=r4, 8 // b2
++ mov r5=b3
++ ;;
++ st8 [in0]=r7, 8 // r7
++ st8 [loc1]=r5, 8 // b3
++ mov r4=b4
++ ;;
++ st8 [in0]=r8, 8 // r8
++ st8 [loc1]=r4, 8 // b4
++ mov r5=b5
++ ;;
++ st8 [in0]=r9, 8 // r9
++ st8 [loc1]=r5, 8 // b5
++ mov r4=b6
++ ;;
++ st8 [in0]=r10, 8 // r10
++ st8 [loc1]=r5, 8 // b6
++ mov r5=b7
++ ;;
++ st8 [in0]=r11, 8 // r11
++ st8 [loc1]=r5, 8 // b7
++ mov r4=b0
++ ;;
++ st8 [in0]=r12, 8 // r12
++ st8 [loc1]=r4, 8 // ip
++ mov r5=loc0
++ ;;
++ st8 [in0]=r13, 8 // r13
++ extr.u r5=r5, 0, 38 // ar.pfs.pfm
++ mov r4=r0 // user mask
++ ;;
++ st8 [in0]=r14, 8 // r14
++ st8 [loc1]=r5, 8 // cfm
++ ;;
++ st8 [in0]=r15, 8 // r15
++ st8 [loc1]=r4, 8 // user mask
++ mov r5=ar.rsc
++ ;;
++ st8 [in0]=r16, 8 // r16
++ st8 [loc1]=r5, 8 // ar.rsc
++ mov r4=ar.bsp
++ ;;
++ st8 [in0]=r17, 8 // r17
++ st8 [loc1]=r4, 8 // ar.bsp
++ mov r5=ar.bspstore
++ ;;
++ st8 [in0]=r18, 8 // r18
++ st8 [loc1]=r5, 8 // ar.bspstore
++ mov r4=ar.rnat
++ ;;
++ st8 [in0]=r19, 8 // r19
++ st8 [loc1]=r4, 8 // ar.rnat
++ mov r5=ar.ccv
++ ;;
++ st8 [in0]=r20, 8 // r20
++ st8 [loc1]=r5, 8 // ar.ccv
++ mov r4=ar.unat
++ ;;
++ st8 [in0]=r21, 8 // r21
++ st8 [loc1]=r4, 8 // ar.unat
++ mov r5 = ar.fpsr
++ ;;
++ st8 [in0]=r22, 8 // r22
++ st8 [loc1]=r5, 8 // ar.fpsr
++ mov r4 = ar.unat
++ ;;
++ st8 [in0]=r23, 8 // r23
++ st8 [loc1]=r4, 8 // unat
++ mov r5 = ar.fpsr
++ ;;
++ st8 [in0]=r24, 8 // r24
++ st8 [loc1]=r5, 8 // fpsr
++ mov r4 = ar.pfs
++ ;;
++ st8 [in0]=r25, 8 // r25
++ st8 [loc1]=r4, 8 // ar.pfs
++ mov r5 = ar.lc
++ ;;
++ st8 [in0]=r26, 8 // r26
++ st8 [loc1]=r5, 8 // ar.lc
++ mov r4 = ar.ec
++ ;;
++ st8 [in0]=r27, 8 // r27
++ st8 [loc1]=r4, 8 // ar.ec
++ mov r5 = ar.csd
++ ;;
++ st8 [in0]=r28, 8 // r28
++ st8 [loc1]=r5, 8 // ar.csd
++ mov r4 = ar.ssd
++ ;;
++ st8 [in0]=r29, 8 // r29
++ st8 [loc1]=r4, 8 // ar.ssd
++ ;;
++ st8 [in0]=r30, 8 // r30
++ ;;
++ st8 [in0]=r31, 8 // r31
++ mov ar.pfs=loc0
++ ;;
++ br.ret.sptk.many rp
++END(ia64_dump_cpu_regs)
++
++
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/salinfo.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/salinfo.c
+--- linux-2.6.18.8/arch/ia64/kernel/salinfo.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/salinfo.c 2008-08-21 11:36:07.000000000 +0200
+@@ -375,6 +375,25 @@ salinfo_log_open(struct inode *inode, st
+ data->open = 0;
+ return -ENOMEM;
+ }
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ ia64_mca_xencomm_t *entry;
++ unsigned long flags;
++
++ entry = vmalloc(sizeof(ia64_mca_xencomm_t));
++ if (!entry) {
++ data->open = 0;
++ vfree(data->log_buffer);
++ return -ENOMEM;
++ }
++ entry->record = data->log_buffer;
++ entry->handle = xencomm_map(data->log_buffer,
++ ia64_sal_get_state_info_size(data->type));
++ spin_lock_irqsave(&ia64_mca_xencomm_lock, flags);
++ list_add(&entry->list, &ia64_mca_xencomm_list);
++ spin_unlock_irqrestore(&ia64_mca_xencomm_lock, flags);
++ }
++#endif
+
+ return 0;
+ }
+@@ -386,6 +405,30 @@ salinfo_log_release(struct inode *inode,
+ struct salinfo_data *data = entry->data;
+
+ if (data->state == STATE_NO_DATA) {
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ struct list_head *pos, *n;
++ ia64_mca_xencomm_t *found_entry = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ia64_mca_xencomm_lock, flags);
++ list_for_each_safe(pos, n, &ia64_mca_xencomm_list) {
++ ia64_mca_xencomm_t *entry;
++
++ entry = list_entry(pos, ia64_mca_xencomm_t, list);
++ if (entry->record == data->log_buffer) {
++ list_del(&entry->list);
++ found_entry = entry;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&ia64_mca_xencomm_lock, flags);
++ if (found_entry) {
++ xencomm_free(found_entry->handle);
++ vfree(found_entry);
++ }
++ }
++#endif
+ vfree(data->log_buffer);
+ vfree(data->oemdata);
+ data->log_buffer = NULL;
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/setup.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/setup.c
+--- linux-2.6.18.8/arch/ia64/kernel/setup.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/setup.c 2008-08-21 11:36:07.000000000 +0200
+@@ -43,6 +43,8 @@
+ #include <linux/initrd.h>
+ #include <linux/pm.h>
+ #include <linux/cpufreq.h>
++#include <linux/kexec.h>
++#include <linux/crash_dump.h>
+
+ #include <asm/ia32.h>
+ #include <asm/machvec.h>
+@@ -60,6 +62,12 @@
+ #include <asm/system.h>
+ #include <asm/unistd.h>
+ #include <asm/system.h>
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++#include <asm/xen/xencomm.h>
++#include <xen/xencons.h>
++#endif
++#include <linux/dma-mapping.h>
+
+ #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
+ # error "struct cpuinfo_ia64 too big!"
+@@ -70,6 +78,34 @@ unsigned long __per_cpu_offset[NR_CPUS];
+ EXPORT_SYMBOL(__per_cpu_offset);
+ #endif
+
++#ifdef CONFIG_XEN
++static void
++xen_panic_hypercall(struct unw_frame_info *info, void *arg)
++{
++ current->thread.ksp = (__u64)info->sw - 16;
++ HYPERVISOR_shutdown(SHUTDOWN_crash);
++ /* we're never actually going to get here... */
++}
++
++static int
++xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ unw_init_running(xen_panic_hypercall, NULL);
++ /* we're never actually going to get here... */
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block xen_panic_block = {
++ xen_panic_event, NULL, 0 /* try to go last */
++};
++
++void xen_pm_power_off(void)
++{
++ local_irq_disable();
++ HYPERVISOR_shutdown(SHUTDOWN_poweroff);
++}
++#endif
++
+ extern void ia64_setup_printk_clock(void);
+
+ DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+@@ -242,6 +278,14 @@ reserve_memory (void)
+ rsvd_region[n].end = (unsigned long) ia64_imva(_end);
+ n++;
+
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ rsvd_region[n].start = (unsigned long)__va((HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
++ rsvd_region[n].end = rsvd_region[n].start + PAGE_SIZE;
++ n++;
++ }
++#endif
++
+ #ifdef CONFIG_BLK_DEV_INITRD
+ if (ia64_boot_param->initrd_start) {
+ rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
+@@ -253,6 +297,56 @@ reserve_memory (void)
+ efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
+ n++;
+
++#ifdef CONFIG_KEXEC
++ /* crashkernel=size@offset specifies the size to reserve for a crash
++ * kernel. If offset is 0, then it is determined automatically.
++ * By reserving this memory we guarantee that linux never set's it
++ * up as a DMA target.Useful for holding code to do something
++ * appropriate after a kernel panic.
++ */
++ {
++ char *from = strstr(saved_command_line, "crashkernel=");
++ unsigned long base, size;
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain() && from)
++ printk("Ignoring crashkernel command line, "
++ "parameter will be supplied by xen\n");
++ else {
++#endif
++ if (from) {
++ size = memparse(from + 12, &from);
++ if (*from == '@')
++ base = memparse(from+1, &from);
++ else
++ base = 0;
++ if (size) {
++ if (!base) {
++ sort_regions(rsvd_region, n);
++ base = kdump_find_rsvd_region(size,
++ rsvd_region, n);
++ }
++ if (base != ~0UL) {
++ rsvd_region[n].start =
++ (unsigned long)__va(base);
++ rsvd_region[n].end =
++ (unsigned long)__va(base + size);
++ n++;
++ crashk_res.start = base;
++ crashk_res.end = base + size - 1;
++ }
++ }
++ }
++ efi_memmap_res.start = ia64_boot_param->efi_memmap;
++ efi_memmap_res.end = efi_memmap_res.start +
++ ia64_boot_param->efi_memmap_size;
++ boot_param_res.start = kexec_virt_to_phys(ia64_boot_param);
++ boot_param_res.end = boot_param_res.start +
++ sizeof(*ia64_boot_param);
++#ifdef CONFIG_XEN
++ }
++#endif
++ }
++#endif
+ /* end of memory marker */
+ rsvd_region[n].start = ~0UL;
+ rsvd_region[n].end = ~0UL;
+@@ -264,6 +358,7 @@ reserve_memory (void)
+ sort_regions(rsvd_region, num_rsvd_regions);
+ }
+
++
+ /**
+ * find_initrd - get initrd parameters from the boot parameter structure
+ *
+@@ -397,11 +492,49 @@ static __init int setup_nomca(char *s)
+ }
+ early_param("nomca", setup_nomca);
+
++#ifdef CONFIG_PROC_VMCORE
++/* elfcorehdr= specifies the location of elf core header
++ * stored by the crashed kernel.
++ */
++static int __init parse_elfcorehdr(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
++
++ elfcorehdr_addr = memparse(arg, &arg);
++ return 0;
++}
++early_param("elfcorehdr", parse_elfcorehdr);
++#endif /* CONFIG_PROC_VMCORE */
++
+ void __init
+ setup_arch (char **cmdline_p)
+ {
++#ifdef CONFIG_XEN
++ shared_info_t *s = NULL;
++ if (is_running_on_xen()) {
++ s = HYPERVISOR_shared_info;
++ xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
++ }
++#endif
++
+ unw_init();
+
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ /* Must be done before any hypercall. */
++ xencomm_initialize();
++
++ setup_xen_features();
++ /* Register a call for panic conditions. */
++ atomic_notifier_chain_register(&panic_notifier_list,
++ &xen_panic_block);
++ pm_power_off = xen_pm_power_off;
++
++ xen_ia64_enable_opt_feature();
++ }
++#endif
++
+ ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
+
+ *cmdline_p = __va(ia64_boot_param->command_line);
+@@ -462,6 +595,57 @@ setup_arch (char **cmdline_p)
+ acpi_boot_init();
+ #endif
+
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ printk("Running on Xen! start_info_pfn=0x%lx nr_pages=%ld "
++ "flags=0x%x\n", s->arch.start_info_pfn,
++ xen_start_info->nr_pages, xen_start_info->flags);
++
++ /*
++ * If a console= is NOT specified, we assume using the
++ * xencons console is desired. By default, this is xvc0
++ * for both dom0 and domU.
++ */
++ if (!strstr(*cmdline_p, "console=")) {
++ char *p, *q, name[5] = "xvc";
++ int offset = 0;
++
++#if defined(CONFIG_VGA_CONSOLE)
++ /*
++ * conswitchp might be set intelligently from the
++ * PCDP code. If set to VGA console, use it.
++ */
++ if (is_initial_xendomain() && conswitchp == &vga_con)
++ strncpy(name, "tty", 3);
++#endif
++
++ p = strstr(*cmdline_p, "xencons=");
++
++ if (p) {
++ p += 8;
++ if (!strncmp(p, "ttyS", 4)) {
++ strncpy(name, p, 4);
++ p += 4;
++ offset = simple_strtol(p, &q, 10);
++ if (p == q)
++ offset = 0;
++ } else if (!strncmp(p, "tty", 3) ||
++ !strncmp(p, "xvc", 3)) {
++ strncpy(name, p, 3);
++ p += 3;
++ offset = simple_strtol(p, &q, 10);
++ if (p == q)
++ offset = 0;
++ } else if (!strncmp(p, "off", 3))
++ offset = -1;
++ }
++
++ if (offset >= 0)
++ add_preferred_console(name, offset, NULL);
++ }
++ }
++#endif
++
+ #ifdef CONFIG_VT
+ if (!conswitchp) {
+ # if defined(CONFIG_DUMMY_CONSOLE)
+@@ -481,11 +665,28 @@ setup_arch (char **cmdline_p)
+ #endif
+
+ /* enable IA-64 Machine Check Abort Handling unless disabled */
++#ifdef CONFIG_XEN
++ if (is_running_on_xen() && !is_initial_xendomain()) {
++ nomca = 1;
++#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = NULL;
++#endif
++ }
++#endif
+ if (!nomca)
+ ia64_mca_init();
+
+ platform_setup(cmdline_p);
++#ifdef CONFIG_XEN
++ if (is_running_on_xen() && !ia64_platform_is("xen")) {
++ extern ia64_mv_setup_t xen_setup;
++ xen_setup(cmdline_p);
++ }
++#endif
+ paging_init();
++#ifdef CONFIG_XEN
++ xen_contiguous_bitmap_init(max_pfn);
++#endif
+ }
+
+ /*
+@@ -870,6 +1071,13 @@ cpu_init (void)
+ /* size of physical stacked register partition plus 8 bytes: */
+ __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
+ platform_cpu_init();
++#ifdef CONFIG_XEN
++ if (is_running_on_xen() && !ia64_platform_is("xen")) {
++ extern ia64_mv_cpu_init_t xen_cpu_init;
++ xen_cpu_init();
++ }
++#endif
++
+ pm_idle = default_idle;
+ }
+
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/smp.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/smp.c
+--- linux-2.6.18.8/arch/ia64/kernel/smp.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/smp.c 2008-08-21 11:36:07.000000000 +0200
+@@ -30,6 +30,7 @@
+ #include <linux/delay.h>
+ #include <linux/efi.h>
+ #include <linux/bitops.h>
++#include <linux/kexec.h>
+
+ #include <asm/atomic.h>
+ #include <asm/current.h>
+@@ -66,6 +67,7 @@ static volatile struct call_data_struct
+
+ #define IPI_CALL_FUNC 0
+ #define IPI_CPU_STOP 1
++#define IPI_KDUMP_CPU_STOP 3
+
+ /* This needs to be cacheline aligned because it is written to by *other* CPUs. */
+ static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
+@@ -155,7 +157,11 @@ handle_IPI (int irq, void *dev_id, struc
+ case IPI_CPU_STOP:
+ stop_this_cpu();
+ break;
+-
++#ifdef CONFIG_KEXEC
++ case IPI_KDUMP_CPU_STOP:
++ unw_init_running(kdump_cpu_freeze, NULL);
++ break;
++#endif
+ default:
+ printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
+ break;
+@@ -213,6 +219,26 @@ send_IPI_self (int op)
+ send_IPI_single(smp_processor_id(), op);
+ }
+
++#ifdef CONFIG_KEXEC
++void
++kdump_smp_send_stop()
++{
++ send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
++}
++
++void
++kdump_smp_send_init()
++{
++ unsigned int cpu, self_cpu;
++ self_cpu = smp_processor_id();
++ for_each_online_cpu(cpu) {
++ if (cpu != self_cpu) {
++ if(kdump_status[cpu] == 0)
++ platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
++ }
++ }
++}
++#endif
+ /*
+ * Called with preeemption disabled.
+ */
+@@ -328,10 +354,14 @@ int
+ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
+ {
+ struct call_data_struct data;
+- int cpus = num_online_cpus()-1;
++ int cpus;
+
+- if (!cpus)
++ spin_lock(&call_lock);
++ cpus = num_online_cpus()-1;
++ if (!cpus) {
++ spin_unlock(&call_lock);
+ return 0;
++ }
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+@@ -343,8 +373,6 @@ smp_call_function (void (*func) (void *i
+ if (wait)
+ atomic_set(&data.finished, 0);
+
+- spin_lock(&call_lock);
+-
+ call_data = &data;
+ mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
+ send_IPI_allbutself(IPI_CALL_FUNC);
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/time.c linux-2.6.18-xen-3.3.0/arch/ia64/kernel/time.c
+--- linux-2.6.18.8/arch/ia64/kernel/time.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/time.c 2008-08-21 11:36:07.000000000 +0200
+@@ -29,6 +29,14 @@
+ #include <asm/sections.h>
+ #include <asm/system.h>
+
++#include <asm/hypervisor.h>
++#ifdef CONFIG_XEN
++#include <linux/kernel_stat.h>
++#include <linux/posix-timers.h>
++#include <xen/interface/vcpu.h>
++#include <asm/percpu.h>
++#endif
++
+ extern unsigned long wall_jiffies;
+
+ volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
+@@ -40,16 +48,109 @@ EXPORT_SYMBOL(last_cli_ip);
+
+ #endif
+
++#ifdef CONFIG_XEN
++DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
++DEFINE_PER_CPU(unsigned long, processed_stolen_time);
++DEFINE_PER_CPU(unsigned long, processed_blocked_time);
++#define NS_PER_TICK (1000000000LL/HZ)
++#endif
++
+ static struct time_interpolator itc_interpolator = {
+ .shift = 16,
+ .mask = 0xffffffffffffffffLL,
+ .source = TIME_SOURCE_CPU
+ };
+
++#ifdef CONFIG_XEN
++static unsigned long
++consider_steal_time(unsigned long new_itm, struct pt_regs *regs)
++{
++ unsigned long stolen, blocked, sched_time;
++ unsigned long delta_itm = 0, stolentick = 0;
++ int i, cpu = smp_processor_id();
++ struct vcpu_runstate_info *runstate;
++ struct task_struct *p = current;
++
++ runstate = &per_cpu(runstate, smp_processor_id());
++
++ do {
++ sched_time = runstate->state_entry_time;
++ mb();
++ stolen = runstate->time[RUNSTATE_runnable] +
++ runstate->time[RUNSTATE_offline] -
++ per_cpu(processed_stolen_time, cpu);
++ blocked = runstate->time[RUNSTATE_blocked] -
++ per_cpu(processed_blocked_time, cpu);
++ mb();
++ } while (sched_time != runstate->state_entry_time);
++
++ /*
++ * Check for vcpu migration effect
++ * In this case, itc value is reversed.
++ * This causes huge stolen value.
++ * This function just checks and reject this effect.
++ */
++ if (!time_after_eq(runstate->time[RUNSTATE_blocked],
++ per_cpu(processed_blocked_time, cpu)))
++ blocked = 0;
++
++ if (!time_after_eq(runstate->time[RUNSTATE_runnable] +
++ runstate->time[RUNSTATE_offline],
++ per_cpu(processed_stolen_time, cpu)))
++ stolen = 0;
++
++ if (!time_after(delta_itm + new_itm, ia64_get_itc()))
++ stolentick = ia64_get_itc() - delta_itm - new_itm;
++
++ do_div(stolentick, NS_PER_TICK);
++ stolentick++;
++
++ do_div(stolen, NS_PER_TICK);
++
++ if (stolen > stolentick)
++ stolen = stolentick;
++
++ stolentick -= stolen;
++ do_div(blocked, NS_PER_TICK);
++
++ if (blocked > stolentick)
++ blocked = stolentick;
++
++ if (stolen > 0 || blocked > 0) {
++ account_steal_time(NULL, jiffies_to_cputime(stolen));
++ account_steal_time(idle_task(cpu), jiffies_to_cputime(blocked));
++ run_local_timers();
++
++ if (rcu_pending(cpu))
++ rcu_check_callbacks(cpu, user_mode(regs));
++
++ scheduler_tick();
++ run_posix_cpu_timers(p);
++ delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
++
++ if (cpu == time_keeper_id) {
++ write_seqlock(&xtime_lock);
++ for(i = 0; i < stolen + blocked; i++)
++ do_timer(regs);
++ local_cpu_data->itm_next = delta_itm + new_itm;
++ write_sequnlock(&xtime_lock);
++ } else {
++ local_cpu_data->itm_next = delta_itm + new_itm;
++ }
++ per_cpu(processed_stolen_time,cpu) += NS_PER_TICK * stolen;
++ per_cpu(processed_blocked_time,cpu) += NS_PER_TICK * blocked;
++ }
++ return delta_itm;
++}
++#else
++#define consider_steal_time(new_itm, regs) (0)
++#endif
++
+ static irqreturn_t
+ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+ {
+ unsigned long new_itm;
++ unsigned long delta_itm; /* XEN */
+
+ if (unlikely(cpu_is_offline(smp_processor_id()))) {
+ return IRQ_HANDLED;
+@@ -65,6 +166,13 @@ timer_interrupt (int irq, void *dev_id,
+
+ profile_tick(CPU_PROFILING, regs);
+
++ if (is_running_on_xen()) {
++ delta_itm = consider_steal_time(new_itm, regs);
++ new_itm += delta_itm;
++ if (time_after(new_itm, ia64_get_itc()) && delta_itm)
++ goto skip_process_time_accounting;
++ }
++
+ while (1) {
+ update_process_times(user_mode(regs));
+
+@@ -88,6 +196,8 @@ timer_interrupt (int irq, void *dev_id,
+ break;
+ }
+
++skip_process_time_accounting: /* XEN */
++
+ do {
+ /*
+ * If we're too close to the next clock tick for
+@@ -142,6 +252,85 @@ static int __init nojitter_setup(char *s
+
+ __setup("nojitter", nojitter_setup);
+
++#ifdef CONFIG_XEN
++/* taken from i386/kernel/time-xen.c */
++static void init_missing_ticks_accounting(int cpu)
++{
++ struct vcpu_register_runstate_memory_area area;
++ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++ int rc;
++
++ memset(runstate, 0, sizeof(*runstate));
++
++ area.addr.v = runstate;
++ rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
++ WARN_ON(rc && rc != -ENOSYS);
++
++ per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
++ per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
++ + runstate->time[RUNSTATE_offline];
++}
++
++static int xen_ia64_settimefoday_after_resume;
++
++static int __init __xen_ia64_settimeofday_after_resume(char *str)
++{
++ xen_ia64_settimefoday_after_resume = 1;
++ return 1;
++}
++
++__setup("xen_ia64_settimefoday_after_resume",
++ __xen_ia64_settimeofday_after_resume);
++
++/* Called after suspend, to resume time. */
++void
++time_resume(void)
++{
++ unsigned int cpu;
++
++ /* Just trigger a tick. */
++ ia64_cpu_local_tick();
++
++ if (xen_ia64_settimefoday_after_resume) {
++ /* do_settimeofday() resets timer interplator */
++ struct timespec xen_time;
++ int ret;
++ efi_gettimeofday(&xen_time);
++
++ ret = do_settimeofday(&xen_time);
++ WARN_ON(ret);
++ } else {
++#if 0
++ /* adjust EFI time */
++ struct timespec my_time = CURRENT_TIME;
++ struct timespec xen_time;
++ static timespec diff;
++ struct xen_domctl domctl;
++ int ret;
++
++ efi_gettimeofday(&xen_time);
++ diff = timespec_sub(&xen_time, &my_time);
++ domctl.cmd = XEN_DOMCTL_settimeoffset;
++ domctl.domain = DOMID_SELF;
++ domctl.u.settimeoffset.timeoffset_seconds = diff.tv_sec;
++ ret = HYPERVISOR_domctl_op(&domctl);
++ WARN_ON(ret);
++#endif
++ /* Time interpolator remembers the last timer status.
++ Forget it */
++ write_seqlock_irq(&xtime_lock);
++ time_interpolator_reset();
++ write_sequnlock_irq(&xtime_lock);
++ }
++
++ for_each_online_cpu(cpu)
++ init_missing_ticks_accounting(cpu);
++
++ touch_softlockup_watchdog();
++}
++#else
++#define init_missing_ticks_accounting(cpu) do {} while (0)
++#endif
+
+ void __devinit
+ ia64_init_itm (void)
+@@ -225,6 +414,12 @@ ia64_init_itm (void)
+ register_time_interpolator(&itc_interpolator);
+ }
+
++ if (is_running_on_xen())
++ init_missing_ticks_accounting(smp_processor_id());
++
++ /* avoid softlock up message when cpu is unplug and plugged again. */
++ touch_softlockup_watchdog();
++
+ /* Setup the CPU local timer tick */
+ ia64_cpu_local_tick();
+ }
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/vmlinux.lds.S linux-2.6.18-xen-3.3.0/arch/ia64/kernel/vmlinux.lds.S
+--- linux-2.6.18.8/arch/ia64/kernel/vmlinux.lds.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/vmlinux.lds.S 2008-08-21 11:36:07.000000000 +0200
+@@ -183,6 +183,12 @@ SECTIONS
+ __start_gate_section = .;
+ *(.data.gate)
+ __stop_gate_section = .;
++#if defined(CONFIG_XEN)
++ . = ALIGN(PAGE_SIZE);
++ __start_xen_gate_section = .;
++ *(.data.gate.xen)
++ __stop_xen_gate_section = .;
++#endif
+ }
+ . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose kernel data */
+
+diff -rpuN linux-2.6.18.8/arch/ia64/kernel/xengate-data.S linux-2.6.18-xen-3.3.0/arch/ia64/kernel/xengate-data.S
+--- linux-2.6.18.8/arch/ia64/kernel/xengate-data.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/kernel/xengate-data.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,3 @@
++ .section .data.gate.xen, "aw"
++
++ .incbin "arch/ia64/kernel/xengate.so"
+diff -rpuN linux-2.6.18.8/arch/ia64/Makefile linux-2.6.18-xen-3.3.0/arch/ia64/Makefile
+--- linux-2.6.18.8/arch/ia64/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -45,6 +45,12 @@ ifeq ($(call cc-version),0304)
+ endif
+
+ CFLAGS += $(cflags-y)
++
++cppflags-$(CONFIG_XEN) += \
++ -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION)
++
++CPPFLAGS += $(cppflags-y)
++
+ head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
+
+ libs-y += arch/ia64/lib/
+@@ -54,7 +60,9 @@ core-$(CONFIG_IA64_DIG) += arch/ia64/di
+ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
+ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
+ core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
++core-$(CONFIG_IA64_XEN) += arch/ia64/dig/
+ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
++core-$(CONFIG_XEN) += arch/ia64/xen/
+
+ drivers-$(CONFIG_PCI) += arch/ia64/pci/
+ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
+@@ -87,8 +95,8 @@ CLEAN_FILES += vmlinux.gz bootloader
+ boot: lib/lib.a vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $@
+
+-install: vmlinux.gz
+- sh $(srctree)/arch/ia64/install.sh $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
++install:
++ -yes | sh $(srctree)/arch/ia64/install.sh $(KERNELRELEASE) vmlinux.gz System.map "$(INSTALL_PATH)"
+
+ define archhelp
+ echo '* compressed - Build compressed kernel image'
+diff -rpuN linux-2.6.18.8/arch/ia64/mm/contig.c linux-2.6.18-xen-3.3.0/arch/ia64/mm/contig.c
+--- linux-2.6.18.8/arch/ia64/mm/contig.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/mm/contig.c 2008-08-21 11:36:07.000000000 +0200
+@@ -18,6 +18,9 @@
+ #include <linux/efi.h>
+ #include <linux/mm.h>
+ #include <linux/swap.h>
++#if defined(CONFIG_XEN) && defined(CONFIG_KEXEC)
++#include <linux/kexec.h>
++#endif
+
+ #include <asm/meminit.h>
+ #include <asm/pgalloc.h>
+@@ -172,8 +175,12 @@ find_memory (void)
+ /* Free all available memory, then mark bootmem-map as being in use. */
+ efi_memmap_walk(filter_rsvd_memory, free_bootmem);
+ reserve_bootmem(bootmap_start, bootmap_size);
++#if defined(CONFIG_XEN) && defined(CONFIG_KEXEC)
++ xen_machine_kexec_setup_resources();
++#endif
+
+ find_initrd();
++
+ }
+
+ #ifdef CONFIG_SMP
+diff -rpuN linux-2.6.18.8/arch/ia64/mm/discontig.c linux-2.6.18-xen-3.3.0/arch/ia64/mm/discontig.c
+--- linux-2.6.18.8/arch/ia64/mm/discontig.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/mm/discontig.c 2008-08-21 11:36:07.000000000 +0200
+@@ -21,6 +21,9 @@
+ #include <linux/acpi.h>
+ #include <linux/efi.h>
+ #include <linux/nodemask.h>
++#if defined(CONFIG_XEN) && defined(CONFIG_KEXEC)
++#include <linux/kexec.h>
++#endif
+ #include <asm/pgalloc.h>
+ #include <asm/tlb.h>
+ #include <asm/meminit.h>
+@@ -502,6 +505,9 @@ void __init find_memory(void)
+ reserve_pernode_space();
+ memory_less_nodes();
+ initialize_pernode_data();
++#if defined(CONFIG_XEN) && defined(CONFIG_KEXEC)
++ xen_machine_kexec_setup_resources();
++#endif
+
+ max_pfn = max_low_pfn;
+
+diff -rpuN linux-2.6.18.8/arch/ia64/mm/init.c linux-2.6.18-xen-3.3.0/arch/ia64/mm/init.c
+--- linux-2.6.18.8/arch/ia64/mm/init.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/mm/init.c 2008-08-21 11:36:07.000000000 +0200
+@@ -303,16 +303,34 @@ static void __init
+ setup_gate (void)
+ {
+ struct page *page;
++ void *gate_page_addr = __start_gate_section;
++
++#ifdef CONFIG_XEN
++ unsigned long unused_gate;
++ extern char __start_xen_gate_section[];
++ if (is_running_on_xen()) {
++ gate_page_addr = __start_xen_gate_section;
++ unused_gate = (unsigned long)ia64_imva(__start_gate_section);
++ } else
++ unused_gate =
++ (unsigned long)ia64_imva(__start_xen_gate_section);
++#ifndef HAVE_BUGGY_SEGREL
++ ClearPageReserved(virt_to_page(unused_gate));
++ init_page_count(virt_to_page(unused_gate));
++ free_page(unused_gate);
++ ++totalram_pages;
++#endif
++#endif
+
+ /*
+ * Map the gate page twice: once read-only to export the ELF
+ * headers etc. and once execute-only page to enable
+ * privilege-promotion via "epc":
+ */
+- page = virt_to_page(ia64_imva(__start_gate_section));
++ page = virt_to_page(ia64_imva(gate_page_addr));
+ put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
+ #ifdef HAVE_BUGGY_SEGREL
+- page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
++ page = virt_to_page(ia64_imva(gate_page_addr + PAGE_SIZE));
+ put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
+ #else
+ put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
+diff -rpuN linux-2.6.18.8/arch/ia64/mm/ioremap.c linux-2.6.18-xen-3.3.0/arch/ia64/mm/ioremap.c
+--- linux-2.6.18.8/arch/ia64/mm/ioremap.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/mm/ioremap.c 2008-08-21 11:36:07.000000000 +0200
+@@ -16,6 +16,9 @@
+ static inline void __iomem *
+ __ioremap (unsigned long offset, unsigned long size)
+ {
++ offset = HYPERVISOR_ioremap(offset, size);
++ if (IS_ERR_VALUE(offset))
++ return NULL;
+ return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset);
+ }
+
+diff -rpuN linux-2.6.18.8/arch/ia64/oprofile/init.c linux-2.6.18-xen-3.3.0/arch/ia64/oprofile/init.c
+--- linux-2.6.18.8/arch/ia64/oprofile/init.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/oprofile/init.c 2008-08-21 11:36:07.000000000 +0200
+@@ -11,6 +11,8 @@
+ #include <linux/oprofile.h>
+ #include <linux/init.h>
+ #include <linux/errno.h>
++#include <asm/hypervisor.h>
++#include "oprofile_perfmon.h"
+
+ extern int perfmon_init(struct oprofile_operations * ops);
+ extern void perfmon_exit(void);
+@@ -20,6 +22,13 @@ int __init oprofile_arch_init(struct opr
+ {
+ int ret = -ENODEV;
+
++ if (is_running_on_xen()) {
++ ret = xen_perfmon_init();
++ if (ret)
++ return ret;
++ return xenoprofile_init(ops);
++ }
++
+ #ifdef CONFIG_PERFMON
+ /* perfmon_init() can fail, but we have no way to report it */
+ ret = perfmon_init(ops);
+@@ -32,6 +41,12 @@ int __init oprofile_arch_init(struct opr
+
+ void oprofile_arch_exit(void)
+ {
++ if (is_running_on_xen()) {
++ xenoprofile_exit();
++ xen_perfmon_exit();
++ return;
++ }
++
+ #ifdef CONFIG_PERFMON
+ perfmon_exit();
+ #endif
+diff -rpuN linux-2.6.18.8/arch/ia64/oprofile/Makefile linux-2.6.18-xen-3.3.0/arch/ia64/oprofile/Makefile
+--- linux-2.6.18.8/arch/ia64/oprofile/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/oprofile/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -8,3 +8,7 @@ DRIVER_OBJS := $(addprefix ../../../driv
+
+ oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
+ oprofile-$(CONFIG_PERFMON) += perfmon.o
++ifeq ($(CONFIG_XEN), y)
++oprofile-$(CONFIG_PERFMON) += xenoprof.o \
++ ../../../drivers/xen/xenoprof/xenoprofile.o
++endif
+diff -rpuN linux-2.6.18.8/arch/ia64/oprofile/oprofile_perfmon.h linux-2.6.18-xen-3.3.0/arch/ia64/oprofile/oprofile_perfmon.h
+--- linux-2.6.18.8/arch/ia64/oprofile/oprofile_perfmon.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/oprofile/oprofile_perfmon.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,30 @@
++#ifndef OPROFILE_PERFMON_H
++#define OPROFILE_PERFMON_H
++
++#ifdef CONFIG_PERFMON
++#ifdef CONFIG_XEN
++int __perfmon_init(void);
++void __perfmon_exit(void);
++int perfmon_start(void);
++void perfmon_stop(void);
++#endif
++#else
++#define __perfmon_init() (-ENOSYS)
++#define __perfmon_exit() do {} while (0)
++#endif /* CONFIG_PERFMON */
++
++#ifdef CONFIG_XEN
++#define STATIC_IF_NO_XEN /* nothing */
++#define xen_perfmon_init() __perfmon_init()
++#define xen_perfmon_exit() __perfmon_exit()
++extern int xenoprofile_init(struct oprofile_operations * ops);
++extern void xenoprofile_exit(void);
++#else
++#define STATIC_IF_NO_XEN static
++#define xen_perfmon_init() (-ENOSYS)
++#define xen_perfmon_exit() do {} while (0)
++#define xenoprofile_init(ops) (-ENOSYS)
++#define xenoprofile_exit() do {} while (0)
++#endif /* CONFIG_XEN */
++
++#endif /* OPROFILE_PERFMON_H */
+diff -rpuN linux-2.6.18.8/arch/ia64/oprofile/perfmon.c linux-2.6.18-xen-3.3.0/arch/ia64/oprofile/perfmon.c
+--- linux-2.6.18.8/arch/ia64/oprofile/perfmon.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/oprofile/perfmon.c 2008-08-21 11:36:07.000000000 +0200
+@@ -13,6 +13,7 @@
+ #include <asm/perfmon.h>
+ #include <asm/ptrace.h>
+ #include <asm/errno.h>
++#include "oprofile_perfmon.h"
+
+ static int allow_ints;
+
+@@ -33,14 +34,16 @@ perfmon_handler(struct task_struct *task
+ }
+
+
+-static int perfmon_start(void)
++STATIC_IF_NO_XEN
++int perfmon_start(void)
+ {
+ allow_ints = 1;
+ return 0;
+ }
+
+
+-static void perfmon_stop(void)
++STATIC_IF_NO_XEN
++void perfmon_stop(void)
+ {
+ allow_ints = 0;
+ }
+@@ -75,16 +78,35 @@ static char * get_cpu_type(void)
+
+ static int using_perfmon;
+
+-int perfmon_init(struct oprofile_operations * ops)
++STATIC_IF_NO_XEN
++int __perfmon_init(void)
+ {
+ int ret = pfm_register_buffer_fmt(&oprofile_fmt);
+ if (ret)
+ return -ENODEV;
+
++ using_perfmon = 1;
++ return 0;
++}
++
++STATIC_IF_NO_XEN
++void __perfmon_exit(void)
++{
++ if (!using_perfmon)
++ return;
++
++ pfm_unregister_buffer_fmt(oprofile_fmt.fmt_uuid);
++}
++
++int perfmon_init(struct oprofile_operations * ops)
++{
++ int ret = __perfmon_init();
++ if (ret)
++ return -ENODEV;
++
+ ops->cpu_type = get_cpu_type();
+ ops->start = perfmon_start;
+ ops->stop = perfmon_stop;
+- using_perfmon = 1;
+ printk(KERN_INFO "oprofile: using perfmon.\n");
+ return 0;
+ }
+@@ -92,8 +114,5 @@ int perfmon_init(struct oprofile_operati
+
+ void perfmon_exit(void)
+ {
+- if (!using_perfmon)
+- return;
+-
+- pfm_unregister_buffer_fmt(oprofile_fmt.fmt_uuid);
++ __perfmon_exit();
+ }
+diff -rpuN linux-2.6.18.8/arch/ia64/oprofile/xenoprof.c linux-2.6.18-xen-3.3.0/arch/ia64/oprofile/xenoprof.c
+--- linux-2.6.18.8/arch/ia64/oprofile/xenoprof.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/oprofile/xenoprof.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,142 @@
++/******************************************************************************
++ * xenoprof ia64 specific part
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#include <linux/init.h>
++#include <linux/oprofile.h>
++#include <linux/ioport.h>
++
++#include <xen/driver_util.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/xenoprof.h>
++#include <xen/xenoprof.h>
++
++#include "oprofile_perfmon.h"
++
++void __init xenoprof_arch_init_counter(struct xenoprof_init *init)
++{
++ init->num_events = 0; /* perfmon manages. */
++}
++
++void xenoprof_arch_counter(void)
++{
++ /* nothing. perfmon does. */
++}
++
++void xenoprof_arch_start(void)
++{
++ perfmon_start();
++}
++
++void xenoprof_arch_stop(void)
++{
++ perfmon_stop();
++}
++
++/* XXX move them to an appropriate header file. */
++struct resource* xen_ia64_allocate_resource(unsigned long size);
++void xen_ia64_release_resource(struct resource *res);
++void xen_ia64_unmap_resource(struct resource *res);
++
++struct resource*
++xenoprof_ia64_allocate_resource(int32_t max_samples)
++{
++ unsigned long bufsize;
++
++ /* XXX add hypercall to get bufsize? */
++ /* this value is taken from alloc_xenoprof_struct(). */
++#if 0
++ bufsize = NR_CPUS * (sizeof(struct xenoprof_buf) +
++ (max_samples - 1) * sizeof(struct event_log));
++ bufsize = PAGE_ALIGN(bufsize) + PAGE_SIZE;
++#else
++#define MAX_OPROF_SHARED_PAGES 32
++ bufsize = (MAX_OPROF_SHARED_PAGES + 1) * PAGE_SIZE;
++#endif
++ return xen_ia64_allocate_resource(bufsize);
++}
++
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer *sbuf)
++{
++ if (sbuf->buffer) {
++ xen_ia64_unmap_resource(sbuf->arch.res);
++ sbuf->buffer = NULL;
++ sbuf->arch.res = NULL;
++ }
++}
++
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer *get_buffer,
++ struct xenoprof_shared_buffer *sbuf)
++{
++ int ret;
++ struct resource *res;
++
++ sbuf->buffer = NULL;
++ sbuf->arch.res = NULL;
++
++ res = xenoprof_ia64_allocate_resource(get_buffer->max_samples);
++ if (IS_ERR(res))
++ return PTR_ERR(res);
++
++ get_buffer->buf_gmaddr = res->start;
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, get_buffer);
++ if (ret) {
++ xen_ia64_release_resource(res);
++ return ret;
++ }
++
++ BUG_ON((res->end - res->start + 1) <
++ get_buffer->bufsize * get_buffer->nbuf);
++
++ sbuf->buffer = __va(res->start);
++ sbuf->arch.res = res;
++
++ return ret;
++}
++
++int xenoprof_arch_set_passive(struct xenoprof_passive *pdomain,
++ struct xenoprof_shared_buffer *sbuf)
++{
++ int ret;
++ struct resource *res;
++
++ sbuf->buffer = NULL;
++ sbuf->arch.res = NULL;
++
++ res = xenoprof_ia64_allocate_resource(pdomain->max_samples);
++ if (IS_ERR(res))
++ return PTR_ERR(res);
++
++ pdomain->buf_gmaddr = res->start;
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive, pdomain);
++ if (ret) {
++ xen_ia64_release_resource(res);
++ return ret;
++ }
++
++ BUG_ON((res->end - res->start + 1) < pdomain->bufsize * pdomain->nbuf);
++
++ sbuf->buffer = __va(res->start);
++ sbuf->arch.res = res;
++
++ return ret;
++}
+diff -rpuN linux-2.6.18.8/arch/ia64/pci/pci.c linux-2.6.18-xen-3.3.0/arch/ia64/pci/pci.c
+--- linux-2.6.18.8/arch/ia64/pci/pci.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/pci/pci.c 2008-08-21 11:36:07.000000000 +0200
+@@ -30,6 +30,15 @@
+ #include <asm/irq.h>
+ #include <asm/hw_irq.h>
+
++#ifdef CONFIG_XEN
++struct ioremap_issue_list {
++ struct list_head listp;
++ unsigned long start;
++ unsigned long end;
++};
++typedef struct ioremap_issue_list ioremap_issue_list_t;
++#endif /* CONFIG_XEN */
++
+ /*
+ * Low-level SAL-based PCI configuration access functions. Note that SAL
+ * calls are already serialized (via sal_lock), so we don't need another
+@@ -165,6 +174,11 @@ new_space (u64 phys_base, int sparse)
+ io_space[i].mmio_base = mmio_base;
+ io_space[i].sparse = sparse;
+
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain())
++ HYPERVISOR_add_io_space(phys_base, sparse, i);
++#endif
++
+ return i;
+ }
+
+@@ -332,6 +346,169 @@ pcibios_setup_root_windows(struct pci_bu
+ }
+ }
+
++#ifdef CONFIG_XEN
++static void __devinit
++__cleanup_issue_list(struct list_head *top)
++{
++ ioremap_issue_list_t *ptr, *tmp_ptr;
++
++ list_for_each_entry_safe(ptr, tmp_ptr, top, listp) {
++ list_del(&(ptr->listp));
++ kfree(ptr);
++ }
++}
++
++static int __devinit
++__add_issue_list(unsigned long start, unsigned long end, struct list_head *top)
++{
++ ioremap_issue_list_t *ptr, *new;
++
++ if (start > end) {
++ printk(KERN_ERR "%s: Internal error (start addr > end addr)\n",
++ __FUNCTION__);
++ return 0;
++ }
++
++ /*
++ * Head of the resource structure list contains
++ * dummy val.(start=0, end=~0), so skip it
++ */
++ if ((start == 0) && (end == ~0))
++ return 0;
++
++ start &= PAGE_MASK;
++ end |= ~PAGE_MASK;
++
++ /* We can merge specified address range into existing entry */
++ list_for_each_entry(ptr, top, listp) {
++ if ((ptr->start > end + 1) || (ptr->end + 1 < start))
++ continue;
++ ptr->start = min(start, ptr->start);
++ ptr->end = max(end, ptr->end);
++ return 0;
++ }
++
++ /* We could not merge, so create new entry */
++ new = kmalloc(sizeof(ioremap_issue_list_t), GFP_KERNEL);
++ if (new == NULL) {
++ printk(KERN_ERR "%s: Could not allocate memory. "
++ "HYPERVISOR_ioremap will not be issued\n",
++ __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ new->start = start;
++ new->end = end;
++
++ /* Insert the new entry to the list by ascending order */
++ if (list_empty(top)) {
++ list_add_tail(&(new->listp), top);
++ return 0;
++ }
++ list_for_each_entry(ptr, top, listp) {
++ if (new->start > ptr->start)
++ continue;
++ list_add(&(new->listp), ((struct list_head *)ptr)->prev);
++ return 0;
++ }
++ list_add_tail(&(new->listp), top);
++
++ return 0;
++}
++
++static int __devinit
++__make_issue_list(struct resource *ptr, struct list_head *top)
++{
++ int ret;
++
++ if (ptr->child) {
++ ret = __make_issue_list(ptr->child, top);
++ if (ret)
++ return ret;
++ }
++ if (ptr->sibling) {
++ ret = __make_issue_list(ptr->sibling, top);
++ if (ret)
++ return ret;
++ }
++
++ if (ptr->flags & IORESOURCE_MEM) {
++ ret = __add_issue_list(ptr->start, ptr->end, top);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
++static void __devinit
++__compress_issue_list(struct list_head *top)
++{
++ ioremap_issue_list_t *ptr, *tmp_ptr, *next;
++ int compressed;
++
++ /*
++ * Merge adjacent entries, if overlapped
++ * (entries are sorted by ascending order)
++ */
++ list_for_each_entry_safe(ptr, tmp_ptr, top, listp) {
++ if (list_is_last((struct list_head *)ptr, top))
++ continue;
++
++ next = (ioremap_issue_list_t *)
++ (((struct list_head *)ptr)->next);
++ if (next->start <= (ptr->end) + 1) {
++ next->start = min(ptr->start, next->start);
++ next->end = max(ptr->end, next->end);
++
++ list_del(&(ptr->listp));
++ kfree(ptr);
++ }
++ }
++}
++
++static int __devinit
++__issue_ioremap(struct list_head *top)
++{
++ ioremap_issue_list_t *ptr, *tmp_ptr;
++ unsigned int offset;
++
++ list_for_each_entry_safe(ptr, tmp_ptr, top, listp) {
++ offset = HYPERVISOR_ioremap(ptr->start,
++ ptr->end - ptr->start + 1);
++ if (offset == ~0) {
++ printk(KERN_ERR "%s: HYPERVISOR_ioremap() failed. "
++ "Address Range: 0x%016lx-0x%016lx\n",
++ __FUNCTION__, ptr->start, ptr->end);
++ }
++
++ list_del(&(ptr->listp));
++ kfree(ptr);
++ }
++
++ return 0;
++}
++
++static int __devinit
++do_ioremap_on_resource_list(struct resource *top)
++{
++ LIST_HEAD(ioremap_issue_list_top);
++ int ret;
++
++ ret = __make_issue_list(top, &ioremap_issue_list_top);
++ if (ret) {
++ __cleanup_issue_list(&ioremap_issue_list_top);
++ return ret;
++ }
++
++ __compress_issue_list(&ioremap_issue_list_top);
++
++ (void)__issue_ioremap(&ioremap_issue_list_top);
++
++ return 0;
++}
++#endif /* CONFIG_XEN */
++
+ struct pci_bus * __devinit
+ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
+ {
+@@ -375,6 +552,18 @@ pci_acpi_scan_root(struct acpi_device *d
+ if (pbus)
+ pcibios_setup_root_windows(pbus, controller);
+
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain()) {
++ if (do_ioremap_on_resource_list(&iomem_resource) != 0) {
++ printk(KERN_ERR
++ "%s: Counld not issue HYPERVISOR_ioremap "
++ "due to lack of memory or hypercall failure\n",
++ __FUNCTION__);
++ goto out3;
++ }
++ }
++#endif /* CONFIG_XEN */
++
+ return pbus;
+
+ out3:
+@@ -818,3 +1007,31 @@ int pci_vector_resources(int last, int n
+
+ return count;
+ }
++
++#ifdef CONFIG_XEN
++void __devinit xen_add_resource(struct pci_controller *controller,
++ unsigned int domain, unsigned int bus,
++ struct acpi_resource *resource)
++{
++ struct pci_root_info info;
++ char *name;
++
++ name = kmalloc(16, GFP_KERNEL);
++ if (!name)
++ return;
++
++ sprintf(name, "PCI Bus %04x:%02x", domain, bus);
++ info.controller = controller;
++ info.name = name;
++
++ add_window(resource, &info);
++}
++EXPORT_SYMBOL(xen_add_resource);
++
++void __devinit xen_pcibios_setup_root_windows(struct pci_bus *bus,
++ struct pci_controller *controller)
++{
++ pcibios_setup_root_windows(bus, controller);
++}
++EXPORT_SYMBOL(xen_pcibios_setup_root_windows);
++#endif
+diff -rpuN linux-2.6.18.8/arch/ia64/sn/kernel/setup.c linux-2.6.18-xen-3.3.0/arch/ia64/sn/kernel/setup.c
+--- linux-2.6.18.8/arch/ia64/sn/kernel/setup.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/sn/kernel/setup.c 2008-08-21 11:36:07.000000000 +0200
+@@ -763,5 +763,13 @@ int sn_prom_feature_available(int id)
+ return 0;
+ return test_bit(id, sn_prom_features);
+ }
++
++void
++sn_kernel_launch_event(void)
++{
++ /* ignore status until we understand possible failure, if any*/
++ if (ia64_sn_kernel_launch_event())
++ printk(KERN_ERR "KEXEC is not supported in this PROM, Please update the PROM.\n");
++}
+ EXPORT_SYMBOL(sn_prom_feature_available);
+
+diff -rpuN linux-2.6.18.8/arch/ia64/sn/pci/pcibr/pcibr_provider.c linux-2.6.18-xen-3.3.0/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+--- linux-2.6.18.8/arch/ia64/sn/pci/pcibr/pcibr_provider.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/sn/pci/pcibr/pcibr_provider.c 2008-08-21 11:36:07.000000000 +0200
+@@ -15,6 +15,7 @@
+ #include <asm/sn/pcibus_provider_defs.h>
+ #include <asm/sn/pcidev.h>
+ #include <asm/sn/sn_sal.h>
++#include <asm/sn/pic.h>
+ #include <asm/sn/sn2/sn_hwperf.h>
+ #include "xtalk/xwidgetdev.h"
+ #include "xtalk/hubdev.h"
+@@ -129,9 +130,9 @@ pcibr_bus_fixup(struct pcibus_bussoft *p
+ }
+
+ memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
+- soft->pbi_buscommon.bs_base =
+- (((u64) soft->pbi_buscommon.
+- bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET;
++ soft->pbi_buscommon.bs_base = (unsigned long)
++ ioremap(REGION_OFFSET(soft->pbi_buscommon.bs_base),
++ sizeof(struct pic));
+
+ spin_lock_init(&soft->pbi_lock);
+
+diff -rpuN linux-2.6.18.8/arch/ia64/sn/pci/tioca_provider.c linux-2.6.18-xen-3.3.0/arch/ia64/sn/pci/tioca_provider.c
+--- linux-2.6.18.8/arch/ia64/sn/pci/tioca_provider.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/sn/pci/tioca_provider.c 2008-08-21 11:36:07.000000000 +0200
+@@ -611,7 +611,9 @@ tioca_bus_fixup(struct pcibus_bussoft *p
+ return NULL;
+
+ memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common));
+- tioca_common->ca_common.bs_base |= __IA64_UNCACHED_OFFSET;
++ tioca_common->ca_common.bs_base = (unsigned long)
++ ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base),
++ sizeof(struct tioca_common));
+
+ /* init kernel-private area */
+
+diff -rpuN linux-2.6.18.8/arch/ia64/sn/pci/tioce_provider.c linux-2.6.18-xen-3.3.0/arch/ia64/sn/pci/tioce_provider.c
+--- linux-2.6.18.8/arch/ia64/sn/pci/tioce_provider.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/sn/pci/tioce_provider.c 2008-08-21 11:36:07.000000000 +0200
+@@ -1006,7 +1006,9 @@ tioce_bus_fixup(struct pcibus_bussoft *p
+ return NULL;
+
+ memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common));
+- tioce_common->ce_pcibus.bs_base |= __IA64_UNCACHED_OFFSET;
++ tioce_common->ce_pcibus.bs_base = (unsigned long)
++ ioremap(REGION_OFFSET(tioce_common->ce_pcibus.bs_base),
++ sizeof(struct tioce_common));
+
+ tioce_kern = tioce_kern_init(tioce_common);
+ if (tioce_kern == NULL) {
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/hypercall.S linux-2.6.18-xen-3.3.0/arch/ia64/xen/hypercall.S
+--- linux-2.6.18.8/arch/ia64/xen/hypercall.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/hypercall.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,141 @@
++/*
++ * Support routines for Xen hypercalls
++ *
++ * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
++ */
++
++#include <asm/asmmacro.h>
++#include <asm/intrinsics.h>
++
++#ifdef __INTEL_COMPILER
++# undef ASM_SUPPORTED
++#else
++# define ASM_SUPPORTED
++#endif
++
++#ifndef ASM_SUPPORTED
++GLOBAL_ENTRY(xen_get_psr)
++ XEN_HYPER_GET_PSR
++ br.ret.sptk.many rp
++ ;;
++END(xen_get_psr)
++
++GLOBAL_ENTRY(xen_get_ivr)
++ XEN_HYPER_GET_IVR
++ br.ret.sptk.many rp
++ ;;
++END(xen_get_ivr)
++
++GLOBAL_ENTRY(xen_get_tpr)
++ XEN_HYPER_GET_TPR
++ br.ret.sptk.many rp
++ ;;
++END(xen_get_tpr)
++
++GLOBAL_ENTRY(xen_set_tpr)
++ mov r8=r32
++ XEN_HYPER_SET_TPR
++ br.ret.sptk.many rp
++ ;;
++END(xen_set_tpr)
++
++GLOBAL_ENTRY(xen_eoi)
++ mov r8=r32
++ XEN_HYPER_EOI
++ br.ret.sptk.many rp
++ ;;
++END(xen_eoi)
++
++GLOBAL_ENTRY(xen_thash)
++ mov r8=r32
++ XEN_HYPER_THASH
++ br.ret.sptk.many rp
++ ;;
++END(xen_thash)
++
++GLOBAL_ENTRY(xen_set_itm)
++ mov r8=r32
++ XEN_HYPER_SET_ITM
++ br.ret.sptk.many rp
++ ;;
++END(xen_set_itm)
++
++GLOBAL_ENTRY(xen_ptcga)
++ mov r8=r32
++ mov r9=r33
++ XEN_HYPER_PTC_GA
++ br.ret.sptk.many rp
++ ;;
++END(xen_ptcga)
++
++GLOBAL_ENTRY(xen_get_rr)
++ mov r8=r32
++ XEN_HYPER_GET_RR
++ br.ret.sptk.many rp
++ ;;
++END(xen_get_rr)
++
++GLOBAL_ENTRY(xen_set_rr)
++ mov r8=r32
++ mov r9=r33
++ XEN_HYPER_SET_RR
++ br.ret.sptk.many rp
++ ;;
++END(xen_set_rr)
++
++GLOBAL_ENTRY(xen_set_kr)
++ mov r8=r32
++ mov r9=r33
++ XEN_HYPER_SET_KR
++ br.ret.sptk.many rp
++END(xen_set_kr)
++
++GLOBAL_ENTRY(xen_fc)
++ mov r8=r32
++ XEN_HYPER_FC
++ br.ret.sptk.many rp
++END(xen_fc)
++
++GLOBAL_ENTRY(xen_get_cpuid)
++ mov r8=r32
++ XEN_HYPER_GET_CPUID
++ br.ret.sptk.many rp
++END(xen_get_cpuid)
++
++GLOBAL_ENTRY(xen_get_pmd)
++ mov r8=r32
++ XEN_HYPER_GET_PMD
++ br.ret.sptk.many rp
++END(xen_get_pmd)
++
++#ifdef CONFIG_IA32_SUPPORT
++GLOBAL_ENTRY(xen_get_eflag)
++ XEN_HYPER_GET_EFLAG
++ br.ret.sptk.many rp
++END(xen_get_eflag)
++
++// some bits aren't set if pl!=0, see SDM vol1 3.1.8
++GLOBAL_ENTRY(xen_set_eflag)
++ mov r8=r32
++ XEN_HYPER_SET_EFLAG
++ br.ret.sptk.many rp
++END(xen_set_eflag)
++#endif /* CONFIG_IA32_SUPPORT */
++#endif /* ASM_SUPPORTED */
++
++GLOBAL_ENTRY(xen_send_ipi)
++ mov r14=r32
++ mov r15=r33
++ mov r2=0x400
++ break 0x1000
++ ;;
++ br.ret.sptk.many rp
++ ;;
++END(xen_send_ipi)
++
++GLOBAL_ENTRY(__hypercall)
++ mov r2=r37
++ break 0x1000
++ br.ret.sptk.many b0
++ ;;
++END(__hypercall)
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/hypervisor.c linux-2.6.18-xen-3.3.0/arch/ia64/xen/hypervisor.c
+--- linux-2.6.18.8/arch/ia64/xen/hypervisor.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/hypervisor.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1526 @@
++/******************************************************************************
++ * include/asm-ia64/shadow.h
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#include <linux/spinlock.h>
++#include <linux/bootmem.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/efi.h>
++#include <asm/page.h>
++#include <asm/pgalloc.h>
++#include <asm/meminit.h>
++#include <asm/hypervisor.h>
++#include <asm/hypercall.h>
++#include <xen/interface/memory.h>
++#include <xen/xencons.h>
++#include <xen/balloon.h>
++
++shared_info_t *HYPERVISOR_shared_info __read_mostly =
++ (shared_info_t *)XSI_BASE;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++
++EXPORT_SYMBOL(running_on_xen);
++
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
++static int p2m_expose_init(void);
++#else
++#define p2m_expose_init() (-ENOSYS)
++#define p2m_expose_resume() ((void)0)
++#endif
++
++EXPORT_SYMBOL(__hypercall);
++
++void __init
++xen_setup(char **cmdline_p)
++{
++ struct dom0_vga_console_info *info;
++ extern void dig_setup(char **cmdline_p);
++
++ if (ia64_platform_is("xen"))
++ dig_setup(cmdline_p);
++
++ if (!is_running_on_xen() || !is_initial_xendomain())
++ return;
++
++ info = (void *)((char *)xen_start_info +
++ xen_start_info->console.dom0.info_off);
++ dom0_init_screen_info(info, xen_start_info->console.dom0.info_size);
++
++ xen_start_info->console.domU.mfn = 0;
++ xen_start_info->console.domU.evtchn = 0;
++}
++
++void __cpuinit
++xen_cpu_init(void)
++{
++ extern void xen_smp_intr_init(void);
++ xen_smp_intr_init();
++}
++
++/*
++ *XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
++ * move those to lib/contiguous_bitmap?
++ *XXX discontigmem/sparsemem
++ */
++
++/*
++ * Bitmap is indexed by page number. If bit is set, the page is part of a
++ * xen_create_contiguous_region() area of memory.
++ */
++unsigned long *contiguous_bitmap __read_mostly;
++
++#ifdef CONFIG_VIRTUAL_MEM_MAP
++/* Following logic is stolen from create_mem_map_table() for virtual memmap */
++static int
++create_contiguous_bitmap(u64 start, u64 end, void *arg)
++{
++ unsigned long address, start_page, end_page;
++ unsigned long bitmap_start, bitmap_end;
++ unsigned char *bitmap;
++ int node;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ bitmap_start = (unsigned long)contiguous_bitmap +
++ ((__pa(start) >> PAGE_SHIFT) >> 3);
++ bitmap_end = (unsigned long)contiguous_bitmap +
++ (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
++
++ start_page = bitmap_start & PAGE_MASK;
++ end_page = PAGE_ALIGN(bitmap_end);
++ node = paddr_to_nid(__pa(start));
++
++ bitmap = alloc_bootmem_pages_node(NODE_DATA(node),
++ end_page - start_page);
++ BUG_ON(!bitmap);
++ memset(bitmap, 0, end_page - start_page);
++
++ for (address = start_page; address < end_page; address += PAGE_SIZE) {
++ pgd = pgd_offset_k(address);
++ if (pgd_none(*pgd))
++ pgd_populate(&init_mm, pgd,
++ alloc_bootmem_pages_node(NODE_DATA(node),
++ PAGE_SIZE));
++ pud = pud_offset(pgd, address);
++
++ if (pud_none(*pud))
++ pud_populate(&init_mm, pud,
++ alloc_bootmem_pages_node(NODE_DATA(node),
++ PAGE_SIZE));
++ pmd = pmd_offset(pud, address);
++
++ if (pmd_none(*pmd))
++ pmd_populate_kernel(&init_mm, pmd,
++ alloc_bootmem_pages_node
++ (NODE_DATA(node), PAGE_SIZE));
++ pte = pte_offset_kernel(pmd, address);
++
++ if (pte_none(*pte))
++ set_pte(pte,
++ pfn_pte(__pa(bitmap + (address - start_page))
++ >> PAGE_SHIFT, PAGE_KERNEL));
++ }
++ return 0;
++}
++#endif
++
++static void
++__contiguous_bitmap_init(unsigned long size)
++{
++ contiguous_bitmap = alloc_bootmem_pages(size);
++ BUG_ON(!contiguous_bitmap);
++ memset(contiguous_bitmap, 0, size);
++}
++
++void
++xen_contiguous_bitmap_init(unsigned long end_pfn)
++{
++ unsigned long size = (end_pfn + 2 * BITS_PER_LONG) >> 3;
++#ifndef CONFIG_VIRTUAL_MEM_MAP
++ __contiguous_bitmap_init(size);
++#else
++ unsigned long max_gap = 0;
++
++ efi_memmap_walk(find_largest_hole, (u64*)&max_gap);
++ if (max_gap < LARGE_GAP) {
++ __contiguous_bitmap_init(size);
++ } else {
++ unsigned long map_size = PAGE_ALIGN(size);
++ vmalloc_end -= map_size;
++ contiguous_bitmap = (unsigned long*)vmalloc_end;
++ efi_memmap_walk(create_contiguous_bitmap, NULL);
++ }
++#endif
++}
++
++#if 0
++int
++contiguous_bitmap_test(void* p)
++{
++ return test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap);
++}
++#endif
++
++static void contiguous_bitmap_set(
++ unsigned long first_page, unsigned long nr_pages)
++{
++ unsigned long start_off, end_off, curr_idx, end_idx;
++
++ curr_idx = first_page / BITS_PER_LONG;
++ start_off = first_page & (BITS_PER_LONG-1);
++ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
++ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
++
++ if (curr_idx == end_idx) {
++ contiguous_bitmap[curr_idx] |=
++ ((1UL<<end_off)-1) & -(1UL<<start_off);
++ } else {
++ contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
++ while ( ++curr_idx < end_idx )
++ contiguous_bitmap[curr_idx] = ~0UL;
++ contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
++ }
++}
++
++static void contiguous_bitmap_clear(
++ unsigned long first_page, unsigned long nr_pages)
++{
++ unsigned long start_off, end_off, curr_idx, end_idx;
++
++ curr_idx = first_page / BITS_PER_LONG;
++ start_off = first_page & (BITS_PER_LONG-1);
++ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
++ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
++
++ if (curr_idx == end_idx) {
++ contiguous_bitmap[curr_idx] &=
++ -(1UL<<end_off) | ((1UL<<start_off)-1);
++ } else {
++ contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
++ while ( ++curr_idx != end_idx )
++ contiguous_bitmap[curr_idx] = 0;
++ contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
++ }
++}
++
++/*
++ * __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
++ * are based on i386 xen_create_contiguous_region(),
++ * xen_destroy_contiguous_region()
++ */
++
++/* Protected by balloon_lock. */
++#define MAX_CONTIG_ORDER 7
++static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
++
++/* Ensure multi-page extents are contiguous in machine memory. */
++int
++__xen_create_contiguous_region(unsigned long vstart,
++ unsigned int order, unsigned int address_bits)
++{
++ unsigned long error = 0;
++ unsigned long gphys = __pa(vstart);
++ unsigned long start_gpfn = gphys >> PAGE_SHIFT;
++ unsigned long num_gpfn = 1 << order;
++ unsigned long i;
++ unsigned long flags;
++
++ unsigned long *in_frames = discontig_frames, out_frame;
++ int success;
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .nr_extents = num_gpfn,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .nr_extents = 1,
++ .extent_order = order,
++ .address_bits = address_bits,
++ .domid = DOMID_SELF
++ },
++ .nr_exchanged = 0
++ };
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return -ENOMEM;
++
++ set_xen_guest_handle(exchange.in.extent_start, in_frames);
++ set_xen_guest_handle(exchange.out.extent_start, &out_frame);
++
++ scrub_pages(vstart, num_gpfn);
++
++ balloon_lock(flags);
++
++ /* Get a new contiguous memory extent. */
++ for (i = 0; i < num_gpfn; i++)
++ in_frames[i] = start_gpfn + i;
++ out_frame = start_gpfn;
++ error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == num_gpfn);
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
++ BUG_ON(success && (error != 0));
++ if (unlikely(error == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in);
++ BUG_ON(error != num_gpfn);
++ error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out);
++ if (error != 1) {
++ /* Couldn't get special memory: fall back to normal. */
++ for (i = 0; i < num_gpfn; i++) {
++ in_frames[i] = start_gpfn + i;
++ }
++ error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.in);
++ BUG_ON(error != num_gpfn);
++ success = 0;
++ } else
++ success = 1;
++ }
++ if (success)
++ contiguous_bitmap_set(start_gpfn, num_gpfn);
++#if 0
++ if (success) {
++ unsigned long mfn;
++ unsigned long mfn_prev = ~0UL;
++ for (i = 0; i < num_gpfn; i++) {
++ mfn = pfn_to_mfn_for_dma(start_gpfn + i);
++ if (mfn_prev != ~0UL && mfn != mfn_prev + 1) {
++ xprintk("\n");
++ xprintk("%s:%d order %d "
++ "start 0x%lx bus 0x%lx "
++ "machine 0x%lx\n",
++ __func__, __LINE__, order,
++ vstart, virt_to_bus((void*)vstart),
++ phys_to_machine_for_dma(gphys));
++ xprintk("mfn: ");
++ for (i = 0; i < num_gpfn; i++) {
++ mfn = pfn_to_mfn_for_dma(
++ start_gpfn + i);
++ xprintk("0x%lx ", mfn);
++ }
++ xprintk("\n");
++ break;
++ }
++ mfn_prev = mfn;
++ }
++ }
++#endif
++ balloon_unlock(flags);
++ return success? 0: -ENOMEM;
++}
++
++void
++__xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++{
++ unsigned long flags;
++ unsigned long error = 0;
++ unsigned long start_gpfn = __pa(vstart) >> PAGE_SHIFT;
++ unsigned long num_gpfn = 1UL << order;
++ unsigned long i;
++
++ unsigned long *out_frames = discontig_frames, in_frame;
++ int success;
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .nr_extents = 1,
++ .extent_order = order,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .nr_extents = num_gpfn,
++ .extent_order = 0,
++ .address_bits = 0,
++ .domid = DOMID_SELF
++ },
++ .nr_exchanged = 0
++ };
++
++
++ if (!test_bit(start_gpfn, contiguous_bitmap))
++ return;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return;
++
++ set_xen_guest_handle(exchange.in.extent_start, &in_frame);
++ set_xen_guest_handle(exchange.out.extent_start, out_frames);
++
++ scrub_pages(vstart, num_gpfn);
++
++ balloon_lock(flags);
++
++ contiguous_bitmap_clear(start_gpfn, num_gpfn);
++
++ /* Do the exchange for non-contiguous MFNs. */
++ in_frame = start_gpfn;
++ for (i = 0; i < num_gpfn; i++)
++ out_frames[i] = start_gpfn + i;
++ error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == 1);
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
++ BUG_ON(success && (error != 0));
++ if (unlikely(error == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in);
++ BUG_ON(error != 1);
++
++ error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out);
++ BUG_ON(error != num_gpfn);
++ }
++ balloon_unlock(flags);
++}
++
++int
++xen_limit_pages_to_max_mfn(struct page *pages, unsigned int order,
++ unsigned int address_bits)
++{
++ return xen_create_contiguous_region((unsigned long)page_address(pages),
++ order, address_bits);
++}
++
++/****************************************************************************
++ * grant table hack
++ * cmd: GNTTABOP_xxx
++ */
++#include <linux/mm.h>
++#include <xen/interface/xen.h>
++#include <xen/gnttab.h>
++
++void *arch_gnttab_alloc_shared(unsigned long *frames)
++{
++ return __va(frames[0] << PAGE_SHIFT);
++}
++
++static void
++gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
++{
++ uint32_t flags;
++
++ flags = uop->flags;
++
++ if (flags & GNTMAP_host_map) {
++ if (flags & GNTMAP_application_map) {
++ xprintd("GNTMAP_application_map is not supported yet:"
++ " flags 0x%x\n", flags);
++ BUG();
++ }
++ if (flags & GNTMAP_contains_pte) {
++ xprintd("GNTMAP_contains_pte is not supported yet"
++ " flags 0x%x\n", flags);
++ BUG();
++ }
++ } else if (flags & GNTMAP_device_map) {
++ xprintd("GNTMAP_device_map is not supported yet 0x%x\n",
++ flags);
++ BUG(); /* XXX not yet. actually this flag is not used. */
++ } else {
++ BUG();
++ }
++}
++
++int
++HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
++{
++ if (cmd == GNTTABOP_map_grant_ref) {
++ unsigned int i;
++ for (i = 0; i < count; i++) {
++ gnttab_map_grant_ref_pre(
++ (struct gnttab_map_grant_ref*)uop + i);
++ }
++ }
++ return xencomm_hypercall_grant_table_op(cmd, uop, count);
++}
++EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
++
++/**************************************************************************
++ * foreign mapping
++ */
++#include <linux/efi.h>
++#include <asm/meminit.h> /* for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}() */
++
++static unsigned long privcmd_resource_min = 0;
++/* Xen/ia64 currently can handle pseudo physical address bits up to
++ * (PAGE_SHIFT * 3) */
++static unsigned long privcmd_resource_max =
++ GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
++static unsigned long privcmd_resource_align = IA64_GRANULE_SIZE;
++
++static unsigned long
++md_end_addr(const efi_memory_desc_t *md)
++{
++ return md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
++}
++
++#define XEN_IA64_PRIVCMD_LEAST_GAP_SIZE (1024 * 1024 * 1024UL)
++static int
++xen_ia64_privcmd_check_size(unsigned long start, unsigned long end)
++{
++ return (start < end &&
++ (end - start) > XEN_IA64_PRIVCMD_LEAST_GAP_SIZE);
++}
++
++static int __init
++xen_ia64_privcmd_init(void)
++{
++ void *efi_map_start, *efi_map_end, *p;
++ u64 efi_desc_size;
++ efi_memory_desc_t *md;
++ unsigned long tmp_min;
++ unsigned long tmp_max;
++ unsigned long gap_size;
++ unsigned long prev_end;
++
++ if (!is_running_on_xen())
++ return -1;
++
++ efi_map_start = __va(ia64_boot_param->efi_memmap);
++ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
++ efi_desc_size = ia64_boot_param->efi_memdesc_size;
++
++ /* at first check the used highest address */
++ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
++ /* nothing */;
++ }
++ md = p - efi_desc_size;
++ privcmd_resource_min = GRANULEROUNDUP(md_end_addr(md));
++ if (xen_ia64_privcmd_check_size(privcmd_resource_min,
++ privcmd_resource_max))
++ goto out;
++
++ /* the used highest address is too large.
++ * try to find the largest gap. */
++ tmp_min = privcmd_resource_max;
++ tmp_max = 0;
++ gap_size = 0;
++ prev_end = 0;
++ for (p = efi_map_start;
++ p < efi_map_end - efi_desc_size;
++ p += efi_desc_size) {
++ unsigned long end;
++ efi_memory_desc_t* next;
++ unsigned long next_start;
++
++ md = p;
++ end = md_end_addr(md);
++ if (end > privcmd_resource_max)
++ break;
++ if (end < prev_end) {
++ /* work around.
++ * Xen may pass incompletely sorted memory
++ * descriptors like
++ * [x, x + length]
++ * [x, x]
++ * this order should be reversed. */
++ continue;
++ }
++ next = p + efi_desc_size;
++ next_start = next->phys_addr;
++ if (next_start > privcmd_resource_max)
++ next_start = privcmd_resource_max;
++ if (end < next_start && gap_size < (next_start - end)) {
++ tmp_min = end;
++ tmp_max = next_start;
++ gap_size = tmp_max - tmp_min;
++ }
++ prev_end = end;
++ }
++
++ privcmd_resource_min = GRANULEROUNDUP(tmp_min);
++ if (xen_ia64_privcmd_check_size(privcmd_resource_min, tmp_max)) {
++ privcmd_resource_max = tmp_max;
++ goto out;
++ }
++
++ privcmd_resource_min = tmp_min;
++ privcmd_resource_max = tmp_max;
++ if (!xen_ia64_privcmd_check_size(privcmd_resource_min,
++ privcmd_resource_max)) {
++ /* Any large enough gap isn't found.
++ * go ahead anyway with the warning hoping that large region
++ * won't be requested. */
++ printk(KERN_WARNING "xen privcmd: "
++ "large enough region for privcmd mmap is not found.\n");
++ }
++
++out:
++ printk(KERN_INFO "xen privcmd uses pseudo physical addr range "
++ "[0x%lx, 0x%lx] (%ldMB)\n",
++ privcmd_resource_min, privcmd_resource_max,
++ (privcmd_resource_max - privcmd_resource_min) >> 20);
++ BUG_ON(privcmd_resource_min >= privcmd_resource_max);
++
++ /* XXX this should be somewhere appropriate */
++ (void)p2m_expose_init();
++
++ return 0;
++}
++late_initcall(xen_ia64_privcmd_init);
++
++struct xen_ia64_privcmd_entry {
++ atomic_t map_count;
++#define INVALID_GPFN (~0UL)
++ unsigned long gpfn;
++};
++
++struct xen_ia64_privcmd_range {
++ atomic_t ref_count;
++ unsigned long pgoff; /* in PAGE_SIZE */
++ struct resource *res;
++
++ /* for foreign domain p2m mapping */
++ void *private;
++ void (*callback)(struct xen_ia64_privcmd_range *range, void *arg);
++
++ unsigned long num_entries;
++ struct xen_ia64_privcmd_entry entries[0];
++};
++
++struct xen_ia64_privcmd_vma {
++ int is_privcmd_mmapped;
++ struct xen_ia64_privcmd_range *range;
++
++ unsigned long num_entries;
++ struct xen_ia64_privcmd_entry *entries;
++};
++
++static void
++xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry *entry)
++{
++ atomic_set(&entry->map_count, 0);
++ entry->gpfn = INVALID_GPFN;
++}
++
++static int
++xen_ia64_privcmd_entry_mmap(struct vm_area_struct *vma,
++ unsigned long addr,
++ struct xen_ia64_privcmd_range *privcmd_range,
++ int i,
++ unsigned long gmfn,
++ pgprot_t prot,
++ domid_t domid)
++{
++ int error = 0;
++ struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
++ unsigned long gpfn;
++ unsigned long flags;
++
++ if ((addr & ~PAGE_MASK) != 0 || gmfn == INVALID_MFN) {
++ error = -EINVAL;
++ goto out;
++ }
++
++ if (entry->gpfn != INVALID_GPFN) {
++ error = -EBUSY;
++ goto out;
++ }
++ gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + i;
++
++ flags = ASSIGN_writable;
++ if (pgprot_val(prot) == PROT_READ)
++ flags = ASSIGN_readonly;
++ error = HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn, flags, domid);
++ if (error != 0)
++ goto out;
++
++ prot = vma->vm_page_prot;
++ error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot);
++ /*
++ * VM_PFNMAP is set in remap_pfn_range().
++ * Reset the flag to avoid BUG_ON() in do_no_page().
++ */
++ vma->vm_flags &= ~VM_PFNMAP;
++
++ if (error != 0) {
++ error = HYPERVISOR_zap_physmap(gpfn, 0);
++ if (error)
++ BUG(); /* XXX */
++ } else {
++ atomic_inc(&entry->map_count);
++ entry->gpfn = gpfn;
++ }
++
++out:
++ return error;
++}
++
++static void
++xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range *privcmd_range,
++ int i)
++{
++ struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
++ unsigned long gpfn = entry->gpfn;
++ /* gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
++ (vma->vm_pgoff - privcmd_range->pgoff); */
++ int error;
++
++ error = HYPERVISOR_zap_physmap(gpfn, 0);
++ if (error)
++ BUG(); /* XXX */
++ entry->gpfn = INVALID_GPFN;
++}
++
++static void
++xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range *privcmd_range,
++ int i)
++{
++ struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
++ if (entry->gpfn != INVALID_GPFN)
++ atomic_inc(&entry->map_count);
++ else
++ BUG_ON(atomic_read(&entry->map_count) != 0);
++}
++
++static void
++xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range *privcmd_range,
++ int i)
++{
++ struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
++ if (entry->gpfn != INVALID_GPFN &&
++ atomic_dec_and_test(&entry->map_count))
++ xen_ia64_privcmd_entry_munmap(privcmd_range, i);
++}
++
++static void xen_ia64_privcmd_vma_open(struct vm_area_struct *vma);
++static void xen_ia64_privcmd_vma_close(struct vm_area_struct *vma);
++
++static struct page *
++xen_ia64_privcmd_vma_nopage(struct vm_area_struct *vma,
++ unsigned long address,
++ int *type)
++{
++ return NOPAGE_SIGBUS;
++}
++
++struct vm_operations_struct xen_ia64_privcmd_vm_ops = {
++ .open = xen_ia64_privcmd_vma_open,
++ .close = xen_ia64_privcmd_vma_close,
++ .nopage = xen_ia64_privcmd_vma_nopage
++};
++
++static void
++__xen_ia64_privcmd_vma_open(struct vm_area_struct *vma,
++ struct xen_ia64_privcmd_vma *privcmd_vma,
++ struct xen_ia64_privcmd_range *privcmd_range)
++{
++ unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
++ unsigned long num_entries =
++ (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ unsigned long i;
++
++ BUG_ON(entry_offset < 0);
++ BUG_ON(entry_offset + num_entries > privcmd_range->num_entries);
++
++ privcmd_vma->range = privcmd_range;
++ privcmd_vma->num_entries = num_entries;
++ privcmd_vma->entries = &privcmd_range->entries[entry_offset];
++ vma->vm_private_data = privcmd_vma;
++ for (i = 0; i < privcmd_vma->num_entries; i++)
++ xen_ia64_privcmd_entry_open(privcmd_range, entry_offset + i);
++
++ vma->vm_private_data = privcmd_vma;
++ vma->vm_ops = &xen_ia64_privcmd_vm_ops;
++}
++
++static void
++xen_ia64_privcmd_vma_open(struct vm_area_struct *vma)
++{
++ struct xen_ia64_privcmd_vma *old_privcmd_vma =
++ (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++ struct xen_ia64_privcmd_vma *privcmd_vma =
++ (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++ struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
++
++ atomic_inc(&privcmd_range->ref_count);
++ /* vm_op->open() can't fail. */
++ privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
++ /* copy original value if necessary */
++ privcmd_vma->is_privcmd_mmapped = old_privcmd_vma->is_privcmd_mmapped;
++
++ __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
++}
++
++static void
++xen_ia64_privcmd_vma_close(struct vm_area_struct *vma)
++{
++ struct xen_ia64_privcmd_vma *privcmd_vma =
++ (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++ struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
++ unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
++ unsigned long i;
++
++ for (i = 0; i < privcmd_vma->num_entries; i++) {
++ xen_ia64_privcmd_entry_close(privcmd_range, entry_offset + i);
++ cond_resched();
++ }
++ vma->vm_private_data = NULL;
++ kfree(privcmd_vma);
++
++ if (atomic_dec_and_test(&privcmd_range->ref_count)) {
++#if 1
++ for (i = 0; i < privcmd_range->num_entries; i++) {
++ struct xen_ia64_privcmd_entry *entry =
++ &privcmd_range->entries[i];
++ BUG_ON(atomic_read(&entry->map_count) != 0);
++ BUG_ON(entry->gpfn != INVALID_GPFN);
++ }
++#endif
++ if (privcmd_range->callback)
++ (*privcmd_range->callback)(privcmd_range,
++ privcmd_range->private);
++ release_resource(privcmd_range->res);
++ kfree(privcmd_range->res);
++ vfree(privcmd_range);
++ }
++}
++
++int
++privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
++{
++ struct xen_ia64_privcmd_vma *privcmd_vma =
++ (struct xen_ia64_privcmd_vma *)vma->vm_private_data;
++ return (xchg(&privcmd_vma->is_privcmd_mmapped, 1) == 0);
++}
++
++int
++privcmd_mmap(struct file * file, struct vm_area_struct * vma)
++{
++ int error;
++ unsigned long size = vma->vm_end - vma->vm_start;
++ unsigned long num_entries = size >> PAGE_SHIFT;
++ struct xen_ia64_privcmd_range *privcmd_range = NULL;
++ struct xen_ia64_privcmd_vma *privcmd_vma = NULL;
++ struct resource *res = NULL;
++ unsigned long i;
++ BUG_ON(!is_running_on_xen());
++
++ BUG_ON(file->private_data != NULL);
++
++ error = -ENOMEM;
++ privcmd_range =
++ vmalloc(sizeof(*privcmd_range) +
++ sizeof(privcmd_range->entries[0]) * num_entries);
++ if (privcmd_range == NULL)
++ goto out_enomem0;
++ privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
++ if (privcmd_vma == NULL)
++ goto out_enomem1;
++ privcmd_vma->is_privcmd_mmapped = 0;
++
++ res = kzalloc(sizeof(*res), GFP_KERNEL);
++ if (res == NULL)
++ goto out_enomem1;
++ res->name = "Xen privcmd mmap";
++ error = allocate_resource(&iomem_resource, res, size,
++ privcmd_resource_min, privcmd_resource_max,
++ privcmd_resource_align, NULL, NULL);
++ if (error)
++ goto out_enomem1;
++ privcmd_range->res = res;
++
++ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
++
++ atomic_set(&privcmd_range->ref_count, 1);
++ privcmd_range->pgoff = vma->vm_pgoff;
++ privcmd_range->num_entries = num_entries;
++ privcmd_range->private = NULL;
++ privcmd_range->callback = NULL;
++ for (i = 0; i < privcmd_range->num_entries; i++)
++ xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
++
++ __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
++ return 0;
++
++out_enomem1:
++ kfree(res);
++ kfree(privcmd_vma);
++out_enomem0:
++ vfree(privcmd_range);
++ return error;
++}
++
++int
++direct_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long address, /* process virtual address */
++ unsigned long gmfn, /* gmfn, gmfn + 1, ... gmfn + size/PAGE_SIZE */
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid) /* target domain */
++{
++ struct xen_ia64_privcmd_vma *privcmd_vma =
++ (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++ struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
++ unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
++
++ unsigned long i;
++ unsigned long offset;
++ int error = 0;
++ BUG_ON(!is_running_on_xen());
++
++#if 0
++ if (prot != vm->vm_page_prot)
++ return -EINVAL;
++#endif
++
++ i = (address - vma->vm_start) >> PAGE_SHIFT;
++ for (offset = 0; offset < size; offset += PAGE_SIZE) {
++ error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, privcmd_range, entry_offset + i, gmfn, prot, domid);
++ if (error != 0)
++ break;
++
++ i++;
++ gmfn++;
++ }
++
++ return error;
++}
++
++
++/**************************************************************************
++ * expose p2m table
++ */
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
++#include <linux/cpu.h>
++#include <asm/uaccess.h>
++
++int p2m_initialized __read_mostly = 0;
++
++unsigned long p2m_min_low_pfn __read_mostly;
++unsigned long p2m_max_low_pfn __read_mostly;
++unsigned long p2m_convert_min_pfn __read_mostly;
++unsigned long p2m_convert_max_pfn __read_mostly;
++
++static struct resource p2m_resource = {
++ .name = "Xen p2m table",
++ .flags = IORESOURCE_MEM,
++};
++static unsigned long p2m_assign_start_pfn __read_mostly;
++static unsigned long p2m_assign_end_pfn __read_mostly;
++static unsigned long p2m_expose_size; /* this is referenced only when resume.
++ * so __read_mostly doesn't make sense.
++ */
++volatile const pte_t *p2m_pte __read_mostly;
++
++#define GRANULE_PFN PTRS_PER_PTE
++static unsigned long p2m_granule_pfn __read_mostly = GRANULE_PFN;
++
++#define ROUNDDOWN(x, y) ((x) & ~((y) - 1))
++#define ROUNDUP(x, y) (((x) + (y) - 1) & ~((y) - 1))
++
++#define P2M_PREFIX "Xen p2m: "
++
++static int xen_ia64_p2m_expose __read_mostly = 1;
++module_param(xen_ia64_p2m_expose, int, 0);
++MODULE_PARM_DESC(xen_ia64_p2m_expose,
++ "enable/disable xen/ia64 p2m exposure optimization\n");
++
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
++static int xen_ia64_p2m_expose_use_dtr __read_mostly = 1;
++module_param(xen_ia64_p2m_expose_use_dtr, int, 0);
++MODULE_PARM_DESC(xen_ia64_p2m_expose_use_dtr,
++ "use/unuse dtr to map exposed p2m table\n");
++
++static const int p2m_page_shifts[] = {
++ _PAGE_SIZE_4K,
++ _PAGE_SIZE_8K,
++ _PAGE_SIZE_16K,
++ _PAGE_SIZE_64K,
++ _PAGE_SIZE_256K,
++ _PAGE_SIZE_1M,
++ _PAGE_SIZE_4M,
++ _PAGE_SIZE_16M,
++ _PAGE_SIZE_64M,
++ _PAGE_SIZE_256M,
++};
++
++struct p2m_itr_arg {
++ unsigned long vaddr;
++ unsigned long pteval;
++ unsigned long log_page_size;
++};
++static struct p2m_itr_arg p2m_itr_arg __read_mostly;
++
++/* This should be in asm-ia64/kregs.h */
++#define IA64_TR_P2M_TABLE 3
++
++static void
++p2m_itr(void *info)
++{
++ struct p2m_itr_arg *arg = (struct p2m_itr_arg*)info;
++ ia64_itr(0x2, IA64_TR_P2M_TABLE,
++ arg->vaddr, arg->pteval, arg->log_page_size);
++ ia64_srlz_d();
++}
++
++static int
++p2m_expose_dtr_call(struct notifier_block *self,
++ unsigned long event, void *ptr)
++{
++ unsigned int cpu = (unsigned int)(long)ptr;
++ if (event != CPU_ONLINE)
++ return 0;
++ if (p2m_initialized && xen_ia64_p2m_expose_use_dtr) {
++ unsigned int me = get_cpu();
++ if (cpu == me)
++ p2m_itr(&p2m_itr_arg);
++ else
++ smp_call_function_single(cpu, &p2m_itr, &p2m_itr_arg,
++ 1, 1);
++ put_cpu();
++ }
++ return 0;
++}
++
++static struct notifier_block p2m_expose_dtr_hotplug_notifier = {
++ .notifier_call = p2m_expose_dtr_call,
++ .next = NULL,
++ .priority = 0
++};
++#endif
++
++static inline unsigned long
++p2m_table_size(unsigned long num_pfn)
++{
++ return ((num_pfn + PTRS_PER_PTE - 1) / PTRS_PER_PTE) << PAGE_SHIFT;
++}
++
++static int
++p2m_expose_init(void)
++{
++ unsigned long num_pfn;
++ unsigned long p2m_size = 0;
++ unsigned long align = ~0UL;
++ int error = 0;
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
++ int i;
++ unsigned long log_page_size = 0;
++#endif
++
++ if (!xen_ia64_p2m_expose)
++ return -ENOSYS;
++ if (p2m_initialized)
++ return 0;
++
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
++ error = register_cpu_notifier(&p2m_expose_dtr_hotplug_notifier);
++ if (error < 0)
++ return error;
++#endif
++
++ lock_cpu_hotplug();
++ if (p2m_initialized)
++ goto out;
++
++#ifdef CONFIG_DISCONTIGMEM
++ p2m_min_low_pfn = min_low_pfn;
++ p2m_max_low_pfn = max_low_pfn;
++#else
++ p2m_min_low_pfn = 0;
++ p2m_max_low_pfn = max_pfn;
++#endif
++
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
++ if (xen_ia64_p2m_expose_use_dtr) {
++ unsigned long page_size = 0;
++ unsigned long granule_pfn = 0;
++ p2m_size = p2m_table_size(p2m_max_low_pfn - p2m_min_low_pfn);
++ for (i = 0;
++ i < sizeof(p2m_page_shifts)/sizeof(p2m_page_shifts[0]);
++ i++) {
++ log_page_size = p2m_page_shifts[i];
++ page_size = 1UL << log_page_size;
++ if (page_size < p2m_size)
++ continue;
++
++ granule_pfn = max(page_size >> PAGE_SHIFT,
++ p2m_granule_pfn);
++ p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
++ granule_pfn);
++ p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
++ granule_pfn);
++ num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
++ p2m_expose_size = num_pfn << PAGE_SHIFT;
++ p2m_size = p2m_table_size(num_pfn);
++ p2m_size = ROUNDUP(p2m_size,
++ granule_pfn << PAGE_SHIFT);
++ if (p2m_size == page_size)
++ break;
++ }
++ if (p2m_size != page_size) {
++ printk(KERN_ERR "p2m_size != page_size\n");
++ error = -EINVAL;
++ goto out;
++ }
++ align = max(privcmd_resource_align, granule_pfn << PAGE_SHIFT);
++ } else
++#endif
++ {
++ BUG_ON(p2m_granule_pfn & (p2m_granule_pfn - 1));
++ p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
++ p2m_granule_pfn);
++ p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
++ p2m_granule_pfn);
++ num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
++ p2m_expose_size = num_pfn << PAGE_SHIFT;
++ p2m_size = p2m_table_size(num_pfn);
++ p2m_size = ROUNDUP(p2m_size, p2m_granule_pfn << PAGE_SHIFT);
++ align = max(privcmd_resource_align,
++ p2m_granule_pfn << PAGE_SHIFT);
++ }
++
++ /* use privcmd region */
++ error = allocate_resource(&iomem_resource, &p2m_resource, p2m_size,
++ privcmd_resource_min, privcmd_resource_max,
++ align, NULL, NULL);
++ if (error) {
++ printk(KERN_ERR P2M_PREFIX
++ "can't allocate region for p2m exposure "
++ "[0x%016lx, 0x%016lx] 0x%016lx\n",
++ p2m_convert_min_pfn, p2m_convert_max_pfn, p2m_size);
++ goto out;
++ }
++
++ p2m_assign_start_pfn = p2m_resource.start >> PAGE_SHIFT;
++ p2m_assign_end_pfn = p2m_resource.end >> PAGE_SHIFT;
++
++ error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
++ p2m_assign_start_pfn,
++ p2m_expose_size, p2m_granule_pfn);
++ if (error) {
++ printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
++ error);
++ printk(KERN_ERR P2M_PREFIX "conv 0x%016lx assign 0x%016lx "
++ "expose_size 0x%016lx granule 0x%016lx\n",
++ p2m_convert_min_pfn, p2m_assign_start_pfn,
++ p2m_expose_size, p2m_granule_pfn);;
++ release_resource(&p2m_resource);
++ goto out;
++ }
++ p2m_pte = (volatile const pte_t*)pfn_to_kaddr(p2m_assign_start_pfn);
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
++ if (xen_ia64_p2m_expose_use_dtr) {
++ p2m_itr_arg.vaddr = (unsigned long)__va(p2m_assign_start_pfn
++ << PAGE_SHIFT);
++ p2m_itr_arg.pteval = pte_val(pfn_pte(p2m_assign_start_pfn,
++ PAGE_KERNEL));
++ p2m_itr_arg.log_page_size = log_page_size;
++ smp_mb();
++ smp_call_function(&p2m_itr, &p2m_itr_arg, 1, 1);
++ p2m_itr(&p2m_itr_arg);
++ }
++#endif
++ smp_mb();
++ p2m_initialized = 1;
++ printk(P2M_PREFIX "assign p2m table of [0x%016lx, 0x%016lx)\n",
++ p2m_convert_min_pfn << PAGE_SHIFT,
++ (p2m_convert_max_pfn << PAGE_SHIFT) + PAGE_SIZE);
++ printk(P2M_PREFIX "to [0x%016lx, 0x%016lx) (%ld KBytes)\n",
++ p2m_assign_start_pfn << PAGE_SHIFT,
++ (p2m_assign_end_pfn << PAGE_SHIFT) + PAGE_SIZE,
++ p2m_size / 1024);
++out:
++ unlock_cpu_hotplug();
++ return error;
++}
++
++#ifdef notyet
++void
++p2m_expose_cleanup(void)
++{
++ BUG_ON(!p2m_initialized);
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
++ unregister_cpu_notifier(&p2m_expose_dtr_hotplug_notifier);
++#endif
++ release_resource(&p2m_resource);
++}
++#endif
++
++static void
++p2m_expose_resume(void)
++{
++ int error;
++
++ if (!xen_ia64_p2m_expose || !p2m_initialized)
++ return;
++
++ /*
++ * We can't call {lock, unlock}_cpu_hotplug() because
++ * they require process context.
++ * We don't need them because we're the only one cpu and
++ * interrupts are masked when resume.
++ */
++ error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
++ p2m_assign_start_pfn,
++ p2m_expose_size, p2m_granule_pfn);
++ if (error) {
++ printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
++ error);
++ printk(KERN_ERR P2M_PREFIX "conv 0x%016lx assign 0x%016lx "
++ "expose_size 0x%016lx granule 0x%016lx\n",
++ p2m_convert_min_pfn, p2m_assign_start_pfn,
++ p2m_expose_size, p2m_granule_pfn);;
++ p2m_initialized = 0;
++ smp_mb();
++ ia64_ptr(0x2, p2m_itr_arg.vaddr, p2m_itr_arg.log_page_size);
++
++ /*
++ * We can't call those clean up functions because they
++ * require process context.
++ */
++#if 0
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
++ if (xen_ia64_p2m_expose_use_dtr)
++ unregister_cpu_notifier(
++ &p2m_expose_dtr_hotplug_notifier);
++#endif
++ release_resource(&p2m_resource);
++#endif
++ }
++}
++
++/* XXX inlinize? */
++unsigned long
++p2m_phystomach(unsigned long gpfn)
++{
++ volatile const pte_t *pte;
++ unsigned long mfn;
++ unsigned long pteval;
++
++ if (!p2m_initialized ||
++ gpfn < p2m_min_low_pfn || gpfn > p2m_max_low_pfn
++ /* || !pfn_valid(gpfn) */)
++ return INVALID_MFN;
++ pte = p2m_pte + (gpfn - p2m_convert_min_pfn);
++
++ mfn = INVALID_MFN;
++ if (likely(__get_user(pteval, (unsigned long __user *)pte) == 0 &&
++ pte_present(__pte(pteval)) &&
++ pte_pfn(__pte(pteval)) != (INVALID_MFN >> PAGE_SHIFT)))
++ mfn = (pteval & _PFN_MASK) >> PAGE_SHIFT;
++
++ return mfn;
++}
++
++EXPORT_SYMBOL_GPL(p2m_initialized);
++EXPORT_SYMBOL_GPL(p2m_min_low_pfn);
++EXPORT_SYMBOL_GPL(p2m_max_low_pfn);
++EXPORT_SYMBOL_GPL(p2m_convert_min_pfn);
++EXPORT_SYMBOL_GPL(p2m_convert_max_pfn);
++EXPORT_SYMBOL_GPL(p2m_pte);
++EXPORT_SYMBOL_GPL(p2m_phystomach);
++
++/**************************************************************************
++ * foreign domain p2m mapping
++ */
++#include <asm/xen/xencomm.h>
++#include <xen/public/privcmd.h>
++
++struct foreign_p2m_private {
++ unsigned long gpfn;
++ domid_t domid;
++};
++
++static void
++xen_foreign_p2m_unexpose(struct xen_ia64_privcmd_range *privcmd_range,
++ void *arg)
++{
++ struct foreign_p2m_private *private = (struct foreign_p2m_private*)arg;
++ int ret;
++
++ privcmd_range->private = NULL;
++ privcmd_range->callback = NULL;
++
++ ret = HYPERVISOR_unexpose_foreign_p2m(private->gpfn, private->domid);
++ if (ret)
++ printk(KERN_WARNING
++ "unexpose_foreign_p2m hypercall failed.\n");
++ kfree(private);
++}
++
++int
++xen_foreign_p2m_expose(privcmd_hypercall_t *hypercall)
++{
++ /*
++ * hypercall->
++ * arg0: cmd = IA64_DOM0VP_expose_foreign_p2m
++ * arg1: va
++ * arg2: domid
++ * arg3: __user* memmap_info
++ * arg4: flags
++ */
++
++ int ret = 0;
++ struct mm_struct *mm = current->mm;
++
++ unsigned long vaddr = hypercall->arg[1];
++ domid_t domid = hypercall->arg[2];
++ struct xen_ia64_memmap_info __user *u_memmap_info =
++ (struct xen_ia64_memmap_info __user *)hypercall->arg[3];
++
++ struct xen_ia64_memmap_info memmap_info;
++ size_t memmap_size;
++ struct xen_ia64_memmap_info *k_memmap_info = NULL;
++ unsigned long max_gpfn;
++ unsigned long p2m_size;
++ struct resource *res;
++ unsigned long gpfn;
++
++ struct vm_area_struct *vma;
++ void *p;
++ unsigned long prev_src_gpfn_end;
++
++ struct xen_ia64_privcmd_vma *privcmd_vma;
++ struct xen_ia64_privcmd_range *privcmd_range;
++ struct foreign_p2m_private *private = NULL;
++
++ BUG_ON(hypercall->arg[0] != IA64_DOM0VP_expose_foreign_p2m);
++
++ private = kmalloc(sizeof(*private), GFP_KERNEL);
++ if (private == NULL)
++ goto kfree_out;
++
++ if (copy_from_user(&memmap_info, u_memmap_info, sizeof(memmap_info)))
++ return -EFAULT;
++ /* memmap_info integrity check */
++ if (memmap_info.efi_memdesc_size < sizeof(efi_memory_desc_t) ||
++ memmap_info.efi_memmap_size < memmap_info.efi_memdesc_size ||
++ (memmap_info.efi_memmap_size % memmap_info.efi_memdesc_size)
++ != 0) {
++ ret = -EINVAL;
++ goto kfree_out;
++ }
++
++ memmap_size = sizeof(*k_memmap_info) + memmap_info.efi_memmap_size;
++ k_memmap_info = kmalloc(memmap_size, GFP_KERNEL);
++ if (k_memmap_info == NULL)
++ return -ENOMEM;
++ if (copy_from_user(k_memmap_info, u_memmap_info, memmap_size)) {
++ ret = -EFAULT;
++ goto kfree_out;
++ }
++ /* k_memmap_info integrity check is done by the expose foreng p2m
++ hypercall */
++
++ max_gpfn = HYPERVISOR_memory_op(XENMEM_maximum_gpfn, &domid);
++ if (max_gpfn < 0) {
++ ret = max_gpfn;
++ goto kfree_out;
++ }
++ p2m_size = p2m_table_size(max_gpfn + 1);
++
++ down_write(&mm->mmap_sem);
++
++ vma = find_vma(mm, vaddr);
++ if (vma == NULL || vma->vm_ops != &xen_ia64_privcmd_vm_ops ||
++ vaddr != vma->vm_start ||
++ (vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_EXEC) ||
++ !privcmd_enforce_singleshot_mapping(vma))
++ goto mmap_out;
++
++ privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++ res = privcmd_vma->range->res;
++ if (p2m_size > (res->end - res->start + 1) ||
++ p2m_size > vma->vm_end - vma->vm_start) {
++ ret = -EINVAL;
++ goto mmap_out;
++ }
++
++ gpfn = res->start >> PAGE_SHIFT;
++ /*
++ * arg0: dest_gpfn
++ * arg1: domid
++ * arg2: XEN_GUEST_HANDLE(char) buffer: memmap_info
++ * arg3: flags
++ * The hypercall checks its intergirty/simplfies it and
++ * copy it back for us.
++ */
++ ret = xencomm_arch_expose_foreign_p2m(gpfn, domid,
++ xencomm_map_no_alloc(k_memmap_info, memmap_size),
++ hypercall->arg[4]);
++ if (ret)
++ goto mmap_out;
++
++ privcmd_range = (struct xen_ia64_privcmd_range*)privcmd_vma->range;
++ prev_src_gpfn_end = 0;
++ for (p = k_memmap_info->memdesc;
++ p < (void*)&k_memmap_info->memdesc[0] +
++ k_memmap_info->efi_memmap_size;
++ p += k_memmap_info->efi_memdesc_size) {
++ efi_memory_desc_t* md = p;
++ unsigned long src_gpfn = md->phys_addr >> PAGE_SHIFT;
++ unsigned long src_gpfn_end =
++ (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
++ PAGE_SHIFT;
++ unsigned long num_src_gpfn;
++ unsigned long gpfn_offset;
++ unsigned long size;
++ unsigned int i;
++
++ if (src_gpfn <= prev_src_gpfn_end)
++ src_gpfn = prev_src_gpfn_end + 1;
++ if (src_gpfn_end <= prev_src_gpfn_end)
++ continue;
++
++ src_gpfn &= ~(PTRS_PER_PTE - 1);
++ src_gpfn_end = (src_gpfn_end + PTRS_PER_PTE - 1) &
++ ~(PTRS_PER_PTE - 1);
++ num_src_gpfn = src_gpfn_end - src_gpfn;
++ gpfn_offset = src_gpfn / PTRS_PER_PTE;
++ size = p2m_table_size(num_src_gpfn);
++
++ prev_src_gpfn_end = src_gpfn_end;
++ ret = remap_pfn_range(vma,
++ vaddr + (gpfn_offset << PAGE_SHIFT),
++ gpfn + gpfn_offset, size,
++ vma->vm_page_prot);
++ if (ret) {
++ for (i = 0; i < gpfn + gpfn_offset; i++) {
++ struct xen_ia64_privcmd_entry *entry =
++ &privcmd_range->entries[i];
++ BUG_ON(atomic_read(&entry->map_count) != 1 &&
++ atomic_read(&entry->map_count) != 0);
++ atomic_set(&entry->map_count, 0);
++ entry->gpfn = INVALID_GPFN;
++ }
++ (void)HYPERVISOR_unexpose_foreign_p2m(gpfn, domid);
++ goto mmap_out;
++ }
++
++ for (i = gpfn_offset;
++ i < gpfn_offset + (size >> PAGE_SHIFT);
++ i++) {
++ struct xen_ia64_privcmd_entry *entry =
++ &privcmd_range->entries[i];
++ BUG_ON(atomic_read(&entry->map_count) != 0);
++ BUG_ON(entry->gpfn != INVALID_GPFN);
++ atomic_inc(&entry->map_count);
++ entry->gpfn = gpfn + i;
++ }
++ }
++
++ private->gpfn = gpfn;
++ private->domid = domid;
++
++ privcmd_range->callback = &xen_foreign_p2m_unexpose;
++ privcmd_range->private = private;
++
++mmap_out:
++ up_write(&mm->mmap_sem);
++kfree_out:
++ kfree(k_memmap_info);
++ if (ret != 0)
++ kfree(private);
++ return ret;
++}
++#endif
++
++/**************************************************************************
++ * for xenoprof
++ */
++struct resource*
++xen_ia64_allocate_resource(unsigned long size)
++{
++ struct resource *res;
++ int error;
++
++ res = kzalloc(sizeof(*res), GFP_KERNEL);
++ if (res == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ res->name = "Xen";
++ res->flags = IORESOURCE_MEM;
++ error = allocate_resource(&iomem_resource, res, PAGE_ALIGN(size),
++ privcmd_resource_min, privcmd_resource_max,
++ IA64_GRANULE_SIZE, NULL, NULL);
++ if (error) {
++ kfree(res);
++ return ERR_PTR(error);
++ }
++ return res;
++}
++EXPORT_SYMBOL_GPL(xen_ia64_allocate_resource);
++
++void
++xen_ia64_release_resource(struct resource *res)
++{
++ release_resource(res);
++ kfree(res);
++}
++EXPORT_SYMBOL_GPL(xen_ia64_release_resource);
++
++void
++xen_ia64_unmap_resource(struct resource *res)
++{
++ unsigned long gpfn = res->start >> PAGE_SHIFT;
++ unsigned long nr_pages = (res->end - res->start) >> PAGE_SHIFT;
++ unsigned long i;
++
++ for (i = 0; i < nr_pages; i++) {
++ int error = HYPERVISOR_zap_physmap(gpfn + i, 0);
++ if (error)
++ printk(KERN_ERR
++ "%s:%d zap_phsymap failed %d gpfn %lx\n",
++ __func__, __LINE__, error, gpfn + i);
++ }
++ xen_ia64_release_resource(res);
++}
++EXPORT_SYMBOL_GPL(xen_ia64_unmap_resource);
++
++/**************************************************************************
++ * opt feature
++ */
++void
++xen_ia64_enable_opt_feature(void)
++{
++ /* Enable region 7 identity map optimizations in Xen */
++ struct xen_ia64_opt_feature optf;
++
++ optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7;
++ optf.on = XEN_IA64_OPTF_ON;
++ optf.pgprot = pgprot_val(PAGE_KERNEL);
++ optf.key = 0; /* No key on linux. */
++ HYPERVISOR_opt_feature(&optf);
++}
++
++/**************************************************************************
++ * suspend/resume
++ */
++void
++xen_post_suspend(int suspend_cancelled)
++{
++ if (suspend_cancelled)
++ return;
++
++ p2m_expose_resume();
++ xen_ia64_enable_opt_feature();
++ /* add more if necessary */
++}
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/machvec.c linux-2.6.18-xen-3.3.0/arch/ia64/xen/machvec.c
+--- linux-2.6.18.8/arch/ia64/xen/machvec.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/machvec.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,4 @@
++#define MACHVEC_PLATFORM_NAME xen
++#define MACHVEC_PLATFORM_HEADER <asm/machvec_xen.h>
++#include <asm/machvec_init.h>
++
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/Makefile linux-2.6.18-xen-3.3.0/arch/ia64/xen/Makefile
+--- linux-2.6.18.8/arch/ia64/xen/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,9 @@
++#
++# Makefile for Xen components
++#
++
++obj-y := hypercall.o xenivt.o xenentry.o xensetup.o xenpal.o \
++ hypervisor.o util.o xencomm.o xcom_hcall.o \
++ xcom_privcmd.o xcom_asm.o xen_dma.o
++
++obj-$(CONFIG_IA64_GENERIC) += machvec.o
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/swiotlb.c linux-2.6.18-xen-3.3.0/arch/ia64/xen/swiotlb.c
+--- linux-2.6.18.8/arch/ia64/xen/swiotlb.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/swiotlb.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,906 @@
++/*
++ * Dynamic DMA mapping support.
++ *
++ * This implementation is for IA-64 and EM64T platforms that do not support
++ * I/O TLBs (aka DMA address translation hardware).
++ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
++ * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
++ * Copyright (C) 2000, 2003 Hewlett-Packard Co
++ * David Mosberger-Tang <davidm@hpl.hp.com>
++ *
++ * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
++ * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
++ * unnecessary i-cache flushing.
++ * 04/07/.. ak Better overflow handling. Assorted fixes.
++ * 05/09/10 linville Add support for syncing ranges, support syncing for
++ * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
++ */
++
++#include <linux/cache.h>
++#include <linux/dma-mapping.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ctype.h>
++
++#include <asm/io.h>
++#include <asm/dma.h>
++#include <asm/scatterlist.h>
++
++#include <linux/init.h>
++#include <linux/bootmem.h>
++
++#ifdef CONFIG_XEN
++#include <xen/gnttab.h>
++#include <asm/gnttab_dma.h>
++/*
++ * What DMA mask should Xen use to remap the bounce buffer pool? Most
++ * reports seem to indicate 30 bits is sufficient, except maybe for old
++ * sound cards that we probably don't care about anyway. If we need to,
++ * we could put in some smarts to try to lower, but hopefully it's not
++ * necessary.
++ */
++#define DMA_BITS (30)
++#endif
++
++#define OFFSET(val,align) ((unsigned long) \
++ ( (val) & ( (align) - 1)))
++
++#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
++#define SG_ENT_PHYS_ADDRESS(SG) virt_to_bus(SG_ENT_VIRT_ADDRESS(SG))
++
++/*
++ * Maximum allowable number of contiguous slabs to map,
++ * must be a power of 2. What is the appropriate value ?
++ * The complexity of {map,unmap}_single is linearly dependent on this value.
++ */
++#define IO_TLB_SEGSIZE 128
++
++/*
++ * log of the size of each IO TLB slab. The number of slabs is command line
++ * controllable.
++ */
++#define IO_TLB_SHIFT 11
++
++#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
++
++/*
++ * Minimum IO TLB size to bother booting with. Systems with mainly
++ * 64bit capable cards will only lightly use the swiotlb. If we can't
++ * allocate a contiguous 1MB, we're probably in trouble anyway.
++ */
++#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
++
++/*
++ * Enumeration for sync targets
++ */
++enum dma_sync_target {
++ SYNC_FOR_CPU = 0,
++ SYNC_FOR_DEVICE = 1,
++};
++
++int swiotlb_force;
++
++/*
++ * Used to do a quick range check in swiotlb_unmap_single and
++ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
++ * API.
++ */
++static char *io_tlb_start, *io_tlb_end;
++
++/*
++ * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
++ * io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
++ */
++static unsigned long io_tlb_nslabs;
++
++/*
++ * When the IOMMU overflows we return a fallback buffer. This sets the size.
++ */
++static unsigned long io_tlb_overflow = 32*1024;
++
++void *io_tlb_overflow_buffer;
++
++/*
++ * This is a free list describing the number of free entries available from
++ * each index
++ */
++static unsigned int *io_tlb_list;
++static unsigned int io_tlb_index;
++
++/*
++ * We need to save away the original address corresponding to a mapped entry
++ * for the sync operations.
++ */
++static unsigned char **io_tlb_orig_addr;
++
++/*
++ * Protect the above data structures in the map and unmap calls
++ */
++static DEFINE_SPINLOCK(io_tlb_lock);
++
++static int __init
++setup_io_tlb_npages(char *str)
++{
++ if (isdigit(*str)) {
++ io_tlb_nslabs = simple_strtoul(str, &str, 0);
++ /* avoid tail segment of size < IO_TLB_SEGSIZE */
++ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
++ }
++ if (*str == ',')
++ ++str;
++ if (!strcmp(str, "force"))
++ swiotlb_force = 1;
++ return 1;
++}
++__setup("swiotlb=", setup_io_tlb_npages);
++/* make io_tlb_overflow tunable too? */
++
++/*
++ * Statically reserve bounce buffer space and initialize bounce buffer data
++ * structures for the software IO TLB used to implement the DMA API.
++ */
++void
++swiotlb_init_with_default_size (size_t default_size)
++{
++ unsigned long i;
++
++ if (!io_tlb_nslabs) {
++ io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
++ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
++ }
++
++#ifdef CONFIG_XEN
++ if (is_running_on_xen())
++ io_tlb_nslabs = roundup_pow_of_two(io_tlb_nslabs);
++#endif
++ /*
++ * Get IO TLB memory from the low pages
++ */
++ io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
++ if (!io_tlb_start)
++ panic("Cannot allocate SWIOTLB buffer");
++ io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
++
++#ifdef CONFIG_XEN
++ for (i = 0 ; i < io_tlb_nslabs ; i += IO_TLB_SEGSIZE) {
++ if (xen_create_contiguous_region(
++ (unsigned long)io_tlb_start +
++ (i << IO_TLB_SHIFT),
++ get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
++ DMA_BITS))
++ panic("Failed to setup Xen contiguous region");
++ }
++#endif
++
++ /*
++ * Allocate and initialize the free list array. This array is used
++ * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
++ * between io_tlb_start and io_tlb_end.
++ */
++ io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
++ for (i = 0; i < io_tlb_nslabs; i++)
++ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++ io_tlb_index = 0;
++ io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
++
++ /*
++ * Get the overflow emergency buffer
++ */
++ io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
++#ifdef CONFIG_XEN
++ if (xen_create_contiguous_region((unsigned long)io_tlb_overflow_buffer,
++ get_order(io_tlb_overflow), DMA_BITS))
++ panic("Failed to setup Xen contiguous region for overflow");
++#endif
++ printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
++ virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
++}
++
++void
++swiotlb_init (void)
++{
++ swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
++}
++
++/*
++ * Systems with larger DMA zones (those that don't support ISA) can
++ * initialize the swiotlb later using the slab allocator if needed.
++ * This should be just like above, but with some error catching.
++ */
++int
++swiotlb_late_init_with_default_size (size_t default_size)
++{
++ unsigned long i, req_nslabs = io_tlb_nslabs;
++ unsigned int order;
++
++ if (!io_tlb_nslabs) {
++ io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
++ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
++ }
++
++#ifdef CONFIG_XEN
++ if (is_running_on_xen())
++ io_tlb_nslabs = roundup_pow_of_two(io_tlb_nslabs);
++#endif
++ /*
++ * Get IO TLB memory from the low pages
++ */
++ order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
++ io_tlb_nslabs = SLABS_PER_PAGE << order;
++
++ while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
++ io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
++ order);
++ if (io_tlb_start)
++ break;
++ order--;
++ }
++
++ if (!io_tlb_start)
++ goto cleanup1;
++
++ if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) {
++ printk(KERN_WARNING "Warning: only able to allocate %ld MB "
++ "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
++ io_tlb_nslabs = SLABS_PER_PAGE << order;
++ }
++ io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
++ memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT));
++
++#ifdef CONFIG_XEN
++ for (i = 0 ; i < io_tlb_nslabs ; i += IO_TLB_SEGSIZE) {
++ if (xen_create_contiguous_region(
++ (unsigned long)io_tlb_start +
++ (i << IO_TLB_SHIFT),
++ get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
++ DMA_BITS))
++ panic("Failed to setup Xen contiguous region");
++ }
++#endif
++ /*
++ * Allocate and initialize the free list array. This array is used
++ * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
++ * between io_tlb_start and io_tlb_end.
++ */
++ io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
++ get_order(io_tlb_nslabs * sizeof(int)));
++ if (!io_tlb_list)
++ goto cleanup2;
++
++ for (i = 0; i < io_tlb_nslabs; i++)
++ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++ io_tlb_index = 0;
++
++ io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
++ get_order(io_tlb_nslabs * sizeof(char *)));
++ if (!io_tlb_orig_addr)
++ goto cleanup3;
++
++ memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
++
++ /*
++ * Get the overflow emergency buffer
++ */
++ io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
++ get_order(io_tlb_overflow));
++ if (!io_tlb_overflow_buffer)
++ goto cleanup4;
++
++#ifdef CONFIG_XEN
++ if (xen_create_contiguous_region((unsigned long)io_tlb_overflow_buffer,
++ get_order(io_tlb_overflow), DMA_BITS))
++ panic("Failed to setup Xen contiguous region for overflow");
++#endif
++ printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - "
++ "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
++ virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
++
++ return 0;
++
++cleanup4:
++ free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
++ sizeof(char *)));
++ io_tlb_orig_addr = NULL;
++cleanup3:
++ free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
++ sizeof(int)));
++ io_tlb_list = NULL;
++ io_tlb_end = NULL;
++cleanup2:
++ free_pages((unsigned long)io_tlb_start, order);
++ io_tlb_start = NULL;
++cleanup1:
++ io_tlb_nslabs = req_nslabs;
++ return -ENOMEM;
++}
++
++static inline int
++address_needs_mapping(struct device *hwdev, dma_addr_t addr)
++{
++ dma_addr_t mask = 0xffffffff;
++ /* If the device has a mask, use it, otherwise default to 32 bits */
++ if (hwdev && hwdev->dma_mask)
++ mask = *hwdev->dma_mask;
++ return (addr & ~mask) != 0;
++}
++
++/*
++ * Allocates bounce buffer and returns its kernel virtual address.
++ */
++static void *
++map_single(struct device *hwdev, char *buffer, size_t size, int dir)
++{
++ unsigned long flags;
++ char *dma_addr;
++ unsigned int nslots, stride, index, wrap;
++ char *slot_buf;
++ int i;
++
++ /*
++ * For mappings greater than a page, we limit the stride (and
++ * hence alignment) to a page size.
++ */
++ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++ if (size > PAGE_SIZE)
++ stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
++ else
++ stride = 1;
++
++ BUG_ON(!nslots);
++
++ /*
++ * Find suitable number of IO TLB entries size that will fit this
++ * request and allocate a buffer from that IO TLB pool.
++ */
++ spin_lock_irqsave(&io_tlb_lock, flags);
++ {
++ wrap = index = ALIGN(io_tlb_index, stride);
++
++ if (index >= io_tlb_nslabs)
++ wrap = index = 0;
++
++ do {
++ /*
++ * If we find a slot that indicates we have 'nslots'
++ * number of contiguous buffers, we allocate the
++ * buffers from that slot and mark the entries as '0'
++ * indicating unavailable.
++ */
++ if (io_tlb_list[index] >= nslots) {
++ int count = 0;
++
++ for (i = index; i < (int) (index + nslots); i++)
++ io_tlb_list[i] = 0;
++ for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
++ io_tlb_list[i] = ++count;
++ dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
++
++ /*
++ * Update the indices to avoid searching in
++ * the next round.
++ */
++ io_tlb_index = ((index + nslots) < io_tlb_nslabs
++ ? (index + nslots) : 0);
++
++ goto found;
++ }
++ index += stride;
++ if (index >= io_tlb_nslabs)
++ index = 0;
++ } while (index != wrap);
++
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++ return NULL;
++ }
++ found:
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++
++ /*
++ * Save away the mapping from the original address to the DMA address.
++ * This is needed when we sync the memory. Then we sync the buffer if
++ * needed.
++ */
++ slot_buf = buffer;
++ for (i = 0; i < nslots; i++) {
++ io_tlb_orig_addr[index + i] = slot_buf;
++ slot_buf += 1 << IO_TLB_SHIFT;
++ }
++ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
++ memcpy(dma_addr, buffer, size);
++
++ return dma_addr;
++}
++
++/*
++ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
++ */
++static void
++unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++{
++ unsigned long flags;
++ int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++ int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
++ char *buffer = io_tlb_orig_addr[index];
++
++ /*
++ * First, sync the memory before unmapping the entry
++ */
++ if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
++ /*
++ * bounce... copy the data back into the original buffer * and
++ * delete the bounce buffer.
++ */
++ memcpy(buffer, dma_addr, size);
++
++ /*
++ * Return the buffer to the free list by setting the corresponding
++ * entries to indicate the number of contigous entries available.
++ * While returning the entries to the free list, we merge the entries
++ * with slots below and above the pool being returned.
++ */
++ spin_lock_irqsave(&io_tlb_lock, flags);
++ {
++ count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
++ io_tlb_list[index + nslots] : 0);
++ /*
++ * Step 1: return the slots to the free list, merging the
++ * slots with superceeding slots
++ */
++ for (i = index + nslots - 1; i >= index; i--)
++ io_tlb_list[i] = ++count;
++ /*
++ * Step 2: merge the returned slots with the preceding slots,
++ * if available (non zero)
++ */
++ for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
++ io_tlb_list[i] = ++count;
++ }
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++}
++
++static void
++sync_single(struct device *hwdev, char *dma_addr, size_t size,
++ int dir, int target)
++{
++ int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
++ char *buffer = io_tlb_orig_addr[index];
++
++ switch (target) {
++ case SYNC_FOR_CPU:
++ if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
++ memcpy(buffer, dma_addr, size);
++ else
++ BUG_ON(dir != DMA_TO_DEVICE);
++ break;
++ case SYNC_FOR_DEVICE:
++ if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
++ memcpy(dma_addr, buffer, size);
++ else
++ BUG_ON(dir != DMA_FROM_DEVICE);
++ break;
++ default:
++ BUG();
++ }
++}
++
++void *
++swiotlb_alloc_coherent(struct device *hwdev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flags)
++{
++ unsigned long dev_addr;
++ void *ret;
++ int order = get_order(size);
++
++ /*
++ * XXX fix me: the DMA API should pass us an explicit DMA mask
++ * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
++ * bit range instead of a 16MB one).
++ */
++ flags |= GFP_DMA;
++
++ ret = (void *)__get_free_pages(flags, order);
++#ifdef CONFIG_XEN
++ if (ret && is_running_on_xen()) {
++ if (xen_create_contiguous_region((unsigned long)ret, order,
++ fls64(hwdev->coherent_dma_mask))) {
++ free_pages((unsigned long)ret, order);
++ ret = NULL;
++ } else {
++ /*
++ * Short circuit the rest, xen_create_contiguous_region
++ * should fail if it didn't give us an address within
++ * the mask requested.
++ */
++ memset(ret, 0, size);
++ *dma_handle = virt_to_bus(ret);
++ return ret;
++ }
++ }
++#endif
++ if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
++ /*
++ * The allocated memory isn't reachable by the device.
++ * Fall back on swiotlb_map_single().
++ */
++ free_pages((unsigned long) ret, order);
++ ret = NULL;
++ }
++ if (!ret) {
++ /*
++ * We are either out of memory or the device can't DMA
++ * to GFP_DMA memory; fall back on
++ * swiotlb_map_single(), which will grab memory from
++ * the lowest available address range.
++ */
++ dma_addr_t handle;
++ handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
++ if (swiotlb_dma_mapping_error(handle))
++ return NULL;
++
++ ret = bus_to_virt(handle);
++ }
++
++ memset(ret, 0, size);
++ dev_addr = virt_to_bus(ret);
++
++ /* Confirm address can be DMA'd by device */
++ if (address_needs_mapping(hwdev, dev_addr)) {
++ printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n",
++ (unsigned long long)*hwdev->dma_mask, dev_addr);
++ panic("swiotlb_alloc_coherent: allocated memory is out of "
++ "range for device");
++ }
++ *dma_handle = dev_addr;
++ return ret;
++}
++
++void
++swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
++ dma_addr_t dma_handle)
++{
++ if (!(vaddr >= (void *)io_tlb_start
++ && vaddr < (void *)io_tlb_end)) {
++#ifdef CONFIG_XEN
++ xen_destroy_contiguous_region((unsigned long)vaddr,
++ get_order(size));
++#endif
++ free_pages((unsigned long) vaddr, get_order(size));
++ } else
++ /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
++ swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
++}
++
++static void
++swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
++{
++ /*
++ * Ran out of IOMMU space for this operation. This is very bad.
++ * Unfortunately the drivers cannot handle this operation properly.
++ * unless they check for dma_mapping_error (most don't)
++ * When the mapping is small enough return a static buffer to limit
++ * the damage, or panic when the transfer is too big.
++ */
++ printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at "
++ "device %s\n", size, dev ? dev->bus_id : "?");
++
++ if (size > io_tlb_overflow && do_panic) {
++ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
++ panic("DMA: Memory would be corrupted\n");
++ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
++ panic("DMA: Random memory would be DMAed\n");
++ }
++}
++
++/*
++ * Map a single buffer of the indicated size for DMA in streaming mode. The
++ * physical address to use is returned.
++ *
++ * Once the device is given the dma address, the device owns this memory until
++ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
++ */
++dma_addr_t
++swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
++{
++ unsigned long dev_addr = gnttab_dma_map_virt(ptr);
++ void *map;
++
++ BUG_ON(dir == DMA_NONE);
++ /*
++ * If the pointer passed in happens to be in the device's DMA window,
++ * we can safely return the device addr and not worry about bounce
++ * buffering it.
++ */
++ if (!range_straddles_page_boundary(__pa(ptr), size) &&
++ !address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
++ return dev_addr;
++
++ __gnttab_dma_unmap_page(virt_to_page(ptr));
++ /*
++ * Oh well, have to allocate and map a bounce buffer.
++ */
++ map = map_single(hwdev, ptr, size, dir);
++ if (!map) {
++ swiotlb_full(hwdev, size, dir, 1);
++ map = io_tlb_overflow_buffer;
++ }
++
++ dev_addr = virt_to_bus(map);
++
++ /*
++ * Ensure that the address returned is DMA'ble
++ */
++ if (address_needs_mapping(hwdev, dev_addr))
++ panic("map_single: bounce buffer is not DMA'ble");
++
++ return dev_addr;
++}
++
++/*
++ * Since DMA is i-cache coherent, any (complete) pages that were written via
++ * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
++ * flush them when they get mapped into an executable vm-area.
++ */
++static void
++mark_clean(void *addr, size_t size)
++{
++ unsigned long pg_addr, end;
++
++#ifdef CONFIG_XEN
++ /* XXX: Bad things happen when starting domUs if this is enabled. */
++ if (is_running_on_xen())
++ return;
++#endif
++
++ pg_addr = PAGE_ALIGN((unsigned long) addr);
++ end = (unsigned long) addr + size;
++ while (pg_addr + PAGE_SIZE <= end) {
++ struct page *page = virt_to_page(pg_addr);
++ set_bit(PG_arch_1, &page->flags);
++ pg_addr += PAGE_SIZE;
++ }
++}
++
++/*
++ * Unmap a single streaming mode DMA translation. The dma_addr and size must
++ * match what was provided for in a previous swiotlb_map_single call. All
++ * other usages are undefined.
++ *
++ * After this call, reads by the cpu to the buffer are guaranteed to see
++ * whatever the device wrote there.
++ */
++void
++swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
++ int dir)
++{
++ char *dma_addr = bus_to_virt(dev_addr);
++
++ BUG_ON(dir == DMA_NONE);
++ if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
++ unmap_single(hwdev, dma_addr, size, dir);
++ else {
++ __gnttab_dma_unmap_page(virt_to_page(dma_addr));
++ if (dir == DMA_FROM_DEVICE)
++ mark_clean(dma_addr, size);
++ }
++}
++
++/*
++ * Make physical memory consistent for a single streaming mode DMA translation
++ * after a transfer.
++ *
++ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
++ * using the cpu, yet do not wish to teardown the dma mapping, you must
++ * call this function before doing so. At the next point you give the dma
++ * address back to the card, you must first perform a
++ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
++ */
++static inline void
++swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir, int target)
++{
++ char *dma_addr = bus_to_virt(dev_addr);
++
++ BUG_ON(dir == DMA_NONE);
++ if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
++ sync_single(hwdev, dma_addr, size, dir, target);
++ else if (dir == DMA_FROM_DEVICE)
++ mark_clean(dma_addr, size);
++}
++
++void
++swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir)
++{
++ swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
++}
++
++void
++swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir)
++{
++ swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
++}
++
++/*
++ * Same as above, but for a sub-range of the mapping.
++ */
++static inline void
++swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
++ unsigned long offset, size_t size,
++ int dir, int target)
++{
++ char *dma_addr = bus_to_virt(dev_addr) + offset;
++
++ BUG_ON(dir == DMA_NONE);
++ if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
++ sync_single(hwdev, dma_addr, size, dir, target);
++ else if (dir == DMA_FROM_DEVICE)
++ mark_clean(dma_addr, size);
++}
++
++void
++swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++ unsigned long offset, size_t size, int dir)
++{
++ swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
++ SYNC_FOR_CPU);
++}
++
++void
++swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
++ unsigned long offset, size_t size, int dir)
++{
++ swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
++ SYNC_FOR_DEVICE);
++}
++
++/*
++ * Map a set of buffers described by scatterlist in streaming mode for DMA.
++ * This is the scatter-gather version of the above swiotlb_map_single
++ * interface. Here the scatter gather list elements are each tagged with the
++ * appropriate dma address and length. They are obtained via
++ * sg_dma_{address,length}(SG).
++ *
++ * NOTE: An implementation may be able to use a smaller number of
++ * DMA address/length pairs than there are SG table elements.
++ * (for example via virtual mapping capabilities)
++ * The routine returns the number of addr/length pairs actually
++ * used, at most nents.
++ *
++ * Device ownership issues as mentioned above for swiotlb_map_single are the
++ * same here.
++ */
++int
++swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++ int dir)
++{
++ void *addr;
++ unsigned long dev_addr;
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++) {
++ addr = SG_ENT_VIRT_ADDRESS(sg);
++ dev_addr = gnttab_dma_map_virt(addr);
++ if (swiotlb_force ||
++ range_straddles_page_boundary(page_to_pseudophys(sg->page)
++ + sg->offset, sg->length) ||
++ address_needs_mapping(hwdev, dev_addr)) {
++ void *map;
++ __gnttab_dma_unmap_page(sg->page);
++ map = map_single(hwdev, addr, sg->length, dir);
++ sg->dma_address = virt_to_bus(map);
++ if (!map) {
++ /* Don't panic here, we expect map_sg users
++ to do proper error handling. */
++ swiotlb_full(hwdev, sg->length, dir, 0);
++ swiotlb_unmap_sg(hwdev, sg - i, i, dir);
++ sg[0].dma_length = 0;
++ return 0;
++ }
++ } else
++ sg->dma_address = dev_addr;
++ sg->dma_length = sg->length;
++ }
++ return nelems;
++}
++
++/*
++ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
++ * concerning calls here are the same as for swiotlb_unmap_single() above.
++ */
++void
++swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++ int dir)
++{
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++)
++ if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++ unmap_single(hwdev, (void *) bus_to_virt(sg->dma_address), sg->dma_length, dir);
++ else {
++ __gnttab_dma_unmap_page(sg->page);
++ if (dir == DMA_FROM_DEVICE)
++ mark_clean(SG_ENT_VIRT_ADDRESS(sg),
++ sg->dma_length);
++ }
++}
++
++/*
++ * Make physical memory consistent for a set of streaming mode DMA translations
++ * after a transfer.
++ *
++ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
++ * and usage.
++ */
++static inline void
++swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int dir, int target)
++{
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++)
++ if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++ sync_single(hwdev, (void *) sg->dma_address,
++ sg->dma_length, dir, target);
++}
++
++void
++swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int dir)
++{
++ swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
++}
++
++void
++swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int dir)
++{
++ swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
++}
++
++int
++swiotlb_dma_mapping_error(dma_addr_t dma_addr)
++{
++ return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
++}
++
++/*
++ * Return whether the given device DMA address mask can be supported
++ * properly. For example, if your device can only drive the low 24-bits
++ * during bus mastering, then you would pass 0x00ffffff as the mask to
++ * this function.
++ */
++int
++swiotlb_dma_supported (struct device *hwdev, u64 mask)
++{
++#ifdef CONFIG_XEN
++ return (virt_to_bus(io_tlb_end - 1)) <= mask;
++#else
++ return (virt_to_bus(io_tlb_end) - 1) <= mask;
++#endif
++}
++
++EXPORT_SYMBOL(swiotlb_init);
++EXPORT_SYMBOL(swiotlb_map_single);
++EXPORT_SYMBOL(swiotlb_unmap_single);
++EXPORT_SYMBOL(swiotlb_map_sg);
++EXPORT_SYMBOL(swiotlb_unmap_sg);
++EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_single_for_device);
++EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
++EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
++EXPORT_SYMBOL(swiotlb_dma_mapping_error);
++EXPORT_SYMBOL(swiotlb_alloc_coherent);
++EXPORT_SYMBOL(swiotlb_free_coherent);
++EXPORT_SYMBOL(swiotlb_dma_supported);
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/util.c linux-2.6.18-xen-3.3.0/arch/ia64/xen/util.c
+--- linux-2.6.18.8/arch/ia64/xen/util.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/util.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,102 @@
++/******************************************************************************
++ * arch/ia64/xen/util.c
++ * This file is the ia64 counterpart of drivers/xen/util.c
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <asm/uaccess.h>
++#include <xen/driver_util.h>
++#include <xen/interface/memory.h>
++#include <asm/hypercall.h>
++
++struct vm_struct *alloc_vm_area(unsigned long size)
++{
++ int order;
++ unsigned long virt;
++ unsigned long nr_pages;
++ struct vm_struct *area;
++
++ order = get_order(size);
++ virt = __get_free_pages(GFP_KERNEL, order);
++ if (virt == 0)
++ goto err0;
++ nr_pages = 1 << order;
++ scrub_pages(virt, nr_pages);
++
++ area = kmalloc(sizeof(*area), GFP_KERNEL);
++ if (area == NULL)
++ goto err1;
++
++ area->flags = VM_IOREMAP; /* XXX */
++ area->addr = (void*)virt;
++ area->size = size;
++ area->pages = NULL; /* XXX */
++ area->nr_pages = nr_pages;
++ area->phys_addr = 0; /* xenbus_map_ring_valloc uses this field! */
++
++ return area;
++
++err1:
++ free_pages(virt, order);
++err0:
++ return NULL;
++}
++EXPORT_SYMBOL_GPL(alloc_vm_area);
++
++void free_vm_area(struct vm_struct *area)
++{
++ unsigned int order = get_order(area->size);
++ unsigned long i;
++ unsigned long phys_addr = __pa(area->addr);
++
++ /* This area is used for foreign page mappping.
++ * So underlying machine page may not be assigned. */
++ for (i = 0; i < (1 << order); i++) {
++ unsigned long ret;
++ unsigned long gpfn = (phys_addr >> PAGE_SHIFT) + i;
++ struct xen_memory_reservation reservation = {
++ .nr_extents = 1,
++ .address_bits = 0,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ set_xen_guest_handle(reservation.extent_start, &gpfn);
++ ret = HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &reservation);
++ BUG_ON(ret != 1);
++ }
++ free_pages((unsigned long)area->addr, order);
++ kfree(area);
++}
++EXPORT_SYMBOL_GPL(free_vm_area);
++
++/*
++ * Local variables:
++ * c-file-style: "linux"
++ * indent-tabs-mode: t
++ * c-indent-level: 8
++ * c-basic-offset: 8
++ * tab-width: 8
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/xcom_asm.S linux-2.6.18-xen-3.3.0/arch/ia64/xen/xcom_asm.S
+--- linux-2.6.18.8/arch/ia64/xen/xcom_asm.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/xcom_asm.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,27 @@
++/*
++ * xencomm suspend support
++ * Support routines for Xen
++ *
++ * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
++ */
++#include <asm/asmmacro.h>
++#include <xen/interface/xen.h>
++
++/*
++ * Stub for suspend.
++ * Just force the stacked registers to be written in memory.
++ */
++GLOBAL_ENTRY(xencomm_arch_hypercall_suspend)
++ ;;
++ alloc r20=ar.pfs,0,0,6,0
++ mov r2=__HYPERVISOR_sched_op
++ ;;
++ /* We don't want to deal with RSE. */
++ flushrs
++ mov r33=r32
++ mov r32=2 // SCHEDOP_shutdown
++ ;;
++ break 0x1000
++ ;;
++ br.ret.sptk.many b0
++END(xencomm_arch_hypercall_suspend)
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/xcom_hcall.c linux-2.6.18-xen-3.3.0/arch/ia64/xen/xcom_hcall.c
+--- linux-2.6.18.8/arch/ia64/xen/xcom_hcall.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/xcom_hcall.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,671 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
++ *
++ * Tristan Gingold <tristan.gingold@bull.net>
++ *
++ * Copyright (c) 2007
++ * Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ * consolidate mini and inline version.
++ */
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/gfp.h>
++#include <linux/module.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/platform.h>
++#include <xen/interface/memory.h>
++#include <xen/interface/xencomm.h>
++#include <xen/interface/version.h>
++#include <xen/interface/sched.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/callback.h>
++#include <xen/interface/xsm/acm_ops.h>
++#include <xen/interface/hvm/params.h>
++#include <xen/interface/xenoprof.h>
++#include <xen/interface/vcpu.h>
++#include <xen/interface/kexec.h>
++#include <asm/hypervisor.h>
++#include <asm/page.h>
++#include <asm/uaccess.h>
++#include <asm/xen/xencomm.h>
++#include <asm/perfmon.h>
++
++/* Xencomm notes:
++ * This file defines hypercalls to be used by xencomm. The hypercalls simply
++ * create inlines or mini descriptors for pointers and then call the raw arch
++ * hypercall xencomm_arch_hypercall_XXX
++ *
++ * If the arch wants to directly use these hypercalls, simply define macros
++ * in asm/hypercall.h, eg:
++ * #define HYPERVISOR_sched_op xencomm_hypercall_sched_op
++ *
++ * The arch may also define HYPERVISOR_xxx as a function and do more operations
++ * before/after doing the hypercall.
++ *
++ * Note: because only inline or mini descriptors are created these functions
++ * must only be called with in kernel memory parameters.
++ */
++
++int
++xencomm_hypercall_console_io(int cmd, int count, char *str)
++{
++ return xencomm_arch_hypercall_console_io
++ (cmd, count, xencomm_map_no_alloc(str, count));
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io);
++
++int
++xencomm_hypercall_event_channel_op(int cmd, void *op)
++{
++ struct xencomm_handle *desc;
++ desc = xencomm_map_no_alloc(op, sizeof(evtchn_op_t));
++ if (desc == NULL)
++ return -EINVAL;
++
++ return xencomm_arch_hypercall_event_channel_op(cmd, desc);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op);
++
++int
++xencomm_hypercall_xen_version(int cmd, void *arg)
++{
++ struct xencomm_handle *desc;
++ unsigned int argsize;
++
++ switch (cmd) {
++ case XENVER_version:
++ /* do not actually pass an argument */
++ return xencomm_arch_hypercall_xen_version(cmd, 0);
++ case XENVER_extraversion:
++ argsize = sizeof(xen_extraversion_t);
++ break;
++ case XENVER_compile_info:
++ argsize = sizeof(xen_compile_info_t);
++ break;
++ case XENVER_capabilities:
++ argsize = sizeof(xen_capabilities_info_t);
++ break;
++ case XENVER_changeset:
++ argsize = sizeof(xen_changeset_info_t);
++ break;
++ case XENVER_platform_parameters:
++ argsize = sizeof(xen_platform_parameters_t);
++ break;
++ case XENVER_pagesize:
++ argsize = (arg == NULL) ? 0 : sizeof(void *);
++ break;
++ case XENVER_get_features:
++ argsize = (arg == NULL) ? 0 : sizeof(xen_feature_info_t);
++ break;
++
++ default:
++ printk("%s: unknown version op %d\n", __func__, cmd);
++ return -ENOSYS;
++ }
++
++ desc = xencomm_map_no_alloc(arg, argsize);
++ if (desc == NULL)
++ return -EINVAL;
++
++ return xencomm_arch_hypercall_xen_version(cmd, desc);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version);
++
++int
++xencomm_hypercall_physdev_op(int cmd, void *op)
++{
++ unsigned int argsize;
++
++ switch (cmd) {
++ case PHYSDEVOP_apic_read:
++ case PHYSDEVOP_apic_write:
++ argsize = sizeof(physdev_apic_t);
++ break;
++ case PHYSDEVOP_alloc_irq_vector:
++ case PHYSDEVOP_free_irq_vector:
++ argsize = sizeof(physdev_irq_t);
++ break;
++ case PHYSDEVOP_irq_status_query:
++ argsize = sizeof(physdev_irq_status_query_t);
++ break;
++ case PHYSDEVOP_manage_pci_add:
++ case PHYSDEVOP_manage_pci_remove:
++ argsize = sizeof(physdev_manage_pci_t);
++ break;
++ case PHYSDEVOP_map_pirq:
++ argsize = sizeof(physdev_map_pirq_t);
++ break;
++ case PHYSDEVOP_unmap_pirq:
++ argsize = sizeof(physdev_unmap_pirq_t);
++ break;
++
++ default:
++ printk("%s: unknown physdev op %d\n", __func__, cmd);
++ return -ENOSYS;
++ }
++
++ return xencomm_arch_hypercall_physdev_op
++ (cmd, xencomm_map_no_alloc(op, argsize));
++}
++
++static int
++xencommize_grant_table_op(struct xencomm_mini **xc_area,
++ unsigned int cmd, void *op, unsigned int count,
++ struct xencomm_handle **desc)
++{
++ struct xencomm_handle *desc1;
++ unsigned int argsize;
++
++ switch (cmd) {
++ case GNTTABOP_map_grant_ref:
++ argsize = sizeof(struct gnttab_map_grant_ref);
++ break;
++ case GNTTABOP_unmap_grant_ref:
++ argsize = sizeof(struct gnttab_unmap_grant_ref);
++ break;
++ case GNTTABOP_unmap_and_replace:
++ argsize = sizeof(struct gnttab_unmap_and_replace);
++ break;
++ case GNTTABOP_setup_table:
++ {
++ struct gnttab_setup_table *setup = op;
++
++ argsize = sizeof(*setup);
++
++ if (count != 1)
++ return -EINVAL;
++ desc1 = __xencomm_map_no_alloc
++ (xen_guest_handle(setup->frame_list),
++ setup->nr_frames *
++ sizeof(*xen_guest_handle(setup->frame_list)),
++ *xc_area);
++ if (desc1 == NULL)
++ return -EINVAL;
++ (*xc_area)++;
++ set_xen_guest_handle(setup->frame_list, (void *)desc1);
++ break;
++ }
++ case GNTTABOP_dump_table:
++ argsize = sizeof(struct gnttab_dump_table);
++ break;
++ case GNTTABOP_transfer:
++ argsize = sizeof(struct gnttab_transfer);
++ break;
++ case GNTTABOP_copy:
++ argsize = sizeof(struct gnttab_copy);
++ break;
++ case GNTTABOP_query_size:
++ argsize = sizeof(struct gnttab_query_size);
++ break;
++ default:
++ printk("%s: unknown hypercall grant table op %d\n",
++ __func__, cmd);
++ BUG();
++ }
++
++ *desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area);
++ if (*desc == NULL)
++ return -EINVAL;
++ (*xc_area)++;
++
++ return 0;
++}
++
++int
++xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
++ unsigned int count)
++{
++ int rc;
++ struct xencomm_handle *desc;
++ XENCOMM_MINI_ALIGNED(xc_area, 2);
++
++ rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc);
++ if (rc)
++ return rc;
++
++ return xencomm_arch_hypercall_grant_table_op(cmd, desc, count);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op);
++
++int
++xencomm_hypercall_sched_op(int cmd, void *arg)
++{
++ struct xencomm_handle *desc;
++ unsigned int argsize;
++
++ switch (cmd) {
++ case SCHEDOP_yield:
++ case SCHEDOP_block:
++ argsize = 0;
++ break;
++ case SCHEDOP_shutdown:
++ argsize = sizeof(sched_shutdown_t);
++ break;
++ case SCHEDOP_remote_shutdown:
++ argsize = sizeof(sched_remote_shutdown_t);
++ break;
++ case SCHEDOP_poll:
++ {
++ sched_poll_t *poll = arg;
++ struct xencomm_handle *ports;
++
++ argsize = sizeof(sched_poll_t);
++ ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports),
++ sizeof(*xen_guest_handle(poll->ports)));
++
++ set_xen_guest_handle(poll->ports, (void *)ports);
++ break;
++ }
++ default:
++ printk("%s: unknown sched op %d\n", __func__, cmd);
++ return -ENOSYS;
++ }
++
++ desc = xencomm_map_no_alloc(arg, argsize);
++ if (desc == NULL)
++ return -EINVAL;
++
++ return xencomm_arch_hypercall_sched_op(cmd, desc);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op);
++
++int
++xencomm_hypercall_multicall(void *call_list, int nr_calls)
++{
++ int rc;
++ int i;
++ multicall_entry_t *mce;
++ struct xencomm_handle *desc;
++ XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2);
++
++ for (i = 0; i < nr_calls; i++) {
++ mce = (multicall_entry_t *)call_list + i;
++
++ switch (mce->op) {
++ case __HYPERVISOR_update_va_mapping:
++ case __HYPERVISOR_mmu_update:
++ /* No-op on ia64. */
++ break;
++ case __HYPERVISOR_grant_table_op:
++ rc = xencommize_grant_table_op
++ (&xc_area,
++ mce->args[0], (void *)mce->args[1],
++ mce->args[2], &desc);
++ if (rc)
++ return rc;
++ mce->args[1] = (unsigned long)desc;
++ break;
++ case __HYPERVISOR_memory_op:
++ default:
++ printk("%s: unhandled multicall op entry op %lu\n",
++ __func__, mce->op);
++ return -ENOSYS;
++ }
++ }
++
++ desc = xencomm_map_no_alloc(call_list,
++ nr_calls * sizeof(multicall_entry_t));
++ if (desc == NULL)
++ return -EINVAL;
++
++ return xencomm_arch_hypercall_multicall(desc, nr_calls);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall);
++
++int
++xencomm_hypercall_callback_op(int cmd, void *arg)
++{
++ unsigned int argsize;
++ switch (cmd)
++ {
++ case CALLBACKOP_register:
++ argsize = sizeof(struct callback_register);
++ break;
++ case CALLBACKOP_unregister:
++ argsize = sizeof(struct callback_unregister);
++ break;
++ default:
++ printk("%s: unknown callback op %d\n", __func__, cmd);
++ return -ENOSYS;
++ }
++
++ return xencomm_arch_hypercall_callback_op
++ (cmd, xencomm_map_no_alloc(arg, argsize));
++}
++
++static int
++xencommize_memory_reservation(struct xencomm_mini *xc_area,
++ xen_memory_reservation_t *mop)
++{
++ struct xencomm_handle *desc;
++
++ desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start),
++ mop->nr_extents *
++ sizeof(*xen_guest_handle(mop->extent_start)),
++ xc_area);
++ if (desc == NULL)
++ return -EINVAL;
++
++ set_xen_guest_handle(mop->extent_start, (void *)desc);
++ return 0;
++}
++
++int
++xencomm_hypercall_memory_op(unsigned int cmd, void *arg)
++{
++ XEN_GUEST_HANDLE(xen_pfn_t) extent_start_va[2];
++ xen_memory_reservation_t *xmr = NULL, *xme_in = NULL, *xme_out = NULL;
++ xen_memory_map_t *memmap = NULL;
++ XEN_GUEST_HANDLE(void) buffer;
++ int rc;
++ struct xencomm_handle *desc;
++ unsigned int argsize;
++ XENCOMM_MINI_ALIGNED(xc_area, 2);
++
++ switch (cmd) {
++ case XENMEM_increase_reservation:
++ case XENMEM_decrease_reservation:
++ case XENMEM_populate_physmap:
++ xmr = (xen_memory_reservation_t *)arg;
++ set_xen_guest_handle(extent_start_va[0],
++ xen_guest_handle(xmr->extent_start));
++
++ argsize = sizeof(*xmr);
++ rc = xencommize_memory_reservation(xc_area, xmr);
++ if (rc)
++ return rc;
++ xc_area++;
++ break;
++
++ case XENMEM_maximum_gpfn:
++ argsize = 0;
++ break;
++
++ case XENMEM_maximum_ram_page:
++ argsize = 0;
++ break;
++
++ case XENMEM_exchange:
++ xme_in = &((xen_memory_exchange_t *)arg)->in;
++ xme_out = &((xen_memory_exchange_t *)arg)->out;
++ set_xen_guest_handle(extent_start_va[0],
++ xen_guest_handle(xme_in->extent_start));
++ set_xen_guest_handle(extent_start_va[1],
++ xen_guest_handle(xme_out->extent_start));
++
++ argsize = sizeof(xen_memory_exchange_t);
++ rc = xencommize_memory_reservation(xc_area, xme_in);
++ if (rc)
++ return rc;
++ xc_area++;
++ rc = xencommize_memory_reservation(xc_area, xme_out);
++ if (rc)
++ return rc;
++ xc_area++;
++ break;
++
++ case XENMEM_add_to_physmap:
++ argsize = sizeof(xen_add_to_physmap_t);
++ break;
++
++ case XENMEM_machine_memory_map:
++ argsize = sizeof(*memmap);
++ memmap = (xen_memory_map_t *)arg;
++ set_xen_guest_handle(buffer, xen_guest_handle(memmap->buffer));
++ desc = xencomm_map_no_alloc(xen_guest_handle(memmap->buffer),
++ memmap->nr_entries);
++ if (desc == NULL)
++ return -EINVAL;
++ set_xen_guest_handle(memmap->buffer, (void *)desc);
++ break;
++
++ default:
++ printk("%s: unknown memory op %d\n", __func__, cmd);
++ return -ENOSYS;
++ }
++
++ desc = xencomm_map_no_alloc(arg, argsize);
++ if (desc == NULL)
++ return -EINVAL;
++
++ rc = xencomm_arch_hypercall_memory_op(cmd, desc);
++
++ switch (cmd) {
++ case XENMEM_increase_reservation:
++ case XENMEM_decrease_reservation:
++ case XENMEM_populate_physmap:
++ set_xen_guest_handle(xmr->extent_start,
++ xen_guest_handle(extent_start_va[0]));
++ break;
++
++ case XENMEM_exchange:
++ set_xen_guest_handle(xme_in->extent_start,
++ xen_guest_handle(extent_start_va[0]));
++ set_xen_guest_handle(xme_out->extent_start,
++ xen_guest_handle(extent_start_va[1]));
++ break;
++
++ case XENMEM_machine_memory_map:
++ set_xen_guest_handle(memmap->buffer, xen_guest_handle(buffer));
++ break;
++ }
++
++ return rc;
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op);
++
++unsigned long
++xencomm_hypercall_hvm_op(int cmd, void *arg)
++{
++ struct xencomm_handle *desc;
++ unsigned int argsize;
++
++ switch (cmd) {
++ case HVMOP_get_param:
++ case HVMOP_set_param:
++ argsize = sizeof(xen_hvm_param_t);
++ break;
++ default:
++ printk("%s: unknown HVMOP %d\n", __func__, cmd);
++ return -EINVAL;
++ }
++
++ desc = xencomm_map_no_alloc(arg, argsize);
++ if (desc == NULL)
++ return -EINVAL;
++
++ return xencomm_arch_hypercall_hvm_op(cmd, desc);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_hvm_op);
++
++int
++xencomm_hypercall_suspend(unsigned long srec)
++{
++ struct sched_shutdown arg;
++
++ arg.reason = SHUTDOWN_suspend;
++
++ return xencomm_arch_hypercall_suspend(
++ xencomm_map_no_alloc(&arg, sizeof(arg)));
++}
++
++int
++xencomm_hypercall_xenoprof_op(int op, void *arg)
++{
++ unsigned int argsize;
++ struct xencomm_handle *desc;
++
++ switch (op) {
++ case XENOPROF_init:
++ argsize = sizeof(xenoprof_init_t);
++ break;
++ case XENOPROF_set_active:
++ argsize = sizeof(domid_t);
++ break;
++ case XENOPROF_set_passive:
++ argsize = sizeof(xenoprof_passive_t);
++ break;
++ case XENOPROF_counter:
++ argsize = sizeof(xenoprof_counter_t);
++ break;
++ case XENOPROF_get_buffer:
++ argsize = sizeof(xenoprof_get_buffer_t);
++ break;
++
++ case XENOPROF_reset_active_list:
++ case XENOPROF_reset_passive_list:
++ case XENOPROF_reserve_counters:
++ case XENOPROF_setup_events:
++ case XENOPROF_enable_virq:
++ case XENOPROF_start:
++ case XENOPROF_stop:
++ case XENOPROF_disable_virq:
++ case XENOPROF_release_counters:
++ case XENOPROF_shutdown:
++ return xencomm_arch_hypercall_xenoprof_op(op, arg);
++
++ default:
++ printk("%s: op %d isn't supported\n", __func__, op);
++ return -ENOSYS;
++ }
++
++ desc = xencomm_map_no_alloc(arg, argsize);
++ if (desc == NULL)
++ return -EINVAL;
++
++ return xencomm_arch_hypercall_xenoprof_op(op, desc);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_xenoprof_op);
++
++int
++xencomm_hypercall_perfmon_op(unsigned long cmd, void* arg,
++ unsigned long count)
++{
++ unsigned int argsize;
++ struct xencomm_handle *desc;
++
++ switch (cmd) {
++ case PFM_GET_FEATURES:
++ argsize = sizeof(pfarg_features_t);
++ break;
++ case PFM_CREATE_CONTEXT:
++ argsize = sizeof(pfarg_context_t);
++ break;
++ case PFM_LOAD_CONTEXT:
++ argsize = sizeof(pfarg_load_t);
++ break;
++ case PFM_WRITE_PMCS:
++ case PFM_WRITE_PMDS:
++ argsize = sizeof(pfarg_reg_t) * count;
++ break;
++
++ case PFM_DESTROY_CONTEXT:
++ case PFM_UNLOAD_CONTEXT:
++ case PFM_START:
++ case PFM_STOP:
++ return xencomm_arch_hypercall_perfmon_op(cmd, arg, count);
++
++ default:
++ printk("%s:%d cmd %ld isn't supported\n",
++ __func__, __LINE__, cmd);
++ BUG();
++ }
++
++ desc = xencomm_map_no_alloc(arg, argsize);
++ if (desc == NULL)
++ return -EINVAL;
++
++ return xencomm_arch_hypercall_perfmon_op(cmd, desc, count);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_perfmon_op);
++
++long
++xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg)
++{
++ unsigned int argsize;
++ switch (cmd) {
++ case VCPUOP_register_runstate_memory_area: {
++ vcpu_register_runstate_memory_area_t *area =
++ (vcpu_register_runstate_memory_area_t *)arg;
++ argsize = sizeof(*arg);
++ set_xen_guest_handle(area->addr.h,
++ (void *)xencomm_map_no_alloc(area->addr.v,
++ sizeof(area->addr.v)));
++ break;
++ }
++
++ default:
++ printk("%s: unknown vcpu op %d\n", __func__, cmd);
++ return -ENOSYS;
++ }
++
++ return xencomm_arch_hypercall_vcpu_op(cmd, cpu,
++ xencomm_map_no_alloc(arg, argsize));
++}
++
++long
++xencomm_hypercall_opt_feature(void *arg)
++{
++ return xencomm_arch_hypercall_opt_feature(
++ xencomm_map_no_alloc(arg,
++ sizeof(struct xen_ia64_opt_feature)));
++}
++
++int
++xencomm_hypercall_fpswa_revision(unsigned int *revision)
++{
++ struct xencomm_handle *desc;
++
++ desc = xencomm_map_no_alloc(revision, sizeof(*revision));
++ if (desc == NULL)
++ return -EINVAL;
++
++ return xencomm_arch_hypercall_fpswa_revision(desc);
++}
++EXPORT_SYMBOL_GPL(xencomm_hypercall_fpswa_revision);
++
++int
++xencomm_hypercall_kexec_op(int cmd, void *arg)
++{
++ unsigned int argsize;
++ struct xencomm_handle *desc;
++
++ switch (cmd) {
++ case KEXEC_CMD_kexec_get_range:
++ argsize = sizeof(xen_kexec_range_t);
++ break;
++ case KEXEC_CMD_kexec_load:
++ case KEXEC_CMD_kexec_unload:
++ argsize = sizeof(xen_kexec_load_t);
++ break;
++ case KEXEC_CMD_kexec:
++ argsize = sizeof(xen_kexec_exec_t);
++ break;
++ default:
++ printk("%s:%d cmd %d isn't supported\n",
++ __func__, __LINE__, cmd);
++ BUG();
++ }
++
++ desc = xencomm_map_no_alloc(arg, argsize);
++ if (desc == NULL)
++ return -EINVAL;
++
++ return xencomm_arch_hypercall_kexec_op(cmd, desc);
++}
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/xcom_privcmd.c linux-2.6.18-xen-3.3.0/arch/ia64/xen/xcom_privcmd.c
+--- linux-2.6.18.8/arch/ia64/xen/xcom_privcmd.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/xcom_privcmd.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,902 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
++ *
++ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
++ * Tristan Gingold <tristan.gingold@bull.net>
++ */
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/gfp.h>
++#include <linux/module.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/platform.h>
++#define __XEN__
++#include <xen/interface/domctl.h>
++#include <xen/interface/sysctl.h>
++#include <xen/interface/memory.h>
++#include <xen/interface/version.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/xsm/acm_ops.h>
++#include <xen/interface/hvm/params.h>
++#include <xen/interface/arch-ia64/debug_op.h>
++#include <xen/public/privcmd.h>
++#include <asm/hypercall.h>
++#include <asm/page.h>
++#include <asm/uaccess.h>
++#include <asm/xen/xencomm.h>
++
++#define ROUND_DIV(v,s) (((v) + (s) - 1) / (s))
++
++static int
++xencomm_privcmd_platform_op(privcmd_hypercall_t *hypercall)
++{
++ struct xen_platform_op kern_op;
++ struct xen_platform_op __user *user_op = (struct xen_platform_op __user *)hypercall->arg[0];
++ struct xencomm_handle *op_desc;
++ struct xencomm_handle *desc = NULL;
++ int ret = 0;
++
++ if (copy_from_user(&kern_op, user_op, sizeof(struct xen_platform_op)))
++ return -EFAULT;
++
++ if (kern_op.interface_version != XENPF_INTERFACE_VERSION)
++ return -EACCES;
++
++ op_desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
++
++ switch (kern_op.cmd) {
++ default:
++ printk("%s: unknown platform cmd %d\n", __func__, kern_op.cmd);
++ return -ENOSYS;
++ }
++
++ if (ret) {
++ /* error mapping the nested pointer */
++ return ret;
++ }
++
++ ret = xencomm_arch_hypercall_platform_op(op_desc);
++
++ /* FIXME: should we restore the handle? */
++ if (copy_to_user(user_op, &kern_op, sizeof(struct xen_platform_op)))
++ ret = -EFAULT;
++
++ xencomm_free(desc);
++ return ret;
++}
++
++static int
++xencomm_privcmd_sysctl(privcmd_hypercall_t *hypercall)
++{
++ xen_sysctl_t kern_op;
++ xen_sysctl_t __user *user_op;
++ struct xencomm_handle *op_desc;
++ struct xencomm_handle *desc = NULL;
++ struct xencomm_handle *desc1 = NULL;
++ int ret = 0;
++
++ user_op = (xen_sysctl_t __user *)hypercall->arg[0];
++
++ if (copy_from_user(&kern_op, user_op, sizeof(xen_sysctl_t)))
++ return -EFAULT;
++
++ if (kern_op.interface_version != XEN_SYSCTL_INTERFACE_VERSION)
++ return -EACCES;
++
++ op_desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
++
++ switch (kern_op.cmd) {
++ case XEN_SYSCTL_readconsole:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.readconsole.buffer),
++ kern_op.u.readconsole.count);
++ if (xen_guest_handle(kern_op.u.readconsole.buffer) != NULL &&
++ kern_op.u.readconsole.count > 0 && desc == NULL)
++ return -ENOMEM;
++ set_xen_guest_handle(kern_op.u.readconsole.buffer,
++ (void *)desc);
++ break;
++ case XEN_SYSCTL_tbuf_op:
++ case XEN_SYSCTL_sched_id:
++ case XEN_SYSCTL_availheap:
++ break;
++ case XEN_SYSCTL_perfc_op:
++ {
++ struct xencomm_handle *tmp_desc;
++ xen_sysctl_t tmp_op = {
++ .cmd = XEN_SYSCTL_perfc_op,
++ .interface_version = XEN_SYSCTL_INTERFACE_VERSION,
++ .u.perfc_op = {
++ .cmd = XEN_SYSCTL_PERFCOP_query,
++ /* .desc.p = NULL, */
++ /* .val.p = NULL, */
++ },
++ };
++
++ if (xen_guest_handle(kern_op.u.perfc_op.desc) == NULL) {
++ if (xen_guest_handle(kern_op.u.perfc_op.val) != NULL)
++ return -EINVAL;
++ break;
++ }
++
++ /* query the buffer size for xencomm */
++ tmp_desc = xencomm_map_no_alloc(&tmp_op, sizeof(tmp_op));
++ ret = xencomm_arch_hypercall_sysctl(tmp_desc);
++ if (ret)
++ return ret;
++
++ desc = xencomm_map(xen_guest_handle(kern_op.u.perfc_op.desc),
++ tmp_op.u.perfc_op.nr_counters *
++ sizeof(xen_sysctl_perfc_desc_t));
++ if (xen_guest_handle(kern_op.u.perfc_op.desc) != NULL &&
++ tmp_op.u.perfc_op.nr_counters > 0 && desc == NULL)
++ return -ENOMEM;
++
++ set_xen_guest_handle(kern_op.u.perfc_op.desc, (void *)desc);
++
++ desc1 = xencomm_map(xen_guest_handle(kern_op.u.perfc_op.val),
++ tmp_op.u.perfc_op.nr_vals *
++ sizeof(xen_sysctl_perfc_val_t));
++ if (xen_guest_handle(kern_op.u.perfc_op.val) != NULL &&
++ tmp_op.u.perfc_op.nr_vals > 0 && desc1 == NULL) {
++ xencomm_free(desc);
++ return -ENOMEM;
++ }
++
++ set_xen_guest_handle(kern_op.u.perfc_op.val, (void *)desc1);
++ break;
++ }
++ case XEN_SYSCTL_getdomaininfolist:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.getdomaininfolist.buffer),
++ kern_op.u.getdomaininfolist.max_domains *
++ sizeof(xen_domctl_getdomaininfo_t));
++ if (xen_guest_handle(kern_op.u.getdomaininfolist.buffer) !=
++ NULL && kern_op.u.getdomaininfolist.max_domains > 0 &&
++ desc == NULL)
++ return -ENOMEM;
++ set_xen_guest_handle(kern_op.u.getdomaininfolist.buffer,
++ (void *)desc);
++ break;
++ case XEN_SYSCTL_debug_keys:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.debug_keys.keys),
++ kern_op.u.debug_keys.nr_keys);
++ if (xen_guest_handle(kern_op.u.debug_keys.keys) != NULL &&
++ kern_op.u.debug_keys.nr_keys > 0 && desc == NULL)
++ return -ENOMEM;
++ set_xen_guest_handle(kern_op.u.debug_keys.keys,
++ (void *)desc);
++ break;
++
++ case XEN_SYSCTL_physinfo:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.physinfo.cpu_to_node),
++ kern_op.u.physinfo.max_cpu_id * sizeof(uint32_t));
++ if (xen_guest_handle(kern_op.u.physinfo.cpu_to_node) != NULL &&
++ kern_op.u.physinfo.max_cpu_id > 0 && desc == NULL)
++ return -ENOMEM;
++
++ set_xen_guest_handle(kern_op.u.physinfo.cpu_to_node,
++ (void *)desc);
++ break;
++ default:
++ printk("%s: unknown sysctl cmd %d\n", __func__, kern_op.cmd);
++ return -ENOSYS;
++ }
++
++ if (ret) {
++ /* error mapping the nested pointer */
++ return ret;
++ }
++
++ ret = xencomm_arch_hypercall_sysctl(op_desc);
++
++ /* FIXME: should we restore the handles? */
++ if (copy_to_user(user_op, &kern_op, sizeof(xen_sysctl_t)))
++ ret = -EFAULT;
++
++ xencomm_free(desc);
++ xencomm_free(desc1);
++ return ret;
++}
++
++static int
++xencomm_privcmd_domctl(privcmd_hypercall_t *hypercall)
++{
++ xen_domctl_t kern_op;
++ xen_domctl_t __user *user_op;
++ struct xencomm_handle *op_desc;
++ struct xencomm_handle *desc = NULL;
++ int ret = 0;
++
++ user_op = (xen_domctl_t __user *)hypercall->arg[0];
++
++ if (copy_from_user(&kern_op, user_op, sizeof(xen_domctl_t)))
++ return -EFAULT;
++
++ if (kern_op.interface_version != XEN_DOMCTL_INTERFACE_VERSION)
++ return -EACCES;
++
++ op_desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
++
++ switch (kern_op.cmd) {
++ case XEN_DOMCTL_createdomain:
++ case XEN_DOMCTL_destroydomain:
++ case XEN_DOMCTL_pausedomain:
++ case XEN_DOMCTL_unpausedomain:
++ case XEN_DOMCTL_resumedomain:
++ case XEN_DOMCTL_getdomaininfo:
++ break;
++ case XEN_DOMCTL_getmemlist:
++ {
++ unsigned long nr_pages = kern_op.u.getmemlist.max_pfns;
++
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.getmemlist.buffer),
++ nr_pages * sizeof(unsigned long));
++ if (xen_guest_handle(kern_op.u.getmemlist.buffer) != NULL &&
++ nr_pages > 0 && desc == NULL)
++ return -ENOMEM;
++ set_xen_guest_handle(kern_op.u.getmemlist.buffer,
++ (void *)desc);
++ break;
++ }
++ case XEN_DOMCTL_getpageframeinfo:
++ break;
++ case XEN_DOMCTL_getpageframeinfo2:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.getpageframeinfo2.array),
++ kern_op.u.getpageframeinfo2.num);
++ if (xen_guest_handle(kern_op.u.getpageframeinfo2.array) !=
++ NULL && kern_op.u.getpageframeinfo2.num > 0 &&
++ desc == NULL)
++ return -ENOMEM;
++ set_xen_guest_handle(kern_op.u.getpageframeinfo2.array,
++ (void *)desc);
++ break;
++ case XEN_DOMCTL_shadow_op:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap),
++ ROUND_DIV(kern_op.u.shadow_op.pages, 8));
++ if (xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap) != NULL
++ && kern_op.u.shadow_op.pages > 0 && desc == NULL)
++ return -ENOMEM;
++ set_xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap,
++ (void *)desc);
++ break;
++ case XEN_DOMCTL_max_mem:
++ break;
++ case XEN_DOMCTL_setvcpucontext:
++ case XEN_DOMCTL_getvcpucontext:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.vcpucontext.ctxt),
++ sizeof(vcpu_guest_context_t));
++ if (xen_guest_handle(kern_op.u.vcpucontext.ctxt) != NULL &&
++ desc == NULL)
++ return -ENOMEM;
++ set_xen_guest_handle(kern_op.u.vcpucontext.ctxt, (void *)desc);
++ break;
++ case XEN_DOMCTL_getvcpuinfo:
++ break;
++ case XEN_DOMCTL_setvcpuaffinity:
++ case XEN_DOMCTL_getvcpuaffinity:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap),
++ ROUND_DIV(kern_op.u.vcpuaffinity.cpumap.nr_cpus, 8));
++ if (xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap) !=
++ NULL && kern_op.u.vcpuaffinity.cpumap.nr_cpus > 0 &&
++ desc == NULL)
++ return -ENOMEM;
++ set_xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap,
++ (void *)desc);
++ break;
++ case XEN_DOMCTL_gethvmcontext:
++ case XEN_DOMCTL_sethvmcontext:
++ if (kern_op.u.hvmcontext.size > 0)
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.hvmcontext.buffer),
++ kern_op.u.hvmcontext.size);
++ if (xen_guest_handle(kern_op.u.hvmcontext.buffer) != NULL &&
++ kern_op.u.hvmcontext.size > 0 && desc == NULL)
++ return -ENOMEM;
++ set_xen_guest_handle(kern_op.u.hvmcontext.buffer, (void*)desc);
++ break;
++ case XEN_DOMCTL_max_vcpus:
++ case XEN_DOMCTL_scheduler_op:
++ case XEN_DOMCTL_setdomainhandle:
++ case XEN_DOMCTL_setdebugging:
++ case XEN_DOMCTL_irq_permission:
++ case XEN_DOMCTL_iomem_permission:
++ case XEN_DOMCTL_ioport_permission:
++ case XEN_DOMCTL_hypercall_init:
++ case XEN_DOMCTL_arch_setup:
++ case XEN_DOMCTL_settimeoffset:
++ case XEN_DOMCTL_sendtrigger:
++ case XEN_DOMCTL_set_opt_feature:
++ case XEN_DOMCTL_assign_device:
++ case XEN_DOMCTL_subscribe:
++ break;
++ case XEN_DOMCTL_pin_mem_cacheattr:
++ return -ENOSYS;
++ default:
++ printk("%s: unknown domctl cmd %d\n", __func__, kern_op.cmd);
++ return -ENOSYS;
++ }
++
++ if (ret) {
++ /* error mapping the nested pointer */
++ return ret;
++ }
++
++ ret = xencomm_arch_hypercall_domctl (op_desc);
++
++ /* FIXME: should we restore the handle? */
++ if (copy_to_user(user_op, &kern_op, sizeof(xen_domctl_t)))
++ ret = -EFAULT;
++
++ xencomm_free(desc);
++ return ret;
++}
++
++static int
++xencomm_privcmd_xsm_op(privcmd_hypercall_t *hypercall)
++{
++ void __user *arg = (void __user *)hypercall->arg[0];
++ xen_acmctl_t kern_arg;
++ struct xencomm_handle *op_desc;
++ struct xencomm_handle *desc = NULL;
++ int ret;
++
++ if (copy_from_user(&kern_arg, arg, sizeof(kern_arg)))
++ return -EFAULT;
++ if (kern_arg.interface_version != ACM_INTERFACE_VERSION)
++ return -ENOSYS;
++
++ switch (kern_arg.cmd) {
++ case ACMOP_getssid: {
++ op_desc = xencomm_map_no_alloc(&kern_arg, sizeof(kern_arg));
++
++ desc = xencomm_map(
++ xen_guest_handle(kern_arg.u.getssid.ssidbuf),
++ kern_arg.u.getssid.ssidbuf_size);
++ if (xen_guest_handle(kern_arg.u.getssid.ssidbuf) != NULL &&
++ kern_arg.u.getssid.ssidbuf_size > 0 && desc == NULL)
++ return -ENOMEM;
++
++ set_xen_guest_handle(kern_arg.u.getssid.ssidbuf, (void *)desc);
++
++ ret = xencomm_arch_hypercall_xsm_op(op_desc);
++
++ xencomm_free(desc);
++
++ if (copy_to_user(arg, &kern_arg, sizeof(kern_arg)))
++ return -EFAULT;
++ return ret;
++ }
++ default:
++ printk("%s: unknown acm_op cmd %d\n", __func__, kern_arg.cmd);
++ return -ENOSYS;
++ }
++
++ return ret;
++}
++
++static int
++xencomm_privcmd_memory_reservation_op(privcmd_hypercall_t *hypercall)
++{
++ const unsigned long cmd = hypercall->arg[0];
++ int ret = 0;
++ xen_memory_reservation_t kern_op;
++ xen_memory_reservation_t __user *user_op;
++ struct xencomm_handle *desc = NULL;
++ struct xencomm_handle *desc_op;
++
++ user_op = (xen_memory_reservation_t __user *)hypercall->arg[1];
++ if (copy_from_user(&kern_op, user_op,
++ sizeof(xen_memory_reservation_t)))
++ return -EFAULT;
++ desc_op = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
++
++ if (!xen_guest_handle(kern_op.extent_start)) {
++ ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
++ if (ret < 0)
++ return ret;
++ } else {
++ xen_ulong_t nr_done = 0;
++ xen_ulong_t nr_extents = kern_op.nr_extents;
++ void *addr = xen_guest_handle(kern_op.extent_start);
++
++ /*
++ * Work around.
++ * Xencomm has single page size limit caused
++ * by xencomm_alloc()/xencomm_free() so that
++ * we have to repeat the hypercall.
++ * This limitation can be removed.
++ */
++#define MEMORYOP_XENCOMM_LIMIT \
++ (((((PAGE_SIZE - sizeof(struct xencomm_desc)) / \
++ sizeof(uint64_t)) - 2) * PAGE_SIZE) / \
++ sizeof(*xen_guest_handle(kern_op.extent_start)))
++
++ /*
++ * Work around.
++ * Even if the above limitation is removed,
++ * the hypercall with large number of extents
++ * may cause the soft lockup warning.
++ * In order to avoid the warning, we limit
++ * the number of extents and repeat the hypercall.
++ * The following value is determined by evaluation.
++ * Time of one hypercall should be smaller than
++ * a vcpu time slice. The time with current
++ * MEMORYOP_MAX_EXTENTS is around 5 msec.
++ * If the following limit causes some issues,
++ * we should decrease this value.
++ *
++ * Another way would be that start with small value and
++ * increase adoptively measuring hypercall time.
++ * It might be over-kill.
++ */
++#define MEMORYOP_MAX_EXTENTS (MEMORYOP_XENCOMM_LIMIT / 512)
++
++ while (nr_extents > 0) {
++ xen_ulong_t nr_tmp = nr_extents;
++ if (nr_tmp > MEMORYOP_MAX_EXTENTS)
++ nr_tmp = MEMORYOP_MAX_EXTENTS;
++
++ kern_op.nr_extents = nr_tmp;
++ desc = xencomm_map
++ (addr + nr_done * sizeof(*xen_guest_handle(kern_op.extent_start)),
++ nr_tmp * sizeof(*xen_guest_handle(kern_op.extent_start)));
++ if (addr != NULL && nr_tmp > 0 && desc == NULL)
++ return nr_done > 0 ? nr_done : -ENOMEM;
++
++ set_xen_guest_handle(kern_op.extent_start,
++ (void *)desc);
++
++ ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
++ xencomm_free(desc);
++ if (ret < 0)
++ return nr_done > 0 ? nr_done : ret;
++
++ nr_done += ret;
++ nr_extents -= ret;
++ if (ret < nr_tmp)
++ break;
++
++ /*
++ * prevent softlock up message.
++ * give cpu to soft lockup kernel thread.
++ */
++ if (nr_extents > 0)
++ schedule();
++ }
++ ret = nr_done;
++ set_xen_guest_handle(kern_op.extent_start, addr);
++ }
++
++ if (copy_to_user(user_op, &kern_op, sizeof(xen_memory_reservation_t)))
++ return -EFAULT;
++
++ return ret;
++}
++
++static int
++xencomm_privcmd_memory_op(privcmd_hypercall_t *hypercall)
++{
++ const unsigned long cmd = hypercall->arg[0];
++ int ret = 0;
++
++ switch (cmd) {
++ case XENMEM_increase_reservation:
++ case XENMEM_decrease_reservation:
++ case XENMEM_populate_physmap:
++ return xencomm_privcmd_memory_reservation_op(hypercall);
++ case XENMEM_maximum_gpfn:
++ {
++ domid_t kern_domid;
++ domid_t __user *user_domid;
++ struct xencomm_handle *desc;
++
++ user_domid = (domid_t __user *)hypercall->arg[1];
++ if (copy_from_user(&kern_domid, user_domid, sizeof(domid_t)))
++ return -EFAULT;
++ desc = xencomm_map_no_alloc(&kern_domid, sizeof(kern_domid));
++
++ ret = xencomm_arch_hypercall_memory_op(cmd, desc);
++
++ return ret;
++ }
++ case XENMEM_translate_gpfn_list:
++ {
++ xen_translate_gpfn_list_t kern_op;
++ xen_translate_gpfn_list_t __user *user_op;
++ struct xencomm_handle *desc_gpfn = NULL;
++ struct xencomm_handle *desc_mfn = NULL;
++ struct xencomm_handle *desc_op;
++ void *addr;
++
++ user_op = (xen_translate_gpfn_list_t __user *)
++ hypercall->arg[1];
++ if (copy_from_user(&kern_op, user_op,
++ sizeof(xen_translate_gpfn_list_t)))
++ return -EFAULT;
++ desc_op = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
++
++ if (kern_op.nr_gpfns) {
++ /* gpfn_list. */
++ addr = xen_guest_handle(kern_op.gpfn_list);
++
++ desc_gpfn = xencomm_map(addr, kern_op.nr_gpfns *
++ sizeof(*xen_guest_handle
++ (kern_op.gpfn_list)));
++ if (addr != NULL && kern_op.nr_gpfns > 0 &&
++ desc_gpfn == NULL)
++ return -ENOMEM;
++ set_xen_guest_handle(kern_op.gpfn_list,
++ (void *)desc_gpfn);
++
++ /* mfn_list. */
++ addr = xen_guest_handle(kern_op.mfn_list);
++
++ desc_mfn = xencomm_map(addr, kern_op.nr_gpfns *
++ sizeof(*xen_guest_handle
++ (kern_op.mfn_list)));
++ if (addr != NULL && kern_op.nr_gpfns > 0 &&
++ desc_mfn == NULL) {
++ xencomm_free(desc_gpfn);
++ return -ENOMEM;
++ }
++
++ set_xen_guest_handle(kern_op.mfn_list,
++ (void *)desc_mfn);
++ }
++
++ ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
++
++ xencomm_free(desc_gpfn);
++ xencomm_free(desc_mfn);
++
++ if (ret != 0)
++ return ret;
++
++ return ret;
++ }
++ default:
++ printk("%s: unknown memory op %lu\n", __func__, cmd);
++ ret = -ENOSYS;
++ }
++ return ret;
++}
++
++static int
++xencomm_privcmd_xen_version(privcmd_hypercall_t *hypercall)
++{
++ int cmd = hypercall->arg[0];
++ void __user *arg = (void __user *)hypercall->arg[1];
++ struct xencomm_handle *desc;
++ size_t argsize;
++ int rc;
++
++ switch (cmd) {
++ case XENVER_version:
++ /* do not actually pass an argument */
++ return xencomm_arch_hypercall_xen_version(cmd, 0);
++ case XENVER_extraversion:
++ argsize = sizeof(xen_extraversion_t);
++ break;
++ case XENVER_compile_info:
++ argsize = sizeof(xen_compile_info_t);
++ break;
++ case XENVER_capabilities:
++ argsize = sizeof(xen_capabilities_info_t);
++ break;
++ case XENVER_changeset:
++ argsize = sizeof(xen_changeset_info_t);
++ break;
++ case XENVER_platform_parameters:
++ argsize = sizeof(xen_platform_parameters_t);
++ break;
++ case XENVER_pagesize:
++ argsize = (arg == NULL) ? 0 : sizeof(void *);
++ break;
++ case XENVER_get_features:
++ argsize = (arg == NULL) ? 0 : sizeof(xen_feature_info_t);
++ break;
++
++ default:
++ printk("%s: unknown version op %d\n", __func__, cmd);
++ return -ENOSYS;
++ }
++
++ desc = xencomm_map(arg, argsize);
++ if (arg != NULL && argsize > 0 && desc == NULL)
++ return -ENOMEM;
++
++ rc = xencomm_arch_hypercall_xen_version(cmd, desc);
++
++ xencomm_free(desc);
++
++ return rc;
++}
++
++static int
++xencomm_privcmd_event_channel_op(privcmd_hypercall_t *hypercall)
++{
++ int cmd = hypercall->arg[0];
++ struct xencomm_handle *desc;
++ unsigned int argsize;
++ int ret;
++
++ switch (cmd) {
++ case EVTCHNOP_alloc_unbound:
++ argsize = sizeof(evtchn_alloc_unbound_t);
++ break;
++
++ case EVTCHNOP_status:
++ argsize = sizeof(evtchn_status_t);
++ break;
++
++ default:
++ printk("%s: unknown EVTCHNOP %d\n", __func__, cmd);
++ return -EINVAL;
++ }
++
++ desc = xencomm_map((void *)hypercall->arg[1], argsize);
++ if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
++ return -ENOMEM;
++
++ ret = xencomm_arch_hypercall_event_channel_op(cmd, desc);
++
++ xencomm_free(desc);
++ return ret;
++}
++
++static int
++xencomm_privcmd_hvm_op_track_dirty_vram(privcmd_hypercall_t *hypercall)
++{
++#if 1
++ /*
++ * At this moment HVMOP_track_dirty_vram isn't implemented
++ * on xen/ia64 so that it just returns -ENOSYS.
++ * Don't issue hypercall to get -ENOSYS.
++ * When the hypercall is implemented, enable the following codes.
++ */
++ return -ENOSYS;
++#else
++ int cmd = hypercall->arg[0];
++ struct xen_hvm_track_dirty_vram *user_op = (void*)hypercall->arg[1];
++ struct xen_hvm_track_dirty_vram kern_op;
++ struct xencomm_handle *desc;
++ struct xencomm_handle *bitmap_desc;
++ int ret;
++
++ BUG_ON(cmd != HVMOP_track_dirty_vram);
++ if (copy_from_user(&kern_op, user_op, sizeof(kern_op)))
++ return -EFAULT;
++ desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
++ bitmap_desc = xencomm_map(xen_guest_handle(kern_op.dirty_bitmap),
++ kern_op.nr * sizeof(uint8_t));
++ if (bitmap_desc == NULL)
++ return -ENOMEM;
++ set_xen_guest_handle(kern_op.dirty_bitmap, (void*)bitmap_desc);
++ ret = xencomm_arch_hypercall_hvm_op(cmd, desc);
++ xencomm_free(bitmap_desc);
++
++ return ret;
++#endif
++}
++
++static int
++xencomm_privcmd_hvm_op(privcmd_hypercall_t *hypercall)
++{
++ int cmd = hypercall->arg[0];
++ struct xencomm_handle *desc;
++ unsigned int argsize;
++ int ret;
++
++ switch (cmd) {
++ case HVMOP_get_param:
++ case HVMOP_set_param:
++ argsize = sizeof(xen_hvm_param_t);
++ break;
++ case HVMOP_set_pci_intx_level:
++ argsize = sizeof(xen_hvm_set_pci_intx_level_t);
++ break;
++ case HVMOP_set_isa_irq_level:
++ argsize = sizeof(xen_hvm_set_isa_irq_level_t);
++ break;
++ case HVMOP_set_pci_link_route:
++ argsize = sizeof(xen_hvm_set_pci_link_route_t);
++ break;
++ case HVMOP_set_mem_type:
++ argsize = sizeof(xen_hvm_set_mem_type_t);
++ break;
++
++ case HVMOP_track_dirty_vram:
++ return xencomm_privcmd_hvm_op_track_dirty_vram(hypercall);
++
++ default:
++ printk("%s: unknown HVMOP %d\n", __func__, cmd);
++ return -EINVAL;
++ }
++
++ desc = xencomm_map((void *)hypercall->arg[1], argsize);
++ if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
++ return -ENOMEM;
++
++ ret = xencomm_arch_hypercall_hvm_op(cmd, desc);
++
++ xencomm_free(desc);
++ return ret;
++}
++
++static int
++xencomm_privcmd_sched_op(privcmd_hypercall_t *hypercall)
++{
++ int cmd = hypercall->arg[0];
++ struct xencomm_handle *desc;
++ unsigned int argsize;
++ int ret;
++
++ switch (cmd) {
++ case SCHEDOP_remote_shutdown:
++ argsize = sizeof(sched_remote_shutdown_t);
++ break;
++ default:
++ printk("%s: unknown SCHEDOP %d\n", __func__, cmd);
++ return -EINVAL;
++ }
++
++ desc = xencomm_map((void *)hypercall->arg[1], argsize);
++ if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
++ return -ENOMEM;
++
++ ret = xencomm_arch_hypercall_sched_op(cmd, desc);
++
++ xencomm_free(desc);
++ return ret;
++}
++
++static int
++xencomm_privcmd_ia64_dom0vp_op(privcmd_hypercall_t *hypercall)
++{
++ int cmd = hypercall->arg[0];
++ int ret;
++
++ switch (cmd) {
++ case IA64_DOM0VP_fpswa_revision: {
++ unsigned int revision;
++ unsigned int __user *revision_user =
++ (unsigned int* __user)hypercall->arg[1];
++ struct xencomm_handle *desc;
++ desc = xencomm_map(&revision, sizeof(revision));
++ if (desc == NULL)
++ return -ENOMEM;
++
++ ret = xencomm_arch_hypercall_fpswa_revision(desc);
++ xencomm_free(desc);
++ if (ret)
++ break;
++ if (copy_to_user(revision_user, &revision, sizeof(revision)))
++ ret = -EFAULT;
++ break;
++ }
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
++ case IA64_DOM0VP_expose_foreign_p2m:
++ ret = xen_foreign_p2m_expose(hypercall);
++ break;
++#endif
++ default:
++ printk("%s: unknown IA64 DOM0VP op %d\n", __func__, cmd);
++ ret = -EINVAL;
++ break;
++ }
++ return ret;
++}
++
++static int
++xencomm_privcmd_ia64_debug_op(privcmd_hypercall_t *hypercall)
++{
++ int cmd = hypercall->arg[0];
++ unsigned long domain = hypercall->arg[1];
++ struct xencomm_handle *desc;
++ int ret;
++
++ switch (cmd) {
++ case XEN_IA64_DEBUG_OP_SET_FLAGS:
++ case XEN_IA64_DEBUG_OP_GET_FLAGS:
++ break;
++ default:
++ printk("%s: unknown IA64 DEBUGOP %d\n", __func__, cmd);
++ return -EINVAL;
++ }
++
++ desc = xencomm_map((void *)hypercall->arg[2],
++ sizeof(xen_ia64_debug_op_t));
++ if (desc == NULL)
++ return -ENOMEM;
++
++ ret = xencomm_arch_hypercall_ia64_debug_op(cmd, domain, desc);
++
++ xencomm_free(desc);
++ return ret;
++}
++
++static int
++xencomm_privcmd_ia64_physdev_op(privcmd_hypercall_t *hypercall)
++{
++ int cmd = hypercall->arg[0];
++ struct xencomm_handle *desc;
++ unsigned int argsize;
++ int ret;
++
++ switch (cmd) {
++ case PHYSDEVOP_map_pirq:
++ argsize = sizeof(physdev_map_pirq_t);
++ break;
++ case PHYSDEVOP_unmap_pirq:
++ argsize = sizeof(physdev_unmap_pirq_t);
++ break;
++ default:
++ printk("%s: unknown PHYSDEVOP %d\n", __func__, cmd);
++ return -EINVAL;
++ }
++
++ desc = xencomm_map((void *)hypercall->arg[1], argsize);
++ if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
++ return -ENOMEM;
++
++ ret = xencomm_arch_hypercall_physdev_op(cmd, desc);
++
++ xencomm_free(desc);
++ return ret;
++}
++
++int
++privcmd_hypercall(privcmd_hypercall_t *hypercall)
++{
++ switch (hypercall->op) {
++ case __HYPERVISOR_platform_op:
++ return xencomm_privcmd_platform_op(hypercall);
++ case __HYPERVISOR_domctl:
++ return xencomm_privcmd_domctl(hypercall);
++ case __HYPERVISOR_sysctl:
++ return xencomm_privcmd_sysctl(hypercall);
++ case __HYPERVISOR_xsm_op:
++ return xencomm_privcmd_xsm_op(hypercall);
++ case __HYPERVISOR_xen_version:
++ return xencomm_privcmd_xen_version(hypercall);
++ case __HYPERVISOR_memory_op:
++ return xencomm_privcmd_memory_op(hypercall);
++ case __HYPERVISOR_event_channel_op:
++ return xencomm_privcmd_event_channel_op(hypercall);
++ case __HYPERVISOR_hvm_op:
++ return xencomm_privcmd_hvm_op(hypercall);
++ case __HYPERVISOR_sched_op:
++ return xencomm_privcmd_sched_op(hypercall);
++ case __HYPERVISOR_ia64_dom0vp_op:
++ return xencomm_privcmd_ia64_dom0vp_op(hypercall);
++ case __HYPERVISOR_ia64_debug_op:
++ return xencomm_privcmd_ia64_debug_op(hypercall);
++ case __HYPERVISOR_physdev_op:
++ return xencomm_privcmd_ia64_physdev_op(hypercall);
++ default:
++ printk("%s: unknown hcall (%ld)\n", __func__, hypercall->op);
++ return -ENOSYS;
++ }
++}
++
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/xencomm.c linux-2.6.18-xen-3.3.0/arch/ia64/xen/xencomm.c
+--- linux-2.6.18.8/arch/ia64/xen/xencomm.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/xencomm.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,109 @@
++/*
++ * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/gfp.h>
++#include <linux/mm.h>
++#include <xen/interface/xen.h>
++#include <asm/page.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#include <asm/xen/xencomm.h>
++
++static unsigned long kernel_start_pa;
++
++void
++xencomm_initialize (void)
++{
++ kernel_start_pa = KERNEL_START - ia64_tpa(KERNEL_START);
++}
++
++/* Translate virtual address to physical address. */
++unsigned long
++xencomm_vtop(unsigned long vaddr)
++{
++#ifndef CONFIG_VMX_GUEST
++ struct page *page;
++ struct vm_area_struct *vma;
++#endif
++
++ if (vaddr == 0)
++ return 0;
++
++#ifdef __ia64__
++ if (REGION_NUMBER(vaddr) == 5) {
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *ptep;
++
++ /* On ia64, TASK_SIZE refers to current. It is not initialized
++ during boot.
++ Furthermore the kernel is relocatable and __pa() doesn't
++ work on addresses. */
++ if (vaddr >= KERNEL_START
++ && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) {
++ return vaddr - kernel_start_pa;
++ }
++
++ /* In kernel area -- virtually mapped. */
++ pgd = pgd_offset_k(vaddr);
++ if (pgd_none(*pgd) || pgd_bad(*pgd))
++ return ~0UL;
++
++ pud = pud_offset(pgd, vaddr);
++ if (pud_none(*pud) || pud_bad(*pud))
++ return ~0UL;
++
++ pmd = pmd_offset(pud, vaddr);
++ if (pmd_none(*pmd) || pmd_bad(*pmd))
++ return ~0UL;
++
++ ptep = pte_offset_kernel(pmd, vaddr);
++ if (!ptep)
++ return ~0UL;
++
++ return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK);
++ }
++#endif
++
++ if (vaddr > TASK_SIZE) {
++ /* kernel address */
++ return __pa(vaddr);
++ }
++
++
++#ifdef CONFIG_VMX_GUEST
++ /* No privcmd within vmx guest. */
++ return ~0UL;
++#else
++ /* XXX double-check (lack of) locking */
++ vma = find_extend_vma(current->mm, vaddr);
++ if (!vma)
++ return ~0UL;
++
++ /* We assume the page is modified. */
++ page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH);
++ if (!page)
++ return ~0UL;
++
++ return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
++#endif
++}
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/xen_dma.c linux-2.6.18-xen-3.3.0/arch/ia64/xen/xen_dma.c
+--- linux-2.6.18.8/arch/ia64/xen/xen_dma.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/xen_dma.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,190 @@
++/*
++ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
++ * Alex Williamson <alex.williamson@hp.com>
++ *
++ * Basic DMA mapping services for Xen guests.
++ * Based on arch/i386/kernel/pci-dma-xen.c.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++
++#include <linux/bitops.h>
++#include <linux/dma-mapping.h>
++#include <linux/mm.h>
++#include <asm/scatterlist.h>
++#include <xen/gnttab.h>
++#include <asm/gnttab_dma.h>
++
++#define IOMMU_BUG_ON(test) \
++do { \
++ if (unlikely(test)) { \
++ printk(KERN_ALERT "Fatal DMA error!\n"); \
++ BUG(); \
++ } \
++} while (0)
++
++static int check_pages_physically_contiguous(unsigned long pfn,
++ unsigned int offset,
++ size_t length)
++{
++ unsigned long next_bus;
++ int i;
++ int nr_pages;
++
++ next_bus = pfn_to_mfn_for_dma(pfn);
++ nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
++
++ for (i = 1; i < nr_pages; i++) {
++ if (pfn_to_mfn_for_dma(++pfn) != ++next_bus)
++ return 0;
++ }
++ return 1;
++}
++
++int range_straddles_page_boundary(paddr_t p, size_t size)
++{
++ extern unsigned long *contiguous_bitmap;
++ unsigned long pfn = p >> PAGE_SHIFT;
++ unsigned int offset = p & ~PAGE_MASK;
++
++ if (!is_running_on_xen())
++ return 0;
++
++ if (offset + size <= PAGE_SIZE)
++ return 0;
++ if (test_bit(pfn, contiguous_bitmap))
++ return 0;
++ if (check_pages_physically_contiguous(pfn, offset, size))
++ return 0;
++ return 1;
++}
++
++/*
++ * This should be broken out of swiotlb and put in a common place
++ * when merged with upstream Linux.
++ */
++static inline int
++address_needs_mapping(struct device *dev, dma_addr_t addr)
++{
++ dma_addr_t mask = 0xffffffff;
++
++ /* If the device has a mask, use it, otherwise default to 32 bits */
++ if (dev && dev->dma_mask)
++ mask = *dev->dma_mask;
++ return (addr & ~mask) != 0;
++}
++
++int
++xen_map_sg(struct device *dev, struct scatterlist *sg, int nents,
++ int direction)
++{
++ int i;
++
++ for (i = 0 ; i < nents ; i++) {
++ sg[i].dma_address = gnttab_dma_map_page(sg[i].page) + sg[i].offset;
++ sg[i].dma_length = sg[i].length;
++
++ IOMMU_BUG_ON(address_needs_mapping(dev, sg[i].dma_address));
++ IOMMU_BUG_ON(range_straddles_page_boundary(
++ page_to_pseudophys(sg[i].page) + sg[i].offset,
++ sg[i].length));
++ }
++
++ return nents;
++}
++EXPORT_SYMBOL(xen_map_sg);
++
++void
++xen_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
++ int direction)
++{
++ int i;
++ for (i = 0; i < nents; i++)
++ __gnttab_dma_unmap_page(sg[i].page);
++}
++EXPORT_SYMBOL(xen_unmap_sg);
++
++int
++xen_dma_mapping_error(dma_addr_t dma_addr)
++{
++ return 0;
++}
++EXPORT_SYMBOL(xen_dma_mapping_error);
++
++int
++xen_dma_supported(struct device *dev, u64 mask)
++{
++ return 1;
++}
++EXPORT_SYMBOL(xen_dma_supported);
++
++void *
++xen_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp)
++{
++ unsigned long vaddr;
++ unsigned int order = get_order(size);
++
++ vaddr = __get_free_pages(gfp, order);
++
++ if (!vaddr)
++ return NULL;
++
++ if (xen_create_contiguous_region(vaddr, order,
++ fls64(dev->coherent_dma_mask))) {
++ free_pages(vaddr, order);
++ return NULL;
++ }
++
++ memset((void *)vaddr, 0, size);
++ *dma_handle = virt_to_bus((void *)vaddr);
++
++ return (void *)vaddr;
++}
++EXPORT_SYMBOL(xen_alloc_coherent);
++
++void
++xen_free_coherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle)
++{
++ unsigned int order = get_order(size);
++
++ xen_destroy_contiguous_region((unsigned long)vaddr, order);
++ free_pages((unsigned long)vaddr, order);
++}
++EXPORT_SYMBOL(xen_free_coherent);
++
++dma_addr_t
++xen_map_single(struct device *dev, void *ptr, size_t size,
++ int direction)
++{
++ dma_addr_t dma_addr = gnttab_dma_map_virt(ptr);
++
++ IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
++ IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
++
++ return dma_addr;
++}
++EXPORT_SYMBOL(xen_map_single);
++
++void
++xen_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
++ int direction)
++{
++ gnttab_dma_unmap_page(dma_addr);
++}
++EXPORT_SYMBOL(xen_unmap_single);
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/xenentry.S linux-2.6.18-xen-3.3.0/arch/ia64/xen/xenentry.S
+--- linux-2.6.18.8/arch/ia64/xen/xenentry.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/xenentry.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,931 @@
++/*
++ * ia64/xen/entry.S
++ *
++ * Alternate kernel routines for Xen. Heavily leveraged from
++ * ia64/kernel/entry.S
++ *
++ * Copyright (C) 2005 Hewlett-Packard Co
++ * Dan Magenheimer <dan.magenheimer@.hp.com>
++ */
++
++#include <asm/asmmacro.h>
++#include <asm/cache.h>
++#include <asm/errno.h>
++#include <asm/kregs.h>
++#include <asm/asm-offsets.h>
++#include <asm/pgtable.h>
++#include <asm/percpu.h>
++#include <asm/processor.h>
++#include <asm/thread_info.h>
++#include <asm/unistd.h>
++
++#ifdef CONFIG_XEN
++#include "xenminstate.h"
++#else
++#include "minstate.h"
++#endif
++
++/*
++ * prev_task <- ia64_switch_to(struct task_struct *next)
++ * With Ingo's new scheduler, interrupts are disabled when this routine gets
++ * called. The code starting at .map relies on this. The rest of the code
++ * doesn't care about the interrupt masking status.
++ */
++#ifdef CONFIG_XEN
++GLOBAL_ENTRY(xen_switch_to)
++ .prologue
++ alloc r16=ar.pfs,1,0,0,0
++ movl r22=running_on_xen;;
++ ld4 r22=[r22];;
++ cmp.eq p7,p0=r22,r0
++(p7) br.cond.sptk.many __ia64_switch_to;;
++#else
++GLOBAL_ENTRY(ia64_switch_to)
++ .prologue
++ alloc r16=ar.pfs,1,0,0,0
++#endif
++ DO_SAVE_SWITCH_STACK
++ .body
++
++ adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
++ movl r25=init_task
++ mov r27=IA64_KR(CURRENT_STACK)
++ adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
++ dep r20=0,in0,61,3 // physical address of "next"
++ ;;
++ st8 [r22]=sp // save kernel stack pointer of old task
++ shr.u r26=r20,IA64_GRANULE_SHIFT
++ cmp.eq p7,p6=r25,in0
++ ;;
++ /*
++ * If we've already mapped this task's page, we can skip doing it again.
++ */
++(p6) cmp.eq p7,p6=r26,r27
++(p6) br.cond.dpnt .map
++ ;;
++.done:
++ ld8 sp=[r21] // load kernel stack pointer of new task
++#ifdef CONFIG_XEN
++ // update "current" application register
++ mov r8=IA64_KR_CURRENT
++ mov r9=in0;;
++ XEN_HYPER_SET_KR
++#else
++ mov IA64_KR(CURRENT)=in0 // update "current" application register
++#endif
++ mov r8=r13 // return pointer to previously running task
++ mov r13=in0 // set "current" pointer
++ ;;
++ DO_LOAD_SWITCH_STACK
++
++#ifdef CONFIG_SMP
++ sync.i // ensure "fc"s done by this CPU are visible on other CPUs
++#endif
++ br.ret.sptk.many rp // boogie on out in new context
++
++.map:
++#ifdef CONFIG_XEN
++ movl r25=XSI_PSR_IC // clear psr.ic
++ ;;
++ st4 [r25]=r0
++ ;;
++#else
++ rsm psr.ic // interrupts (psr.i) are already disabled here
++#endif
++ movl r25=PAGE_KERNEL
++ ;;
++ srlz.d
++ or r23=r25,r20 // construct PA | page properties
++ mov r25=IA64_GRANULE_SHIFT<<2
++ ;;
++#ifdef CONFIG_XEN
++ movl r8=XSI_ITIR
++ ;;
++ st8 [r8]=r25
++ ;;
++ movl r8=XSI_IFA
++ ;;
++ st8 [r8]=in0 // VA of next task...
++ ;;
++ mov r25=IA64_TR_CURRENT_STACK
++ // remember last page we mapped...
++ mov r8=IA64_KR_CURRENT_STACK
++ mov r9=r26;;
++ XEN_HYPER_SET_KR;;
++#else
++ mov cr.itir=r25
++ mov cr.ifa=in0 // VA of next task...
++ ;;
++ mov r25=IA64_TR_CURRENT_STACK
++ mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped...
++#endif
++ ;;
++ itr.d dtr[r25]=r23 // wire in new mapping...
++#ifdef CONFIG_XEN
++ ;;
++ srlz.d
++ mov r9=1
++ movl r8=XSI_PSR_IC
++ ;;
++ st4 [r8]=r9
++ ;;
++#else
++ ssm psr.ic // reenable the psr.ic bit
++ ;;
++ srlz.d
++#endif
++ br.cond.sptk .done
++#ifdef CONFIG_XEN
++END(xen_switch_to)
++#else
++END(ia64_switch_to)
++#endif
++
++ /*
++ * Invoke a system call, but do some tracing before and after the call.
++ * We MUST preserve the current register frame throughout this routine
++ * because some system calls (such as ia64_execve) directly
++ * manipulate ar.pfs.
++ */
++#ifdef CONFIG_XEN
++GLOBAL_ENTRY(xen_trace_syscall)
++ PT_REGS_UNWIND_INFO(0)
++ movl r16=running_on_xen;;
++ ld4 r16=[r16];;
++ cmp.eq p7,p0=r16,r0
++(p7) br.cond.sptk.many __ia64_trace_syscall;;
++#else
++GLOBAL_ENTRY(ia64_trace_syscall)
++ PT_REGS_UNWIND_INFO(0)
++#endif
++ /*
++ * We need to preserve the scratch registers f6-f11 in case the system
++ * call is sigreturn.
++ */
++ adds r16=PT(F6)+16,sp
++ adds r17=PT(F7)+16,sp
++ ;;
++ stf.spill [r16]=f6,32
++ stf.spill [r17]=f7,32
++ ;;
++ stf.spill [r16]=f8,32
++ stf.spill [r17]=f9,32
++ ;;
++ stf.spill [r16]=f10
++ stf.spill [r17]=f11
++ br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
++ adds r16=PT(F6)+16,sp
++ adds r17=PT(F7)+16,sp
++ ;;
++ ldf.fill f6=[r16],32
++ ldf.fill f7=[r17],32
++ ;;
++ ldf.fill f8=[r16],32
++ ldf.fill f9=[r17],32
++ ;;
++ ldf.fill f10=[r16]
++ ldf.fill f11=[r17]
++ // the syscall number may have changed, so re-load it and re-calculate the
++ // syscall entry-point:
++ adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #)
++ ;;
++ ld8 r15=[r15]
++ mov r3=NR_syscalls - 1
++ ;;
++ adds r15=-1024,r15
++ movl r16=sys_call_table
++ ;;
++ shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
++ cmp.leu p6,p7=r15,r3
++ ;;
++(p6) ld8 r20=[r20] // load address of syscall entry point
++(p7) movl r20=sys_ni_syscall
++ ;;
++ mov b6=r20
++ br.call.sptk.many rp=b6 // do the syscall
++.strace_check_retval:
++ cmp.lt p6,p0=r8,r0 // syscall failed?
++ adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
++ adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
++ mov r10=0
++(p6) br.cond.sptk strace_error // syscall failed ->
++ ;; // avoid RAW on r10
++.strace_save_retval:
++.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
++.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
++ br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
++.ret3:
++(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
++ br.cond.sptk .work_pending_syscall_end
++
++strace_error:
++ ld8 r3=[r2] // load pt_regs.r8
++ sub r9=0,r8 // negate return value to get errno value
++ ;;
++ cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0?
++ adds r3=16,r2 // r3=&pt_regs.r10
++ ;;
++(p6) mov r10=-1
++(p6) mov r8=r9
++ br.cond.sptk .strace_save_retval
++#ifdef CONFIG_XEN
++END(xen_trace_syscall)
++#else
++END(ia64_trace_syscall)
++#endif
++
++#ifdef CONFIG_XEN
++GLOBAL_ENTRY(xen_ret_from_clone)
++ PT_REGS_UNWIND_INFO(0)
++ movl r16=running_on_xen;;
++ ld4 r16=[r16];;
++ cmp.eq p7,p0=r16,r0
++(p7) br.cond.sptk.many __ia64_ret_from_clone;;
++#else
++GLOBAL_ENTRY(ia64_ret_from_clone)
++ PT_REGS_UNWIND_INFO(0)
++#endif
++{ /*
++ * Some versions of gas generate bad unwind info if the first instruction of a
++ * procedure doesn't go into the first slot of a bundle. This is a workaround.
++ */
++ nop.m 0
++ nop.i 0
++ /*
++ * We need to call schedule_tail() to complete the scheduling process.
++ * Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the
++ * address of the previously executing task.
++ */
++ br.call.sptk.many rp=ia64_invoke_schedule_tail
++}
++.ret8:
++ adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
++ ;;
++ ld4 r2=[r2]
++ ;;
++ mov r8=0
++ and r2=_TIF_SYSCALL_TRACEAUDIT,r2
++ ;;
++ cmp.ne p6,p0=r2,r0
++(p6) br.cond.spnt .strace_check_retval
++ ;; // added stop bits to prevent r8 dependency
++#ifdef CONFIG_XEN
++ br.cond.sptk ia64_ret_from_syscall
++END(xen_ret_from_clone)
++#else
++END(ia64_ret_from_clone)
++#endif
++/*
++ * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
++ * need to switch to bank 0 and doesn't restore the scratch registers.
++ * To avoid leaking kernel bits, the scratch registers are set to
++ * the following known-to-be-safe values:
++ *
++ * r1: restored (global pointer)
++ * r2: cleared
++ * r3: 1 (when returning to user-level)
++ * r8-r11: restored (syscall return value(s))
++ * r12: restored (user-level stack pointer)
++ * r13: restored (user-level thread pointer)
++ * r14: set to __kernel_syscall_via_epc
++ * r15: restored (syscall #)
++ * r16-r17: cleared
++ * r18: user-level b6
++ * r19: cleared
++ * r20: user-level ar.fpsr
++ * r21: user-level b0
++ * r22: cleared
++ * r23: user-level ar.bspstore
++ * r24: user-level ar.rnat
++ * r25: user-level ar.unat
++ * r26: user-level ar.pfs
++ * r27: user-level ar.rsc
++ * r28: user-level ip
++ * r29: user-level psr
++ * r30: user-level cfm
++ * r31: user-level pr
++ * f6-f11: cleared
++ * pr: restored (user-level pr)
++ * b0: restored (user-level rp)
++ * b6: restored
++ * b7: set to __kernel_syscall_via_epc
++ * ar.unat: restored (user-level ar.unat)
++ * ar.pfs: restored (user-level ar.pfs)
++ * ar.rsc: restored (user-level ar.rsc)
++ * ar.rnat: restored (user-level ar.rnat)
++ * ar.bspstore: restored (user-level ar.bspstore)
++ * ar.fpsr: restored (user-level ar.fpsr)
++ * ar.ccv: cleared
++ * ar.csd: cleared
++ * ar.ssd: cleared
++ */
++#ifdef CONFIG_XEN
++GLOBAL_ENTRY(xen_leave_syscall)
++ PT_REGS_UNWIND_INFO(0)
++ movl r22=running_on_xen;;
++ ld4 r22=[r22];;
++ cmp.eq p7,p0=r22,r0
++(p7) br.cond.sptk.many __ia64_leave_syscall;;
++#else
++ENTRY(ia64_leave_syscall)
++ PT_REGS_UNWIND_INFO(0)
++#endif
++ /*
++ * work.need_resched etc. mustn't get changed by this CPU before it returns to
++ * user- or fsys-mode, hence we disable interrupts early on.
++ *
++ * p6 controls whether current_thread_info()->flags needs to be check for
++ * extra work. We always check for extra work when returning to user-level.
++ * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
++ * is 0. After extra work processing has been completed, execution
++ * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
++ * needs to be redone.
++ */
++#ifdef CONFIG_PREEMPT
++ rsm psr.i // disable interrupts
++ cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
++(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
++ ;;
++ .pred.rel.mutex pUStk,pKStk
++(pKStk) ld4 r21=[r20] // r21 <- preempt_count
++(pUStk) mov r21=0 // r21 <- 0
++ ;;
++ cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
++#else /* !CONFIG_PREEMPT */
++#ifdef CONFIG_XEN
++ movl r2=XSI_PSR_I_ADDR
++ mov r18=1
++ ;;
++ ld8 r2=[r2]
++ ;;
++(pUStk) st1 [r2]=r18
++#else
++(pUStk) rsm psr.i
++#endif
++ cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
++(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
++#endif
++.work_processed_syscall:
++ adds r2=PT(LOADRS)+16,r12
++ adds r3=PT(AR_BSPSTORE)+16,r12
++ adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
++ ;;
++(p6) ld4 r31=[r18] // load current_thread_info()->flags
++ ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
++ nop.i 0
++ ;;
++ mov r16=ar.bsp // M2 get existing backing store pointer
++ ld8 r18=[r2],PT(R9)-PT(B6) // load b6
++(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
++ ;;
++ ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
++(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
++(p6) br.cond.spnt .work_pending_syscall
++ ;;
++ // start restoring the state saved on the kernel stack (struct pt_regs):
++ ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
++ ld8 r11=[r3],PT(CR_IIP)-PT(R11)
++(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
++ ;;
++ invala // M0|1 invalidate ALAT
++#ifdef CONFIG_XEN
++ movl r28=XSI_PSR_I_ADDR
++ movl r29=XSI_PSR_IC
++ ;;
++ ld8 r28=[r28]
++ mov r30=1
++ ;;
++ st1 [r28]=r30
++ st4 [r29]=r0 // note: clears both vpsr.i and vpsr.ic!
++ ;;
++#else
++ rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection
++#endif
++ cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
++
++ ld8 r29=[r2],16 // M0|1 load cr.ipsr
++ ld8 r28=[r3],16 // M0|1 load cr.iip
++ mov r22=r0 // A clear r22
++ ;;
++ ld8 r30=[r2],16 // M0|1 load cr.ifs
++ ld8 r25=[r3],16 // M0|1 load ar.unat
++(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
++ ;;
++ ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
++#ifdef CONFIG_XEN
++(pKStk) mov r21=r8
++(pKStk) XEN_HYPER_GET_PSR
++ ;;
++(pKStk) mov r22=r8
++(pKStk) mov r8=r21
++ ;;
++#else
++(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
++#endif
++ nop 0
++ ;;
++ ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
++ ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
++ mov f6=f0 // F clear f6
++ ;;
++ ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage)
++ ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates
++ mov f7=f0 // F clear f7
++ ;;
++ ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
++ ld8.fill r1=[r3],16 // M0|1 load r1
++(pUStk) mov r17=1 // A
++ ;;
++(pUStk) st1 [r14]=r17 // M2|3
++ ld8.fill r13=[r3],16 // M0|1
++ mov f8=f0 // F clear f8
++ ;;
++ ld8.fill r12=[r2] // M0|1 restore r12 (sp)
++ ld8.fill r15=[r3] // M0|1 restore r15
++ mov b6=r18 // I0 restore b6
++
++ addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A
++ mov f9=f0 // F clear f9
++(pKStk) br.cond.dpnt.many skip_rbs_switch // B
++
++ srlz.d // M0 ensure interruption collection is off (for cover)
++ shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
++#ifdef CONFIG_XEN
++ XEN_HYPER_COVER;
++#else
++ cover // B add current frame into dirty partition & set cr.ifs
++#endif
++ ;;
++(pUStk) ld4 r17=[r17] // M0|1 r17 = cpu_data->phys_stacked_size_p8
++ mov r19=ar.bsp // M2 get new backing store pointer
++ mov f10=f0 // F clear f10
++
++ nop.m 0
++ movl r14=__kernel_syscall_via_epc // X
++ ;;
++ mov.m ar.csd=r0 // M2 clear ar.csd
++ mov.m ar.ccv=r0 // M2 clear ar.ccv
++ mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc)
++
++ mov.m ar.ssd=r0 // M2 clear ar.ssd
++ mov f11=f0 // F clear f11
++ br.cond.sptk.many rbs_switch // B
++#ifdef CONFIG_XEN
++END(xen_leave_syscall)
++#else
++END(ia64_leave_syscall)
++#endif
++
++#ifdef CONFIG_XEN
++GLOBAL_ENTRY(xen_leave_kernel)
++ PT_REGS_UNWIND_INFO(0)
++ movl r22=running_on_xen;;
++ ld4 r22=[r22];;
++ cmp.eq p7,p0=r22,r0
++(p7) br.cond.sptk.many __ia64_leave_kernel;;
++#else
++GLOBAL_ENTRY(ia64_leave_kernel)
++ PT_REGS_UNWIND_INFO(0)
++#endif
++ /*
++ * work.need_resched etc. mustn't get changed by this CPU before it returns to
++ * user- or fsys-mode, hence we disable interrupts early on.
++ *
++ * p6 controls whether current_thread_info()->flags needs to be check for
++ * extra work. We always check for extra work when returning to user-level.
++ * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
++ * is 0. After extra work processing has been completed, execution
++ * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
++ * needs to be redone.
++ */
++#ifdef CONFIG_PREEMPT
++ rsm psr.i // disable interrupts
++ cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
++(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
++ ;;
++ .pred.rel.mutex pUStk,pKStk
++(pKStk) ld4 r21=[r20] // r21 <- preempt_count
++(pUStk) mov r21=0 // r21 <- 0
++ ;;
++ cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
++#else
++#ifdef CONFIG_XEN
++(pUStk) movl r17=XSI_PSR_I_ADDR
++(pUStk) mov r31=1
++ ;;
++(pUStk) ld8 r17=[r17]
++ ;;
++(pUStk) st1 [r17]=r31
++ ;;
++#else
++(pUStk) rsm psr.i
++#endif
++ cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
++(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
++#endif
++.work_processed_kernel:
++ adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
++ ;;
++(p6) ld4 r31=[r17] // load current_thread_info()->flags
++ adds r21=PT(PR)+16,r12
++ ;;
++
++ lfetch [r21],PT(CR_IPSR)-PT(PR)
++ adds r2=PT(B6)+16,r12
++ adds r3=PT(R16)+16,r12
++ ;;
++ lfetch [r21]
++ ld8 r28=[r2],8 // load b6
++ adds r29=PT(R24)+16,r12
++
++ ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
++ adds r30=PT(AR_CCV)+16,r12
++(p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
++ ;;
++ ld8.fill r24=[r29]
++ ld8 r15=[r30] // load ar.ccv
++(p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending?
++ ;;
++ ld8 r29=[r2],16 // load b7
++ ld8 r30=[r3],16 // load ar.csd
++(p6) br.cond.spnt .work_pending
++ ;;
++ ld8 r31=[r2],16 // load ar.ssd
++ ld8.fill r8=[r3],16
++ ;;
++ ld8.fill r9=[r2],16
++ ld8.fill r10=[r3],PT(R17)-PT(R10)
++ ;;
++ ld8.fill r11=[r2],PT(R18)-PT(R11)
++ ld8.fill r17=[r3],16
++ ;;
++ ld8.fill r18=[r2],16
++ ld8.fill r19=[r3],16
++ ;;
++ ld8.fill r20=[r2],16
++ ld8.fill r21=[r3],16
++ mov ar.csd=r30
++ mov ar.ssd=r31
++ ;;
++#ifdef CONFIG_XEN
++ movl r23=XSI_PSR_I_ADDR
++ movl r22=XSI_PSR_IC
++ ;;
++ ld8 r23=[r23]
++ mov r25=1
++ ;;
++ st1 [r23]=r25
++ st4 [r22]=r0 // note: clears both vpsr.i and vpsr.ic!
++ ;;
++#else
++ rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
++#endif
++ invala // invalidate ALAT
++ ;;
++ ld8.fill r22=[r2],24
++ ld8.fill r23=[r3],24
++ mov b6=r28
++ ;;
++ ld8.fill r25=[r2],16
++ ld8.fill r26=[r3],16
++ mov b7=r29
++ ;;
++ ld8.fill r27=[r2],16
++ ld8.fill r28=[r3],16
++ ;;
++ ld8.fill r29=[r2],16
++ ld8.fill r30=[r3],24
++ ;;
++ ld8.fill r31=[r2],PT(F9)-PT(R31)
++ adds r3=PT(F10)-PT(F6),r3
++ ;;
++ ldf.fill f9=[r2],PT(F6)-PT(F9)
++ ldf.fill f10=[r3],PT(F8)-PT(F10)
++ ;;
++ ldf.fill f6=[r2],PT(F7)-PT(F6)
++ ;;
++ ldf.fill f7=[r2],PT(F11)-PT(F7)
++ ldf.fill f8=[r3],32
++ ;;
++ srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
++ mov ar.ccv=r15
++ ;;
++ ldf.fill f11=[r2]
++#ifdef CONFIG_XEN
++ ;;
++ // r16-r31 all now hold bank1 values
++ mov r15=ar.unat
++ movl r2=XSI_BANK1_R16
++ movl r3=XSI_BANK1_R16+8
++ ;;
++.mem.offset 0,0; st8.spill [r2]=r16,16
++.mem.offset 8,0; st8.spill [r3]=r17,16
++ ;;
++.mem.offset 0,0; st8.spill [r2]=r18,16
++.mem.offset 8,0; st8.spill [r3]=r19,16
++ ;;
++.mem.offset 0,0; st8.spill [r2]=r20,16
++.mem.offset 8,0; st8.spill [r3]=r21,16
++ ;;
++.mem.offset 0,0; st8.spill [r2]=r22,16
++.mem.offset 8,0; st8.spill [r3]=r23,16
++ ;;
++.mem.offset 0,0; st8.spill [r2]=r24,16
++.mem.offset 8,0; st8.spill [r3]=r25,16
++ ;;
++.mem.offset 0,0; st8.spill [r2]=r26,16
++.mem.offset 8,0; st8.spill [r3]=r27,16
++ ;;
++.mem.offset 0,0; st8.spill [r2]=r28,16
++.mem.offset 8,0; st8.spill [r3]=r29,16
++ ;;
++.mem.offset 0,0; st8.spill [r2]=r30,16
++.mem.offset 8,0; st8.spill [r3]=r31,16
++ ;;
++ mov r3=ar.unat
++ movl r2=XSI_B1NAT
++ ;;
++ st8 [r2]=r3
++ mov ar.unat=r15
++ movl r2=XSI_BANKNUM;;
++ st4 [r2]=r0;
++#else
++ bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
++#endif
++ ;;
++(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
++ adds r16=PT(CR_IPSR)+16,r12
++ adds r17=PT(CR_IIP)+16,r12
++
++#ifdef CONFIG_XEN
++(pKStk) mov r29=r8
++(pKStk) XEN_HYPER_GET_PSR
++ ;;
++(pKStk) mov r22=r8
++(pKStk) mov r8=r29
++ ;;
++#else
++(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
++#endif
++ nop.i 0
++ nop.i 0
++ ;;
++ ld8 r29=[r16],16 // load cr.ipsr
++ ld8 r28=[r17],16 // load cr.iip
++ ;;
++ ld8 r30=[r16],16 // load cr.ifs
++ ld8 r25=[r17],16 // load ar.unat
++ ;;
++ ld8 r26=[r16],16 // load ar.pfs
++ ld8 r27=[r17],16 // load ar.rsc
++ cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
++ ;;
++ ld8 r24=[r16],16 // load ar.rnat (may be garbage)
++ ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
++ ;;
++ ld8 r31=[r16],16 // load predicates
++ ld8 r21=[r17],16 // load b0
++ ;;
++ ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
++ ld8.fill r1=[r17],16 // load r1
++ ;;
++ ld8.fill r12=[r16],16
++ ld8.fill r13=[r17],16
++(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
++ ;;
++ ld8 r20=[r16],16 // ar.fpsr
++ ld8.fill r15=[r17],16
++ ;;
++ ld8.fill r14=[r16],16
++ ld8.fill r2=[r17]
++(pUStk) mov r17=1
++ ;;
++ ld8.fill r3=[r16]
++(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
++ shr.u r18=r19,16 // get byte size of existing "dirty" partition
++ ;;
++ mov r16=ar.bsp // get existing backing store pointer
++ addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
++ ;;
++ ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
++(pKStk) br.cond.dpnt skip_rbs_switch
++
++ /*
++ * Restore user backing store.
++ *
++ * NOTE: alloc, loadrs, and cover can't be predicated.
++ */
++(pNonSys) br.cond.dpnt dont_preserve_current_frame
++
++#ifdef CONFIG_XEN
++ XEN_HYPER_COVER;
++#else
++ cover // add current frame into dirty partition and set cr.ifs
++#endif
++ ;;
++ mov r19=ar.bsp // get new backing store pointer
++rbs_switch:
++ sub r16=r16,r18 // krbs = old bsp - size of dirty partition
++ cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
++ ;;
++ sub r19=r19,r16 // calculate total byte size of dirty partition
++ add r18=64,r18 // don't force in0-in7 into memory...
++ ;;
++ shl r19=r19,16 // shift size of dirty partition into loadrs position
++ ;;
++dont_preserve_current_frame:
++ /*
++ * To prevent leaking bits between the kernel and user-space,
++ * we must clear the stacked registers in the "invalid" partition here.
++ * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
++ * 5 registers/cycle on McKinley).
++ */
++# define pRecurse p6
++# define pReturn p7
++#ifdef CONFIG_ITANIUM
++# define Nregs 10
++#else
++# define Nregs 14
++#endif
++ alloc loc0=ar.pfs,2,Nregs-2,2,0
++ shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
++ sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
++ ;;
++ mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
++ shladd in0=loc1,3,r17
++ mov in1=0
++ ;;
++ TEXT_ALIGN(32)
++rse_clear_invalid:
++#ifdef CONFIG_ITANIUM
++ // cycle 0
++ { .mii
++ alloc loc0=ar.pfs,2,Nregs-2,2,0
++ cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
++ add out0=-Nregs*8,in0
++}{ .mfb
++ add out1=1,in1 // increment recursion count
++ nop.f 0
++ nop.b 0 // can't do br.call here because of alloc (WAW on CFM)
++ ;;
++}{ .mfi // cycle 1
++ mov loc1=0
++ nop.f 0
++ mov loc2=0
++}{ .mib
++ mov loc3=0
++ mov loc4=0
++(pRecurse) br.call.sptk.many b0=rse_clear_invalid
++
++}{ .mfi // cycle 2
++ mov loc5=0
++ nop.f 0
++ cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
++}{ .mib
++ mov loc6=0
++ mov loc7=0
++(pReturn) br.ret.sptk.many b0
++}
++#else /* !CONFIG_ITANIUM */
++ alloc loc0=ar.pfs,2,Nregs-2,2,0
++ cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
++ add out0=-Nregs*8,in0
++ add out1=1,in1 // increment recursion count
++ mov loc1=0
++ mov loc2=0
++ ;;
++ mov loc3=0
++ mov loc4=0
++ mov loc5=0
++ mov loc6=0
++ mov loc7=0
++(pRecurse) br.call.dptk.few b0=rse_clear_invalid
++ ;;
++ mov loc8=0
++ mov loc9=0
++ cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
++ mov loc10=0
++ mov loc11=0
++(pReturn) br.ret.dptk.many b0
++#endif /* !CONFIG_ITANIUM */
++# undef pRecurse
++# undef pReturn
++ ;;
++ alloc r17=ar.pfs,0,0,0,0 // drop current register frame
++ ;;
++ loadrs
++ ;;
++skip_rbs_switch:
++ mov ar.unat=r25 // M2
++(pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp from r22
++(pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise
++ ;;
++(pUStk) mov ar.bspstore=r23 // M2
++(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
++(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
++ ;;
++#ifdef CONFIG_XEN
++ movl r25=XSI_IPSR
++ ;;
++ st8[r25]=r29,XSI_IFS_OFS-XSI_IPSR_OFS
++ ;;
++#else
++ mov cr.ipsr=r29 // M2
++#endif
++ mov ar.pfs=r26 // I0
++(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
++
++#ifdef CONFIG_XEN
++(p9) st8 [r25]=r30
++ ;;
++ adds r25=XSI_IIP_OFS-XSI_IFS_OFS,r25
++ ;;
++#else
++(p9) mov cr.ifs=r30 // M2
++#endif
++ mov b0=r21 // I0
++(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
++
++ mov ar.fpsr=r20 // M2
++#ifdef CONFIG_XEN
++ st8 [r25]=r28
++#else
++ mov cr.iip=r28 // M2
++#endif
++ nop 0
++ ;;
++(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
++ nop 0
++(pLvSys)mov r2=r0
++
++ mov ar.rsc=r27 // M2
++ mov pr=r31,-1 // I0
++#ifdef CONFIG_XEN
++ ;;
++ XEN_HYPER_RFI;
++#else
++ rfi // B
++#endif
++
++ /*
++ * On entry:
++ * r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
++ * r31 = current->thread_info->flags
++ * On exit:
++ * p6 = TRUE if work-pending-check needs to be redone
++ */
++.work_pending_syscall:
++ add r2=-8,r2
++ add r3=-8,r3
++ ;;
++ st8 [r2]=r8
++ st8 [r3]=r10
++.work_pending:
++ tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
++(p6) br.cond.sptk.few .notify
++#ifdef CONFIG_PREEMPT
++(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
++ ;;
++(pKStk) st4 [r20]=r21
++ ssm psr.i // enable interrupts
++#endif
++ br.call.spnt.many rp=schedule
++.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
++#ifdef CONFIG_XEN
++ movl r2=XSI_PSR_I_ADDR
++ mov r20=1
++ ;;
++ ld8 r2=[r2]
++ ;;
++ st1 [r2]=r20
++#else
++ rsm psr.i // disable interrupts
++#endif
++ ;;
++#ifdef CONFIG_PREEMPT
++(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
++ ;;
++(pKStk) st4 [r20]=r0 // preempt_count() <- 0
++#endif
++(pLvSys)br.cond.sptk.few .work_pending_syscall_end
++ br.cond.sptk.many .work_processed_kernel // re-check
++
++.notify:
++(pUStk) br.call.spnt.many rp=notify_resume_user
++.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
++(pLvSys)br.cond.sptk.few .work_pending_syscall_end
++ br.cond.sptk.many .work_processed_kernel // don't re-check
++
++.work_pending_syscall_end:
++ adds r2=PT(R8)+16,r12
++ adds r3=PT(R10)+16,r12
++ ;;
++ ld8 r8=[r2]
++ ld8 r10=[r3]
++ br.cond.sptk.many .work_processed_syscall // re-check
++
++#ifdef CONFIG_XEN
++END(xen_leave_kernel)
++#else
++END(ia64_leave_kernel)
++#endif
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/xenivt.S linux-2.6.18-xen-3.3.0/arch/ia64/xen/xenivt.S
+--- linux-2.6.18.8/arch/ia64/xen/xenivt.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/xenivt.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2178 @@
++/*
++ * arch/ia64/xen/ivt.S
++ *
++ * Copyright (C) 2005 Hewlett-Packard Co
++ * Dan Magenheimer <dan.magenheimer@hp.com>
++ */
++/*
++ * This file defines the interruption vector table used by the CPU.
++ * It does not include one entry per possible cause of interruption.
++ *
++ * The first 20 entries of the table contain 64 bundles each while the
++ * remaining 48 entries contain only 16 bundles each.
++ *
++ * The 64 bundles are used to allow inlining the whole handler for critical
++ * interruptions like TLB misses.
++ *
++ * For each entry, the comment is as follows:
++ *
++ * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
++ * entry offset ----/ / / / /
++ * entry number ---------/ / / /
++ * size of the entry -------------/ / /
++ * vector name -------------------------------------/ /
++ * interruptions triggering this vector ----------------------/
++ *
++ * The table is 32KB in size and must be aligned on 32KB boundary.
++ * (The CPU ignores the 15 lower bits of the address)
++ *
++ * Table is based upon EAS2.6 (Oct 1999)
++ */
++
++#include <asm/asmmacro.h>
++#include <asm/break.h>
++#include <asm/ia32.h>
++#include <asm/kregs.h>
++#include <asm/asm-offsets.h>
++#include <asm/pgtable.h>
++#include <asm/processor.h>
++#include <asm/ptrace.h>
++#include <asm/system.h>
++#include <asm/thread_info.h>
++#include <asm/unistd.h>
++#include <asm/errno.h>
++
++#ifdef CONFIG_XEN
++#define ia64_ivt xen_ivt
++#endif
++
++#if 1
++# define PSR_DEFAULT_BITS psr.ac
++#else
++# define PSR_DEFAULT_BITS 0
++#endif
++
++#if 0
++ /*
++ * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
++ * needed for something else before enabling this...
++ */
++# define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
++#else
++# define DBG_FAULT(i)
++#endif
++
++#define MINSTATE_VIRT /* needed by minstate.h */
++#include "xenminstate.h"
++
++#define FAULT(n) \
++ mov r31=pr; \
++ mov r19=n;; /* prepare to save predicates */ \
++ br.sptk.many dispatch_to_fault_handler
++
++ .section .text.ivt,"ax"
++
++ .align 32768 // align on 32KB boundary
++ .global ia64_ivt
++ia64_ivt:
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
++ENTRY(vhpt_miss)
++ DBG_FAULT(0)
++ /*
++ * The VHPT vector is invoked when the TLB entry for the virtual page table
++ * is missing. This happens only as a result of a previous
++ * (the "original") TLB miss, which may either be caused by an instruction
++ * fetch or a data access (or non-access).
++ *
++ * What we do here is normal TLB miss handing for the _original_ miss,
++ * followed by inserting the TLB entry for the virtual page table page
++ * that the VHPT walker was attempting to access. The latter gets
++ * inserted as long as page table entry above pte level have valid
++ * mappings for the faulting address. The TLB entry for the original
++ * miss gets inserted only if the pte entry indicates that the page is
++ * present.
++ *
++ * do_page_fault gets invoked in the following cases:
++ * - the faulting virtual address uses unimplemented address bits
++ * - the faulting virtual address has no valid page table mapping
++ */
++#ifdef CONFIG_XEN
++ movl r16=XSI_IFA
++ ;;
++ ld8 r16=[r16]
++#ifdef CONFIG_HUGETLB_PAGE
++ movl r18=PAGE_SHIFT
++ movl r25=XSI_ITIR
++ ;;
++ ld8 r25=[r25]
++#endif
++ ;;
++#else
++ mov r16=cr.ifa // get address that caused the TLB miss
++#ifdef CONFIG_HUGETLB_PAGE
++ movl r18=PAGE_SHIFT
++ mov r25=cr.itir
++#endif
++#endif
++ ;;
++#ifdef CONFIG_XEN
++ XEN_HYPER_RSM_PSR_DT;
++#else
++ rsm psr.dt // use physical addressing for data
++#endif
++ mov r31=pr // save the predicate registers
++ mov r19=IA64_KR(PT_BASE) // get page table base address
++ shl r21=r16,3 // shift bit 60 into sign bit
++ shr.u r17=r16,61 // get the region number into r17
++ ;;
++ shr.u r22=r21,3
++#ifdef CONFIG_HUGETLB_PAGE
++ extr.u r26=r25,2,6
++ ;;
++ cmp.ne p8,p0=r18,r26
++ sub r27=r26,r18
++ ;;
++(p8) dep r25=r18,r25,2,6
++(p8) shr r22=r22,r27
++#endif
++ ;;
++ cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
++ shr.u r18=r22,PGDIR_SHIFT // get bottom portion of pgd index bit
++ ;;
++(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
++
++ srlz.d
++ LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
++
++ .pred.rel "mutex", p6, p7
++(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
++(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
++ ;;
++(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
++(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
++ cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
++#ifdef CONFIG_PGTABLE_4
++ shr.u r28=r22,PUD_SHIFT // shift pud index into position
++#else
++ shr.u r18=r22,PMD_SHIFT // shift pmd index into position
++#endif
++ ;;
++ ld8 r17=[r17] // get *pgd (may be 0)
++ ;;
++(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
++#ifdef CONFIG_PGTABLE_4
++ dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr)
++ ;;
++ shr.u r18=r22,PMD_SHIFT // shift pmd index into position
++(p7) ld8 r29=[r28] // get *pud (may be 0)
++ ;;
++(p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL?
++ dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
++#else
++ dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr)
++#endif
++ ;;
++(p7) ld8 r20=[r17] // get *pmd (may be 0)
++ shr.u r19=r22,PAGE_SHIFT // shift pte index into position
++ ;;
++(p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL?
++ dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr)
++ ;;
++(p7) ld8 r18=[r21] // read *pte
++#ifdef CONFIG_XEN
++ movl r19=XSI_ISR
++ ;;
++ ld8 r19=[r19]
++#else
++ mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss
++#endif
++ ;;
++(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
++#ifdef CONFIG_XEN
++ movl r22=XSI_IHA
++ ;;
++ ld8 r22=[r22]
++#else
++ mov r22=cr.iha // get the VHPT address that caused the TLB miss
++#endif
++ ;; // avoid RAW on p7
++(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
++ dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
++ ;;
++#ifdef CONFIG_XEN
++ mov r24=r8
++ mov r8=r18
++ ;;
++(p10) XEN_HYPER_ITC_I
++ ;;
++(p11) XEN_HYPER_ITC_D
++ ;;
++ mov r8=r24
++ ;;
++#else
++(p10) itc.i r18 // insert the instruction TLB entry
++(p11) itc.d r18 // insert the data TLB entry
++#endif
++(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
++#ifdef CONFIG_XEN
++ movl r24=XSI_IFA
++ ;;
++ st8 [r24]=r22
++ ;;
++#else
++ mov cr.ifa=r22
++#endif
++
++#ifdef CONFIG_HUGETLB_PAGE
++(p8) mov cr.itir=r25 // change to default page-size for VHPT
++#endif
++
++ /*
++ * Now compute and insert the TLB entry for the virtual page table. We never
++ * execute in a page table page so there is no need to set the exception deferral
++ * bit.
++ */
++ adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
++ ;;
++#ifdef CONFIG_XEN
++(p7) mov r25=r8
++(p7) mov r8=r24
++ ;;
++(p7) XEN_HYPER_ITC_D
++ ;;
++(p7) mov r8=r25
++ ;;
++#else
++(p7) itc.d r24
++#endif
++ ;;
++#ifdef CONFIG_SMP
++ /*
++ * Tell the assemblers dependency-violation checker that the above "itc" instructions
++ * cannot possibly affect the following loads:
++ */
++ dv_serialize_data
++
++ /*
++ * Re-check pagetable entry. If they changed, we may have received a ptc.g
++ * between reading the pagetable and the "itc". If so, flush the entry we
++ * inserted and retry. At this point, we have:
++ *
++ * r28 = equivalent of pud_offset(pgd, ifa)
++ * r17 = equivalent of pmd_offset(pud, ifa)
++ * r21 = equivalent of pte_offset(pmd, ifa)
++ *
++ * r29 = *pud
++ * r20 = *pmd
++ * r18 = *pte
++ */
++ ld8 r25=[r21] // read *pte again
++ ld8 r26=[r17] // read *pmd again
++#ifdef CONFIG_PGTABLE_4
++ ld8 r19=[r28] // read *pud again
++#endif
++ cmp.ne p6,p7=r0,r0
++ ;;
++ cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change
++#ifdef CONFIG_PGTABLE_4
++ cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change
++#endif
++ mov r27=PAGE_SHIFT<<2
++ ;;
++(p6) ptc.l r22,r27 // purge PTE page translation
++(p7) cmp.ne.or.andcm p6,p7=r25,r18 // did *pte change
++ ;;
++(p6) ptc.l r16,r27 // purge translation
++#endif
++
++ mov pr=r31,-1 // restore predicate registers
++#ifdef CONFIG_XEN
++ XEN_HYPER_RFI
++ dv_serialize_data
++#else
++ rfi
++#endif
++END(vhpt_miss)
++
++ .org ia64_ivt+0x400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
++ENTRY(itlb_miss)
++ DBG_FAULT(1)
++ /*
++ * The ITLB handler accesses the PTE via the virtually mapped linear
++ * page table. If a nested TLB miss occurs, we switch into physical
++ * mode, walk the page table, and then re-execute the PTE read and
++ * go on normally after that.
++ */
++#ifdef CONFIG_XEN
++ movl r16=XSI_IFA
++ ;;
++ ld8 r16=[r16]
++#else
++ mov r16=cr.ifa // get virtual address
++#endif
++ mov r29=b0 // save b0
++ mov r31=pr // save predicates
++.itlb_fault:
++#ifdef CONFIG_XEN
++ movl r17=XSI_IHA
++ ;;
++ ld8 r17=[r17] // get virtual address of L3 PTE
++#else
++ mov r17=cr.iha // get virtual address of PTE
++#endif
++ movl r30=1f // load nested fault continuation point
++ ;;
++1: ld8 r18=[r17] // read *pte
++ ;;
++ mov b0=r29
++ tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
++(p6) br.cond.spnt page_fault
++ ;;
++#ifdef CONFIG_XEN
++ mov r19=r8
++ mov r8=r18
++ ;;
++ XEN_HYPER_ITC_I
++ ;;
++ mov r8=r19
++#else
++ itc.i r18
++#endif
++ ;;
++#ifdef CONFIG_SMP
++ /*
++ * Tell the assemblers dependency-violation checker that the above "itc" instructions
++ * cannot possibly affect the following loads:
++ */
++ dv_serialize_data
++
++ ld8 r19=[r17] // read *pte again and see if same
++ mov r20=PAGE_SHIFT<<2 // setup page size for purge
++ ;;
++ cmp.ne p7,p0=r18,r19
++ ;;
++(p7) ptc.l r16,r20
++#endif
++ mov pr=r31,-1
++#ifdef CONFIG_XEN
++ XEN_HYPER_RFI
++ dv_serialize_data
++#else
++ rfi
++#endif
++END(itlb_miss)
++
++ .org ia64_ivt+0x0800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
++ENTRY(dtlb_miss)
++ DBG_FAULT(2)
++ /*
++ * The DTLB handler accesses the PTE via the virtually mapped linear
++ * page table. If a nested TLB miss occurs, we switch into physical
++ * mode, walk the page table, and then re-execute the PTE read and
++ * go on normally after that.
++ */
++#ifdef CONFIG_XEN
++ movl r16=XSI_IFA
++ ;;
++ ld8 r16=[r16]
++#else
++ mov r16=cr.ifa // get virtual address
++#endif
++ mov r29=b0 // save b0
++ mov r31=pr // save predicates
++dtlb_fault:
++#ifdef CONFIG_XEN
++ movl r17=XSI_IHA
++ ;;
++ ld8 r17=[r17] // get virtual address of L3 PTE
++#else
++ mov r17=cr.iha // get virtual address of PTE
++#endif
++ movl r30=1f // load nested fault continuation point
++ ;;
++1: ld8 r18=[r17] // read *pte
++ ;;
++ mov b0=r29
++ tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
++(p6) br.cond.spnt page_fault
++ ;;
++#ifdef CONFIG_XEN
++ mov r19=r8
++ mov r8=r18
++ ;;
++ XEN_HYPER_ITC_D
++ ;;
++ mov r8=r19
++ ;;
++#else
++ itc.d r18
++#endif
++ ;;
++#ifdef CONFIG_SMP
++ /*
++ * Tell the assemblers dependency-violation checker that the above "itc" instructions
++ * cannot possibly affect the following loads:
++ */
++ dv_serialize_data
++
++ ld8 r19=[r17] // read *pte again and see if same
++ mov r20=PAGE_SHIFT<<2 // setup page size for purge
++ ;;
++ cmp.ne p7,p0=r18,r19
++ ;;
++(p7) ptc.l r16,r20
++#endif
++ mov pr=r31,-1
++#ifdef CONFIG_XEN
++ XEN_HYPER_RFI
++ dv_serialize_data
++#else
++ rfi
++#endif
++END(dtlb_miss)
++
++ .org ia64_ivt+0x0c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
++ENTRY(alt_itlb_miss)
++ DBG_FAULT(3)
++#ifdef CONFIG_XEN
++ movl r31=XSI_IPSR
++ ;;
++ ld8 r21=[r31],XSI_IFA_OFS-XSI_IPSR_OFS // get ipsr, point to ifa
++ movl r17=PAGE_KERNEL
++ ;;
++ ld8 r16=[r31] // get ifa
++#else
++ mov r16=cr.ifa // get address that caused the TLB miss
++ movl r17=PAGE_KERNEL
++ mov r21=cr.ipsr
++#endif
++ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
++ mov r31=pr
++ ;;
++#ifdef CONFIG_DISABLE_VHPT
++ shr.u r22=r16,61 // get the region number into r21
++ ;;
++ cmp.gt p8,p0=6,r22 // user mode
++ ;;
++#ifndef CONFIG_XEN
++(p8) thash r17=r16
++ ;;
++(p8) mov cr.iha=r17
++#endif
++(p8) mov r29=b0 // save b0
++(p8) br.cond.dptk .itlb_fault
++#endif
++ extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
++ and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
++ shr.u r18=r16,57 // move address bit 61 to bit 4
++ ;;
++ andcm r18=0x10,r18 // bit 4=~address-bit(61)
++ cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
++ or r19=r17,r19 // insert PTE control bits into r19
++ ;;
++ or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
++(p8) br.cond.spnt page_fault
++ ;;
++#ifdef CONFIG_XEN
++ mov r18=r8
++ mov r8=r19
++ ;;
++ XEN_HYPER_ITC_I
++ ;;
++ mov r8=r18
++ ;;
++ mov pr=r31,-1
++ ;;
++ XEN_HYPER_RFI;
++#else
++ itc.i r19 // insert the TLB entry
++ mov pr=r31,-1
++ rfi
++#endif
++END(alt_itlb_miss)
++
++ .org ia64_ivt+0x1000
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
++ENTRY(alt_dtlb_miss)
++ DBG_FAULT(4)
++#ifdef CONFIG_XEN
++ movl r31=XSI_IPSR
++ ;;
++ ld8 r21=[r31],XSI_ISR_OFS-XSI_IPSR_OFS // get ipsr, point to isr
++ movl r17=PAGE_KERNEL
++ ;;
++ ld8 r20=[r31],XSI_IFA_OFS-XSI_ISR_OFS // get isr, point to ifa
++ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
++ ;;
++ ld8 r16=[r31] // get ifa
++#else
++ mov r16=cr.ifa // get address that caused the TLB miss
++ movl r17=PAGE_KERNEL
++ mov r20=cr.isr
++ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
++ mov r21=cr.ipsr
++#endif
++ mov r31=pr
++ ;;
++#ifdef CONFIG_DISABLE_VHPT
++ shr.u r22=r16,61 // get the region number into r21
++ ;;
++ cmp.gt p8,p0=6,r22 // access to region 0-5
++ ;;
++#ifndef CONFIG_XEN
++(p8) thash r17=r16
++ ;;
++(p8) mov cr.iha=r17
++#endif
++(p8) mov r29=b0 // save b0
++(p8) br.cond.dptk dtlb_fault
++#endif
++ extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
++ and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
++ tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
++ shr.u r18=r16,57 // move address bit 61 to bit 4
++ and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
++ tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
++ ;;
++ andcm r18=0x10,r18 // bit 4=~address-bit(61)
++ cmp.ne p8,p0=r0,r23
++(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
++(p8) br.cond.spnt page_fault
++
++ dep r21=-1,r21,IA64_PSR_ED_BIT,1
++ or r19=r19,r17 // insert PTE control bits into r19
++ ;;
++ or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
++(p6) mov cr.ipsr=r21
++ ;;
++#ifdef CONFIG_XEN
++(p7) mov r18=r8
++(p7) mov r8=r19
++ ;;
++(p7) XEN_HYPER_ITC_D
++ ;;
++(p7) mov r8=r18
++ ;;
++ mov pr=r31,-1
++ ;;
++ XEN_HYPER_RFI;
++#else
++(p7) itc.d r19 // insert the TLB entry
++ mov pr=r31,-1
++ rfi
++#endif
++END(alt_dtlb_miss)
++
++ .org ia64_ivt+0x1400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
++ENTRY(nested_dtlb_miss)
++ /*
++ * In the absence of kernel bugs, we get here when the virtually mapped linear
++ * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
++ * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page
++ * table is missing, a nested TLB miss fault is triggered and control is
++ * transferred to this point. When this happens, we lookup the pte for the
++ * faulting address by walking the page table in physical mode and return to the
++ * continuation point passed in register r30 (or call page_fault if the address is
++ * not mapped).
++ *
++ * Input: r16: faulting address
++ * r29: saved b0
++ * r30: continuation address
++ * r31: saved pr
++ *
++ * Output: r17: physical address of PTE of faulting address
++ * r29: saved b0
++ * r30: continuation address
++ * r31: saved pr
++ *
++ * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared)
++ */
++#ifdef CONFIG_XEN
++ XEN_HYPER_RSM_PSR_DT;
++#else
++ rsm psr.dt // switch to using physical data addressing
++#endif
++ mov r19=IA64_KR(PT_BASE) // get the page table base address
++ shl r21=r16,3 // shift bit 60 into sign bit
++#ifdef CONFIG_XEN
++ movl r18=XSI_ITIR
++ ;;
++ ld8 r18=[r18]
++#else
++ mov r18=cr.itir
++#endif
++ ;;
++ shr.u r17=r16,61 // get the region number into r17
++ extr.u r18=r18,2,6 // get the faulting page size
++ ;;
++ cmp.eq p6,p7=5,r17 // is faulting address in region 5?
++ add r22=-PAGE_SHIFT,r18 // adjustment for hugetlb address
++ add r18=PGDIR_SHIFT-PAGE_SHIFT,r18
++ ;;
++ shr.u r22=r16,r22
++ shr.u r18=r16,r18
++(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
++
++ srlz.d
++ LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
++
++ .pred.rel "mutex", p6, p7
++(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
++(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
++ ;;
++(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
++(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
++ cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
++#ifdef CONFIG_PGTABLE_4
++ shr.u r18=r22,PUD_SHIFT // shift pud index into position
++#else
++ shr.u r18=r22,PMD_SHIFT // shift pmd index into position
++#endif
++ ;;
++ ld8 r17=[r17] // get *pgd (may be 0)
++ ;;
++(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
++ dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr)
++ ;;
++#ifdef CONFIG_PGTABLE_4
++(p7) ld8 r17=[r17] // get *pud (may be 0)
++ shr.u r18=r22,PMD_SHIFT // shift pmd index into position
++ ;;
++(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pud_present(*pud) == NULL?
++ dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
++ ;;
++#endif
++(p7) ld8 r17=[r17] // get *pmd (may be 0)
++ shr.u r19=r22,PAGE_SHIFT // shift pte index into position
++ ;;
++(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pmd_present(*pmd) == NULL?
++ dep r17=r19,r17,3,(PAGE_SHIFT-3) // r17=pte_offset(pmd,addr);
++(p6) br.cond.spnt page_fault
++ mov b0=r30
++ br.sptk.many b0 // return to continuation point
++END(nested_dtlb_miss)
++
++ .org ia64_ivt+0x1800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
++ENTRY(ikey_miss)
++ DBG_FAULT(6)
++ FAULT(6)
++END(ikey_miss)
++
++ //-----------------------------------------------------------------------------------
++ // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
++ENTRY(page_fault)
++#ifdef CONFIG_XEN
++ XEN_HYPER_SSM_PSR_DT
++#else
++ ssm psr.dt
++ ;;
++ srlz.i
++#endif
++ ;;
++ SAVE_MIN_WITH_COVER
++ alloc r15=ar.pfs,0,0,3,0
++#ifdef CONFIG_XEN
++ movl r3=XSI_ISR
++ ;;
++ ld8 out1=[r3],XSI_IFA_OFS-XSI_ISR_OFS // get vcr.isr, point to ifa
++ ;;
++ ld8 out0=[r3] // get vcr.ifa
++ mov r14=1
++ ;;
++ add r3=XSI_PSR_IC_OFS-XSI_IFA_OFS, r3 // point to vpsr.ic
++ ;;
++ st4 [r3]=r14 // vpsr.ic = 1
++ adds r3=8,r2 // set up second base pointer
++ ;;
++ sum PSR_DEFAULT_BITS
++#else
++ mov out0=cr.ifa
++ mov out1=cr.isr
++ adds r3=8,r2 // set up second base pointer
++ ;;
++ ssm psr.ic | PSR_DEFAULT_BITS
++#endif
++ ;;
++ srlz.i // guarantee that interruption collectin is on
++ ;;
++#ifdef CONFIG_XEN
++
++#define MASK_TO_PEND_OFS (-1)
++
++(p15) movl r14=XSI_PSR_I_ADDR
++ ;;
++(p15) ld8 r14=[r14]
++ ;;
++(p15) st1 [r14]=r0,MASK_TO_PEND_OFS // if (p15) vpsr.i = 1
++ ;; // if (p15) (vcpu->vcpu_info->evtchn_upcall_mask)=0
++(p15) ld1 r14=[r14] // if (vcpu->vcpu_info->evtchn_upcall_pending)
++ ;;
++(p15) cmp.ne p15,p0=r14,r0
++ ;;
++(p15) XEN_HYPER_SSM_I
++#else
++(p15) ssm psr.i // restore psr.i
++#endif
++ movl r14=ia64_leave_kernel
++ ;;
++ SAVE_REST
++ mov rp=r14
++ ;;
++ adds out2=16,r12 // out2 = pointer to pt_regs
++ br.call.sptk.many b6=ia64_do_page_fault // ignore return address
++END(page_fault)
++
++ .org ia64_ivt+0x1c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
++ENTRY(dkey_miss)
++ DBG_FAULT(7)
++ FAULT(7)
++END(dkey_miss)
++
++ .org ia64_ivt+0x2000
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
++ENTRY(dirty_bit)
++ DBG_FAULT(8)
++ /*
++ * What we do here is to simply turn on the dirty bit in the PTE. We need to
++ * update both the page-table and the TLB entry. To efficiently access the PTE,
++ * we address it through the virtual page table. Most likely, the TLB entry for
++ * the relevant virtual page table page is still present in the TLB so we can
++ * normally do this without additional TLB misses. In case the necessary virtual
++ * page table TLB entry isn't present, we take a nested TLB miss hit where we look
++ * up the physical address of the L3 PTE and then continue at label 1 below.
++ */
++#ifdef CONFIG_XEN
++ movl r16=XSI_IFA
++ ;;
++ ld8 r16=[r16]
++ ;;
++#else
++ mov r16=cr.ifa // get the address that caused the fault
++#endif
++ movl r30=1f // load continuation point in case of nested fault
++ ;;
++#ifdef CONFIG_XEN
++ mov r18=r8;
++ mov r8=r16;
++ XEN_HYPER_THASH;;
++ mov r17=r8;
++ mov r8=r18;;
++#else
++ thash r17=r16 // compute virtual address of L3 PTE
++#endif
++ mov r29=b0 // save b0 in case of nested fault
++ mov r31=pr // save pr
++#ifdef CONFIG_SMP
++ mov r28=ar.ccv // save ar.ccv
++ ;;
++1: ld8 r18=[r17]
++ ;; // avoid RAW on r18
++ mov ar.ccv=r18 // set compare value for cmpxchg
++ or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
++ tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
++ ;;
++(p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only update if page is present
++ mov r24=PAGE_SHIFT<<2
++ ;;
++(p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present
++ ;;
++#ifdef CONFIG_XEN
++(p6) mov r18=r8
++(p6) mov r8=r25
++ ;;
++(p6) XEN_HYPER_ITC_D
++ ;;
++(p6) mov r8=r18
++#else
++(p6) itc.d r25 // install updated PTE
++#endif
++ ;;
++ /*
++ * Tell the assemblers dependency-violation checker that the above "itc" instructions
++ * cannot possibly affect the following loads:
++ */
++ dv_serialize_data
++
++ ld8 r18=[r17] // read PTE again
++ ;;
++ cmp.eq p6,p7=r18,r25 // is it same as the newly installed
++ ;;
++(p7) ptc.l r16,r24
++ mov b0=r29 // restore b0
++ mov ar.ccv=r28
++#else
++ ;;
++1: ld8 r18=[r17]
++ ;; // avoid RAW on r18
++ or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
++ mov b0=r29 // restore b0
++ ;;
++ st8 [r17]=r18 // store back updated PTE
++ itc.d r18 // install updated PTE
++#endif
++ mov pr=r31,-1 // restore pr
++#ifdef CONFIG_XEN
++ XEN_HYPER_RFI
++ dv_serialize_data
++#else
++ rfi
++#endif
++END(dirty_bit)
++
++ .org ia64_ivt+0x2400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
++ENTRY(iaccess_bit)
++ DBG_FAULT(9)
++ // Like Entry 8, except for instruction access
++#ifdef CONFIG_XEN
++ movl r16=XSI_IFA
++ ;;
++ ld8 r16=[r16]
++ ;;
++#else
++ mov r16=cr.ifa // get the address that caused the fault
++#endif
++ movl r30=1f // load continuation point in case of nested fault
++ mov r31=pr // save predicates
++#ifdef CONFIG_ITANIUM
++ /*
++ * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
++ */
++ mov r17=cr.ipsr
++ ;;
++ mov r18=cr.iip
++ tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
++ ;;
++(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
++#endif /* CONFIG_ITANIUM */
++ ;;
++#ifdef CONFIG_XEN
++ mov r18=r8;
++ mov r8=r16;
++ XEN_HYPER_THASH;;
++ mov r17=r8;
++ mov r8=r18;;
++#else
++ thash r17=r16 // compute virtual address of L3 PTE
++#endif
++ mov r29=b0 // save b0 in case of nested fault)
++#ifdef CONFIG_SMP
++ mov r28=ar.ccv // save ar.ccv
++ ;;
++1: ld8 r18=[r17]
++ ;;
++ mov ar.ccv=r18 // set compare value for cmpxchg
++ or r25=_PAGE_A,r18 // set the accessed bit
++ tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
++ ;;
++(p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page present
++ mov r24=PAGE_SHIFT<<2
++ ;;
++(p6) cmp.eq p6,p7=r26,r18 // Only if page present
++ ;;
++#ifdef CONFIG_XEN
++ mov r26=r8
++ mov r8=r25
++ ;;
++(p6) XEN_HYPER_ITC_I
++ ;;
++ mov r8=r26
++ ;;
++#else
++(p6) itc.i r25 // install updated PTE
++#endif
++ ;;
++ /*
++ * Tell the assemblers dependency-violation checker that the above "itc" instructions
++ * cannot possibly affect the following loads:
++ */
++ dv_serialize_data
++
++ ld8 r18=[r17] // read PTE again
++ ;;
++ cmp.eq p6,p7=r18,r25 // is it same as the newly installed
++ ;;
++(p7) ptc.l r16,r24
++ mov b0=r29 // restore b0
++ mov ar.ccv=r28
++#else /* !CONFIG_SMP */
++ ;;
++1: ld8 r18=[r17]
++ ;;
++ or r18=_PAGE_A,r18 // set the accessed bit
++ mov b0=r29 // restore b0
++ ;;
++ st8 [r17]=r18 // store back updated PTE
++ itc.i r18 // install updated PTE
++#endif /* !CONFIG_SMP */
++ mov pr=r31,-1
++#ifdef CONFIG_XEN
++ XEN_HYPER_RFI
++ dv_serialize_data
++#else
++ rfi
++#endif
++END(iaccess_bit)
++
++ .org ia64_ivt+0x2800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
++ENTRY(daccess_bit)
++ DBG_FAULT(10)
++ // Like Entry 8, except for data access
++#ifdef CONFIG_XEN
++ movl r16=XSI_IFA
++ ;;
++ ld8 r16=[r16]
++ ;;
++#else
++ mov r16=cr.ifa // get the address that caused the fault
++#endif
++ movl r30=1f // load continuation point in case of nested fault
++ ;;
++#ifdef CONFIG_XEN
++ mov r18=r8
++ mov r8=r16
++ XEN_HYPER_THASH
++ ;;
++ mov r17=r8
++ mov r8=r18
++ ;;
++#else
++ thash r17=r16 // compute virtual address of L3 PTE
++#endif
++ mov r31=pr
++ mov r29=b0 // save b0 in case of nested fault)
++#ifdef CONFIG_SMP
++ mov r28=ar.ccv // save ar.ccv
++ ;;
++1: ld8 r18=[r17]
++ ;; // avoid RAW on r18
++ mov ar.ccv=r18 // set compare value for cmpxchg
++ or r25=_PAGE_A,r18 // set the dirty bit
++ tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
++ ;;
++(p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page is present
++ mov r24=PAGE_SHIFT<<2
++ ;;
++(p6) cmp.eq p6,p7=r26,r18 // Only if page is present
++ ;;
++#ifdef CONFIG_XEN
++ mov r26=r8
++ mov r8=r25
++ ;;
++(p6) XEN_HYPER_ITC_D
++ ;;
++ mov r8=r26
++ ;;
++#else
++(p6) itc.d r25 // install updated PTE
++#endif
++ /*
++ * Tell the assemblers dependency-violation checker that the above "itc" instructions
++ * cannot possibly affect the following loads:
++ */
++ dv_serialize_data
++ ;;
++ ld8 r18=[r17] // read PTE again
++ ;;
++ cmp.eq p6,p7=r18,r25 // is it same as the newly installed
++ ;;
++(p7) ptc.l r16,r24
++ mov ar.ccv=r28
++#else
++ ;;
++1: ld8 r18=[r17]
++ ;; // avoid RAW on r18
++ or r18=_PAGE_A,r18 // set the accessed bit
++ ;;
++ st8 [r17]=r18 // store back updated PTE
++ itc.d r18 // install updated PTE
++#endif
++ mov b0=r29 // restore b0
++ mov pr=r31,-1
++#ifdef CONFIG_XEN
++ XEN_HYPER_RFI
++ dv_serialize_data
++#else
++ rfi
++#endif
++END(daccess_bit)
++
++ .org ia64_ivt+0x2c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
++ENTRY(break_fault)
++ /*
++ * The streamlined system call entry/exit paths only save/restore the initial part
++ * of pt_regs. This implies that the callers of system-calls must adhere to the
++ * normal procedure calling conventions.
++ *
++ * Registers to be saved & restored:
++ * CR registers: cr.ipsr, cr.iip, cr.ifs
++ * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
++ * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
++ * Registers to be restored only:
++ * r8-r11: output value from the system call.
++ *
++ * During system call exit, scratch registers (including r15) are modified/cleared
++ * to prevent leaking bits from kernel to user level.
++ */
++ DBG_FAULT(11)
++ mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
++#ifdef CONFIG_XEN
++ movl r22=XSI_IPSR
++ ;;
++ ld8 r29=[r22],XSI_IIM_OFS-XSI_IPSR_OFS // get ipsr, point to iip
++#else
++ mov r29=cr.ipsr // M2 (12 cyc)
++#endif
++ mov r31=pr // I0 (2 cyc)
++
++#ifdef CONFIG_XEN
++ ;;
++ ld8 r17=[r22],XSI_IIP_OFS-XSI_IIM_OFS
++#else
++ mov r17=cr.iim // M2 (2 cyc)
++#endif
++ mov.m r27=ar.rsc // M2 (12 cyc)
++ mov r18=__IA64_BREAK_SYSCALL // A
++
++ mov.m ar.rsc=0 // M2
++ mov.m r21=ar.fpsr // M2 (12 cyc)
++ mov r19=b6 // I0 (2 cyc)
++ ;;
++ mov.m r23=ar.bspstore // M2 (12 cyc)
++ mov.m r24=ar.rnat // M2 (5 cyc)
++ mov.i r26=ar.pfs // I0 (2 cyc)
++
++ invala // M0|1
++ nop.m 0 // M
++ mov r20=r1 // A save r1
++
++ nop.m 0
++ movl r30=sys_call_table // X
++
++#ifdef CONFIG_XEN
++ ld8 r28=[r22]
++#else
++ mov r28=cr.iip // M2 (2 cyc)
++#endif
++ cmp.eq p0,p7=r18,r17 // I0 is this a system call?
++(p7) br.cond.spnt non_syscall // B no ->
++ //
++ // From this point on, we are definitely on the syscall-path
++ // and we can use (non-banked) scratch registers.
++ //
++///////////////////////////////////////////////////////////////////////
++ mov r1=r16 // A move task-pointer to "addl"-addressable reg
++ mov r2=r16 // A setup r2 for ia64_syscall_setup
++ add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags
++
++ adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
++ adds r15=-1024,r15 // A subtract 1024 from syscall number
++ mov r3=NR_syscalls - 1
++ ;;
++ ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
++ ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
++ extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
++
++ shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
++ addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
++ cmp.leu p6,p7=r15,r3 // A syscall number in range?
++ ;;
++
++ lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
++(p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
++ tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
++
++ mov.m ar.bspstore=r22 // M2 switch to kernel RBS
++ cmp.eq p8,p9=2,r8 // A isr.ei==2?
++ ;;
++
++(p8) mov r8=0 // A clear ei to 0
++(p7) movl r30=sys_ni_syscall // X
++
++(p8) adds r28=16,r28 // A switch cr.iip to next bundle
++(p9) adds r8=1,r8 // A increment ei to next slot
++ nop.i 0
++ ;;
++
++ mov.m r25=ar.unat // M2 (5 cyc)
++ dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
++ adds r15=1024,r15 // A restore original syscall number
++ //
++ // If any of the above loads miss in L1D, we'll stall here until
++ // the data arrives.
++ //
++///////////////////////////////////////////////////////////////////////
++ st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
++ mov b6=r30 // I0 setup syscall handler branch reg early
++ cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
++
++ and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
++ mov r18=ar.bsp // M2 (12 cyc)
++(pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS
++ ;;
++.back_from_break_fixup:
++(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
++ cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
++ br.call.sptk.many b7=ia64_syscall_setup // B
++1:
++ mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
++ nop 0
++#ifdef CONFIG_XEN
++ mov r2=b0; br.call.sptk b0=xen_bsw1;; mov b0=r2;;
++#else
++ bsw.1 // B (6 cyc) regs are saved, switch to bank 1
++#endif
++ ;;
++
++#ifdef CONFIG_XEN
++ movl r16=XSI_PSR_IC
++ mov r3=1
++ ;;
++ st4 [r16]=r3,XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS // vpsr.ic = 1
++#else
++ ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
++#endif
++ movl r3=ia64_ret_from_syscall // X
++ ;;
++
++ srlz.i // M0 ensure interruption collection is on
++ mov rp=r3 // I0 set the real return addr
++(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
++
++#ifdef CONFIG_XEN
++(p15) ld8 r16=[r16] // vpsr.i
++ ;;
++(p15) st1 [r16]=r0,MASK_TO_PEND_OFS // if (p15) vpsr.i = 1
++ ;; // if (p15) (vcpu->vcpu_info->evtchn_upcall_mask)=0
++(p15) ld1 r2=[r16] // if (vcpu->vcpu_info->evtchn_upcall_pending)
++ ;;
++(p15) cmp.ne.unc p6,p0=r2,r0
++ ;;
++(p6) XEN_HYPER_SSM_I // do a real ssm psr.i
++#else
++(p15) ssm psr.i // M2 restore psr.i
++#endif
++(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
++ br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
++ // NOT REACHED
++///////////////////////////////////////////////////////////////////////
++ // On entry, we optimistically assumed that we're coming from user-space.
++ // For the rare cases where a system-call is done from within the kernel,
++ // we fix things up at this point:
++.break_fixup:
++ add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure
++ mov ar.rnat=r24 // M2 restore kernel's AR.RNAT
++ ;;
++ mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE
++ br.cond.sptk .back_from_break_fixup
++END(break_fault)
++
++ .org ia64_ivt+0x3000
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
++ENTRY(interrupt)
++ DBG_FAULT(12)
++ mov r31=pr // prepare to save predicates
++ ;;
++ SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
++#ifdef CONFIG_XEN
++ movl r3=XSI_PSR_IC
++ mov r14=1
++ ;;
++ st4 [r3]=r14
++#else
++ ssm psr.ic | PSR_DEFAULT_BITS
++#endif
++ ;;
++ adds r3=8,r2 // set up second base pointer for SAVE_REST
++ srlz.i // ensure everybody knows psr.ic is back on
++ ;;
++ SAVE_REST
++ ;;
++ alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
++#ifdef CONFIG_XEN
++ ;;
++ XEN_HYPER_GET_IVR
++ ;;
++ mov out0=r8 // pass cr.ivr as first arg
++#else
++ mov out0=cr.ivr // pass cr.ivr as first arg
++#endif
++ add out1=16,sp // pass pointer to pt_regs as second arg
++ ;;
++ srlz.d // make sure we see the effect of cr.ivr
++ movl r14=ia64_leave_kernel
++ ;;
++ mov rp=r14
++ br.call.sptk.many b6=ia64_handle_irq
++END(interrupt)
++
++ .org ia64_ivt+0x3400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x3400 Entry 13 (size 64 bundles) Reserved
++ DBG_FAULT(13)
++ FAULT(13)
++
++ .org ia64_ivt+0x3800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x3800 Entry 14 (size 64 bundles) Reserved
++ DBG_FAULT(14)
++ FAULT(14)
++
++ /*
++ * There is no particular reason for this code to be here, other than that
++ * there happens to be space here that would go unused otherwise. If this
++ * fault ever gets "unreserved", simply moved the following code to a more
++ * suitable spot...
++ *
++ * ia64_syscall_setup() is a separate subroutine so that it can
++ * allocate stacked registers so it can safely demine any
++ * potential NaT values from the input registers.
++ *
++ * On entry:
++ * - executing on bank 0 or bank 1 register set (doesn't matter)
++ * - r1: stack pointer
++ * - r2: current task pointer
++ * - r3: preserved
++ * - r11: original contents (saved ar.pfs to be saved)
++ * - r12: original contents (sp to be saved)
++ * - r13: original contents (tp to be saved)
++ * - r15: original contents (syscall # to be saved)
++ * - r18: saved bsp (after switching to kernel stack)
++ * - r19: saved b6
++ * - r20: saved r1 (gp)
++ * - r21: saved ar.fpsr
++ * - r22: kernel's register backing store base (krbs_base)
++ * - r23: saved ar.bspstore
++ * - r24: saved ar.rnat
++ * - r25: saved ar.unat
++ * - r26: saved ar.pfs
++ * - r27: saved ar.rsc
++ * - r28: saved cr.iip
++ * - r29: saved cr.ipsr
++ * - r31: saved pr
++ * - b0: original contents (to be saved)
++ * On exit:
++ * - p10: TRUE if syscall is invoked with more than 8 out
++ * registers or r15's Nat is true
++ * - r1: kernel's gp
++ * - r3: preserved (same as on entry)
++ * - r8: -EINVAL if p10 is true
++ * - r12: points to kernel stack
++ * - r13: points to current task
++ * - r14: preserved (same as on entry)
++ * - p13: preserved
++ * - p15: TRUE if interrupts need to be re-enabled
++ * - ar.fpsr: set to kernel settings
++ * - b6: preserved (same as on entry)
++ */
++#ifndef CONFIG_XEN
++GLOBAL_ENTRY(ia64_syscall_setup)
++#if PT(B6) != 0
++# error This code assumes that b6 is the first field in pt_regs.
++#endif
++ st8 [r1]=r19 // save b6
++ add r16=PT(CR_IPSR),r1 // initialize first base pointer
++ add r17=PT(R11),r1 // initialize second base pointer
++ ;;
++ alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
++ st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
++ tnat.nz p8,p0=in0
++
++ st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
++ tnat.nz p9,p0=in1
++(pKStk) mov r18=r0 // make sure r18 isn't NaT
++ ;;
++
++ st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
++ st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
++ mov r28=b0 // save b0 (2 cyc)
++ ;;
++
++ st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
++ dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
++(p8) mov in0=-1
++ ;;
++
++ st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
++ extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
++ and r8=0x7f,r19 // A // get sof of ar.pfs
++
++ st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
++ tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
++(p9) mov in1=-1
++ ;;
++
++(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
++ tnat.nz p10,p0=in2
++ add r11=8,r11
++ ;;
++(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
++(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
++ tnat.nz p11,p0=in3
++ ;;
++(p10) mov in2=-1
++ tnat.nz p12,p0=in4 // [I0]
++(p11) mov in3=-1
++ ;;
++(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
++(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
++ shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
++ ;;
++ st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
++ st8 [r17]=r28,PT(R1)-PT(B0) // save b0
++ tnat.nz p13,p0=in5 // [I0]
++ ;;
++ st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
++ st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
++(p12) mov in4=-1
++ ;;
++
++.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
++.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
++(p13) mov in5=-1
++ ;;
++ st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
++ tnat.nz p13,p0=in6
++ cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
++ ;;
++ mov r8=1
++(p9) tnat.nz p10,p0=r15
++ adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
++
++ st8.spill [r17]=r15 // save r15
++ tnat.nz p8,p0=in7
++ nop.i 0
++
++ mov r13=r2 // establish `current'
++ movl r1=__gp // establish kernel global pointer
++ ;;
++ st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
++(p13) mov in6=-1
++(p8) mov in7=-1
++
++ cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
++ movl r17=FPSR_DEFAULT
++ ;;
++ mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
++(p10) mov r8=-EINVAL
++ br.ret.sptk.many b7
++END(ia64_syscall_setup)
++#endif
++
++ .org ia64_ivt+0x3c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x3c00 Entry 15 (size 64 bundles) Reserved
++ DBG_FAULT(15)
++ FAULT(15)
++
++ /*
++ * Squatting in this space ...
++ *
++ * This special case dispatcher for illegal operation faults allows preserved
++ * registers to be modified through a callback function (asm only) that is handed
++ * back from the fault handler in r8. Up to three arguments can be passed to the
++ * callback function by returning an aggregate with the callback as its first
++ * element, followed by the arguments.
++ */
++ENTRY(dispatch_illegal_op_fault)
++ .prologue
++ .body
++ SAVE_MIN_WITH_COVER
++ ssm psr.ic | PSR_DEFAULT_BITS
++ ;;
++ srlz.i // guarantee that interruption collection is on
++ ;;
++(p15) ssm psr.i // restore psr.i
++ adds r3=8,r2 // set up second base pointer for SAVE_REST
++ ;;
++ alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
++ mov out0=ar.ec
++ ;;
++ SAVE_REST
++ PT_REGS_UNWIND_INFO(0)
++ ;;
++ br.call.sptk.many rp=ia64_illegal_op_fault
++.ret0: ;;
++ alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
++ mov out0=r9
++ mov out1=r10
++ mov out2=r11
++ movl r15=ia64_leave_kernel
++ ;;
++ mov rp=r15
++ mov b6=r8
++ ;;
++ cmp.ne p6,p0=0,r8
++(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
++ br.sptk.many ia64_leave_kernel
++END(dispatch_illegal_op_fault)
++
++ .org ia64_ivt+0x4000
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x4000 Entry 16 (size 64 bundles) Reserved
++ DBG_FAULT(16)
++ FAULT(16)
++
++ .org ia64_ivt+0x4400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x4400 Entry 17 (size 64 bundles) Reserved
++ DBG_FAULT(17)
++ FAULT(17)
++
++ENTRY(non_syscall)
++ mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
++ ;;
++ SAVE_MIN_WITH_COVER
++
++ // There is no particular reason for this code to be here, other than that
++ // there happens to be space here that would go unused otherwise. If this
++ // fault ever gets "unreserved", simply moved the following code to a more
++ // suitable spot...
++
++ alloc r14=ar.pfs,0,0,2,0
++ mov out0=cr.iim
++ add out1=16,sp
++ adds r3=8,r2 // set up second base pointer for SAVE_REST
++
++ ssm psr.ic | PSR_DEFAULT_BITS
++ ;;
++ srlz.i // guarantee that interruption collection is on
++ ;;
++(p15) ssm psr.i // restore psr.i
++ movl r15=ia64_leave_kernel
++ ;;
++ SAVE_REST
++ mov rp=r15
++ ;;
++ br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
++END(non_syscall)
++
++ .org ia64_ivt+0x4800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x4800 Entry 18 (size 64 bundles) Reserved
++ DBG_FAULT(18)
++ FAULT(18)
++
++ /*
++ * There is no particular reason for this code to be here, other than that
++ * there happens to be space here that would go unused otherwise. If this
++ * fault ever gets "unreserved", simply moved the following code to a more
++ * suitable spot...
++ */
++
++ENTRY(dispatch_unaligned_handler)
++ SAVE_MIN_WITH_COVER
++ ;;
++ alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
++ mov out0=cr.ifa
++ adds out1=16,sp
++
++ ssm psr.ic | PSR_DEFAULT_BITS
++ ;;
++ srlz.i // guarantee that interruption collection is on
++ ;;
++(p15) ssm psr.i // restore psr.i
++ adds r3=8,r2 // set up second base pointer
++ ;;
++ SAVE_REST
++ movl r14=ia64_leave_kernel
++ ;;
++ mov rp=r14
++ br.sptk.many ia64_prepare_handle_unaligned
++END(dispatch_unaligned_handler)
++
++ .org ia64_ivt+0x4c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x4c00 Entry 19 (size 64 bundles) Reserved
++ DBG_FAULT(19)
++ FAULT(19)
++
++ /*
++ * There is no particular reason for this code to be here, other than that
++ * there happens to be space here that would go unused otherwise. If this
++ * fault ever gets "unreserved", simply moved the following code to a more
++ * suitable spot...
++ */
++
++ENTRY(dispatch_to_fault_handler)
++ /*
++ * Input:
++ * psr.ic: off
++ * r19: fault vector number (e.g., 24 for General Exception)
++ * r31: contains saved predicates (pr)
++ */
++ SAVE_MIN_WITH_COVER_R19
++ alloc r14=ar.pfs,0,0,5,0
++ mov out0=r15
++#ifdef CONFIG_XEN
++ movl out1=XSI_ISR
++ ;;
++ adds out2=XSI_IFA-XSI_ISR,out1
++ adds out3=XSI_IIM-XSI_ISR,out1
++ adds out4=XSI_ITIR-XSI_ISR,out1
++ ;;
++ ld8 out1=[out1]
++ ld8 out2=[out2]
++ ld8 out3=[out4]
++ ld8 out4=[out4]
++ ;;
++#else
++ mov out1=cr.isr
++ mov out2=cr.ifa
++ mov out3=cr.iim
++ mov out4=cr.itir
++ ;;
++#endif
++ ssm psr.ic | PSR_DEFAULT_BITS
++ ;;
++ srlz.i // guarantee that interruption collection is on
++ ;;
++(p15) ssm psr.i // restore psr.i
++ adds r3=8,r2 // set up second base pointer for SAVE_REST
++ ;;
++ SAVE_REST
++ movl r14=ia64_leave_kernel
++ ;;
++ mov rp=r14
++ br.call.sptk.many b6=ia64_fault
++END(dispatch_to_fault_handler)
++
++//
++// --- End of long entries, Beginning of short entries
++//
++
++ .org ia64_ivt+0x5000
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
++ENTRY(page_not_present)
++ DBG_FAULT(20)
++ mov r16=cr.ifa
++ rsm psr.dt
++ /*
++ * The Linux page fault handler doesn't expect non-present pages to be in
++ * the TLB. Flush the existing entry now, so we meet that expectation.
++ */
++ mov r17=PAGE_SHIFT<<2
++ ;;
++ ptc.l r16,r17
++ ;;
++ mov r31=pr
++ srlz.d
++ br.sptk.many page_fault
++END(page_not_present)
++
++ .org ia64_ivt+0x5100
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
++ENTRY(key_permission)
++ DBG_FAULT(21)
++ mov r16=cr.ifa
++ rsm psr.dt
++ mov r31=pr
++ ;;
++ srlz.d
++ br.sptk.many page_fault
++END(key_permission)
++
++ .org ia64_ivt+0x5200
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
++ENTRY(iaccess_rights)
++ DBG_FAULT(22)
++ mov r16=cr.ifa
++ rsm psr.dt
++ mov r31=pr
++ ;;
++ srlz.d
++ br.sptk.many page_fault
++END(iaccess_rights)
++
++ .org ia64_ivt+0x5300
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
++ENTRY(daccess_rights)
++ DBG_FAULT(23)
++#ifdef CONFIG_XEN
++ movl r16=XSI_IFA
++ ;;
++ ld8 r16=[r16]
++ ;;
++ XEN_HYPER_RSM_PSR_DT
++#else
++ mov r16=cr.ifa
++ rsm psr.dt
++#endif
++ mov r31=pr
++ ;;
++ srlz.d
++ br.sptk.many page_fault
++END(daccess_rights)
++
++ .org ia64_ivt+0x5400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
++ENTRY(general_exception)
++ DBG_FAULT(24)
++ mov r16=cr.isr
++ mov r31=pr
++ ;;
++ cmp4.eq p6,p0=0,r16
++(p6) br.sptk.many dispatch_illegal_op_fault
++ ;;
++ mov r19=24 // fault number
++ br.sptk.many dispatch_to_fault_handler
++END(general_exception)
++
++ .org ia64_ivt+0x5500
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
++ENTRY(disabled_fp_reg)
++ DBG_FAULT(25)
++ rsm psr.dfh // ensure we can access fph
++ ;;
++ srlz.d
++ mov r31=pr
++ mov r19=25
++ br.sptk.many dispatch_to_fault_handler
++END(disabled_fp_reg)
++
++ .org ia64_ivt+0x5600
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
++ENTRY(nat_consumption)
++ DBG_FAULT(26)
++
++ mov r16=cr.ipsr
++ mov r17=cr.isr
++ mov r31=pr // save PR
++ ;;
++ and r18=0xf,r17 // r18 = cr.ipsr.code{3:0}
++ tbit.z p6,p0=r17,IA64_ISR_NA_BIT
++ ;;
++ cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18
++ dep r16=-1,r16,IA64_PSR_ED_BIT,1
++(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
++ ;;
++ mov cr.ipsr=r16 // set cr.ipsr.na
++ mov pr=r31,-1
++ ;;
++ rfi
++
++1: mov pr=r31,-1
++ ;;
++ FAULT(26)
++END(nat_consumption)
++
++ .org ia64_ivt+0x5700
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
++ENTRY(speculation_vector)
++ DBG_FAULT(27)
++ /*
++ * A [f]chk.[as] instruction needs to take the branch to the recovery code but
++ * this part of the architecture is not implemented in hardware on some CPUs, such
++ * as Itanium. Thus, in general we need to emulate the behavior. IIM contains
++ * the relative target (not yet sign extended). So after sign extending it we
++ * simply add it to IIP. We also need to reset the EI field of the IPSR to zero,
++ * i.e., the slot to restart into.
++ *
++ * cr.imm contains zero_ext(imm21)
++ */
++ mov r18=cr.iim
++ ;;
++ mov r17=cr.iip
++ shl r18=r18,43 // put sign bit in position (43=64-21)
++ ;;
++
++ mov r16=cr.ipsr
++ shr r18=r18,39 // sign extend (39=43-4)
++ ;;
++
++ add r17=r17,r18 // now add the offset
++ ;;
++ mov cr.iip=r17
++ dep r16=0,r16,41,2 // clear EI
++ ;;
++
++ mov cr.ipsr=r16
++ ;;
++
++#ifdef CONFIG_XEN
++ XEN_HYPER_RFI;
++#else
++ rfi // and go back
++#endif
++END(speculation_vector)
++
++ .org ia64_ivt+0x5800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5800 Entry 28 (size 16 bundles) Reserved
++ DBG_FAULT(28)
++ FAULT(28)
++
++ .org ia64_ivt+0x5900
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
++ENTRY(debug_vector)
++ DBG_FAULT(29)
++ FAULT(29)
++END(debug_vector)
++
++ .org ia64_ivt+0x5a00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
++ENTRY(unaligned_access)
++ DBG_FAULT(30)
++ mov r31=pr // prepare to save predicates
++ ;;
++ br.sptk.many dispatch_unaligned_handler
++END(unaligned_access)
++
++ .org ia64_ivt+0x5b00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
++ENTRY(unsupported_data_reference)
++ DBG_FAULT(31)
++ FAULT(31)
++END(unsupported_data_reference)
++
++ .org ia64_ivt+0x5c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
++ENTRY(floating_point_fault)
++ DBG_FAULT(32)
++ FAULT(32)
++END(floating_point_fault)
++
++ .org ia64_ivt+0x5d00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
++ENTRY(floating_point_trap)
++ DBG_FAULT(33)
++ FAULT(33)
++END(floating_point_trap)
++
++ .org ia64_ivt+0x5e00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
++ENTRY(lower_privilege_trap)
++ DBG_FAULT(34)
++ FAULT(34)
++END(lower_privilege_trap)
++
++ .org ia64_ivt+0x5f00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
++ENTRY(taken_branch_trap)
++ DBG_FAULT(35)
++ FAULT(35)
++END(taken_branch_trap)
++
++ .org ia64_ivt+0x6000
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
++ENTRY(single_step_trap)
++ DBG_FAULT(36)
++ FAULT(36)
++END(single_step_trap)
++
++ .org ia64_ivt+0x6100
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6100 Entry 37 (size 16 bundles) Reserved
++ DBG_FAULT(37)
++ FAULT(37)
++
++ .org ia64_ivt+0x6200
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6200 Entry 38 (size 16 bundles) Reserved
++ DBG_FAULT(38)
++ FAULT(38)
++
++ .org ia64_ivt+0x6300
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6300 Entry 39 (size 16 bundles) Reserved
++ DBG_FAULT(39)
++ FAULT(39)
++
++ .org ia64_ivt+0x6400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6400 Entry 40 (size 16 bundles) Reserved
++ DBG_FAULT(40)
++ FAULT(40)
++
++ .org ia64_ivt+0x6500
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6500 Entry 41 (size 16 bundles) Reserved
++ DBG_FAULT(41)
++ FAULT(41)
++
++ .org ia64_ivt+0x6600
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6600 Entry 42 (size 16 bundles) Reserved
++ DBG_FAULT(42)
++ FAULT(42)
++
++ .org ia64_ivt+0x6700
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6700 Entry 43 (size 16 bundles) Reserved
++ DBG_FAULT(43)
++ FAULT(43)
++
++ .org ia64_ivt+0x6800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6800 Entry 44 (size 16 bundles) Reserved
++ DBG_FAULT(44)
++ FAULT(44)
++
++ .org ia64_ivt+0x6900
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
++ENTRY(ia32_exception)
++ DBG_FAULT(45)
++ FAULT(45)
++END(ia32_exception)
++
++ .org ia64_ivt+0x6a00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
++ENTRY(ia32_intercept)
++ DBG_FAULT(46)
++#ifdef CONFIG_IA32_SUPPORT
++ mov r31=pr
++ mov r16=cr.isr
++ ;;
++ extr.u r17=r16,16,8 // get ISR.code
++ mov r18=ar.eflag
++ mov r19=cr.iim // old eflag value
++ ;;
++ cmp.ne p6,p0=2,r17
++(p6) br.cond.spnt 1f // not a system flag fault
++ xor r16=r18,r19
++ ;;
++ extr.u r17=r16,18,1 // get the eflags.ac bit
++ ;;
++ cmp.eq p6,p0=0,r17
++(p6) br.cond.spnt 1f // eflags.ac bit didn't change
++ ;;
++ mov pr=r31,-1 // restore predicate registers
++#ifdef CONFIG_XEN
++ XEN_HYPER_RFI;
++#else
++ rfi
++#endif
++
++1:
++#endif // CONFIG_IA32_SUPPORT
++ FAULT(46)
++END(ia32_intercept)
++
++ .org ia64_ivt+0x6b00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
++ENTRY(ia32_interrupt)
++ DBG_FAULT(47)
++#ifdef CONFIG_IA32_SUPPORT
++ mov r31=pr
++ br.sptk.many dispatch_to_ia32_handler
++#else
++ FAULT(47)
++#endif
++END(ia32_interrupt)
++
++ .org ia64_ivt+0x6c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6c00 Entry 48 (size 16 bundles) Reserved
++ DBG_FAULT(48)
++ FAULT(48)
++
++ .org ia64_ivt+0x6d00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6d00 Entry 49 (size 16 bundles) Reserved
++ DBG_FAULT(49)
++ FAULT(49)
++
++ .org ia64_ivt+0x6e00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6e00 Entry 50 (size 16 bundles) Reserved
++ DBG_FAULT(50)
++ FAULT(50)
++
++ .org ia64_ivt+0x6f00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6f00 Entry 51 (size 16 bundles) Reserved
++ DBG_FAULT(51)
++ FAULT(51)
++
++ .org ia64_ivt+0x7000
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7000 Entry 52 (size 16 bundles) Reserved
++ DBG_FAULT(52)
++ FAULT(52)
++
++ .org ia64_ivt+0x7100
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7100 Entry 53 (size 16 bundles) Reserved
++ DBG_FAULT(53)
++ FAULT(53)
++
++ .org ia64_ivt+0x7200
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7200 Entry 54 (size 16 bundles) Reserved
++ DBG_FAULT(54)
++ FAULT(54)
++
++ .org ia64_ivt+0x7300
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7300 Entry 55 (size 16 bundles) Reserved
++ DBG_FAULT(55)
++ FAULT(55)
++
++ .org ia64_ivt+0x7400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7400 Entry 56 (size 16 bundles) Reserved
++ DBG_FAULT(56)
++ FAULT(56)
++
++ .org ia64_ivt+0x7500
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7500 Entry 57 (size 16 bundles) Reserved
++ DBG_FAULT(57)
++ FAULT(57)
++
++ .org ia64_ivt+0x7600
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7600 Entry 58 (size 16 bundles) Reserved
++ DBG_FAULT(58)
++ FAULT(58)
++
++ .org ia64_ivt+0x7700
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7700 Entry 59 (size 16 bundles) Reserved
++ DBG_FAULT(59)
++ FAULT(59)
++
++ .org ia64_ivt+0x7800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7800 Entry 60 (size 16 bundles) Reserved
++ DBG_FAULT(60)
++ FAULT(60)
++
++ .org ia64_ivt+0x7900
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7900 Entry 61 (size 16 bundles) Reserved
++ DBG_FAULT(61)
++ FAULT(61)
++
++ .org ia64_ivt+0x7a00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7a00 Entry 62 (size 16 bundles) Reserved
++ DBG_FAULT(62)
++ FAULT(62)
++
++ .org ia64_ivt+0x7b00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7b00 Entry 63 (size 16 bundles) Reserved
++ DBG_FAULT(63)
++ FAULT(63)
++
++ .org ia64_ivt+0x7c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7c00 Entry 64 (size 16 bundles) Reserved
++ DBG_FAULT(64)
++ FAULT(64)
++
++ .org ia64_ivt+0x7d00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7d00 Entry 65 (size 16 bundles) Reserved
++ DBG_FAULT(65)
++ FAULT(65)
++
++ .org ia64_ivt+0x7e00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7e00 Entry 66 (size 16 bundles) Reserved
++ DBG_FAULT(66)
++ FAULT(66)
++
++ .org ia64_ivt+0x7f00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7f00 Entry 67 (size 16 bundles) Reserved
++ DBG_FAULT(67)
++ FAULT(67)
++
++#ifdef CONFIG_IA32_SUPPORT
++
++ /*
++ * There is no particular reason for this code to be here, other than that
++ * there happens to be space here that would go unused otherwise. If this
++ * fault ever gets "unreserved", simply moved the following code to a more
++ * suitable spot...
++ */
++
++ // IA32 interrupt entry point
++
++ENTRY(dispatch_to_ia32_handler)
++ SAVE_MIN
++ ;;
++ mov r14=cr.isr
++ ssm psr.ic | PSR_DEFAULT_BITS
++ ;;
++ srlz.i // guarantee that interruption collection is on
++ ;;
++(p15) ssm psr.i
++ adds r3=8,r2 // Base pointer for SAVE_REST
++ ;;
++ SAVE_REST
++ ;;
++ mov r15=0x80
++ shr r14=r14,16 // Get interrupt number
++ ;;
++ cmp.ne p6,p0=r14,r15
++(p6) br.call.dpnt.many b6=non_ia32_syscall
++
++ adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
++ adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
++ ;;
++ cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
++ ld8 r8=[r14] // get r8
++ ;;
++ st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
++ ;;
++ alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
++ ;;
++ ld4 r8=[r14],8 // r8 == eax (syscall number)
++ mov r15=IA32_NR_syscalls
++ ;;
++ cmp.ltu.unc p6,p7=r8,r15
++ ld4 out1=[r14],8 // r9 == ecx
++ ;;
++ ld4 out2=[r14],8 // r10 == edx
++ ;;
++ ld4 out0=[r14] // r11 == ebx
++ adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
++ ;;
++ ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
++ ;;
++ ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
++ adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
++ ;;
++ ld4 out4=[r14] // r15 == edi
++ movl r16=ia32_syscall_table
++ ;;
++(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
++ ld4 r2=[r2] // r2 = current_thread_info()->flags
++ ;;
++ ld8 r16=[r16]
++ and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
++ ;;
++ mov b6=r16
++ movl r15=ia32_ret_from_syscall
++ cmp.eq p8,p0=r2,r0
++ ;;
++ mov rp=r15
++(p8) br.call.sptk.many b6=b6
++ br.cond.sptk ia32_trace_syscall
++
++non_ia32_syscall:
++ alloc r15=ar.pfs,0,0,2,0
++ mov out0=r14 // interrupt #
++ add out1=16,sp // pointer to pt_regs
++ ;; // avoid WAW on CFM
++ br.call.sptk.many rp=ia32_bad_interrupt
++.ret1: movl r15=ia64_leave_kernel
++ ;;
++ mov rp=r15
++ br.ret.sptk.many rp
++END(dispatch_to_ia32_handler)
++#endif /* CONFIG_IA32_SUPPORT */
++
++#ifdef CONFIG_XEN
++ .section .text,"ax"
++GLOBAL_ENTRY(xen_event_callback)
++ mov r31=pr // prepare to save predicates
++ ;;
++ SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
++ ;;
++ movl r3=XSI_PSR_IC
++ mov r14=1
++ ;;
++ st4 [r3]=r14
++ ;;
++ adds r3=8,r2 // set up second base pointer for SAVE_REST
++ srlz.i // ensure everybody knows psr.ic is back on
++ ;;
++ SAVE_REST
++ ;;
++1:
++ alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
++ add out0=16,sp // pass pointer to pt_regs as first arg
++ ;;
++ br.call.sptk.many b0=evtchn_do_upcall
++ ;;
++ movl r20=XSI_PSR_I_ADDR
++ ;;
++ ld8 r20=[r20]
++ ;;
++ adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending
++ ;;
++ ld1 r20=[r20]
++ ;;
++ cmp.ne p6,p0=r20,r0 // if there are pending events,
++ (p6) br.spnt.few 1b // call evtchn_do_upcall again.
++ br.sptk.many ia64_leave_kernel
++END(xen_event_callback)
++
++
++ /*
++ * There is no particular reason for this code to be here, other than that
++ * there happens to be space here that would go unused otherwise. If this
++ * fault ever gets "unreserved", simply moved the following code to a more
++ * suitable spot...
++ */
++
++GLOBAL_ENTRY(xen_bsw1)
++ /* FIXME: THIS CODE IS NOT NaT SAFE! */
++ mov r14=ar.unat
++ movl r30=XSI_B1NAT
++ ;;
++ ld8 r30=[r30];;
++ mov ar.unat=r30
++ movl r30=XSI_BANKNUM;
++ mov r31=1;;
++ st4 [r30]=r31;
++ movl r30=XSI_BANK1_R16;
++ movl r31=XSI_BANK1_R16+8;;
++ ld8.fill r16=[r30],16; ld8.fill r17=[r31],16;;
++ ld8.fill r18=[r30],16; ld8.fill r19=[r31],16;;
++ ld8.fill r20=[r30],16; ld8.fill r21=[r31],16;;
++ ld8.fill r22=[r30],16; ld8.fill r23=[r31],16;;
++ ld8.fill r24=[r30],16; ld8.fill r25=[r31],16;;
++ ld8.fill r26=[r30],16; ld8.fill r27=[r31],16;;
++ ld8.fill r28=[r30],16; ld8.fill r29=[r31],16;;
++ ld8.fill r30=[r30]; ld8.fill r31=[r31];;
++ mov ar.unat=r14
++ br.ret.sptk.many b0
++END(xen_bsw1)
++
++
++#endif
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/xenminstate.h linux-2.6.18-xen-3.3.0/arch/ia64/xen/xenminstate.h
+--- linux-2.6.18.8/arch/ia64/xen/xenminstate.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/xenminstate.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,358 @@
++#include <asm/cache.h>
++
++#ifdef CONFIG_XEN
++#include "../kernel/entry.h"
++#else
++#include "entry.h"
++#endif
++
++/*
++ * For ivt.s we want to access the stack virtually so we don't have to disable translation
++ * on interrupts.
++ *
++ * On entry:
++ * r1: pointer to current task (ar.k6)
++ */
++#define MINSTATE_START_SAVE_MIN_VIRT \
++(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
++ ;; \
++(pUStk) mov.m r24=ar.rnat; \
++(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
++(pKStk) mov r1=sp; /* get sp */ \
++ ;; \
++(pUStk) lfetch.fault.excl.nt1 [r22]; \
++(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
++(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
++ ;; \
++(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
++(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
++ ;; \
++(pUStk) mov r18=ar.bsp; \
++(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
++
++#define MINSTATE_END_SAVE_MIN_VIRT \
++ bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
++ ;;
++
++/*
++ * For mca_asm.S we want to access the stack physically since the state is saved before we
++ * go virtual and don't want to destroy the iip or ipsr.
++ */
++#define MINSTATE_START_SAVE_MIN_PHYS \
++(pKStk) mov r3=IA64_KR(PER_CPU_DATA);; \
++(pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \
++(pKStk) ld8 r3 = [r3];; \
++(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \
++(pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
++(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
++(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
++ ;; \
++(pUStk) mov r24=ar.rnat; \
++(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
++(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
++(pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \
++ ;; \
++(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
++(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
++ ;; \
++(pUStk) mov r18=ar.bsp; \
++(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
++
++#define MINSTATE_END_SAVE_MIN_PHYS \
++ dep r12=-1,r12,61,3; /* make sp a kernel virtual address */ \
++ ;;
++
++#ifdef MINSTATE_VIRT
++# define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT)
++# define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_VIRT
++# define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_VIRT
++#endif
++
++#ifdef MINSTATE_PHYS
++# define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT);; tpa reg=reg
++# define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_PHYS
++# define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_PHYS
++#endif
++
++/*
++ * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
++ * the minimum state necessary that allows us to turn psr.ic back
++ * on.
++ *
++ * Assumed state upon entry:
++ * psr.ic: off
++ * r31: contains saved predicates (pr)
++ *
++ * Upon exit, the state is as follows:
++ * psr.ic: off
++ * r2 = points to &pt_regs.r16
++ * r8 = contents of ar.ccv
++ * r9 = contents of ar.csd
++ * r10 = contents of ar.ssd
++ * r11 = FPSR_DEFAULT
++ * r12 = kernel sp (kernel virtual address)
++ * r13 = points to current task_struct (kernel virtual address)
++ * p15 = TRUE if psr.i is set in cr.ipsr
++ * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
++ * preserved
++ * CONFIG_XEN note: p6/p7 are not preserved
++ *
++ * Note that psr.ic is NOT turned on by this macro. This is so that
++ * we can pass interruption state as arguments to a handler.
++ */
++#ifdef CONFIG_XEN
++#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
++ MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
++ mov r27=ar.rsc; /* M */ \
++ mov r20=r1; /* A */ \
++ mov r25=ar.unat; /* M */ \
++ /* mov r29=cr.ipsr; /* M */ \
++ movl r29=XSI_IPSR;; \
++ ld8 r29=[r29];; \
++ mov r26=ar.pfs; /* I */ \
++ /* mov r28=cr.iip; /* M */ \
++ movl r28=XSI_IIP;; \
++ ld8 r28=[r28];; \
++ mov r21=ar.fpsr; /* M */ \
++ COVER; /* B;; (or nothing) */ \
++ ;; \
++ adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
++ ;; \
++ ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
++ st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
++ adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
++ /* switch from user to kernel RBS: */ \
++ ;; \
++ invala; /* M */ \
++ /* SAVE_IFS; /* see xen special handling below */ \
++ cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
++ ;; \
++ MINSTATE_START_SAVE_MIN \
++ adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
++ adds r16=PT(CR_IPSR),r1; \
++ ;; \
++ lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
++ st8 [r16]=r29; /* save cr.ipsr */ \
++ ;; \
++ lfetch.fault.excl.nt1 [r17]; \
++ tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
++ mov r29=b0 \
++ ;; \
++ adds r16=PT(R8),r1; /* initialize first base pointer */ \
++ adds r17=PT(R9),r1; /* initialize second base pointer */ \
++(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
++ ;; \
++.mem.offset 0,0; st8.spill [r16]=r8,16; \
++.mem.offset 8,0; st8.spill [r17]=r9,16; \
++ ;; \
++.mem.offset 0,0; st8.spill [r16]=r10,24; \
++.mem.offset 8,0; st8.spill [r17]=r11,24; \
++ ;; \
++ /* xen special handling for possibly lazy cover */ \
++ movl r8=XSI_PRECOVER_IFS; \
++ ;; \
++ ld8 r30=[r8]; \
++ ;; \
++ st8 [r16]=r28,16; /* save cr.iip */ \
++ st8 [r17]=r30,16; /* save cr.ifs */ \
++(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
++ mov r8=ar.ccv; \
++ mov r9=ar.csd; \
++ mov r10=ar.ssd; \
++ movl r11=FPSR_DEFAULT; /* L-unit */ \
++ ;; \
++ st8 [r16]=r25,16; /* save ar.unat */ \
++ st8 [r17]=r26,16; /* save ar.pfs */ \
++ shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
++ ;; \
++ st8 [r16]=r27,16; /* save ar.rsc */ \
++(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
++(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
++ ;; /* avoid RAW on r16 & r17 */ \
++(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
++ st8 [r17]=r31,16; /* save predicates */ \
++(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
++ ;; \
++ st8 [r16]=r29,16; /* save b0 */ \
++ st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
++ cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
++ ;; \
++.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
++.mem.offset 8,0; st8.spill [r17]=r12,16; \
++ adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
++ ;; \
++.mem.offset 0,0; st8.spill [r16]=r13,16; \
++.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
++ mov r13=IA64_KR(CURRENT); /* establish `current' */ \
++ ;; \
++.mem.offset 0,0; st8.spill [r16]=r15,16; \
++.mem.offset 8,0; st8.spill [r17]=r14,16; \
++ ;; \
++.mem.offset 0,0; st8.spill [r16]=r2,16; \
++.mem.offset 8,0; st8.spill [r17]=r3,16; \
++ ;; \
++ EXTRA; \
++ mov r2=b0; br.call.sptk b0=xen_bsw1;; mov b0=r2; \
++ adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
++ ;; \
++ movl r1=__gp; /* establish kernel global pointer */ \
++ ;; \
++ /* MINSTATE_END_SAVE_MIN */
++#else
++#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
++ MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
++ mov r27=ar.rsc; /* M */ \
++ mov r20=r1; /* A */ \
++ mov r25=ar.unat; /* M */ \
++ mov r29=cr.ipsr; /* M */ \
++ mov r26=ar.pfs; /* I */ \
++ mov r28=cr.iip; /* M */ \
++ mov r21=ar.fpsr; /* M */ \
++ COVER; /* B;; (or nothing) */ \
++ ;; \
++ adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
++ ;; \
++ ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
++ st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
++ adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
++ /* switch from user to kernel RBS: */ \
++ ;; \
++ invala; /* M */ \
++ SAVE_IFS; \
++ cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
++ ;; \
++ MINSTATE_START_SAVE_MIN \
++ adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
++ adds r16=PT(CR_IPSR),r1; \
++ ;; \
++ lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
++ st8 [r16]=r29; /* save cr.ipsr */ \
++ ;; \
++ lfetch.fault.excl.nt1 [r17]; \
++ tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
++ mov r29=b0 \
++ ;; \
++ adds r16=PT(R8),r1; /* initialize first base pointer */ \
++ adds r17=PT(R9),r1; /* initialize second base pointer */ \
++(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
++ ;; \
++.mem.offset 0,0; st8.spill [r16]=r8,16; \
++.mem.offset 8,0; st8.spill [r17]=r9,16; \
++ ;; \
++.mem.offset 0,0; st8.spill [r16]=r10,24; \
++.mem.offset 8,0; st8.spill [r17]=r11,24; \
++ ;; \
++ st8 [r16]=r28,16; /* save cr.iip */ \
++ st8 [r17]=r30,16; /* save cr.ifs */ \
++(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
++ mov r8=ar.ccv; \
++ mov r9=ar.csd; \
++ mov r10=ar.ssd; \
++ movl r11=FPSR_DEFAULT; /* L-unit */ \
++ ;; \
++ st8 [r16]=r25,16; /* save ar.unat */ \
++ st8 [r17]=r26,16; /* save ar.pfs */ \
++ shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
++ ;; \
++ st8 [r16]=r27,16; /* save ar.rsc */ \
++(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
++(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
++ ;; /* avoid RAW on r16 & r17 */ \
++(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
++ st8 [r17]=r31,16; /* save predicates */ \
++(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
++ ;; \
++ st8 [r16]=r29,16; /* save b0 */ \
++ st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
++ cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
++ ;; \
++.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
++.mem.offset 8,0; st8.spill [r17]=r12,16; \
++ adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
++ ;; \
++.mem.offset 0,0; st8.spill [r16]=r13,16; \
++.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
++ mov r13=IA64_KR(CURRENT); /* establish `current' */ \
++ ;; \
++.mem.offset 0,0; st8.spill [r16]=r15,16; \
++.mem.offset 8,0; st8.spill [r17]=r14,16; \
++ ;; \
++.mem.offset 0,0; st8.spill [r16]=r2,16; \
++.mem.offset 8,0; st8.spill [r17]=r3,16; \
++ adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
++ ;; \
++ EXTRA; \
++ movl r1=__gp; /* establish kernel global pointer */ \
++ ;; \
++ MINSTATE_END_SAVE_MIN
++#endif
++
++/*
++ * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
++ *
++ * Assumed state upon entry:
++ * psr.ic: on
++ * r2: points to &pt_regs.r16
++ * r3: points to &pt_regs.r17
++ * r8: contents of ar.ccv
++ * r9: contents of ar.csd
++ * r10: contents of ar.ssd
++ * r11: FPSR_DEFAULT
++ *
++ * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
++ */
++#define SAVE_REST \
++.mem.offset 0,0; st8.spill [r2]=r16,16; \
++.mem.offset 8,0; st8.spill [r3]=r17,16; \
++ ;; \
++.mem.offset 0,0; st8.spill [r2]=r18,16; \
++.mem.offset 8,0; st8.spill [r3]=r19,16; \
++ ;; \
++.mem.offset 0,0; st8.spill [r2]=r20,16; \
++.mem.offset 8,0; st8.spill [r3]=r21,16; \
++ mov r18=b6; \
++ ;; \
++.mem.offset 0,0; st8.spill [r2]=r22,16; \
++.mem.offset 8,0; st8.spill [r3]=r23,16; \
++ mov r19=b7; \
++ ;; \
++.mem.offset 0,0; st8.spill [r2]=r24,16; \
++.mem.offset 8,0; st8.spill [r3]=r25,16; \
++ ;; \
++.mem.offset 0,0; st8.spill [r2]=r26,16; \
++.mem.offset 8,0; st8.spill [r3]=r27,16; \
++ ;; \
++.mem.offset 0,0; st8.spill [r2]=r28,16; \
++.mem.offset 8,0; st8.spill [r3]=r29,16; \
++ ;; \
++.mem.offset 0,0; st8.spill [r2]=r30,16; \
++.mem.offset 8,0; st8.spill [r3]=r31,32; \
++ ;; \
++ mov ar.fpsr=r11; /* M-unit */ \
++ st8 [r2]=r8,8; /* ar.ccv */ \
++ adds r24=PT(B6)-PT(F7),r3; \
++ ;; \
++ stf.spill [r2]=f6,32; \
++ stf.spill [r3]=f7,32; \
++ ;; \
++ stf.spill [r2]=f8,32; \
++ stf.spill [r3]=f9,32; \
++ ;; \
++ stf.spill [r2]=f10; \
++ stf.spill [r3]=f11; \
++ adds r25=PT(B7)-PT(F11),r3; \
++ ;; \
++ st8 [r24]=r18,16; /* b6 */ \
++ st8 [r25]=r19,16; /* b7 */ \
++ ;; \
++ st8 [r24]=r9; /* ar.csd */ \
++ st8 [r25]=r10; /* ar.ssd */ \
++ ;;
++
++#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
++#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
++#ifdef CONFIG_XEN
++#define SAVE_MIN break 0;; /* FIXME: non-cover version only for ia32 support? */
++#else
++#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, )
++#endif
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/xenpal.S linux-2.6.18-xen-3.3.0/arch/ia64/xen/xenpal.S
+--- linux-2.6.18.8/arch/ia64/xen/xenpal.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/xenpal.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,85 @@
++/*
++ * ia64/xen/xenpal.S
++ *
++ * Alternate PAL routines for Xen. Heavily leveraged from
++ * ia64/kernel/pal.S
++ *
++ * Copyright (C) 2005 Hewlett-Packard Co
++ * Dan Magenheimer <dan.magenheimer@.hp.com>
++ */
++
++#include <asm/asmmacro.h>
++#include <asm/processor.h>
++
++GLOBAL_ENTRY(xen_pal_call_static)
++ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
++ alloc loc1 = ar.pfs,5,5,0,0
++#ifdef CONFIG_XEN
++ movl r22=running_on_xen;;
++ ld4 r22=[r22];;
++ cmp.eq p7,p0=r22,r0
++(p7) br.cond.spnt.many __ia64_pal_call_static;;
++#endif
++ movl loc2 = pal_entry_point
++1: {
++ mov r28 = in0
++ mov r29 = in1
++ mov r8 = ip
++ }
++ ;;
++ ld8 loc2 = [loc2] // loc2 <- entry point
++ tbit.nz p6,p7 = in4, 0
++ adds r8 = 1f-1b,r8
++ mov loc4=ar.rsc // save RSE configuration
++ ;;
++ mov ar.rsc=0 // put RSE in enforced lazy, LE mode
++#ifdef CONFIG_XEN
++ mov r9 = r8
++ XEN_HYPER_GET_PSR
++ ;;
++ mov loc3 = r8
++ mov r8 = r9
++ ;;
++#else
++ mov loc3 = psr
++#endif
++ mov loc0 = rp
++ .body
++ mov r30 = in2
++
++#ifdef CONFIG_XEN
++ // this is low priority for paravirtualization, but is called
++ // from the idle loop so confuses privop counting
++ movl r31=XSI_PSR_I_ADDR
++ ;;
++ ld8 r31=[r31]
++ mov r22=1
++ ;;
++ st1 [r31]=r22
++ ;;
++(p6) movl r31=XSI_PSR_IC
++ ;;
++(p6) st4.rel [r31]=r0
++ ;;
++ mov r31 = in3
++ mov b7 = loc2
++ ;;
++#else
++(p6) rsm psr.i | psr.ic
++ mov r31 = in3
++ mov b7 = loc2
++
++(p7) rsm psr.i
++ ;;
++(p6) srlz.i
++#endif
++ mov rp = r8
++ br.cond.sptk.many b7
++1: mov psr.l = loc3
++ mov ar.rsc = loc4 // restore RSE configuration
++ mov ar.pfs = loc1
++ mov rp = loc0
++ ;;
++ srlz.d // seralize restoration of psr.l
++ br.ret.sptk.many b0
++END(xen_pal_call_static)
+diff -rpuN linux-2.6.18.8/arch/ia64/xen/xensetup.S linux-2.6.18-xen-3.3.0/arch/ia64/xen/xensetup.S
+--- linux-2.6.18.8/arch/ia64/xen/xensetup.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/ia64/xen/xensetup.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,40 @@
++/*
++ * Support routines for Xen
++ *
++ * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
++ */
++
++#include <asm/processor.h>
++#include <asm/asmmacro.h>
++
++ .section .data.read_mostly
++ .align 8
++ .global running_on_xen
++running_on_xen:
++ data4 0
++ .previous
++
++#define isBP p3 // are we the Bootstrap Processor?
++
++ .text
++GLOBAL_ENTRY(early_xen_setup)
++ mov r8=ar.rsc // Initialized in head.S
++(isBP) movl r9=running_on_xen;;
++ extr.u r8=r8,2,2;; // Extract pl fields
++ cmp.eq p7,p0=r8,r0 // p7: !running on xen
++ mov r8=1 // booleanize.
++(p7) br.ret.sptk.many rp;;
++(isBP) st4 [r9]=r8
++ movl r10=xen_ivt;;
++
++ mov cr.iva=r10
++
++ /* Set xsi base. */
++#define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600
++(isBP) mov r2=FW_HYPERCALL_SET_SHARED_INFO_VA
++(isBP) movl r28=XSI_BASE;;
++(isBP) break 0x1000;;
++
++ br.ret.sptk.many rp
++ ;;
++END(early_xen_setup)
+diff -rpuN linux-2.6.18.8/arch/powerpc/boot/Makefile linux-2.6.18-xen-3.3.0/arch/powerpc/boot/Makefile
+--- linux-2.6.18.8/arch/powerpc/boot/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/boot/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -36,8 +36,11 @@ zliblinuxheader := zlib.h zconf.h zutil.
+ $(addprefix $(obj)/,$(zlib) main.o): $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader))
+ #$(addprefix $(obj)/,main.o): $(addprefix $(obj)/,zlib.h)
+
++xen_guest-y = xen_guest.S
++
+ src-boot := crt0.S string.S prom.c stdio.c main.c div64.S
+ src-boot += $(zlib)
++src-boot += $(xen_guest-$(CONFIG_XEN))
+ src-boot := $(addprefix $(obj)/, $(src-boot))
+ obj-boot := $(addsuffix .o, $(basename $(src-boot)))
+
+diff -rpuN linux-2.6.18.8/arch/powerpc/boot/xen_guest.S linux-2.6.18-xen-3.3.0/arch/powerpc/boot/xen_guest.S
+--- linux-2.6.18.8/arch/powerpc/boot/xen_guest.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/boot/xen_guest.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,7 @@
++ .section __xen_guest
++ .ascii "GUEST_OS=linux"
++ .ascii ",GUEST_VER=xen-3.0"
++ .ascii ",XEN_VER=xen-3.0"
++ .ascii ",VIRT_BASE=0x0"
++ .ascii ",LOADER=generic"
++ .byte 0
+diff -rpuN linux-2.6.18.8/arch/powerpc/configs/xen_maple_defconfig linux-2.6.18-xen-3.3.0/arch/powerpc/configs/xen_maple_defconfig
+--- linux-2.6.18.8/arch/powerpc/configs/xen_maple_defconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/configs/xen_maple_defconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1342 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.17
++# Mon Jan 15 23:48:47 2007
++#
++CONFIG_PPC64=y
++CONFIG_64BIT=y
++CONFIG_PPC_MERGE=y
++CONFIG_MMU=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_IRQ_PER_CPU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++CONFIG_PPC=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_COMPAT=y
++CONFIG_SYSVIPC_COMPAT=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_PPC_OF=y
++CONFIG_PPC_UDBG_16550=y
++CONFIG_GENERIC_TBSYNC=y
++# CONFIG_DEFAULT_UIMAGE is not set
++
++#
++# Processor support
++#
++CONFIG_POWER4_ONLY=y
++CONFIG_POWER4=y
++CONFIG_PPC_FPU=y
++CONFIG_ALTIVEC=y
++CONFIG_PPC_STD_MMU=y
++CONFIG_VIRT_CPU_ACCOUNTING=y
++CONFIG_SMP=y
++CONFIG_NR_CPUS=32
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION="-Xen"
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++CONFIG_SYSCTL=y
++# CONFIG_AUDIT is not set
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_RT_MUTEXES=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_BLK_DEV_IO_TRACE is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++# CONFIG_IOSCHED_AS is not set
++CONFIG_IOSCHED_DEADLINE=y
++# CONFIG_IOSCHED_CFQ is not set
++# CONFIG_DEFAULT_AS is not set
++CONFIG_DEFAULT_DEADLINE=y
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="deadline"
++
++#
++# Platform support
++#
++CONFIG_PPC_MULTIPLATFORM=y
++# CONFIG_PPC_ISERIES is not set
++# CONFIG_EMBEDDED6xx is not set
++# CONFIG_APUS is not set
++CONFIG_PPC_PSERIES=y
++# CONFIG_PPC_PMAC is not set
++CONFIG_PPC_MAPLE=y
++# CONFIG_PPC_CELL is not set
++# CONFIG_PPC_CELL_NATIVE is not set
++# CONFIG_PPC_IBM_CELL_BLADE is not set
++# CONFIG_UDBG_RTAS_CONSOLE is not set
++CONFIG_PPC_XEN=y
++CONFIG_XICS=y
++CONFIG_U3_DART=y
++CONFIG_MPIC=y
++CONFIG_PPC_RTAS=y
++CONFIG_RTAS_ERROR_LOGGING=y
++CONFIG_RTAS_PROC=y
++CONFIG_RTAS_FLASH=y
++# CONFIG_MMIO_NVRAM is not set
++CONFIG_MPIC_BROKEN_U3=y
++CONFIG_IBMVIO=y
++# CONFIG_IBMEBUS is not set
++# CONFIG_PPC_MPC106 is not set
++CONFIG_PPC_970_NAP=y
++# CONFIG_CPU_FREQ is not set
++# CONFIG_WANT_EARLY_SERIAL is not set
++
++#
++# Kernel options
++#
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++# CONFIG_PREEMPT_BKL is not set
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_FORCE_MAX_ZONEORDER=13
++CONFIG_IOMMU_VMERGE=y
++# CONFIG_HOTPLUG_CPU is not set
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++# CONFIG_KEXEC is not set
++# CONFIG_CRASH_DUMP is not set
++CONFIG_IRQ_ALL_CPUS=y
++# CONFIG_PPC_SPLPAR is not set
++CONFIG_EEH=y
++CONFIG_SCANLOG=y
++CONFIG_LPARCFG=y
++CONFIG_NUMA=y
++CONFIG_NODES_SHIFT=4
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_DEFAULT=y
++CONFIG_SELECT_MEMORY_MODEL=y
++# CONFIG_FLATMEM_MANUAL is not set
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++CONFIG_SPARSEMEM_MANUAL=y
++CONFIG_SPARSEMEM=y
++CONFIG_NEED_MULTIPLE_NODES=y
++CONFIG_HAVE_MEMORY_PRESENT=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPARSEMEM_EXTREME=y
++CONFIG_MEMORY_HOTPLUG=y
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_MIGRATION=y
++CONFIG_RESOURCES_64BIT=y
++CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
++CONFIG_ARCH_MEMORY_PROBE=y
++# CONFIG_PPC_64K_PAGES is not set
++# CONFIG_SCHED_SMT is not set
++CONFIG_PROC_DEVICETREE=y
++# CONFIG_CMDLINE_BOOL is not set
++# CONFIG_PM is not set
++CONFIG_SECCOMP=y
++CONFIG_ISA_DMA_API=y
++
++#
++# Bus options
++#
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_PPC_I8259=y
++# CONFIG_PPC_INDIRECT_PCI is not set
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_DEBUG is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# PCI Hotplug Support
++#
++# CONFIG_HOTPLUG_PCI is not set
++CONFIG_KERNEL_START=0xc000000000000000
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++CONFIG_PACKET_MMAP=y
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_ASK_IP_FIB_HASH=y
++# CONFIG_IP_FIB_TRIE is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_FWMARK=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
++# CONFIG_IP_ROUTE_VERBOSE is not set
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++CONFIG_IP_PNP_RARP=y
++CONFIG_NET_IPIP=y
++# CONFIG_NET_IPGRE is not set
++CONFIG_IP_MROUTE=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++CONFIG_INET_TUNNEL=y
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
++
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++CONFIG_IPV6=y
++CONFIG_IPV6_PRIVACY=y
++CONFIG_IPV6_ROUTER_PREF=y
++CONFIG_IPV6_ROUTE_INFO=y
++CONFIG_INET6_AH=y
++CONFIG_INET6_ESP=y
++CONFIG_INET6_IPCOMP=y
++CONFIG_INET6_XFRM_TUNNEL=y
++CONFIG_INET6_TUNNEL=y
++CONFIG_INET6_XFRM_MODE_TRANSPORT=y
++CONFIG_INET6_XFRM_MODE_TUNNEL=y
++CONFIG_IPV6_TUNNEL=y
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# Core Netfilter Configuration
++#
++CONFIG_NETFILTER_NETLINK=y
++CONFIG_NETFILTER_NETLINK_QUEUE=y
++CONFIG_NETFILTER_NETLINK_LOG=y
++CONFIG_NETFILTER_XTABLES=y
++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
++CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
++CONFIG_NETFILTER_XT_TARGET_MARK=y
++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
++CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
++CONFIG_NETFILTER_XT_MATCH_COMMENT=y
++CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
++CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
++CONFIG_NETFILTER_XT_MATCH_DCCP=y
++CONFIG_NETFILTER_XT_MATCH_ESP=y
++CONFIG_NETFILTER_XT_MATCH_HELPER=y
++CONFIG_NETFILTER_XT_MATCH_LENGTH=y
++CONFIG_NETFILTER_XT_MATCH_LIMIT=y
++CONFIG_NETFILTER_XT_MATCH_MAC=y
++CONFIG_NETFILTER_XT_MATCH_MARK=y
++CONFIG_NETFILTER_XT_MATCH_POLICY=y
++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
++CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y
++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
++CONFIG_NETFILTER_XT_MATCH_QUOTA=y
++CONFIG_NETFILTER_XT_MATCH_REALM=y
++CONFIG_NETFILTER_XT_MATCH_SCTP=y
++CONFIG_NETFILTER_XT_MATCH_STATE=y
++CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
++CONFIG_NETFILTER_XT_MATCH_STRING=y
++CONFIG_NETFILTER_XT_MATCH_TCPMSS=y
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=y
++CONFIG_IP_NF_CT_ACCT=y
++CONFIG_IP_NF_CONNTRACK_MARK=y
++CONFIG_IP_NF_CONNTRACK_EVENTS=y
++CONFIG_IP_NF_CONNTRACK_NETLINK=y
++CONFIG_IP_NF_CT_PROTO_SCTP=y
++CONFIG_IP_NF_FTP=y
++CONFIG_IP_NF_IRC=y
++# CONFIG_IP_NF_NETBIOS_NS is not set
++CONFIG_IP_NF_TFTP=y
++CONFIG_IP_NF_AMANDA=y
++CONFIG_IP_NF_PPTP=y
++# CONFIG_IP_NF_H323 is not set
++# CONFIG_IP_NF_SIP is not set
++# CONFIG_IP_NF_QUEUE is not set
++CONFIG_IP_NF_IPTABLES=y
++CONFIG_IP_NF_MATCH_IPRANGE=y
++CONFIG_IP_NF_MATCH_TOS=y
++CONFIG_IP_NF_MATCH_RECENT=y
++CONFIG_IP_NF_MATCH_ECN=y
++CONFIG_IP_NF_MATCH_DSCP=y
++CONFIG_IP_NF_MATCH_AH=y
++CONFIG_IP_NF_MATCH_TTL=y
++CONFIG_IP_NF_MATCH_OWNER=y
++CONFIG_IP_NF_MATCH_ADDRTYPE=y
++CONFIG_IP_NF_MATCH_HASHLIMIT=y
++CONFIG_IP_NF_FILTER=y
++CONFIG_IP_NF_TARGET_REJECT=y
++CONFIG_IP_NF_TARGET_LOG=y
++CONFIG_IP_NF_TARGET_ULOG=y
++CONFIG_IP_NF_TARGET_TCPMSS=y
++CONFIG_IP_NF_NAT=y
++CONFIG_IP_NF_NAT_NEEDED=y
++CONFIG_IP_NF_TARGET_MASQUERADE=y
++CONFIG_IP_NF_TARGET_REDIRECT=y
++CONFIG_IP_NF_TARGET_NETMAP=y
++CONFIG_IP_NF_TARGET_SAME=y
++CONFIG_IP_NF_NAT_SNMP_BASIC=y
++CONFIG_IP_NF_NAT_IRC=y
++CONFIG_IP_NF_NAT_FTP=y
++CONFIG_IP_NF_NAT_TFTP=y
++CONFIG_IP_NF_NAT_AMANDA=y
++CONFIG_IP_NF_NAT_PPTP=y
++CONFIG_IP_NF_MANGLE=y
++CONFIG_IP_NF_TARGET_TOS=y
++CONFIG_IP_NF_TARGET_ECN=y
++CONFIG_IP_NF_TARGET_DSCP=y
++CONFIG_IP_NF_TARGET_TTL=y
++CONFIG_IP_NF_TARGET_CLUSTERIP=y
++CONFIG_IP_NF_RAW=y
++CONFIG_IP_NF_ARPTABLES=y
++CONFIG_IP_NF_ARPFILTER=y
++CONFIG_IP_NF_ARP_MANGLE=y
++
++#
++# IPv6: Netfilter Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP6_NF_QUEUE is not set
++CONFIG_IP6_NF_IPTABLES=y
++CONFIG_IP6_NF_MATCH_RT=y
++CONFIG_IP6_NF_MATCH_OPTS=y
++CONFIG_IP6_NF_MATCH_FRAG=y
++CONFIG_IP6_NF_MATCH_HL=y
++CONFIG_IP6_NF_MATCH_OWNER=y
++CONFIG_IP6_NF_MATCH_IPV6HEADER=y
++CONFIG_IP6_NF_MATCH_AH=y
++CONFIG_IP6_NF_MATCH_EUI64=y
++CONFIG_IP6_NF_FILTER=y
++CONFIG_IP6_NF_TARGET_LOG=y
++CONFIG_IP6_NF_TARGET_REJECT=y
++CONFIG_IP6_NF_MANGLE=y
++CONFIG_IP6_NF_TARGET_HL=y
++CONFIG_IP6_NF_RAW=y
++
++#
++# Bridge: Netfilter Configuration
++#
++CONFIG_BRIDGE_NF_EBTABLES=y
++CONFIG_BRIDGE_EBT_BROUTE=y
++CONFIG_BRIDGE_EBT_T_FILTER=y
++CONFIG_BRIDGE_EBT_T_NAT=y
++CONFIG_BRIDGE_EBT_802_3=y
++CONFIG_BRIDGE_EBT_AMONG=y
++CONFIG_BRIDGE_EBT_ARP=y
++CONFIG_BRIDGE_EBT_IP=y
++CONFIG_BRIDGE_EBT_LIMIT=y
++CONFIG_BRIDGE_EBT_MARK=y
++CONFIG_BRIDGE_EBT_PKTTYPE=y
++CONFIG_BRIDGE_EBT_STP=y
++CONFIG_BRIDGE_EBT_VLAN=y
++CONFIG_BRIDGE_EBT_ARPREPLY=y
++CONFIG_BRIDGE_EBT_DNAT=y
++CONFIG_BRIDGE_EBT_MARK_T=y
++CONFIG_BRIDGE_EBT_REDIRECT=y
++CONFIG_BRIDGE_EBT_SNAT=y
++CONFIG_BRIDGE_EBT_LOG=y
++CONFIG_BRIDGE_EBT_ULOG=y
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++CONFIG_BRIDGE=y
++CONFIG_VLAN_8021Q=y
++# CONFIG_DECNET is not set
++CONFIG_LLC=y
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_NET_DIVERT is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++CONFIG_NET_CLS_ROUTE=y
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++# CONFIG_SYS_HYPERVISOR is not set
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++
++#
++# Block devices
++#
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=10240
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_BLK_DEV_IDESCSI is not set
++CONFIG_IDE_TASK_IOCTL=y
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++CONFIG_BLK_DEV_IDEPCI=y
++CONFIG_IDEPCI_SHARE_IRQ=y
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++# CONFIG_BLK_DEV_SL82C105 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++CONFIG_BLK_DEV_AMD74XX=y
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++# CONFIG_BLK_DEV_PIIX is not set
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++# CONFIG_BLK_DEV_SVWKS is not set
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++# CONFIG_SCSI_PROC_FS is not set
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++CONFIG_BLK_DEV_SR=y
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++CONFIG_SCSI_FC_ATTRS=y
++CONFIG_SCSI_ISCSI_ATTRS=y
++CONFIG_SCSI_SAS_ATTRS=y
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_BUSLOGIC is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_GDTH is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_IBMVSCSI is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++CONFIG_SCSI_IPR=y
++# CONFIG_SCSI_IPR_TRACE is not set
++# CONFIG_SCSI_IPR_DUMP is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++# CONFIG_MD is not set
++
++#
++# Fusion MPT device support
++#
++# CONFIG_FUSION is not set
++# CONFIG_FUSION_SPI is not set
++# CONFIG_FUSION_FC is not set
++# CONFIG_FUSION_SAS is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Macintosh device drivers
++#
++# CONFIG_WINDFARM is not set
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++
++#
++# ARCnet devices
++#
++# CONFIG_ARCNET is not set
++
++#
++# PHY device support
++#
++# CONFIG_PHYLIB is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++
++#
++# Tulip family network device support
++#
++# CONFIG_NET_TULIP is not set
++# CONFIG_HP100 is not set
++CONFIG_IBMVETH=y
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++CONFIG_AMD8111_ETH=y
++# CONFIG_AMD8111E_NAPI is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++# CONFIG_EEPRO100 is not set
++# CONFIG_E100 is not set
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_VIA_RHINE is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++CONFIG_SKY2=y
++CONFIG_SK98LIN=y
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++CONFIG_BNX2=y
++# CONFIG_MV643XX_ETH is not set
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1600
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1200
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_TSDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++# CONFIG_SERIO is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_ICOM is not set
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++CONFIG_HVC_DRIVER=y
++CONFIG_HVC_CONSOLE=y
++# CONFIG_HVC_RTAS is not set
++# CONFIG_HVCS is not set
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++CONFIG_HW_RANDOM=y
++CONFIG_GEN_RTC=y
++# CONFIG_GEN_RTC_X is not set
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++# CONFIG_AGP is not set
++# CONFIG_DRM is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=y
++# CONFIG_I2C_ALGOPCF is not set
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++CONFIG_I2C_AMD8111=y
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++# CONFIG_I2C_PCA_ISA is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++# CONFIG_HWMON is not set
++# CONFIG_HWMON_VID is not set
++
++#
++# Misc devices
++#
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++CONFIG_VIDEO_V4L2=y
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++# CONFIG_FB is not set
++
++#
++# Console display driver support
++#
++# CONFIG_VGA_CONSOLE is not set
++CONFIG_DUMMY_CONSOLE=y
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++# CONFIG_USB is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++
++#
++# InfiniBand support
++#
++CONFIG_INFINIBAND=y
++CONFIG_INFINIBAND_USER_MAD=y
++CONFIG_INFINIBAND_USER_ACCESS=y
++CONFIG_INFINIBAND_ADDR_TRANS=y
++CONFIG_INFINIBAND_MTHCA=y
++CONFIG_INFINIBAND_MTHCA_DEBUG=y
++CONFIG_INFINIBAND_IPOIB=y
++CONFIG_INFINIBAND_IPOIB_DEBUG=y
++CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y
++CONFIG_INFINIBAND_SRP=y
++# CONFIG_INFINIBAND_ISER is not set
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++
++#
++# Real Time Clock
++#
++# CONFIG_RTC_CLASS is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++CONFIG_EXT2_FS_XIP=y
++CONFIG_FS_XIP=y
++CONFIG_EXT3_FS=y
++# CONFIG_EXT3_FS_XATTR is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++CONFIG_REISERFS_PROC_INFO=y
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++# CONFIG_JFS_FS is not set
++CONFIG_FS_POSIX_ACL=y
++# CONFIG_XFS_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++# CONFIG_JOLIET is not set
++# CONFIG_ZISOFS is not set
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_ACL_SUPPORT=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_CIFS_DEBUG2 is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++# CONFIG_EFI_PARTITION is not set
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="utf-8"
++# CONFIG_NLS_CODEPAGE_437 is not set
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++# CONFIG_NLS_ISO8859_1 is not set
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++CONFIG_NLS_UTF8=y
++
++#
++# Library routines
++#
++CONFIG_CRC_CCITT=y
++# CONFIG_CRC16 is not set
++CONFIG_CRC32=y
++# CONFIG_LIBCRC32C is not set
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=y
++CONFIG_TEXTSEARCH=y
++CONFIG_TEXTSEARCH_KMP=y
++CONFIG_TEXTSEARCH_BM=y
++CONFIG_TEXTSEARCH_FSM=y
++CONFIG_PLIST=y
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++# CONFIG_UNUSED_SYMBOLS is not set
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=17
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++CONFIG_DEBUG_SLAB=y
++CONFIG_DEBUG_MUTEXES=y
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++CONFIG_DEBUG_SPINLOCK_SLEEP=y
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_INFO=y
++CONFIG_DEBUG_FS=y
++# CONFIG_DEBUG_VM is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_DEBUG_STACKOVERFLOW=y
++CONFIG_DEBUG_STACK_USAGE=y
++CONFIG_DEBUGGER=y
++CONFIG_XMON=y
++CONFIG_XMON_DEFAULT=y
++# CONFIG_IRQSTACKS is not set
++CONFIG_BOOTX_TEXT=y
++# CONFIG_PPC_EARLY_DEBUG is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++CONFIG_XEN=y
++CONFIG_XEN_INTERFACE_VERSION=0x00030202
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++CONFIG_XEN_UNPRIVILEGED_GUEST=y
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_BACKEND=y
++# CONFIG_XEN_PCIDEV_BACKEND is not set
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XEN_XENBUS_DEV=y
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++CONFIG_XEN_NETDEV_LOOPBACK=y
++# CONFIG_XEN_TPMDEV_BACKEND is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++# CONFIG_XEN_COMPAT_030002_AND_LATER is not set
++CONFIG_XEN_COMPAT_LATEST_ONLY=y
++# CONFIG_XEN_COMPAT_030002 is not set
++CONFIG_HAVE_ARCH_ALLOC_SKB=y
++CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_DEVMEM=y
++CONFIG_XEN_SKBUFF=y
++CONFIG_XEN_REBOOT=y
++CONFIG_XEN_XENCOMM=y
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_SHA1=y
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++CONFIG_CRYPTO_DEFLATE=y
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
+diff -rpuN linux-2.6.18.8/arch/powerpc/Kconfig linux-2.6.18-xen-3.3.0/arch/powerpc/Kconfig
+--- linux-2.6.18.8/arch/powerpc/Kconfig 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -439,6 +439,17 @@ config UDBG_RTAS_CONSOLE
+ bool
+ default n
+
++config PPC_XEN
++ bool "Enable Xen compatible kernel"
++ depends on PPC_MULTIPLATFORM && PPC64 && PPC_MAPLE && PPC_PSERIES && SMP
++ select XEN
++ select XEN_PRIVILEGED_GUEST
++ select XEN_UNPRIVILEGED_GUEST
++ select XEN_XENCOMM
++
++ help
++ This option will compile a kernel compatible with Xen hypervisor
++
+ config XICS
+ depends on PPC_PSERIES
+ bool
+@@ -1080,6 +1091,8 @@ source "arch/powerpc/Kconfig.debug"
+
+ source "security/Kconfig"
+
++source "drivers/xen/Kconfig"
++
+ config KEYS_COMPAT
+ bool
+ depends on COMPAT && KEYS
+diff -rpuN linux-2.6.18.8/arch/powerpc/Kconfig.debug linux-2.6.18-xen-3.3.0/arch/powerpc/Kconfig.debug
+--- linux-2.6.18.8/arch/powerpc/Kconfig.debug 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/Kconfig.debug 2008-08-21 11:36:07.000000000 +0200
+@@ -160,6 +160,20 @@ config PPC_EARLY_DEBUG_ISERIES
+ Select this to enable early debugging for legacy iSeries. You need
+ to hit "Ctrl-x Ctrl-x" to see the messages on the console.
+
++config PPC_EARLY_DEBUG_XEN_DOM0
++ bool "Xen Dom0 Console"
++ depends on PPC_XEN
++ help
++ Select this to enable early debugging for Xen Dom0. Setting
++ this will result in a kernel that may not work as a DomU.
++
++config PPC_EARLY_DEBUG_XEN_DOMU
++ bool "Xen DomU Console"
++ depends on PPC_XEN && XEN_UNPRIVILEGED_GUEST
++ help
++ Select this to enable early debugging for Xen DomU. Setting
++ this will result in a kernel that may not work as a Dom0.
++
+ endchoice
+
+ endmenu
+diff -rpuN linux-2.6.18.8/arch/powerpc/kernel/cpu_setup_power4.S linux-2.6.18-xen-3.3.0/arch/powerpc/kernel/cpu_setup_power4.S
+--- linux-2.6.18.8/arch/powerpc/kernel/cpu_setup_power4.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/kernel/cpu_setup_power4.S 2008-08-21 11:36:07.000000000 +0200
+@@ -73,6 +73,13 @@ _GLOBAL(__970_cpu_preinit)
+ blr
+
+ _GLOBAL(__setup_cpu_ppc970)
++ /*
++ * Do nothing if not running in HV mode
++ */
++ mfmsr r0
++ rldicl. r0,r0,4,63
++ beqlr
++
+ mfspr r0,SPRN_HID0
+ li r11,5 /* clear DOZE and SLEEP */
+ rldimi r0,r11,52,8 /* set NAP and DPM */
+diff -rpuN linux-2.6.18.8/arch/powerpc/kernel/prom_init.c linux-2.6.18-xen-3.3.0/arch/powerpc/kernel/prom_init.c
+--- linux-2.6.18.8/arch/powerpc/kernel/prom_init.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/kernel/prom_init.c 2008-08-21 11:36:07.000000000 +0200
+@@ -188,6 +188,7 @@ static unsigned long __initdata prom_tce
+ #define PLATFORM_LPAR 0x0001
+ #define PLATFORM_POWERMAC 0x0400
+ #define PLATFORM_GENERIC 0x0500
++#define PLATFORM_GENERIC_XEN (PLATFORM_GENERIC | PLATFORM_LPAR)
+
+ static int __initdata of_platform;
+
+@@ -1529,6 +1530,14 @@ static int __init prom_find_machine_type
+ phandle rtas;
+ int x;
+ #endif
++#ifdef CONFIG_PPC_XEN
++ phandle xen;
++
++ xen = call_prom("finddevice", 1, 1, ADDR("/xen"));
++ if (PHANDLE_VALID(xen)) {
++ return PLATFORM_GENERIC_XEN;
++ }
++#endif
+
+ /* Look for a PowerMac */
+ len = prom_getprop(_prom->root, "compatible",
+@@ -2262,6 +2271,31 @@ unsigned long __init prom_init(unsigned
+ if (RELOC(of_platform) == PLATFORM_PSERIES)
+ prom_initialize_tce_table();
+ #endif
++#ifdef CONFIG_PPC_XEN
++ if (RELOC(of_platform) & PLATFORM_LPAR) {
++ phandle xen;
++
++ prom_debug("XXX:checking for Xen OF package\n");
++
++ xen = call_prom("finddevice", 1, 1, ADDR("/xen"));
++ if (PHANDLE_VALID(xen)) {
++ u64 res[2];
++ int l;
++ ulong base;
++
++ l = prom_getprop(xen, "reserved", res, sizeof (res));
++ if (l != sizeof(res)) {
++ prom_panic("Xen reserved prop not exist\n");
++ }
++
++ base = alloc_down(res[1], PAGE_SIZE, 0);
++ if (base != res[0]) {
++ prom_panic("XSI != alloc_down()\n");
++ }
++ reserve_mem(res[0], res[1]);
++ }
++ }
++#endif
+
+ /*
+ * On non-powermacs, try to instantiate RTAS and puts all CPUs
+diff -rpuN linux-2.6.18.8/arch/powerpc/kernel/setup-common.c linux-2.6.18-xen-3.3.0/arch/powerpc/kernel/setup-common.c
+--- linux-2.6.18.8/arch/powerpc/kernel/setup-common.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/kernel/setup-common.c 2008-08-21 11:36:07.000000000 +0200
+@@ -387,6 +387,12 @@ void __init smp_setup_cpu_maps(void)
+ }
+ }
+
++ if (machine_is(xen)) {
++ /* something more inteligent perhaps? */
++ for (cpu = 0; cpu < NR_CPUS; cpu++)
++ cpu_set(cpu, cpu_possible_map);
++ }
++
+ #ifdef CONFIG_PPC64
+ /*
+ * On pSeries LPAR, we need to know how many cpus
+diff -rpuN linux-2.6.18.8/arch/powerpc/kernel/udbg.c linux-2.6.18-xen-3.3.0/arch/powerpc/kernel/udbg.c
+--- linux-2.6.18.8/arch/powerpc/kernel/udbg.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/kernel/udbg.c 2008-08-21 11:36:07.000000000 +0200
+@@ -45,6 +45,9 @@ void __init udbg_early_init(void)
+ #elif defined(CONFIG_PPC_EARLY_DEBUG_ISERIES)
+ /* For iSeries - hit Ctrl-x Ctrl-x to see the output */
+ udbg_init_iseries();
++#elif defined(CONFIG_PPC_EARLY_DEBUG_XEN_DOM0) || \
++ defined(CONFIG_PPC_EARLY_DEBUG_XEN_DOMU)
++ udbg_init_xen();
+ #endif
+ }
+
+diff -rpuN linux-2.6.18.8/arch/powerpc/kernel/vdso.c linux-2.6.18-xen-3.3.0/arch/powerpc/kernel/vdso.c
+--- linux-2.6.18.8/arch/powerpc/kernel/vdso.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/kernel/vdso.c 2008-08-21 11:36:07.000000000 +0200
+@@ -282,6 +282,13 @@ int arch_setup_additional_pages(struct l
+ * pages though
+ */
+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC;
++ /*
++ * Make sure the vDSO gets into every core dump.
++ * Dumping its contents makes post-mortem fully interpretable later
++ * without matching up the same kernel and hardware config to see
++ * what PC values meant.
++ */
++ vma->vm_flags |= VM_ALWAYSDUMP;
+ vma->vm_flags |= mm->def_flags;
+ vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
+ vma->vm_ops = &vdso_vmops;
+diff -rpuN linux-2.6.18.8/arch/powerpc/Makefile linux-2.6.18-xen-3.3.0/arch/powerpc/Makefile
+--- linux-2.6.18.8/arch/powerpc/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -65,6 +65,7 @@ CPPFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARC
+ AFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARCH)
+ CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc
+ CFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARCH) -ffixed-r2 -mmultiple
++CFLAGS-$(CONFIG_PPC_XEN) += -Iinclude/asm-$(ARCH)/xen
+ CPPFLAGS += $(CPPFLAGS-y)
+ AFLAGS += $(AFLAGS-y)
+ CFLAGS += -msoft-float -pipe $(CFLAGS-y)
+diff -rpuN linux-2.6.18.8/arch/powerpc/mm/slb_low.S linux-2.6.18-xen-3.3.0/arch/powerpc/mm/slb_low.S
+--- linux-2.6.18.8/arch/powerpc/mm/slb_low.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/mm/slb_low.S 2008-08-21 11:36:07.000000000 +0200
+@@ -51,6 +51,23 @@ _GLOBAL(slb_allocate_realmode)
+ */
+ bne cr7,1f
+
++#ifdef CONFIG_PPC_XEN
++_GLOBAL(slb_miss_kernel_load_xen_nop)
++ b 3f
++ /* Need to check if it is in the part of our XEN Foreign Map */
++ rldicl r9,r3,30,63 /* get Xen region */
++ cmpldi cr7,r9,1 /* cmp this bit set to 1 */
++ bne cr7,3f
++ /* Xen Linear mapping encoding bits, the "li" instruction below
++ * could be patched below (like the other pages of the linear map)
++ * if we ever wish to map anything other that 4K pages in
++ * this region, right now it is fine as zero.
++ */
++_GLOBAL(slb_miss_kernel_load_xen_linear)
++ li r11,0
++ b slb_finish_load
++3:
++#endif
+ /* Linear mapping encoding bits, the "li" instruction below will
+ * be patched by the kernel at boot
+ */
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/Makefile linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/Makefile
+--- linux-2.6.18.8/arch/powerpc/platforms/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -12,6 +12,8 @@ obj-$(CONFIG_PPC_85xx) += 85xx/
+ obj-$(CONFIG_PPC_86xx) += 86xx/
+ obj-$(CONFIG_PPC_PSERIES) += pseries/
+ obj-$(CONFIG_PPC_ISERIES) += iseries/
++# must occur before xen hosting platforms
++obj-$(CONFIG_PPC_XEN) += xen/
+ obj-$(CONFIG_PPC_MAPLE) += maple/
+ obj-$(CONFIG_PPC_CELL) += cell/
+ obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/pseries/iommu.c linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/pseries/iommu.c
+--- linux-2.6.18.8/arch/powerpc/platforms/pseries/iommu.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/pseries/iommu.c 2008-08-21 11:36:07.000000000 +0200
+@@ -531,6 +531,17 @@ static void iommu_dev_setup_pSeriesLP(st
+ * already allocated.
+ */
+ dn = pci_device_to_OF_node(dev);
++ if (dn == NULL) {
++#ifdef CONFIG_PPC_XEN
++ /* this becomes possible for Xen Dom0 */
++ DBG("%s, dev %p (%s) has no OF devtree entree\n", __func__,
++ dev, pci_name(dev));
++ return;
++#else
++ panic("%s, dev %p (%s) has no OF devtree entree\n", __func__,
++ dev, pci_name(dev));
++#endif
++ }
+
+ for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
+ pdn = pdn->parent) {
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/xen/balloon.c linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/balloon.c
+--- linux-2.6.18.8/arch/powerpc/platforms/xen/balloon.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/balloon.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,82 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ */
++
++#include <linux/module.h>
++#include <linux/mm.h>
++#include <asm/hypervisor.h>
++#include "setup.h"
++
++/*
++ * FIXME: Port balloon driver, if ever
++ */
++
++struct page **alloc_empty_pages_and_pagevec(int nr_pages)
++{
++ struct page *page, **pagevec;
++ int i;
++
++ pagevec = kmalloc(sizeof(*pagevec) * nr_pages, GFP_KERNEL);
++ if (pagevec == NULL)
++ return NULL;
++
++ for (i = 0; i < nr_pages; i++) {
++ page = alloc_foreign_page();
++ BUG_ON(page == NULL);
++ pagevec[i] = page;
++ /* There is no real page backing us yet so it cannot
++ * be scrubbed */
++ }
++
++ return pagevec;
++}
++
++void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
++{
++ int i;
++
++ if (pagevec == NULL)
++ return;
++
++ for (i = 0; i < nr_pages; i++) {
++ free_foreign_page(pagevec[i]);
++ }
++
++ kfree(pagevec);
++}
++
++void balloon_dealloc_empty_page_range(
++ struct page *page, unsigned long nr_pages)
++{
++ __free_pages(page, get_order(nr_pages * PAGE_SIZE));
++}
++
++void balloon_update_driver_allowance(long delta)
++{
++}
++
++void balloon_release_driver_page(struct page *page)
++{
++ BUG();
++}
++
++EXPORT_SYMBOL_GPL(balloon_update_driver_allowance);
++EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec);
++EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec);
++EXPORT_SYMBOL_GPL(balloon_release_driver_page);
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/xen/gnttab.c linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/gnttab.c
+--- linux-2.6.18.8/arch/powerpc/platforms/xen/gnttab.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/gnttab.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,468 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/vmalloc.h>
++#include <linux/memory_hotplug.h>
++#include <xen/gnttab.h>
++#include <asm/hypervisor.h>
++#include <xen/interface/grant_table.h>
++#include <asm/pgtable.h>
++#include <asm/sections.h>
++#include <asm/io.h>
++#include <asm/machdep.h>
++#include <asm/prom.h>
++#include <asm/cacheflush.h>
++#include "setup.h"
++#include "../pseries/plpar_wrappers.h"
++
++#undef DEBUG
++
++#ifdef DEBUG
++#define DBG(fmt...) printk(KERN_EMERG fmt)
++#else
++#define DBG(fmt...)
++#endif
++
++#define NR_GRANT_FRAMES 4
++
++struct address_space xen_foreign_dummy_mapping;
++
++static ulong foreign_map_pfn;
++static ulong foreign_map_pgs;
++static unsigned long *foreign_map_bitmap;
++
++
++/* hijack _mapcount */
++static inline int gnt_mapcount(struct page *page)
++{
++ return atomic_read(&(page)->_mapcount) + 1;
++}
++
++static inline int gnt_map(struct page *page)
++{
++ /* return true is transition from -1 to 0 */
++ return atomic_inc_and_test(&page->_mapcount);
++}
++
++static inline int gnt_unmap(struct page *page)
++{
++ int val;
++
++ val = atomic_dec_return(&page->_mapcount);
++ if (val < -1) {
++ atomic_inc(&page->_mapcount);
++ printk(KERN_EMERG "%s: %d\n", __func__, val);
++ }
++
++ return (val == -1);
++}
++
++
++static long map_to_linear(ulong paddr)
++{
++ unsigned long vaddr;
++ int psize;
++ unsigned long mode;
++ int slot;
++ uint shift;
++ unsigned long tmp_mode;
++
++ psize = MMU_PAGE_4K;
++ shift = mmu_psize_defs[psize].shift;
++ mode = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
++ vaddr = (ulong)__va(paddr);
++
++ {
++ unsigned long vpn, hash, hpteg;
++ unsigned long vsid = get_kernel_vsid(vaddr);
++ unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
++
++ vpn = va >> shift;
++ tmp_mode = mode;
++
++ /* Make non-kernel text non-executable */
++ if (!in_kernel_text(vaddr))
++ tmp_mode = mode | HPTE_R_N;
++
++ hash = hpt_hash(va, shift);
++ hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
++
++ BUG_ON(!ppc_md.hpte_insert);
++ slot = ppc_md.hpte_insert(hpteg, va, paddr,
++ tmp_mode, HPTE_V_BOLTED, psize);
++ if (slot < 0)
++ printk(KERN_EMERG
++ "%s: no more bolted entries "
++ "HTAB[0x%lx]: 0x%lx\n",
++ __func__, hpteg, paddr);
++ }
++ return slot;
++}
++
++static unsigned long get_hpte_vsid(ulong slot)
++{
++ unsigned long dword0;
++ unsigned long lpar_rc;
++ unsigned long dummy_word1;
++ unsigned long flags;
++
++ /* Read 1 pte at a time */
++ /* Do not need RPN to logical page translation */
++ /* No cross CEC PFT access */
++ flags = 0;
++
++ lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
++
++ BUG_ON(lpar_rc != H_SUCCESS);
++
++ return dword0;
++}
++
++static long find_hpte_slot(unsigned long va, int psize)
++{
++ unsigned long hash;
++ unsigned long i, j;
++ long slot;
++ unsigned long want_v, hpte_v;
++
++ hash = hpt_hash(va, mmu_psize_defs[psize].shift);
++ want_v = hpte_encode_v(va, psize);
++
++ for (j = 0; j < 2; j++) {
++ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
++ for (i = 0; i < HPTES_PER_GROUP; i++) {
++ hpte_v = get_hpte_vsid(slot);
++
++ if (HPTE_V_COMPARE(hpte_v, want_v)
++ && (hpte_v & HPTE_V_VALID)
++ && (!!(hpte_v & HPTE_V_SECONDARY) == j)) {
++ /* HPTE matches */
++ if (j)
++ slot = -slot;
++ return slot;
++ }
++ ++slot;
++ }
++ hash = ~hash;
++ }
++
++ return -1;
++}
++
++static long find_map_slot(ulong ea)
++{
++ int psize = MMU_PAGE_4K;
++ ulong vsid;
++ ulong va;
++
++ vsid = get_kernel_vsid(ea);
++ va = (vsid << 28) | (ea & 0x0fffffff);
++
++ return find_hpte_slot(va, psize);
++}
++
++
++static void gnttab_pre_unmap_grant_ref(
++ struct gnttab_unmap_grant_ref *unmap, int count)
++{
++ long slot;
++ int i;
++ ulong ea;
++ unsigned long dummy1, dummy2;
++ ulong flags;
++
++ /* paranoia */
++ local_irq_save(flags);
++
++ for (i = 0 ; i < count; i++) {
++ struct page *page;
++
++ ea = (ulong)__va(unmap[i].host_addr);
++ page = virt_to_page(ea);
++
++ if (!gnt_unmap(page)) {
++ DBG("%s[0x%x]: skip: 0x%lx, mapcount 0x%x\n",
++ __func__, i, ea, gnt_mapcount(page));
++ continue;
++ }
++ slot = find_map_slot(ea);
++ if (slot < 0) {
++ printk(KERN_EMERG "%s: PTE not found: 0x%lx\n",
++ __func__, ea);
++ continue;
++ }
++
++ DBG("%s[0x%x]: 0x%lx: mapcount: 0x%x\n",
++ __func__, i, ea, gnt_mapcount(page));
++ plpar_pte_remove(0, slot, 0, &dummy1, &dummy2);
++ }
++ local_irq_restore(flags);
++}
++
++static void gnttab_post_map_grant_ref(
++ struct gnttab_map_grant_ref *map, int count)
++{
++ int i;
++ long slot;
++ ulong flags;
++
++ /* paranoia */
++ local_irq_save(flags);
++
++ for (i = 0 ; i < count; i++) {
++ ulong pa = map[i].host_addr;
++ struct page *page;
++
++ if (map[i].status != GNTST_okay) {
++ printk(KERN_EMERG "%s: status, skip\n", __func__);
++ continue;
++ }
++
++ BUG_ON(pa < (foreign_map_pfn << PAGE_SHIFT));
++ BUG_ON(pa >= (foreign_map_pfn << PAGE_SHIFT) +
++ (foreign_map_pgs << PAGE_SHIFT));
++
++ page = virt_to_page(__va(pa));
++
++ if (gnt_map(page)) {
++#ifdef DEBUG
++ /* we need to get smarted than this */
++ slot = find_map_slot((ulong)__va(pa));
++ if (slot >= 0) {
++ DBG("%s: redundant 0x%lx\n", __func__, pa);
++ continue;
++ }
++#endif
++ slot = map_to_linear(pa);
++ DBG("%s[0x%x]: 0x%lx, mapcount:0x%x\n",
++ __func__, i, pa, gnt_mapcount(page));
++
++ } else {
++ DBG("%s[0x%x] skip 0x%lx, mapcount:0x%x\n",
++ __func__, i, pa, gnt_mapcount(page));
++ }
++ }
++ local_irq_restore(flags);
++}
++
++int HYPERVISOR_grant_table_op(unsigned int cmd, void *op, unsigned int count)
++{
++ void *desc;
++ void *frame_list = NULL;
++ int argsize;
++ int ret = -ENOMEM;
++
++ switch (cmd) {
++ case GNTTABOP_map_grant_ref:
++ argsize = sizeof(struct gnttab_map_grant_ref);
++ break;
++ case GNTTABOP_unmap_grant_ref:
++ gnttab_pre_unmap_grant_ref(op, count);
++ argsize = sizeof(struct gnttab_unmap_grant_ref);
++ break;
++ case GNTTABOP_setup_table: {
++ struct gnttab_setup_table setup;
++
++ memcpy(&setup, op, sizeof(setup));
++ argsize = sizeof(setup);
++
++ frame_list = xencomm_map(
++ xen_guest_handle(setup.frame_list),
++ (sizeof(*xen_guest_handle(setup.frame_list))
++ * setup.nr_frames));
++
++ if (frame_list == NULL)
++ return -ENOMEM;
++
++ set_xen_guest_handle(setup.frame_list, frame_list);
++ memcpy(op, &setup, sizeof(setup));
++ }
++ break;
++ case GNTTABOP_dump_table:
++ argsize = sizeof(struct gnttab_dump_table);
++ break;
++ case GNTTABOP_transfer:
++ BUG();
++ argsize = sizeof(struct gnttab_transfer);
++ break;
++ case GNTTABOP_copy:
++ argsize = sizeof(struct gnttab_transfer);
++ break;
++ case GNTTABOP_query_size:
++ argsize = sizeof(struct gnttab_query_size);
++ break;
++ default:
++ printk(KERN_EMERG "%s: unknown grant table op %d\n",
++ __func__, cmd);
++ return -ENOSYS;
++ }
++
++ desc = xencomm_map_no_alloc(op, argsize);
++ if (desc) {
++ ret = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_grant_table_op),
++ cmd, desc, count);
++ if (!ret && cmd == GNTTABOP_map_grant_ref)
++ gnttab_post_map_grant_ref(op, count);
++ xencomm_free(desc);
++ }
++ xencomm_free(frame_list);
++
++ return ret;
++}
++EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
++
++static ulong find_grant_maps(void)
++{
++ struct device_node *xen;
++ u64 *gm;
++ u64 _gm[2];
++ u64 expect;
++
++ /* This value is currently hardcoded into the SLB logic that
++ * it written in assempler, See
++ * slb_miss_kernel_load_xen_linear for more information.
++ * Anything else and we can not run. */
++ expect = 34 - PAGE_SHIFT;
++
++ xen = of_find_node_by_path("/xen");
++
++ /*
++ * The foreign is 2x2 Cells.
++ * The first entry is log2 of the base page frame.
++ * The second is the number of pages
++ */
++ gm = (u64 *)get_property(xen, "foreign-map", NULL);
++ if (gm == NULL) {
++ if (!is_initial_xendomain()) {
++ printk("OF: /xen/foreign-map not present\n");
++ _gm[0] = expect;
++ _gm[1] = 2048;
++ gm = _gm;
++ } else
++ panic("OF: /xen/foreign-map must be present\n");
++ }
++
++ if (gm[0] != expect)
++ panic("foreign-map is 0x%lx, expect 0x%lx\n",
++ gm[0], expect);
++
++ foreign_map_pfn = 1UL << gm[0];
++ return gm[1];
++}
++
++static void setup_foreign_segment(void)
++{
++ extern int *slb_miss_kernel_load_xen_nop;
++ ulong iaddr = (ulong)slb_miss_kernel_load_xen_nop;
++
++ /* By default Linux will branch around this logic we replace
++ * the branch with a NOP to turn the logic on */
++ *slb_miss_kernel_load_xen_nop = 0x60000000;
++ flush_icache_range(iaddr, iaddr + 4);
++}
++
++struct page *alloc_foreign_page(void)
++{
++ ulong bit;
++ do {
++ bit = find_first_zero_bit(foreign_map_bitmap,
++ foreign_map_pgs);
++ if (bit >= foreign_map_pgs)
++ return NULL;
++ } while (test_and_set_bit(bit, foreign_map_bitmap) == 1);
++
++ return pfn_to_page(foreign_map_pfn + bit);
++}
++
++void free_foreign_page(struct page *page)
++{
++ ulong bit = page_to_pfn(page) - foreign_map_pfn;
++
++ BUG_ON(bit >= foreign_map_pgs);
++ BUG_ON(!test_bit(bit, foreign_map_bitmap));
++
++ clear_bit(bit, foreign_map_bitmap);
++}
++
++static void setup_grant_area(void)
++{
++ ulong pgs;
++ int err;
++ struct zone *zone;
++ struct pglist_data *pgdata;
++ int nid;
++
++ pgs = find_grant_maps();
++ setup_foreign_segment();
++
++ printk("%s: Xen VIO will use a foreign address space of 0x%lx pages\n",
++ __func__, pgs);
++
++ /* add pages to the zone */
++ nid = 0;
++ pgdata = NODE_DATA(nid);
++ zone = pgdata->node_zones;
++
++ err = __add_pages(zone, foreign_map_pfn, pgs);
++
++ if (err < 0) {
++ printk(KERN_EMERG "%s: add_pages(0x%lx, 0x%lx) = %d\n",
++ __func__, foreign_map_pfn, pgs, err);
++ BUG();
++ }
++
++ /* create a bitmap to manage these pages */
++ foreign_map_bitmap = kmalloc(BITS_TO_LONGS(pgs) * sizeof(long),
++ GFP_KERNEL);
++ if (foreign_map_bitmap == NULL) {
++ printk(KERN_EMERG
++ "%s: could not allocate foreign_map_bitmap to "
++ "manage 0x%lx foreign pages\n", __func__, pgs);
++ BUG();
++ }
++ /* I'm paranoid so make sure we assign the top bits so we
++ * don't give them away */
++ bitmap_fill(&foreign_map_bitmap[BITS_TO_LONGS(pgs) - 1],
++ BITS_PER_LONG);
++ /* now clear all the real bits */
++ bitmap_zero(foreign_map_bitmap, pgs);
++
++ foreign_map_pgs = pgs;
++}
++
++void *arch_gnttab_alloc_shared(unsigned long *frames)
++{
++ void *shared;
++ ulong pa = frames[0] << PAGE_SHIFT;
++ static int resume;
++
++ shared = ioremap(pa, PAGE_SIZE * NR_GRANT_FRAMES);
++ BUG_ON(shared == NULL);
++ printk("%s: grant table at %p\n", __func__, shared);
++
++ /* no need to do the rest of this if we are resuming */
++ if (!resume)
++ setup_grant_area();
++
++ resume = 1;
++
++ return shared;
++}
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/xen/hcall.c linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/hcall.c
+--- linux-2.6.18.8/arch/powerpc/platforms/xen/hcall.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/hcall.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,891 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006, 2007
++ *
++ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
++ */
++
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/gfp.h>
++#include <linux/module.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/domctl.h>
++#include <xen/interface/sysctl.h>
++#include <xen/interface/platform.h>
++#include <xen/interface/memory.h>
++#include <xen/interface/xencomm.h>
++#include <xen/interface/version.h>
++#include <xen/interface/sched.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/vcpu.h>
++#include <xen/interface/xsm/acm_ops.h>
++#include <xen/interface/kexec.h>
++#include <xen/public/privcmd.h>
++#include <asm/hypercall.h>
++#include <asm/page.h>
++#include <asm/uaccess.h>
++#include <asm/hvcall.h>
++#include "setup.h"
++
++/* Xencomm notes:
++ *
++ * For kernel memory, we assume that virtually contiguous pages are also
++ * physically contiguous. This allows us to avoid creating descriptors for
++ * kernel hypercalls, such as console and event channel operations.
++ *
++ * In general, we need a xencomm descriptor to cover the top-level data
++ * structure (e.g. the domctl op), plus another for every embedded pointer to
++ * another data structure (i.e. for every GUEST_HANDLE).
++ */
++
++int HYPERVISOR_console_io(int cmd, int count, char *str)
++{
++ struct xencomm_handle *desc;
++ int rc;
++
++ desc = xencomm_map_no_alloc(str, count);
++ if (desc == NULL)
++ return -EINVAL;
++
++ rc = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_console_io),
++ cmd, count, desc);
++
++ xencomm_free(desc);
++
++ return rc;
++}
++EXPORT_SYMBOL(HYPERVISOR_console_io);
++
++int HYPERVISOR_event_channel_op(int cmd, void *op)
++{
++ int rc;
++
++ struct xencomm_handle *desc =
++ xencomm_map_no_alloc(op, sizeof(evtchn_op_t));
++ if (desc == NULL)
++ return -EINVAL;
++
++ rc = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_event_channel_op),
++ cmd, desc);
++
++ xencomm_free(desc);
++
++ return rc;
++
++}
++EXPORT_SYMBOL(HYPERVISOR_event_channel_op);
++
++int HYPERVISOR_xen_version(int cmd, void *arg)
++{
++ struct xencomm_handle *desc;
++ const unsigned long hcall = __HYPERVISOR_xen_version;
++ int argsize;
++ int rc;
++
++ switch (cmd) {
++ case XENVER_version:
++ /* do not actually pass an argument */
++ return plpar_hcall_norets(XEN_MARK(hcall), cmd, 0);
++ case XENVER_extraversion:
++ argsize = sizeof(xen_extraversion_t);
++ break;
++ case XENVER_compile_info:
++ argsize = sizeof(xen_compile_info_t);
++ break;
++ case XENVER_capabilities:
++ argsize = sizeof(xen_capabilities_info_t);
++ break;
++ case XENVER_changeset:
++ argsize = sizeof(xen_changeset_info_t);
++ break;
++ case XENVER_platform_parameters:
++ argsize = sizeof(xen_platform_parameters_t);
++ break;
++ case XENVER_pagesize:
++ if (arg == NULL)
++ argsize = 0;
++ else
++ argsize = sizeof(void *);
++ break;
++ case XENVER_get_features:
++ argsize = sizeof(xen_feature_info_t);
++ break;
++ default:
++ printk(KERN_ERR "%s: unknown version cmd %d\n", __func__, cmd);
++ return -ENOSYS;
++ }
++
++ /* desc could be NULL in the case of XENVER_pagesize with NULL arg */
++ desc = xencomm_map(arg, argsize);
++
++ rc = plpar_hcall_norets(XEN_MARK(hcall), cmd, desc);
++
++ xencomm_free(desc);
++
++ return rc;
++}
++EXPORT_SYMBOL(HYPERVISOR_xen_version);
++
++
++int HYPERVISOR_physdev_op(int cmd, void *op)
++{
++ struct xencomm_handle *desc =
++ xencomm_map_no_alloc(op, sizeof(physdev_op_t));
++ int rc;
++
++ if (desc == NULL)
++ return -EINVAL;
++
++ rc = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_physdev_op),
++ cmd, desc);
++
++ xencomm_free(desc);
++
++ return rc;
++}
++EXPORT_SYMBOL(HYPERVISOR_physdev_op);
++
++int HYPERVISOR_sched_op(int cmd, void *arg)
++{
++ int argsize = 0;
++ int rc = -EINVAL;
++ struct xencomm_handle *desc;
++ struct xencomm_handle *ports = NULL;
++
++ switch (cmd) {
++ case SCHEDOP_yield:
++ case SCHEDOP_block:
++ return plpar_hcall_norets(XEN_MARK(__HYPERVISOR_sched_op),
++ cmd, 0);
++ break;
++
++ case SCHEDOP_poll: {
++ struct sched_poll sched_poll;
++
++ argsize = sizeof(struct sched_poll);
++
++ memcpy(&sched_poll, arg, sizeof(sched_poll));
++
++ ports = xencomm_map(
++ xen_guest_handle(sched_poll.ports),
++ (sizeof(evtchn_port_t) * sched_poll.nr_ports));
++
++ if (ports == NULL)
++ return -ENOMEM;
++
++ set_xen_guest_handle(sched_poll.ports, (evtchn_port_t *)ports);
++ memcpy(arg, &sched_poll, sizeof(sched_poll));
++
++ }
++ break;
++ case SCHEDOP_shutdown:
++ argsize = sizeof(struct sched_shutdown);
++ break;
++ case SCHEDOP_remote_shutdown:
++ argsize = sizeof(struct sched_remote_shutdown);
++ break;
++ default:
++ printk(KERN_ERR "%s: unknown sched op %d\n", __func__, cmd);
++ return -ENOSYS;
++ }
++
++ desc = xencomm_map_no_alloc(arg, argsize);
++ if (desc) {
++ rc = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_sched_op),
++ cmd, desc);
++ xencomm_free(desc);
++ }
++
++ xencomm_free(ports);
++
++ return rc;
++}
++EXPORT_SYMBOL(HYPERVISOR_sched_op);
++
++int HYPERVISOR_suspend(unsigned long srec)
++{
++ int cmd = SCHEDOP_shutdown;
++ struct sched_shutdown sched_shutdown = {
++ .reason = SHUTDOWN_suspend,
++ };
++ struct xencomm_handle *desc;
++
++ desc = xencomm_map_no_alloc(&sched_shutdown, sizeof(struct sched_shutdown));
++
++ return plpar_hcall_norets(XEN_MARK(__HYPERVISOR_sched_op),
++ cmd, desc, srec);
++}
++EXPORT_SYMBOL(HYPERVISOR_suspend);
++
++int HYPERVISOR_kexec_op(unsigned long op, void *args)
++{
++ unsigned long argsize;
++ struct xencomm_handle *desc;
++
++ switch (op) {
++ case KEXEC_CMD_kexec_get_range:
++ argsize = sizeof(struct xen_kexec_range);
++ break;
++ case KEXEC_CMD_kexec_load:
++ argsize = sizeof(struct xen_kexec_load);
++ break;
++ case KEXEC_CMD_kexec_unload:
++ argsize = sizeof(struct xen_kexec_load);
++ break;
++ case KEXEC_CMD_kexec:
++ argsize = sizeof(struct xen_kexec_exec);
++ break;
++ default:
++ return -ENOSYS;
++ }
++ desc = xencomm_map_no_alloc(args, argsize);
++
++ return plpar_hcall_norets(XEN_MARK(__HYPERVISOR_kexec_op),
++ op, desc);
++}
++EXPORT_SYMBOL(HYPERVISOR_kexec_op);
++
++int HYPERVISOR_poll(
++ evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
++{
++ struct sched_poll sched_poll = {
++ .nr_ports = nr_ports,
++ .timeout = jiffies_to_ns(timeout)
++ };
++ set_xen_guest_handle(sched_poll.ports, ports);
++
++ return HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
++}
++EXPORT_SYMBOL(HYPERVISOR_poll);
++
++typedef ulong (mf_t)(ulong arg0, ...);
++
++static mf_t *multicall_funcs[] = {
++ [__HYPERVISOR_grant_table_op] = (mf_t *)HYPERVISOR_grant_table_op,
++};
++
++int HYPERVISOR_multicall(void *call_list, int nr_calls)
++{
++ /* we blow out the multicall because the xencomm stuff is jsut
++ * too tricky */
++ multicall_entry_t *mcl = (multicall_entry_t *)call_list;
++ multicall_entry_t *c;
++ int i;
++ mf_t *mf;
++ int res;
++ ulong flags;
++
++ /* let make sure all the calls are supported */
++ for (i = 0; i < nr_calls; i++) {
++ mf = multicall_funcs[mcl[i].op];
++ BUG_ON(mf == NULL);
++ }
++ /* disable interrupts until we are done all calls */
++ local_irq_save(flags);
++ for (i = 0; i < nr_calls; i++) {
++ /* lookup supported multicalls */
++ c = &mcl[i];
++ mf = multicall_funcs[c->op];
++ res = mf(c->args[0], c->args[1], c->args[2],
++ c->args[3], c->args[4], c->args[5]);
++ c->result = res;
++ }
++ local_irq_restore(flags);
++ return 0;
++}
++EXPORT_SYMBOL(HYPERVISOR_multicall);
++
++
++/* privcmd operations: */
++
++static int xenppc_privcmd_domctl(privcmd_hypercall_t *hypercall)
++{
++ xen_domctl_t kern_op;
++ xen_domctl_t __user *user_op = (xen_domctl_t __user *)hypercall->arg[0];
++ struct xencomm_handle *op_desc;
++ struct xencomm_handle *desc = NULL;
++ int ret = 0;
++
++ if (copy_from_user(&kern_op, user_op, sizeof(xen_domctl_t)))
++ return -EFAULT;
++
++ if (kern_op.interface_version != XEN_DOMCTL_INTERFACE_VERSION) {
++ printk(KERN_WARNING "%s: %s %x != %x\n", __func__, current->comm,
++ kern_op.interface_version, XEN_DOMCTL_INTERFACE_VERSION);
++ return -EACCES;
++ }
++
++ op_desc = xencomm_map(&kern_op, sizeof(xen_domctl_t));
++ if (op_desc == NULL)
++ return -ENOMEM;
++
++ switch (kern_op.cmd) {
++ case XEN_DOMCTL_createdomain:
++ case XEN_DOMCTL_destroydomain:
++ case XEN_DOMCTL_pausedomain:
++ case XEN_DOMCTL_unpausedomain:
++ case XEN_DOMCTL_getdomaininfo:
++ break;
++ case XEN_DOMCTL_getmemlist:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.getmemlist.buffer),
++ kern_op.u.getmemlist.max_pfns * sizeof(unsigned long));
++
++ if (desc == NULL)
++ ret = -ENOMEM;
++
++ set_xen_guest_handle(kern_op.u.getmemlist.buffer,
++ (void *)desc);
++ break;
++ case XEN_DOMCTL_getpageframeinfo:
++ break;
++ case XEN_DOMCTL_getpageframeinfo2:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.getpageframeinfo2.array),
++ kern_op.u.getpageframeinfo2.num);
++
++ if (desc == NULL)
++ ret = -ENOMEM;
++
++ set_xen_guest_handle(kern_op.u.getpageframeinfo2.array,
++ (void *)desc);
++ break;
++ case XEN_DOMCTL_shadow_op:
++
++ if (xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap))
++ {
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap),
++ kern_op.u.shadow_op.pages * sizeof(unsigned long));
++
++ if (desc == NULL)
++ ret = -ENOMEM;
++
++ set_xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap,
++ (void *)desc);
++ }
++ break;
++ case XEN_DOMCTL_max_mem:
++ break;
++ case XEN_DOMCTL_setvcpucontext:
++ case XEN_DOMCTL_getvcpucontext:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.vcpucontext.ctxt),
++ sizeof(vcpu_guest_context_t));
++
++ if (desc == NULL)
++ ret = -ENOMEM;
++
++ set_xen_guest_handle(kern_op.u.vcpucontext.ctxt,
++ (void *)desc);
++ break;
++ case XEN_DOMCTL_getvcpuinfo:
++ break;
++ case XEN_DOMCTL_setvcpuaffinity:
++ case XEN_DOMCTL_getvcpuaffinity:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap),
++ (kern_op.u.vcpuaffinity.cpumap.nr_cpus + 7) / 8);
++
++ if (desc == NULL)
++ ret = -ENOMEM;
++
++ set_xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap,
++ (void *)desc);
++ break;
++ case XEN_DOMCTL_max_vcpus:
++ case XEN_DOMCTL_scheduler_op:
++ case XEN_DOMCTL_setdomainhandle:
++ case XEN_DOMCTL_setdebugging:
++ case XEN_DOMCTL_irq_permission:
++ case XEN_DOMCTL_iomem_permission:
++ case XEN_DOMCTL_ioport_permission:
++ case XEN_DOMCTL_hypercall_init:
++ case XEN_DOMCTL_arch_setup:
++ case XEN_DOMCTL_settimeoffset:
++ case XEN_DOMCTL_real_mode_area:
++ break;
++ default:
++ printk(KERN_ERR "%s: unknown domctl cmd %d\n", __func__, kern_op.cmd);
++ return -ENOSYS;
++ }
++
++ if (ret)
++ goto out; /* error mapping the nested pointer */
++
++ ret = plpar_hcall_norets(XEN_MARK(hypercall->op),op_desc);
++
++ if (copy_to_user(user_op, &kern_op, sizeof(xen_domctl_t)))
++ ret = -EFAULT;
++
++out:
++ xencomm_free(desc);
++ xencomm_free(op_desc);
++ return ret;
++}
++
++static int xenppc_privcmd_sysctl(privcmd_hypercall_t *hypercall)
++{
++ xen_sysctl_t kern_op;
++ xen_sysctl_t __user *user_op = (xen_sysctl_t __user *)hypercall->arg[0];
++ struct xencomm_handle *op_desc;
++ struct xencomm_handle *desc = NULL;
++ int ret = 0;
++
++ if (copy_from_user(&kern_op, user_op, sizeof(xen_sysctl_t)))
++ return -EFAULT;
++
++ if (kern_op.interface_version != XEN_SYSCTL_INTERFACE_VERSION) {
++ printk(KERN_WARNING "%s: %s %x != %x\n", __func__, current->comm,
++ kern_op.interface_version, XEN_SYSCTL_INTERFACE_VERSION);
++ return -EACCES;
++ }
++
++ op_desc = xencomm_map(&kern_op, sizeof(xen_sysctl_t));
++
++ if (op_desc == NULL)
++ return -ENOMEM;
++
++ switch (kern_op.cmd) {
++ case XEN_SYSCTL_readconsole:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.readconsole.buffer),
++ kern_op.u.readconsole.count);
++
++ if (desc == NULL)
++ ret = -ENOMEM;
++
++ set_xen_guest_handle(kern_op.u.readconsole.buffer,
++ (void *)desc);
++ break;
++ case XEN_SYSCTL_tbuf_op:
++ case XEN_SYSCTL_physinfo:
++ case XEN_SYSCTL_sched_id:
++ break;
++ case XEN_SYSCTL_perfc_op:
++ /* XXX this requires *two* embedded xencomm mappings (desc and val),
++ * and I don't feel like it right now. */
++ printk(KERN_ERR "%s: unknown sysctl cmd %d\n", __func__, kern_op.cmd);
++ return -ENOSYS;
++ case XEN_SYSCTL_getdomaininfolist:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.getdomaininfolist.buffer),
++ kern_op.u.getdomaininfolist.max_domains *
++ sizeof(xen_domctl_getdomaininfo_t));
++
++ if (desc == NULL)
++ ret = -ENOMEM;
++
++ set_xen_guest_handle(kern_op.u.getdomaininfolist.buffer,
++ (void *)desc);
++ break;
++ default:
++ printk(KERN_ERR "%s: unknown sysctl cmd %d\n", __func__, kern_op.cmd);
++ return -ENOSYS;
++ }
++
++ if (ret)
++ goto out; /* error mapping the nested pointer */
++
++ ret = plpar_hcall_norets(XEN_MARK(hypercall->op), op_desc);
++
++ if (copy_to_user(user_op, &kern_op, sizeof(xen_sysctl_t)))
++ ret = -EFAULT;
++
++out:
++ xencomm_free(desc);
++ xencomm_free(op_desc);
++ return ret;
++}
++
++static int xenppc_privcmd_platform_op(privcmd_hypercall_t *hypercall)
++{
++ xen_platform_op_t kern_op;
++ xen_platform_op_t __user *user_op =
++ (xen_platform_op_t __user *)hypercall->arg[0];
++ struct xencomm_handle *op_desc;
++ struct xencomm_handle *desc = NULL;
++ int ret = 0;
++
++ if (copy_from_user(&kern_op, user_op, sizeof(xen_platform_op_t)))
++ return -EFAULT;
++
++ if (kern_op.interface_version != XENPF_INTERFACE_VERSION) {
++ printk(KERN_WARNING "%s: %s %x != %x\n", __func__, current->comm,
++ kern_op.interface_version, XENPF_INTERFACE_VERSION);
++ return -EACCES;
++ }
++
++ op_desc = xencomm_map(&kern_op, sizeof(xen_platform_op_t));
++
++ if (op_desc == NULL)
++ return -ENOMEM;
++
++ switch (kern_op.cmd) {
++ case XENPF_settime:
++ case XENPF_add_memtype:
++ case XENPF_del_memtype:
++ case XENPF_read_memtype:
++ case XENPF_microcode_update:
++ case XENPF_platform_quirk:
++ break;
++ default:
++ printk(KERN_ERR "%s: unknown platform_op cmd %d\n", __func__,
++ kern_op.cmd);
++ return -ENOSYS;
++ }
++
++ if (ret)
++ goto out; /* error mapping the nested pointer */
++
++ ret = plpar_hcall_norets(XEN_MARK(hypercall->op), op_desc);
++
++ if (copy_to_user(user_op, &kern_op, sizeof(xen_platform_op_t)))
++ ret = -EFAULT;
++
++out:
++ xencomm_free(desc);
++ xencomm_free(op_desc);
++ return ret;
++}
++
++int HYPERVISOR_memory_op(unsigned int cmd, void *arg)
++{
++ int ret;
++ struct xencomm_handle *op_desc;
++ xen_memory_reservation_t *mop;
++
++
++ mop = (xen_memory_reservation_t *)arg;
++
++ op_desc = xencomm_map(mop, sizeof(xen_memory_reservation_t));
++
++ if (op_desc == NULL)
++ return -ENOMEM;
++
++ switch (cmd) {
++ case XENMEM_increase_reservation:
++ case XENMEM_decrease_reservation:
++ case XENMEM_populate_physmap: {
++ struct xencomm_handle *desc = NULL;
++
++ if (xen_guest_handle(mop->extent_start)) {
++ desc = xencomm_map(
++ xen_guest_handle(mop->extent_start),
++ mop->nr_extents *
++ sizeof(*xen_guest_handle(mop->extent_start)));
++
++ if (desc == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ set_xen_guest_handle(mop->extent_start,
++ (void *)desc);
++ }
++
++ ret = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_memory_op),
++ cmd, op_desc);
++
++ xencomm_free(desc);
++ }
++ break;
++
++ case XENMEM_maximum_ram_page:
++ /* arg is NULL so we can call thru here */
++ ret = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_memory_op),
++ cmd, NULL);
++ break;
++ default:
++ printk(KERN_ERR "%s: unknown memory op %d\n", __func__, cmd);
++ ret = -ENOSYS;
++ }
++
++out:
++ xencomm_free(op_desc);
++ return ret;
++}
++EXPORT_SYMBOL(HYPERVISOR_memory_op);
++
++static int xenppc_privcmd_memory_op(privcmd_hypercall_t *hypercall)
++{
++ xen_memory_reservation_t kern_op;
++ xen_memory_reservation_t __user *user_op;
++ const unsigned long cmd = hypercall->arg[0];
++ int ret = 0;
++
++ user_op = (xen_memory_reservation_t __user *)hypercall->arg[1];
++ if (copy_from_user(&kern_op, user_op,
++ sizeof(xen_memory_reservation_t)))
++ return -EFAULT;
++
++ ret = HYPERVISOR_memory_op(cmd, &kern_op);
++ if (ret >= 0) {
++ if (copy_to_user(user_op, &kern_op,
++ sizeof(xen_memory_reservation_t)))
++ return -EFAULT;
++ }
++ return ret;
++}
++
++static int xenppc_privcmd_version(privcmd_hypercall_t *hypercall)
++{
++ return HYPERVISOR_xen_version(hypercall->arg[0],
++ (void *)hypercall->arg[1]);
++}
++
++static int xenppc_privcmd_event_channel_op(privcmd_hypercall_t *hypercall)
++{
++ struct xencomm_handle *desc;
++ unsigned int argsize;
++ int ret;
++
++ switch (hypercall->arg[0]) {
++ case EVTCHNOP_alloc_unbound:
++ argsize = sizeof(evtchn_alloc_unbound_t);
++ break;
++
++ case EVTCHNOP_status:
++ argsize = sizeof(evtchn_status_t);
++ break;
++
++ default:
++ printk(KERN_ERR "%s: unknown EVTCHNOP (%ld)\n",
++ __func__, hypercall->arg[0]);
++ return -EINVAL;
++ }
++
++ desc = xencomm_map((void *)hypercall->arg[1], argsize);
++
++ if (desc == NULL)
++ return -ENOMEM;
++
++ ret = plpar_hcall_norets(XEN_MARK(hypercall->op), hypercall->arg[0],
++ desc);
++
++ xencomm_free(desc);
++ return ret;
++}
++
++static int xenppc_acmcmd_op(privcmd_hypercall_t *hypercall)
++{
++ xen_acmctl_t kern_op;
++ xen_acmctl_t __user *user_op = (xen_acmctl_t __user *)hypercall->arg[0];
++ void *op_desc;
++ void *desc = NULL, *desc2 = NULL, *desc3 = NULL, *desc4 = NULL;
++ int ret = 0;
++
++ if (copy_from_user(&kern_op, user_op, sizeof(xen_acmctl_t)))
++ return -EFAULT;
++
++ if (kern_op.interface_version != ACM_INTERFACE_VERSION) {
++ printk(KERN_WARNING "%s: %s %x != %x\n", __func__, current->comm,
++ kern_op.interface_version, ACM_INTERFACE_VERSION);
++ return -EACCES;
++ }
++
++ op_desc = xencomm_map(&kern_op, sizeof(xen_acmctl_t));
++ if (op_desc == NULL)
++ return -ENOMEM;
++
++ switch (kern_op.cmd) {
++ case ACMOP_setpolicy:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.setpolicy.pushcache),
++ kern_op.u.setpolicy.pushcache_size);
++
++ if (desc == NULL)
++ ret = -ENOMEM;
++
++ set_xen_guest_handle(kern_op.u.setpolicy.pushcache,
++ desc);
++ break;
++ case ACMOP_getpolicy:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.getpolicy.pullcache),
++ kern_op.u.getpolicy.pullcache_size);
++
++ if (desc == NULL)
++ ret = -ENOMEM;
++
++ set_xen_guest_handle(kern_op.u.getpolicy.pullcache,
++ desc);
++ break;
++ case ACMOP_dumpstats:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.dumpstats.pullcache),
++ kern_op.u.dumpstats.pullcache_size);
++
++ if (desc == NULL)
++ ret = -ENOMEM;
++
++ set_xen_guest_handle(kern_op.u.dumpstats.pullcache,
++ desc);
++ break;
++ case ACMOP_getssid:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.getssid.ssidbuf),
++ kern_op.u.getssid.ssidbuf_size);
++
++ if (desc == NULL)
++ ret = -ENOMEM;
++
++ set_xen_guest_handle(kern_op.u.getssid.ssidbuf,
++ desc);
++ break;
++ case ACMOP_getdecision:
++ break;
++ case ACMOP_chgpolicy:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.change_policy.policy_pushcache),
++ kern_op.u.change_policy.policy_pushcache_size);
++ desc2 = xencomm_map(
++ xen_guest_handle(kern_op.u.change_policy.del_array),
++ kern_op.u.change_policy.delarray_size);
++ desc3 = xencomm_map(
++ xen_guest_handle(kern_op.u.change_policy.chg_array),
++ kern_op.u.change_policy.chgarray_size);
++ desc4 = xencomm_map(
++ xen_guest_handle(kern_op.u.change_policy.err_array),
++ kern_op.u.change_policy.errarray_size);
++
++ if (desc == NULL || desc2 == NULL ||
++ desc3 == NULL || desc4 == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ set_xen_guest_handle(kern_op.u.change_policy.policy_pushcache,
++ desc);
++ set_xen_guest_handle(kern_op.u.change_policy.del_array,
++ desc2);
++ set_xen_guest_handle(kern_op.u.change_policy.chg_array,
++ desc3);
++ set_xen_guest_handle(kern_op.u.change_policy.err_array,
++ desc4);
++ break;
++ case ACMOP_relabeldoms:
++ desc = xencomm_map(
++ xen_guest_handle(kern_op.u.relabel_doms.relabel_map),
++ kern_op.u.relabel_doms.relabel_map_size);
++ desc2 = xencomm_map(
++ xen_guest_handle(kern_op.u.relabel_doms.err_array),
++ kern_op.u.relabel_doms.errarray_size);
++
++ if (desc == NULL || desc2 == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ set_xen_guest_handle(kern_op.u.relabel_doms.relabel_map,
++ desc);
++ set_xen_guest_handle(kern_op.u.relabel_doms.err_array,
++ desc2);
++ break;
++ default:
++ printk(KERN_ERR "%s: unknown/unsupported acmctl cmd %d\n",
++ __func__, kern_op.cmd);
++ return -ENOSYS;
++ }
++
++ if (ret)
++ goto out; /* error mapping the nested pointer */
++
++ ret = plpar_hcall_norets(XEN_MARK(hypercall->op),op_desc);
++
++ if (copy_to_user(user_op, &kern_op, sizeof(xen_acmctl_t)))
++ ret = -EFAULT;
++
++out:
++ xencomm_free(desc);
++ xencomm_free(desc2);
++ xencomm_free(desc3);
++ xencomm_free(desc4);
++ xencomm_free(op_desc);
++ return ret;
++}
++
++
++/* The PowerPC hypervisor runs in a separate address space from Linux
++ * kernel/userspace, i.e. real mode. We must therefore translate userspace
++ * pointers to something the hypervisor can make sense of. */
++int privcmd_hypercall(privcmd_hypercall_t *hypercall)
++{
++ switch (hypercall->op) {
++ case __HYPERVISOR_domctl:
++ return xenppc_privcmd_domctl(hypercall);
++ case __HYPERVISOR_sysctl:
++ return xenppc_privcmd_sysctl(hypercall);
++ case __HYPERVISOR_platform_op:
++ return xenppc_privcmd_platform_op(hypercall);
++ case __HYPERVISOR_memory_op:
++ return xenppc_privcmd_memory_op(hypercall);
++ case __HYPERVISOR_xen_version:
++ return xenppc_privcmd_version(hypercall);
++ case __HYPERVISOR_event_channel_op:
++ return xenppc_privcmd_event_channel_op(hypercall);
++ case __HYPERVISOR_acm_op:
++ return xenppc_acmcmd_op(hypercall);
++ default:
++ printk(KERN_ERR "%s: unknown hcall (%ld)\n", __func__, hypercall->op);
++ /* maybe we'll get lucky and the hcall needs no translation. */
++ return plpar_hcall_norets(XEN_MARK(hypercall->op),
++ hypercall->arg[0],
++ hypercall->arg[1],
++ hypercall->arg[2],
++ hypercall->arg[3],
++ hypercall->arg[4]);
++ }
++}
++
++int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
++{
++ int argsize;
++ const unsigned long hcall = __HYPERVISOR_vcpu_op;
++ struct xencomm_handle *desc;
++ int rc;
++
++ switch (cmd) {
++ case VCPUOP_initialise:
++ argsize = sizeof(vcpu_guest_context_t);
++ break;
++ case VCPUOP_up:
++ case VCPUOP_down:
++ case VCPUOP_is_up:
++ return plpar_hcall_norets(XEN_MARK(hcall), cmd, vcpuid, 0);
++
++ case VCPUOP_get_runstate_info:
++ argsize = sizeof (vcpu_runstate_info_t);
++ break;
++ default:
++ printk(KERN_ERR "%s: unknown version cmd %d\n", __func__, cmd);
++ return -ENOSYS;
++ }
++
++ desc = xencomm_map_no_alloc(extra_args, argsize);
++
++ if (desc == NULL)
++ return -EINVAL;
++
++ rc = plpar_hcall_norets(XEN_MARK(hcall), cmd, vcpuid, desc);
++
++ xencomm_free(desc);
++
++ return rc;
++}
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/xen/Makefile linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/Makefile
+--- linux-2.6.18.8/arch/powerpc/platforms/xen/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,20 @@
++obj-y += gnttab.o
++obj-y += hcall.o
++obj-y += reboot.o
++obj-y += setup.o
++obj-y += smp.o
++obj-y += time.o
++obj-y += udbg_xen.o
++obj-y += xen_guest.o
++obj-y += xencomm.o
++
++# we need the latest __XEN_INTERFACE_VERSION__ (see xen-compat.h)
++CFLAGS_hcall.o += -D__XEN_TOOLS__
++
++ifndef CONFIG_XEN_BALLOON
++obj-y += balloon.o
++endif
++
++ifndef CONFIG_XEN_UTIL
++obj-y += util.o
++endif
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/xen/reboot.c linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/reboot.c
+--- linux-2.6.18.8/arch/powerpc/platforms/xen/reboot.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/reboot.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,53 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ */
++
++#include <linux/module.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/io/console.h>
++#include <xen/xencons.h>
++#include <asm/hypervisor.h>
++#include <asm/machdep.h>
++
++static void domain_machine_restart(char * __unused)
++{
++ /* We really want to get pending console data out before we die. */
++ xencons_force_flush();
++ HYPERVISOR_shutdown(SHUTDOWN_reboot);
++}
++
++static void domain_machine_power_off(void)
++{
++ /* We really want to get pending console data out before we die. */
++ xencons_force_flush();
++ HYPERVISOR_shutdown(SHUTDOWN_poweroff);
++}
++
++void xen_reboot_init(struct machdep_calls *md)
++{
++ if (md != NULL) {
++ ppc_md.restart = md->restart;
++ ppc_md.power_off = md->power_off;
++ ppc_md.halt = md->halt;
++ } else {
++ ppc_md.restart = domain_machine_restart;
++ ppc_md.power_off = domain_machine_power_off;
++ ppc_md.halt = domain_machine_power_off;
++ }
++}
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/xen/setup.c linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/setup.c
+--- linux-2.6.18.8/arch/powerpc/platforms/xen/setup.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/setup.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,336 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ */
++
++#define DEBUG
++#define CONFIG_SHARE_MPIC
++
++#include <linux/module.h>
++#include <linux/rwsem.h>
++#include <linux/delay.h>
++#include <linux/console.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/sched.h>
++#include <xen/evtchn.h>
++#include <xen/features.h>
++#include <xen/xencons.h>
++#include <asm/udbg.h>
++#include <asm/pgtable.h>
++#include <asm/prom.h>
++#include <asm/iommu.h>
++#include <asm/mmu.h>
++#include <asm/abs_addr.h>
++#include <asm/machdep.h>
++#include <asm/hypervisor.h>
++#include <asm/time.h>
++#include <asm/pmc.h>
++#include "setup.h"
++
++#ifdef DEBUG
++#define DBG(fmt...) udbg_printf(fmt)
++#else
++#define DBG(fmt...)
++#endif
++
++/* Apperently on other arches this could be used before its defined,
++ * this should not be the case in PPC */
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)NULL;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++/* Raw start-of-day parameters from the hypervisor. */
++static start_info_t xsi;
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++
++extern struct machdep_calls mach_maple_md;
++extern void maple_pci_init(void);
++
++static unsigned long foreign_mfn_flag;
++
++/* Must be called with &vma->vm_mm->mmap_sem locked for write */
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid)
++{
++ int rc;
++
++ /* Set the MFN flag to tell Xen that this is not a PFN. */
++ printk("%s: mapping mfn 0x%lx (size 0x%lx) -> 0x%lx\n", __func__,
++ mfn, size, mfn | foreign_mfn_flag);
++ mfn = mfn | foreign_mfn_flag;
++
++ WARN_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
++ rc = remap_pfn_range(vma, address, mfn, size, prot);
++
++ return rc;
++}
++
++static void __init xen_fw_feature_init(void)
++{
++ DBG(" -> %s\n", __func__);
++
++ powerpc_firmware_features = 0;
++
++ powerpc_firmware_features |= FW_FEATURE_LPAR;
++ powerpc_firmware_features |= FW_FEATURE_TCE | FW_FEATURE_DABR;
++
++ printk(KERN_INFO "firmware_features = 0x%lx\n",
++ powerpc_firmware_features);
++
++ DBG(" <- %s\n", __func__);
++}
++
++/* if these were global then I could get them from the pseries/setup.c */
++static int pseries_set_dabr(unsigned long dabr)
++{
++ return plpar_hcall_norets(H_SET_DABR, dabr);
++}
++
++static int pseries_set_xdabr(unsigned long dabr)
++{
++ /* We want to catch accesses from kernel and userspace */
++ return plpar_hcall_norets(H_SET_XDABR, dabr,
++ H_DABRX_KERNEL | H_DABRX_USER);
++}
++
++/*
++ * Early initialization.
++ */
++static void __init xenppc_init_early(void)
++{
++ struct device_node *xen;
++
++ DBG(" -> %s\n", __func__);
++
++ xen = of_find_node_by_path("/xen");
++
++ xen_start_info = &xsi;
++
++ /* fill out start_info_t from devtree */
++ if ((char *)get_property(xen, "privileged", NULL))
++ xen_start_info->flags |= SIF_PRIVILEGED;
++ if ((char *)get_property(xen, "initdomain", NULL))
++ xen_start_info->flags |= SIF_INITDOMAIN;
++ xen_start_info->shared_info = *((u64 *)get_property(xen,
++ "shared-info", NULL));
++
++ /* only look for store and console for guest domains */
++ if (xen_start_info->flags == 0) {
++ struct device_node *console = of_find_node_by_path("/xen/console");
++ struct device_node *store = of_find_node_by_path("/xen/store");
++
++ xen_start_info->store_mfn = (*((u64 *)get_property(store,
++ "reg", NULL))) >> PAGE_SHIFT;
++ xen_start_info->store_evtchn = *((u32 *)get_property(store,
++ "interrupts", NULL));
++ xen_start_info->console.domU.mfn = (*((u64 *)get_property(console,
++ "reg", NULL))) >> PAGE_SHIFT;
++ xen_start_info->console.domU.evtchn = *((u32 *)get_property(console,
++ "interrupts", NULL));
++ }
++
++ HYPERVISOR_shared_info = __va(xen_start_info->shared_info);
++
++ udbg_init_xen();
++
++ DBG("xen_start_info at %p\n", xen_start_info);
++ DBG(" magic %s\n", xen_start_info->magic);
++ DBG(" flags %x\n", xen_start_info->flags);
++ DBG(" shared_info %lx, %p\n",
++ xen_start_info->shared_info, HYPERVISOR_shared_info);
++ DBG(" store_mfn %llx\n", xen_start_info->store_mfn);
++ DBG(" store_evtchn %x\n", xen_start_info->store_evtchn);
++ DBG(" console_mfn %llx\n", xen_start_info->console.domU.mfn);
++ DBG(" console_evtchn %x\n", xen_start_info->console.domU.evtchn);
++
++ xen_setup_time(&mach_maple_md);
++
++ add_preferred_console("xvc", 0, NULL);
++
++ if (get_property(xen, "power-control", NULL))
++ xen_reboot_init(&mach_maple_md);
++ else
++ xen_reboot_init(NULL);
++
++ if (is_initial_xendomain()) {
++ u64 *mfnflag = (u64 *)get_property(xen, "mfn-flag", NULL);
++ if (mfnflag) {
++ foreign_mfn_flag = (1UL << mfnflag[0]);
++ printk("OF: using 0x%lx as foreign mfn flag\n", foreign_mfn_flag);
++ } else
++ printk("OF: /xen/mfn-base must be present it build guests\n");
++ }
++
++ /* get the domain features */
++ setup_xen_features();
++
++ DBG("Hello World I'm Maple Xen-LPAR!\n");
++
++ if (firmware_has_feature(FW_FEATURE_DABR))
++ ppc_md.set_dabr = pseries_set_dabr;
++ else if (firmware_has_feature(FW_FEATURE_XDABR))
++ ppc_md.set_dabr = pseries_set_xdabr;
++
++ iommu_init_early_pSeries();
++
++ DBG(" <- %s\n", __func__);
++}
++
++/*
++ * this interface is limiting
++ */
++static int running_on_xen;
++int is_running_on_xen(void)
++{
++ return running_on_xen;
++}
++EXPORT_SYMBOL(is_running_on_xen);
++
++static void xenppc_power_save(void)
++{
++ /* SCHEDOP_yield could immediately return. Instead, we
++ * want to idle in the Xen idle domain, so use
++ * SCHEDOP_block with a one-shot timer. */
++ /* XXX do tickless stuff here. See
++ * linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c */
++ u64 now_ns = tb_to_ns(get_tb());
++ u64 offset_ns = jiffies_to_ns(1);
++ int rc;
++
++ rc = HYPERVISOR_set_timer_op(now_ns + offset_ns);
++ BUG_ON(rc != 0);
++
++ HYPERVISOR_sched_op(SCHEDOP_block, NULL);
++}
++
++void __init xenppc_setup_arch(void)
++{
++ /* init to some ~sane value until calibrate_delay() runs */
++ loops_per_jiffy = 50000000;
++
++ /* Lookup PCI hosts */
++ if (is_initial_xendomain())
++ maple_pci_init();
++
++#ifdef CONFIG_DUMMY_CONSOLE
++ conswitchp = &dummy_con;
++#endif
++#ifdef CONFIG_SMP
++ /* let them fly */
++ xen_setup_smp();
++#endif
++
++ printk(KERN_INFO "Using Xen idle loop\n");
++}
++
++static int __init xen_probe_flat_dt(unsigned long node,
++ const char *uname, int depth,
++ void *data)
++{
++ if (depth != 1)
++ return 0;
++ if (strcmp(uname, "xen") != 0)
++ return 0;
++
++ running_on_xen = 1;
++
++ return 1;
++}
++
++/*
++ * Called very early, MMU is off, device-tree isn't unflattened
++ */
++/* forward ref */
++struct machdep_calls __initdata xen_md;
++static int __init xenppc_probe(void)
++{
++ of_scan_flat_dt(xen_probe_flat_dt, NULL);
++
++ if (!running_on_xen)
++ return 0;
++
++ xen_fw_feature_init();
++
++ hpte_init_lpar();
++
++ return 1;
++}
++
++static void __init xenppc_progress(char *s, unsigned short hex)
++{
++ printk("*** %04x : %s\n", hex, s ? s : "");
++}
++
++unsigned int xenppc_get_irq(struct pt_regs *regs)
++{
++ evtchn_do_upcall(regs);
++ /* evtchn_do_upcall() handles all pending event channels directly, so there
++ * is nothing for do_IRQ() to do.
++ * XXX This means we aren't using IRQ stacks. */
++ return NO_IRQ;
++}
++
++static void xenppc_enable_pmcs(void)
++{
++ unsigned long set, reset;
++
++ power4_enable_pmcs();
++
++ set = 1UL << 63;
++ reset = 0;
++ plpar_hcall_norets(H_PERFMON, set, reset);
++}
++
++#ifdef CONFIG_KEXEC
++void xen_machine_kexec(struct kimage *image)
++{
++ panic("%s(%p): called\n", __func__, image);
++}
++
++int xen_machine_kexec_prepare(struct kimage *image)
++{
++ panic("%s(%p): called\n", __func__, image);
++}
++
++void xen_machine_crash_shutdown(struct pt_regs *regs)
++{
++ panic("%s(%p): called\n", __func__, regs);
++}
++#endif
++
++define_machine(xen) {
++ .name = "Xen-Maple",
++ .probe = xenppc_probe,
++ .setup_arch = xenppc_setup_arch,
++ .init_early = xenppc_init_early,
++ .init_IRQ = xen_init_IRQ,
++ .get_irq = xenppc_get_irq,
++ .calibrate_decr = generic_calibrate_decr,
++ .progress = xenppc_progress,
++ .power_save = xenppc_power_save,
++ .enable_pmcs = xenppc_enable_pmcs,
++#ifdef CONFIG_KEXEC
++ .machine_kexec = xen_machine_kexec,
++ .machine_kexec_prepare = xen_machine_kexec_prepare,
++ .machine_crash_shutdown = xen_machine_crash_shutdown,
++#endif
++};
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/xen/setup.h linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/setup.h
+--- linux-2.6.18.8/arch/powerpc/platforms/xen/setup.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/setup.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,47 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ */
++
++#include <asm/machdep.h>
++#include <asm/time.h>
++
++extern void evtchn_init_IRQ(void);
++extern void xen_init_IRQ(void);
++extern void xen_reboot_init(struct machdep_calls *);
++extern void xen_maple_init_IRQ(void);
++extern unsigned int xen_get_irq(struct pt_regs *regs);
++
++static inline u64 tb_to_ns(u64 tb)
++{
++ if (likely(tb_ticks_per_sec)) {
++ return tb * (1000000000UL / tb_ticks_per_sec);
++ }
++ return 0;
++}
++
++static inline u64 jiffies_to_ns(unsigned long j)
++{
++ return j * (1000000000UL / HZ);
++}
++
++extern struct page *alloc_foreign_page(void);
++extern void free_foreign_page(struct page *page);
++
++extern void __init xen_setup_time(struct machdep_calls *host_md);
++extern void xen_setup_smp(void);
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/xen/smp.c linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/smp.c
+--- linux-2.6.18.8/arch/powerpc/platforms/xen/smp.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/smp.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,444 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/config.h>
++#include <linux/bootmem.h>
++#include <linux/irq.h>
++#include <linux/smp.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/vcpu.h>
++#include <xen/evtchn.h>
++#include <asm/prom.h>
++#include <asm/udbg.h>
++#include <asm/hypervisor.h>
++#include "setup.h"
++
++#undef DEBUG
++
++#ifdef DEBUG
++#define DBG(fmt...) printk(KERN_EMERG fmt)
++#else
++#define DBG(fmt...)
++#endif
++
++static inline void *xen_of_alloc(ulong size)
++{
++ if (mem_init_done)
++ return kmalloc(size, GFP_KERNEL);
++ return alloc_bootmem(size);
++}
++static inline void xen_of_free(void *ptr)
++{
++ /* if this happens with the boot allocator then we are screwed */
++ BUG_ON(!mem_init_done);
++ kfree(ptr);
++}
++
++static struct property *dup_prop(struct property *op)
++{
++ struct property *np;
++ void *p;
++ ulong sz;
++
++
++ /* allocate everything in one go in case it fails */
++ sz = sizeof (*np); /* prop node */
++ sz += strlen(op->name) + 1; /* prop name */
++ sz += op->length; /* prop value */
++
++ p = xen_of_alloc(sz);
++ if (!p)
++ return NULL;
++ memset(p, 0, sz);
++
++ /* prop node first */
++ np = p;
++ p += sizeof (*np);
++
++ /* value next becuase we want it aligned */
++ np->value = p;
++ p += op->length;
++
++ /* name */
++ np->name = p;
++
++ /* copy it all */
++ strcpy(np->name, op->name);
++ np->length = op->length;
++ memcpy(np->value, op->value, np->length);
++
++ return np;
++}
++
++static int dup_properties(struct device_node *dst, struct device_node *src)
++{
++ struct property *op;
++ struct property *np;
++ struct property *lp;
++ int rc = 0;
++
++ DBG("%s: duping to new cpu node: %s\n", __func__, dst->full_name);
++
++ np = lp = NULL;
++ for (op = src->properties; op != 0; op = op->next) {
++ lp = np;
++ np = dup_prop(op);
++ if (!np)
++ break;
++
++ prom_add_property(dst, np);
++ }
++
++ if (!np) {
++ DBG("%s: FAILED duping: %s\n", __func__, dst->full_name);
++ /* we could not allocate enuff so free what we have
++ * allocated */
++ rc = -ENOMEM;
++ for (op = dst->properties; lp && op != lp; op = op->next)
++ xen_of_free(op);
++ }
++
++ return rc;
++}
++
++/* returns added device node so it can be added to procfs in the case
++ * of hotpluging */
++static struct device_node *xen_add_vcpu_node(struct device_node *boot_cpu,
++ uint cpu)
++{
++ struct device_node *new_cpu;
++ struct property *pp;
++ void *p;
++ int sz;
++ int type_sz;
++ int name_sz;
++
++ DBG("%s: boot cpu: %s\n", __func__, boot_cpu->full_name);
++
++ /* allocate in one shot in case we fail */
++ name_sz = strlen(boot_cpu->name) + 1;
++ type_sz = strlen(boot_cpu->type) + 1;
++
++ sz = sizeof (*new_cpu); /* the node */
++ sz += strlen(boot_cpu->full_name) + 3; /* full_name */
++ sz += name_sz; /* name */
++ sz += type_sz; /* type */
++
++ p = xen_of_alloc(sz);
++ if (!p)
++ return NULL;
++ memset(p, 0, sz);
++
++ /* the node */
++ new_cpu = p;
++ p += sizeof (*new_cpu);
++
++ /* name */
++ new_cpu->name = p;
++ strcpy(new_cpu->name, boot_cpu->name);
++ p += name_sz;
++
++ /* type */
++ new_cpu->type = p;
++ strcpy(new_cpu->type, boot_cpu->type);
++ p += type_sz;
++
++ /* full_name */
++ new_cpu->full_name = p;
++
++ /* assemble new full_name */
++ pp = of_find_property(boot_cpu, "name", NULL);
++ if (!pp)
++ panic("%s: no name prop\n", __func__);
++
++ DBG("%s: name is: %s = %s\n", __func__, pp->name, pp->value);
++ sprintf(new_cpu->full_name, "/cpus/%s@%u", pp->value, cpu);
++
++ if (dup_properties(new_cpu, boot_cpu)) {
++ xen_of_free(new_cpu);
++ return NULL;
++ }
++
++ /* fixup reg property */
++ DBG("%s: updating reg: %d\n", __func__, cpu);
++ pp = of_find_property(new_cpu, "reg", NULL);
++ if (!pp)
++ panic("%s: no reg prop\n", __func__);
++ *(int *)pp->value = cpu;
++
++ if (mem_init_done)
++ OF_MARK_DYNAMIC(new_cpu);
++
++ kref_init(&new_cpu->kref);
++
++ /* insert the node */
++ new_cpu->parent = of_get_parent(boot_cpu);
++ of_attach_node(new_cpu);
++ of_node_put(new_cpu->parent);
++
++ return new_cpu;
++}
++
++static void cpu_initialize_context(unsigned int vcpu, ulong entry)
++{
++ vcpu_guest_context_t ctxt;
++
++ memset(&ctxt.user_regs, 0x55, sizeof(ctxt.user_regs));
++
++ ctxt.user_regs.pc = entry;
++ ctxt.user_regs.msr = 0;
++ ctxt.user_regs.gprs[1] = 0; /* Linux uses its own stack */
++ ctxt.user_regs.gprs[3] = vcpu;
++
++ /* XXX verify this *** */
++ /* There is a buggy kernel that does not zero the "local_paca", so
++ * we must make sure this register is 0 */
++ ctxt.user_regs.gprs[13] = 0;
++
++ DBG("%s: initializing vcpu: %d\n", __func__, vcpu);
++
++ if (HYPERVISOR_vcpu_op(VCPUOP_initialise, vcpu, &ctxt))
++ panic("%s: VCPUOP_initialise failed, vcpu: %d\n",
++ __func__, vcpu);
++
++}
++
++static int xen_start_vcpu(uint vcpu, ulong entry)
++{
++ DBG("%s: starting vcpu: %d\n", __func__, vcpu);
++
++ cpu_initialize_context(vcpu, entry);
++
++ DBG("%s: Spinning up vcpu: %d\n", __func__, vcpu);
++ return HYPERVISOR_vcpu_op(VCPUOP_up, vcpu, NULL);
++}
++
++extern void __secondary_hold(void);
++extern unsigned long __secondary_hold_spinloop;
++extern unsigned long __secondary_hold_acknowledge;
++
++static void xen_boot_secondary_vcpus(void)
++{
++ int vcpu;
++ int rc;
++ const unsigned long mark = (unsigned long)-1;
++ unsigned long *spinloop = &__secondary_hold_spinloop;
++ unsigned long *acknowledge = &__secondary_hold_acknowledge;
++#ifdef CONFIG_PPC64
++ /* __secondary_hold is actually a descriptor, not the text address */
++ unsigned long secondary_hold = __pa(*(unsigned long *)__secondary_hold);
++#else
++ unsigned long secondary_hold = __pa(__secondary_hold);
++#endif
++ struct device_node *boot_cpu;
++
++ DBG("%s: finding CPU node\n", __func__);
++ boot_cpu = of_find_node_by_type(NULL, "cpu");
++ if (!boot_cpu)
++ panic("%s: Cannot find Booting CPU node\n", __func__);
++
++ /* Set the common spinloop variable, so all of the secondary cpus
++ * will block when they are awakened from their OF spinloop.
++ * This must occur for both SMP and non SMP kernels, since OF will
++ * be trashed when we move the kernel.
++ */
++ *spinloop = 0;
++
++ DBG("%s: Searching for all vcpu numbers > 0\n", __func__);
++ /* try and start as many as we can */
++ for (vcpu = 1; vcpu < NR_CPUS; vcpu++) {
++ int i;
++
++ rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, vcpu, NULL);
++ if (rc < 0)
++ continue;
++
++ DBG("%s: Found vcpu: %d\n", __func__, vcpu);
++ /* Init the acknowledge var which will be reset by
++ * the secondary cpu when it awakens from its OF
++ * spinloop.
++ */
++ *acknowledge = mark;
++
++ DBG("%s: Starting vcpu: %d at pc: 0x%lx\n", __func__,
++ vcpu, secondary_hold);
++ rc = xen_start_vcpu(vcpu, secondary_hold);
++ if (rc)
++ panic("%s: xen_start_vpcu() failed\n", __func__);
++
++
++ DBG("%s: Waiting for ACK on vcpu: %d\n", __func__, vcpu);
++ for (i = 0; (i < 100000000) && (*acknowledge == mark); i++)
++ mb();
++
++ if (*acknowledge == vcpu)
++ DBG("%s: Recieved for ACK on vcpu: %d\n",
++ __func__, vcpu);
++
++ xen_add_vcpu_node(boot_cpu, vcpu);
++
++ cpu_set(vcpu, cpu_present_map);
++ set_hard_smp_processor_id(vcpu, vcpu);
++ }
++ of_node_put(boot_cpu);
++ DBG("%s: end...\n", __func__);
++}
++
++static int __init smp_xen_probe(void)
++{
++ return cpus_weight(cpu_present_map);
++}
++
++static irqreturn_t xen_ppc_msg_reschedule(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ smp_message_recv(PPC_MSG_RESCHEDULE, regs);
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t xen_ppc_msg_call_function(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t xen_ppc_msg_debugger_break(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs);
++ return IRQ_HANDLED;
++}
++
++struct message {
++ irqreturn_t (*f)(int, void *, struct pt_regs *);
++ int num;
++ char *name;
++};
++static struct message ipi_msgs[] = {
++ {
++ .num = PPC_MSG_RESCHEDULE,
++ .f = xen_ppc_msg_reschedule,
++ .name = "IPI-resched"
++ },
++ {
++ .num = PPC_MSG_CALL_FUNCTION,
++ .f = xen_ppc_msg_call_function,
++ .name = "IPI-function"
++ },
++ {
++ .num = PPC_MSG_DEBUGGER_BREAK,
++ .f = xen_ppc_msg_debugger_break,
++ .name = "IPI-debug"
++ }
++};
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++static void __devinit smp_xen_setup_cpu(int cpu)
++{
++ int irq;
++ int i;
++ const int nr_ipis = ARRAY_SIZE(__get_cpu_var(ipi_to_irq));
++
++ /* big scary include web could mess with our values, so we
++ * make sure they are sane */
++ BUG_ON(ARRAY_SIZE(ipi_msgs) > nr_ipis);
++
++ for (i = 0; i < ARRAY_SIZE(ipi_msgs); i++) {
++ BUG_ON(ipi_msgs[i].num >= nr_ipis);
++
++ irq = bind_ipi_to_irqhandler(ipi_msgs[i].num,
++ cpu,
++ ipi_msgs[i].f,
++ SA_INTERRUPT,
++ ipi_msgs[i].name,
++ NULL);
++ BUG_ON(irq < 0);
++ per_cpu(ipi_to_irq, cpu)[ipi_msgs[i].num] = irq;
++ DBG("%s: cpu: %d vector :%d irq: %d\n",
++ __func__, cpu, ipi_msgs[i].num, irq);
++ }
++}
++
++static inline void send_IPI_one(unsigned int cpu, int vector)
++{
++ int irq;
++
++ irq = per_cpu(ipi_to_irq, cpu)[vector];
++ BUG_ON(irq < 0);
++
++ DBG("%s: cpu: %d vector :%d irq: %d!\n",
++ __func__, cpu, vector, irq);
++ DBG("%s: per_cpu[%p]: %d %d %d %d\n",
++ __func__, per_cpu(ipi_to_irq, cpu),
++ per_cpu(ipi_to_irq, cpu)[0],
++ per_cpu(ipi_to_irq, cpu)[1],
++ per_cpu(ipi_to_irq, cpu)[2],
++ per_cpu(ipi_to_irq, cpu)[3]);
++
++ notify_remote_via_irq(irq);
++}
++
++static void smp_xen_message_pass(int target, int msg)
++{
++ int cpu;
++
++ switch (msg) {
++ case PPC_MSG_RESCHEDULE:
++ case PPC_MSG_CALL_FUNCTION:
++ case PPC_MSG_DEBUGGER_BREAK:
++ break;
++ default:
++ panic("SMP %d: smp_message_pass: unknown msg %d\n",
++ smp_processor_id(), msg);
++ return;
++ }
++ switch (target) {
++ case MSG_ALL:
++ case MSG_ALL_BUT_SELF:
++ for_each_online_cpu(cpu) {
++ if (target == MSG_ALL_BUT_SELF &&
++ cpu == smp_processor_id())
++ continue;
++ send_IPI_one(cpu, msg);
++ }
++ break;
++ default:
++ send_IPI_one(target, msg);
++ break;
++ }
++}
++
++static struct smp_ops_t xen_smp_ops = {
++ .probe = smp_xen_probe,
++ .message_pass = smp_xen_message_pass,
++ .kick_cpu = smp_generic_kick_cpu,
++ .setup_cpu = smp_xen_setup_cpu,
++};
++
++void xen_setup_smp(void)
++{
++ smp_ops = &xen_smp_ops;
++
++ xen_boot_secondary_vcpus();
++ smp_release_cpus();
++}
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/xen/time.c linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/time.c
+--- linux-2.6.18.8/arch/powerpc/platforms/xen/time.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/time.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,114 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ */
++
++#include <linux/module.h>
++#include <linux/time.h>
++#include <linux/rtc.h>
++#include <asm/hypervisor.h>
++#include <asm/machdep.h>
++#include <asm/time.h>
++#include <asm/udbg.h>
++
++#ifdef DEBUG
++#define DBG(fmt...) printk(fmt)
++#else
++#define DBG(fmt...)
++#endif
++
++void time_resume(void)
++{
++ snapshot_timebase();
++}
++
++static inline ulong time_from_shared(void)
++{
++ ulong t;
++
++ DBG("tb_freq: %ld\n", ppc_tb_freq);
++
++ t = mftb() - HYPERVISOR_shared_info->arch.boot_timebase;
++ t /= ppc_tb_freq;
++ t += HYPERVISOR_shared_info->wc_sec;
++
++ return t;
++}
++
++static void (*host_md_get_rtc_time)(struct rtc_time *tm);
++static void xen_get_rtc_time(struct rtc_time *tm)
++{
++ if (is_initial_xendomain()) {
++ host_md_get_rtc_time(tm);
++ return;
++ } else {
++ ulong t;
++
++ t = time_from_shared();
++ to_tm(t, tm);
++ }
++}
++
++static int (*host_md_set_rtc_time)(struct rtc_time *tm);
++static int xen_set_rtc_time(struct rtc_time *tm)
++{
++ ulong sec;
++
++ if (is_initial_xendomain()) {
++ host_md_set_rtc_time(tm);
++ return 0;
++ }
++
++ sec = mktime(tm->tm_year, tm->tm_mon, tm->tm_mday,
++ tm->tm_hour, tm->tm_min, tm->tm_sec);
++
++ HYPERVISOR_shared_info->wc_sec = sec;
++ HYPERVISOR_shared_info->arch.boot_timebase = mftb();
++
++ return 0;
++}
++
++static unsigned long (*host_md_get_boot_time)(void);
++static unsigned long __init xen_get_boot_time(void)
++{
++ ulong t;
++
++ if (is_initial_xendomain()) {
++ t = host_md_get_boot_time();
++
++ HYPERVISOR_shared_info->wc_sec = t;
++ HYPERVISOR_shared_info->arch.boot_timebase = mftb();
++ DBG("%s: time: %ld\n", __func__, t);
++ } else {
++ t = time_from_shared();
++ DBG("%s: %ld\n", __func__, t);
++ }
++ return t;
++}
++
++void __init xen_setup_time(struct machdep_calls *host_md)
++{
++ ppc_md.get_boot_time = xen_get_boot_time;
++ host_md_get_boot_time = host_md->get_boot_time;
++
++ ppc_md.set_rtc_time = xen_set_rtc_time;
++ host_md_set_rtc_time = host_md->set_rtc_time;
++
++ ppc_md.get_rtc_time = xen_get_rtc_time;
++ host_md_get_rtc_time = host_md->get_rtc_time;
++}
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/xen/udbg_xen.c linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/udbg_xen.c
+--- linux-2.6.18.8/arch/powerpc/platforms/xen/udbg_xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/udbg_xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,164 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ */
++
++#include <linux/module.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/io/console.h>
++#include <xen/evtchn.h>
++#include <asm/udbg.h>
++#include <asm/hypervisor.h>
++#include "setup.h"
++
++static void udbg_xen_wait(void)
++{
++ evtchn_port_t port = 0;
++
++ if (xen_start_info) {
++ port = xen_start_info->console.domU.evtchn;
++ clear_evtchn(port);
++ }
++ HYPERVISOR_poll(&port, 1, 10);
++}
++
++static int udbg_getc_xen(void)
++{
++ int ch;
++ for (;;) {
++ ch = udbg_getc_poll();
++ if (ch == -1) {
++ udbg_xen_wait();
++ } else {
++ return ch;
++ }
++ }
++}
++
++static void udbg_putc_dom0_xen(char c)
++{
++ unsigned long rc;
++
++ if (c == '\n')
++ udbg_putc_dom0_xen('\r');
++
++ do {
++ rc = HYPERVISOR_console_io(CONSOLEIO_write, 1, &c);
++ } while (rc < 0);
++}
++
++/* Buffered chars getc */
++static long inbuflen;
++static char inbuf[128]; /* Xen serial ring buffer */
++
++static int udbg_getc_poll_dom0_xen(void)
++{
++ /* The interface is tricky because it may return many chars.
++ * We save them statically for future calls to udbg_getc().
++ */
++ char ch, *buf = (char *)inbuf;
++ int i;
++
++ if (inbuflen == 0) {
++ /* get some more chars. */
++ inbuflen = HYPERVISOR_console_io(CONSOLEIO_read,
++ sizeof(inbuf), buf);
++ }
++
++ if (inbuflen == 0)
++ return -1;
++
++ ch = buf[0];
++ for (i = 1; i < inbuflen; i++) /* shuffle them down. */
++ buf[i-1] = buf[i];
++ inbuflen--;
++
++ return ch;
++}
++
++static struct xencons_interface *intf;
++
++static void udbg_putc_domu_xen(char c)
++{
++ XENCONS_RING_IDX cons, prod;
++
++ if (c == '\n')
++ udbg_putc_domu_xen('\r');
++
++ cons = intf->out_cons;
++ prod = intf->out_prod;
++ mb();
++
++ if ((prod - cons) < sizeof(intf->out))
++ intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = c;
++
++ wmb();
++ intf->out_prod = prod;
++
++ if (xen_start_info)
++ notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
++}
++
++static int udbg_getc_poll_domu_xen(void)
++{
++ XENCONS_RING_IDX cons, prod;
++ int c;
++
++ mb();
++ cons = intf->in_cons;
++ prod = intf->in_prod;
++ BUG_ON((prod - cons) > sizeof(intf->in));
++
++ if (cons == prod)
++ return -1;
++
++ c = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
++ wmb();
++ intf->in_cons = cons;
++
++ if (xen_start_info)
++ notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
++
++ return c;
++}
++
++void udbg_init_xen(void)
++{
++ ulong __console_mfn = 0;
++
++ if (xen_start_info) {
++ /* we can find out where everything is */
++ if (!(xen_start_info->flags & SIF_INITDOMAIN))
++ __console_mfn = xen_start_info->console.domU.mfn;
++ } else {
++ /* VERY early printf */
++#ifdef CONFIG_PPC_EARLY_DEBUG_XEN_DOMU
++ __console_mfn = 0x3ffdUL;
++#endif
++ }
++
++ udbg_getc = udbg_getc_xen;
++ if (__console_mfn == 0) {
++ udbg_putc = udbg_putc_dom0_xen;
++ udbg_getc_poll = udbg_getc_poll_dom0_xen;
++ } else {
++ udbg_putc = udbg_putc_domu_xen;
++ udbg_getc_poll = udbg_getc_poll_domu_xen;
++ intf = (struct xencons_interface *)mfn_to_virt(__console_mfn);
++ }
++}
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/xen/util.c linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/util.c
+--- linux-2.6.18.8/arch/powerpc/platforms/xen/util.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/util.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,70 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <asm/uaccess.h>
++#include <xen/driver_util.h>
++#include "setup.h"
++
++struct vm_struct *alloc_vm_area(unsigned long size)
++{
++ struct vm_struct *area;
++ struct page *page;
++
++ page = alloc_foreign_page();
++ if (page == NULL) {
++ BUG();
++ return NULL;
++ }
++
++ area = kmalloc(sizeof(*area), GFP_KERNEL);
++ if (area != NULL) {
++ area->flags = VM_MAP;//XXX
++ area->addr = pfn_to_kaddr(page_to_pfn(page));
++ area->size = size;
++ area->pages = NULL; //XXX
++ area->nr_pages = size >> PAGE_SHIFT;
++ area->phys_addr = 0;
++ }
++ return area;
++}
++EXPORT_SYMBOL_GPL(alloc_vm_area);
++
++void free_vm_area(struct vm_struct *area)
++{
++ free_foreign_page(virt_to_page(area->addr));
++ kfree(area);
++}
++EXPORT_SYMBOL_GPL(free_vm_area);
++
++void lock_vm_area(struct vm_struct *area)
++{
++ preempt_disable();
++}
++
++void unlock_vm_area(struct vm_struct *area)
++{
++ preempt_enable();
++}
++EXPORT_SYMBOL_GPL(unlock_vm_area);
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/xen/xencomm.c linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/xencomm.c
+--- linux-2.6.18.8/arch/powerpc/platforms/xen/xencomm.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/xencomm.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,54 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
++ */
++
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <asm/page.h>
++#include <asm/current.h>
++#include <xen/interface/arch-powerpc.h>
++#include <xen/xencomm.h>
++
++/* translate virtual address to physical address */
++unsigned long xencomm_vtop(unsigned long vaddr)
++{
++ struct page *page;
++ struct vm_area_struct *vma;
++
++ /* NULL is NULL */
++ if (vaddr == 0)
++ return 0;
++
++ if (is_kernel_addr(vaddr))
++ return __pa(vaddr);
++
++ /* XXX double-check (lack of) locking */
++ vma = find_extend_vma(current->mm, vaddr);
++ BUG_ON(!vma);
++ if (!vma)
++ return ~0UL;
++
++ page = follow_page(vma, vaddr, 0);
++ BUG_ON(!page);
++ if (!page)
++ return ~0UL;
++
++ return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
++}
+diff -rpuN linux-2.6.18.8/arch/powerpc/platforms/xen/xen_guest.S linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/xen_guest.S
+--- linux-2.6.18.8/arch/powerpc/platforms/xen/xen_guest.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/platforms/xen/xen_guest.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,27 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ */
++
++ .section __xen_guest
++ .ascii "GUEST_OS=linux"
++ .ascii ",GUEST_VER=xen-3.0"
++ .ascii ",XEN_VER=xen-3.0"
++ .ascii ",VIRT_BASE=0xC000000000000000"
++ .ascii ",LOADER=generic"
++ .byte 0
+diff -rpuN linux-2.6.18.8/arch/powerpc/sysdev/mpic.c linux-2.6.18-xen-3.3.0/arch/powerpc/sysdev/mpic.c
+--- linux-2.6.18.8/arch/powerpc/sysdev/mpic.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/sysdev/mpic.c 2008-08-21 11:36:07.000000000 +0200
+@@ -765,6 +765,9 @@ static int mpic_host_map(struct irq_host
+ else if (hw >= MPIC_VEC_IPI_0) {
+ WARN_ON(!(mpic->flags & MPIC_PRIMARY));
+
++ if (mpic->flags & MPIC_SKIP_IPI_INIT)
++ return 0;
++
+ DBG("mpic: mapping as IPI\n");
+ set_irq_chip_data(virq, mpic);
+ set_irq_chip_and_handler(virq, &mpic->hc_ipi,
+@@ -1019,6 +1022,9 @@ void __init mpic_init(struct mpic *mpic)
+ (MPIC_VEC_TIMER_0 + i));
+ }
+
++ if (mpic->flags & MPIC_SKIP_IPI_INIT)
++ goto ipi_bailout;
++
+ /* Initialize IPIs to our reserved vectors and mark them disabled for now */
+ mpic_test_broken_ipi(mpic);
+ for (i = 0; i < 4; i++) {
+@@ -1028,6 +1034,7 @@ void __init mpic_init(struct mpic *mpic)
+ (MPIC_VEC_IPI_0 + i));
+ }
+
++ipi_bailout:
+ /* Initialize interrupt sources */
+ if (mpic->irq_count == 0)
+ mpic->irq_count = mpic->num_sources;
+diff -rpuN linux-2.6.18.8/arch/powerpc/xmon/xmon.c linux-2.6.18-xen-3.3.0/arch/powerpc/xmon/xmon.c
+--- linux-2.6.18.8/arch/powerpc/xmon/xmon.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/powerpc/xmon/xmon.c 2008-08-21 11:36:07.000000000 +0200
+@@ -752,6 +752,9 @@ cmds(struct pt_regs *excp)
+ cmd = inchar();
+ }
+ switch (cmd) {
++ case 'A':
++ asm volatile(".long 0x200;nop");
++ break;
+ case 'm':
+ cmd = inchar();
+ switch (cmd) {
+diff -rpuN linux-2.6.18.8/arch/x86_64/boot/Makefile linux-2.6.18-xen-3.3.0/arch/x86_64/boot/Makefile
+--- linux-2.6.18.8/arch/x86_64/boot/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/boot/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -26,7 +26,7 @@ SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
+ #RAMDISK := -DRAMDISK=512
+
+ targets := vmlinux.bin bootsect bootsect.o \
+- setup setup.o bzImage mtools.conf
++ setup setup.o bzImage mtools.conf vmlinuz vmlinux-stripped
+
+ EXTRA_CFLAGS := -m32
+
+@@ -131,5 +131,13 @@ zlilo: $(BOOTIMAGE)
+ cp System.map $(INSTALL_PATH)/
+ if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
+
++$(obj)/vmlinuz: $(obj)/vmlinux-stripped FORCE
++ $(call if_changed,gzip)
++ @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
++
++$(obj)/vmlinux-stripped: OBJCOPYFLAGS := -g --strip-unneeded
++$(obj)/vmlinux-stripped: vmlinux FORCE
++ $(call if_changed,objcopy)
++
+ install:
+ sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
+diff -rpuN linux-2.6.18.8/arch/x86_64/ia32/ia32_binfmt.c linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/ia32_binfmt.c
+--- linux-2.6.18.8/arch/x86_64/ia32/ia32_binfmt.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/ia32_binfmt.c 2008-08-21 11:36:07.000000000 +0200
+@@ -65,55 +65,6 @@ typedef unsigned int elf_greg_t;
+ #define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t))
+ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+-/*
+- * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
+- * extra segments containing the vsyscall DSO contents. Dumping its
+- * contents makes post-mortem fully interpretable later without matching up
+- * the same kernel and hardware config to see what PC values meant.
+- * Dumping its extra ELF program headers includes all the other information
+- * a debugger needs to easily find how the vsyscall DSO was being used.
+- */
+-#define ELF_CORE_EXTRA_PHDRS (find_vma(current->mm, VSYSCALL32_BASE) ? \
+- (VSYSCALL32_EHDR->e_phnum) : 0)
+-#define ELF_CORE_WRITE_EXTRA_PHDRS \
+-do { \
+- if (find_vma(current->mm, VSYSCALL32_BASE)) { \
+- const struct elf32_phdr *const vsyscall_phdrs = \
+- (const struct elf32_phdr *) (VSYSCALL32_BASE \
+- + VSYSCALL32_EHDR->e_phoff);\
+- int i; \
+- Elf32_Off ofs = 0; \
+- for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \
+- struct elf32_phdr phdr = vsyscall_phdrs[i]; \
+- if (phdr.p_type == PT_LOAD) { \
+- BUG_ON(ofs != 0); \
+- ofs = phdr.p_offset = offset; \
+- phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \
+- phdr.p_filesz = phdr.p_memsz; \
+- offset += phdr.p_filesz; \
+- } \
+- else \
+- phdr.p_offset += ofs; \
+- phdr.p_paddr = 0; /* match other core phdrs */ \
+- DUMP_WRITE(&phdr, sizeof(phdr)); \
+- } \
+- } \
+-} while (0)
+-#define ELF_CORE_WRITE_EXTRA_DATA \
+-do { \
+- if (find_vma(current->mm, VSYSCALL32_BASE)) { \
+- const struct elf32_phdr *const vsyscall_phdrs = \
+- (const struct elf32_phdr *) (VSYSCALL32_BASE \
+- + VSYSCALL32_EHDR->e_phoff); \
+- int i; \
+- for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \
+- if (vsyscall_phdrs[i].p_type == PT_LOAD) \
+- DUMP_WRITE((void *) (u64) vsyscall_phdrs[i].p_vaddr,\
+- PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \
+- } \
+- } \
+-} while (0)
+-
+ struct elf_siginfo
+ {
+ int si_signo; /* signal number */
+diff -rpuN linux-2.6.18.8/arch/x86_64/ia32/ia32entry.S linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/ia32entry.S
+--- linux-2.6.18.8/arch/x86_64/ia32/ia32entry.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/ia32entry.S 2008-08-21 11:36:07.000000000 +0200
+@@ -703,8 +703,8 @@ ia32_sys_call_table:
+ .quad sys_readlinkat /* 305 */
+ .quad sys_fchmodat
+ .quad sys_faccessat
+- .quad quiet_ni_syscall /* pselect6 for now */
+- .quad quiet_ni_syscall /* ppoll for now */
++ .quad compat_sys_pselect6
++ .quad compat_sys_ppoll
+ .quad sys_unshare /* 310 */
+ .quad compat_sys_set_robust_list
+ .quad compat_sys_get_robust_list
+diff -rpuN linux-2.6.18.8/arch/x86_64/ia32/ia32entry-xen.S linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/ia32entry-xen.S
+--- linux-2.6.18.8/arch/x86_64/ia32/ia32entry-xen.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/ia32entry-xen.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,666 @@
++/*
++ * Compatibility mode system call entry point for x86-64.
++ *
++ * Copyright 2000-2002 Andi Kleen, SuSE Labs.
++ */
++
++#include <asm/dwarf2.h>
++#include <asm/calling.h>
++#include <asm/asm-offsets.h>
++#include <asm/current.h>
++#include <asm/errno.h>
++#include <asm/ia32_unistd.h>
++#include <asm/thread_info.h>
++#include <asm/segment.h>
++#include <asm/vsyscall32.h>
++#include <asm/irqflags.h>
++#include <linux/linkage.h>
++
++#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
++
++ .macro IA32_ARG_FIXUP noebp=0
++ movl %edi,%r8d
++ .if \noebp
++ .else
++ movl %ebp,%r9d
++ .endif
++ xchg %ecx,%esi
++ movl %ebx,%edi
++ movl %edx,%edx /* zero extension */
++ .endm
++
++ /* clobbers %eax */
++ .macro CLEAR_RREGS
++ xorl %eax,%eax
++ movq %rax,R11(%rsp)
++ movq %rax,R10(%rsp)
++ movq %rax,R9(%rsp)
++ movq %rax,R8(%rsp)
++ .endm
++
++ .macro LOAD_ARGS32 offset
++ movl \offset(%rsp),%r11d
++ movl \offset+8(%rsp),%r10d
++ movl \offset+16(%rsp),%r9d
++ movl \offset+24(%rsp),%r8d
++ movl \offset+40(%rsp),%ecx
++ movl \offset+48(%rsp),%edx
++ movl \offset+56(%rsp),%esi
++ movl \offset+64(%rsp),%edi
++ movl \offset+72(%rsp),%eax
++ .endm
++
++ .macro CFI_STARTPROC32 simple
++ CFI_STARTPROC \simple
++ CFI_UNDEFINED r8
++ CFI_UNDEFINED r9
++ CFI_UNDEFINED r10
++ CFI_UNDEFINED r11
++ CFI_UNDEFINED r12
++ CFI_UNDEFINED r13
++ CFI_UNDEFINED r14
++ CFI_UNDEFINED r15
++ .endm
++
++/*
++ * 32bit SYSENTER instruction entry.
++ *
++ * Arguments:
++ * %eax System call number.
++ * %ebx Arg1
++ * %ecx Arg2
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp user stack
++ * 0(%ebp) Arg6
++ *
++ * Interrupts on.
++ *
++ * This is purely a fast path. For anything complicated we use the int 0x80
++ * path below. Set up a complete hardware stack frame to share code
++ * with the int 0x80 path.
++ */
++ENTRY(ia32_sysenter_target)
++ CFI_STARTPROC32 simple
++ CFI_DEF_CFA rsp,SS+8-RIP+16
++ /*CFI_REL_OFFSET ss,SS-RIP+16*/
++ CFI_REL_OFFSET rsp,RSP-RIP+16
++ /*CFI_REL_OFFSET rflags,EFLAGS-RIP+16*/
++ /*CFI_REL_OFFSET cs,CS-RIP+16*/
++ CFI_REL_OFFSET rip,RIP-RIP+16
++ CFI_REL_OFFSET r11,8
++ CFI_REL_OFFSET rcx,0
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ popq %rcx
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_RESTORE rcx
++ movl %ebp,%ebp /* zero extension */
++ movl %eax,%eax
++ movl $__USER32_DS,40(%rsp)
++ movq %rbp,32(%rsp)
++ movl $__USER32_CS,16(%rsp)
++ movl $VSYSCALL32_SYSEXIT,8(%rsp)
++ movq %rax,(%rsp)
++ cld
++ SAVE_ARGS 0,0,0
++ /* no need to do an access_ok check here because rbp has been
++ 32bit zero extended */
++1: movl (%rbp),%r9d
++ .section __ex_table,"a"
++ .quad 1b,ia32_badarg
++ .previous
++ GET_THREAD_INFO(%r10)
++ orl $TS_COMPAT,threadinfo_status(%r10)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++ jnz sysenter_tracesys
++sysenter_do_call:
++ cmpl $(IA32_NR_syscalls-1),%eax
++ ja ia32_badsys
++ IA32_ARG_FIXUP 1
++ call *ia32_sys_call_table(,%rax,8)
++ movq %rax,RAX-ARGOFFSET(%rsp)
++ jmp int_ret_from_sys_call
++
++sysenter_tracesys:
++ SAVE_REST
++ CLEAR_RREGS
++ movq $-ENOSYS,RAX(%rsp) /* really needed? */
++ movq %rsp,%rdi /* &pt_regs -> arg1 */
++ call syscall_trace_enter
++ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
++ RESTORE_REST
++ movl %ebp, %ebp
++ /* no need to do an access_ok check here because rbp has been
++ 32bit zero extended */
++1: movl (%rbp),%r9d
++ .section __ex_table,"a"
++ .quad 1b,ia32_badarg
++ .previous
++ jmp sysenter_do_call
++ CFI_ENDPROC
++ENDPROC(ia32_sysenter_target)
++
++/*
++ * 32bit SYSCALL instruction entry.
++ *
++ * Arguments:
++ * %eax System call number.
++ * %ebx Arg1
++ * %ecx return EIP
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp Arg2 [note: not saved in the stack frame, should not be touched]
++ * %esp user stack
++ * 0(%esp) Arg6
++ *
++ * Interrupts on.
++ *
++ * This is purely a fast path. For anything complicated we use the int 0x80
++ * path below. Set up a complete hardware stack frame to share code
++ * with the int 0x80 path.
++ */
++ENTRY(ia32_cstar_target)
++ CFI_STARTPROC32 simple
++ CFI_DEF_CFA rsp,SS+8-RIP+16
++ /*CFI_REL_OFFSET ss,SS-RIP+16*/
++ CFI_REL_OFFSET rsp,RSP-RIP+16
++ /*CFI_REL_OFFSET rflags,EFLAGS-RIP+16*/
++ /*CFI_REL_OFFSET cs,CS-RIP+16*/
++ CFI_REL_OFFSET rip,RIP-RIP+16
++ movl %eax,%eax /* zero extension */
++ movl RSP-RIP+16(%rsp),%r8d
++ SAVE_ARGS -8,1,1
++ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
++ movq %rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */
++ movl %ebp,%ecx
++ movl $__USER32_CS,CS-ARGOFFSET(%rsp)
++ movl $__USER32_DS,SS-ARGOFFSET(%rsp)
++ /* no need to do an access_ok check here because r8 has been
++ 32bit zero extended */
++ /* hardware stack frame is complete now */
++1: movl (%r8),%r9d
++ .section __ex_table,"a"
++ .quad 1b,ia32_badarg
++ .previous
++ GET_THREAD_INFO(%r10)
++ orl $TS_COMPAT,threadinfo_status(%r10)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++ jnz cstar_tracesys
++cstar_do_call:
++ cmpl $IA32_NR_syscalls-1,%eax
++ ja ia32_badsys
++ IA32_ARG_FIXUP 1
++ call *ia32_sys_call_table(,%rax,8)
++ movq %rax,RAX-ARGOFFSET(%rsp)
++ jmp int_ret_from_sys_call
++
++cstar_tracesys:
++ SAVE_REST
++ CLEAR_RREGS
++ movq $-ENOSYS,RAX(%rsp) /* really needed? */
++ movq %rsp,%rdi /* &pt_regs -> arg1 */
++ call syscall_trace_enter
++ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
++ RESTORE_REST
++ movl RSP-ARGOFFSET(%rsp), %r8d
++ /* no need to do an access_ok check here because r8 has been
++ 32bit zero extended */
++1: movl (%r8),%r9d
++ .section __ex_table,"a"
++ .quad 1b,ia32_badarg
++ .previous
++ jmp cstar_do_call
++END(ia32_cstar_target)
++
++ia32_badarg:
++ movq $-EFAULT,%rax
++ jmp ia32_sysret
++ CFI_ENDPROC
++
++/*
++ * Emulated IA32 system calls via int 0x80.
++ *
++ * Arguments:
++ * %eax System call number.
++ * %ebx Arg1
++ * %ecx Arg2
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp Arg6 [note: not saved in the stack frame, should not be touched]
++ *
++ * Notes:
++ * Uses the same stack frame as the x86-64 version.
++ * All registers except %eax must be saved (but ptrace may violate that)
++ * Arguments are zero extended. For system calls that want sign extension and
++ * take long arguments a wrapper is needed. Most calls can just be called
++ * directly.
++ * Assumes it is only called from user space and entered with interrupts on.
++ */
++
++ENTRY(ia32_syscall)
++ CFI_STARTPROC simple
++ CFI_DEF_CFA rsp,SS+8-RIP+16
++ /*CFI_REL_OFFSET ss,SS-RIP+16*/
++ CFI_REL_OFFSET rsp,RSP-RIP+16
++ /*CFI_REL_OFFSET rflags,EFLAGS-RIP+16*/
++ /*CFI_REL_OFFSET cs,CS-RIP+16*/
++ CFI_REL_OFFSET rip,RIP-RIP+16
++ CFI_REL_OFFSET r11,8
++ CFI_REL_OFFSET rcx,0
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ popq %rcx
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_RESTORE rcx
++ movl %eax,%eax
++ movq %rax,(%rsp)
++ cld
++ /* note the registers are not zero extended to the sf.
++ this could be a problem. */
++ SAVE_ARGS 0,0,1
++ GET_THREAD_INFO(%r10)
++ orl $TS_COMPAT,threadinfo_status(%r10)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++ jnz ia32_tracesys
++ia32_do_syscall:
++ cmpl $(IA32_NR_syscalls-1),%eax
++ ja ia32_badsys
++ IA32_ARG_FIXUP
++ call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
++ia32_sysret:
++ movq %rax,RAX-ARGOFFSET(%rsp)
++ jmp int_ret_from_sys_call
++
++ia32_tracesys:
++ SAVE_REST
++ movq $-ENOSYS,RAX(%rsp) /* really needed? */
++ movq %rsp,%rdi /* &pt_regs -> arg1 */
++ call syscall_trace_enter
++ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
++ RESTORE_REST
++ jmp ia32_do_syscall
++END(ia32_syscall)
++
++ia32_badsys:
++ movq $0,ORIG_RAX-ARGOFFSET(%rsp)
++ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
++ jmp int_ret_from_sys_call
++
++quiet_ni_syscall:
++ movq $-ENOSYS,%rax
++ ret
++ CFI_ENDPROC
++
++ .macro PTREGSCALL label, func, arg
++ .globl \label
++\label:
++ leaq \func(%rip),%rax
++ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
++ jmp ia32_ptregs_common
++ .endm
++
++ CFI_STARTPROC32
++
++ PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi
++ PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi
++ PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx
++ PTREGSCALL stub32_sigsuspend, sys32_sigsuspend, %rcx
++ PTREGSCALL stub32_execve, sys32_execve, %rcx
++ PTREGSCALL stub32_fork, sys_fork, %rdi
++ PTREGSCALL stub32_clone, sys32_clone, %rdx
++ PTREGSCALL stub32_vfork, sys_vfork, %rdi
++ PTREGSCALL stub32_iopl, sys_iopl, %rsi
++ PTREGSCALL stub32_rt_sigsuspend, sys_rt_sigsuspend, %rdx
++
++ENTRY(ia32_ptregs_common)
++ popq %r11
++ CFI_ENDPROC
++ CFI_STARTPROC32 simple
++ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
++ CFI_REL_OFFSET rax,RAX-ARGOFFSET
++ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
++ CFI_REL_OFFSET rdx,RDX-ARGOFFSET
++ CFI_REL_OFFSET rsi,RSI-ARGOFFSET
++ CFI_REL_OFFSET rdi,RDI-ARGOFFSET
++ CFI_REL_OFFSET rip,RIP-ARGOFFSET
++/* CFI_REL_OFFSET cs,CS-ARGOFFSET*/
++/* CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
++ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
++/* CFI_REL_OFFSET ss,SS-ARGOFFSET*/
++ SAVE_REST
++ call *%rax
++ RESTORE_REST
++ jmp ia32_sysret /* misbalances the return cache */
++ CFI_ENDPROC
++END(ia32_ptregs_common)
++
++ .section .rodata,"a"
++ .align 8
++ia32_sys_call_table:
++ .quad sys_restart_syscall
++ .quad sys_exit
++ .quad stub32_fork
++ .quad sys_read
++ .quad sys_write
++ .quad compat_sys_open /* 5 */
++ .quad sys_close
++ .quad sys32_waitpid
++ .quad sys_creat
++ .quad sys_link
++ .quad sys_unlink /* 10 */
++ .quad stub32_execve
++ .quad sys_chdir
++ .quad compat_sys_time
++ .quad sys_mknod
++ .quad sys_chmod /* 15 */
++ .quad sys_lchown16
++ .quad quiet_ni_syscall /* old break syscall holder */
++ .quad sys_stat
++ .quad sys32_lseek
++ .quad sys_getpid /* 20 */
++ .quad compat_sys_mount /* mount */
++ .quad sys_oldumount /* old_umount */
++ .quad sys_setuid16
++ .quad sys_getuid16
++ .quad compat_sys_stime /* stime */ /* 25 */
++ .quad sys32_ptrace /* ptrace */
++ .quad sys_alarm
++ .quad sys_fstat /* (old)fstat */
++ .quad sys_pause
++ .quad compat_sys_utime /* 30 */
++ .quad quiet_ni_syscall /* old stty syscall holder */
++ .quad quiet_ni_syscall /* old gtty syscall holder */
++ .quad sys_access
++ .quad sys_nice
++ .quad quiet_ni_syscall /* 35 */ /* old ftime syscall holder */
++ .quad sys_sync
++ .quad sys32_kill
++ .quad sys_rename
++ .quad sys_mkdir
++ .quad sys_rmdir /* 40 */
++ .quad sys_dup
++ .quad sys32_pipe
++ .quad compat_sys_times
++ .quad quiet_ni_syscall /* old prof syscall holder */
++ .quad sys_brk /* 45 */
++ .quad sys_setgid16
++ .quad sys_getgid16
++ .quad sys_signal
++ .quad sys_geteuid16
++ .quad sys_getegid16 /* 50 */
++ .quad sys_acct
++ .quad sys_umount /* new_umount */
++ .quad quiet_ni_syscall /* old lock syscall holder */
++ .quad compat_sys_ioctl
++ .quad compat_sys_fcntl64 /* 55 */
++ .quad quiet_ni_syscall /* old mpx syscall holder */
++ .quad sys_setpgid
++ .quad quiet_ni_syscall /* old ulimit syscall holder */
++ .quad sys32_olduname
++ .quad sys_umask /* 60 */
++ .quad sys_chroot
++ .quad sys32_ustat
++ .quad sys_dup2
++ .quad sys_getppid
++ .quad sys_getpgrp /* 65 */
++ .quad sys_setsid
++ .quad sys32_sigaction
++ .quad sys_sgetmask
++ .quad sys_ssetmask
++ .quad sys_setreuid16 /* 70 */
++ .quad sys_setregid16
++ .quad stub32_sigsuspend
++ .quad compat_sys_sigpending
++ .quad sys_sethostname
++ .quad compat_sys_setrlimit /* 75 */
++ .quad compat_sys_old_getrlimit /* old_getrlimit */
++ .quad compat_sys_getrusage
++ .quad sys32_gettimeofday
++ .quad sys32_settimeofday
++ .quad sys_getgroups16 /* 80 */
++ .quad sys_setgroups16
++ .quad sys32_old_select
++ .quad sys_symlink
++ .quad sys_lstat
++ .quad sys_readlink /* 85 */
++#ifdef CONFIG_IA32_AOUT
++ .quad sys_uselib
++#else
++ .quad quiet_ni_syscall
++#endif
++ .quad sys_swapon
++ .quad sys_reboot
++ .quad compat_sys_old_readdir
++ .quad sys32_mmap /* 90 */
++ .quad sys_munmap
++ .quad sys_truncate
++ .quad sys_ftruncate
++ .quad sys_fchmod
++ .quad sys_fchown16 /* 95 */
++ .quad sys_getpriority
++ .quad sys_setpriority
++ .quad quiet_ni_syscall /* old profil syscall holder */
++ .quad compat_sys_statfs
++ .quad compat_sys_fstatfs /* 100 */
++ .quad sys_ioperm
++ .quad compat_sys_socketcall
++ .quad sys_syslog
++ .quad compat_sys_setitimer
++ .quad compat_sys_getitimer /* 105 */
++ .quad compat_sys_newstat
++ .quad compat_sys_newlstat
++ .quad compat_sys_newfstat
++ .quad sys32_uname
++ .quad stub32_iopl /* 110 */
++ .quad sys_vhangup
++ .quad quiet_ni_syscall /* old "idle" system call */
++ .quad sys32_vm86_warning /* vm86old */
++ .quad compat_sys_wait4
++ .quad sys_swapoff /* 115 */
++ .quad sys32_sysinfo
++ .quad sys32_ipc
++ .quad sys_fsync
++ .quad stub32_sigreturn
++ .quad stub32_clone /* 120 */
++ .quad sys_setdomainname
++ .quad sys_uname
++ .quad sys_modify_ldt
++ .quad compat_sys_adjtimex
++ .quad sys32_mprotect /* 125 */
++ .quad compat_sys_sigprocmask
++ .quad quiet_ni_syscall /* create_module */
++ .quad sys_init_module
++ .quad sys_delete_module
++ .quad quiet_ni_syscall /* 130 get_kernel_syms */
++ .quad sys_quotactl
++ .quad sys_getpgid
++ .quad sys_fchdir
++ .quad quiet_ni_syscall /* bdflush */
++ .quad sys_sysfs /* 135 */
++ .quad sys_personality
++ .quad quiet_ni_syscall /* for afs_syscall */
++ .quad sys_setfsuid16
++ .quad sys_setfsgid16
++ .quad sys_llseek /* 140 */
++ .quad compat_sys_getdents
++ .quad compat_sys_select
++ .quad sys_flock
++ .quad sys_msync
++ .quad compat_sys_readv /* 145 */
++ .quad compat_sys_writev
++ .quad sys_getsid
++ .quad sys_fdatasync
++ .quad sys32_sysctl /* sysctl */
++ .quad sys_mlock /* 150 */
++ .quad sys_munlock
++ .quad sys_mlockall
++ .quad sys_munlockall
++ .quad sys_sched_setparam
++ .quad sys_sched_getparam /* 155 */
++ .quad sys_sched_setscheduler
++ .quad sys_sched_getscheduler
++ .quad sys_sched_yield
++ .quad sys_sched_get_priority_max
++ .quad sys_sched_get_priority_min /* 160 */
++ .quad sys_sched_rr_get_interval
++ .quad compat_sys_nanosleep
++ .quad sys_mremap
++ .quad sys_setresuid16
++ .quad sys_getresuid16 /* 165 */
++ .quad sys32_vm86_warning /* vm86 */
++ .quad quiet_ni_syscall /* query_module */
++ .quad sys_poll
++ .quad compat_sys_nfsservctl
++ .quad sys_setresgid16 /* 170 */
++ .quad sys_getresgid16
++ .quad sys_prctl
++ .quad stub32_rt_sigreturn
++ .quad sys32_rt_sigaction
++ .quad sys32_rt_sigprocmask /* 175 */
++ .quad sys32_rt_sigpending
++ .quad compat_sys_rt_sigtimedwait
++ .quad sys32_rt_sigqueueinfo
++ .quad stub32_rt_sigsuspend
++ .quad sys32_pread /* 180 */
++ .quad sys32_pwrite
++ .quad sys_chown16
++ .quad sys_getcwd
++ .quad sys_capget
++ .quad sys_capset
++ .quad stub32_sigaltstack
++ .quad sys32_sendfile
++ .quad quiet_ni_syscall /* streams1 */
++ .quad quiet_ni_syscall /* streams2 */
++ .quad stub32_vfork /* 190 */
++ .quad compat_sys_getrlimit
++ .quad sys32_mmap2
++ .quad sys32_truncate64
++ .quad sys32_ftruncate64
++ .quad sys32_stat64 /* 195 */
++ .quad sys32_lstat64
++ .quad sys32_fstat64
++ .quad sys_lchown
++ .quad sys_getuid
++ .quad sys_getgid /* 200 */
++ .quad sys_geteuid
++ .quad sys_getegid
++ .quad sys_setreuid
++ .quad sys_setregid
++ .quad sys_getgroups /* 205 */
++ .quad sys_setgroups
++ .quad sys_fchown
++ .quad sys_setresuid
++ .quad sys_getresuid
++ .quad sys_setresgid /* 210 */
++ .quad sys_getresgid
++ .quad sys_chown
++ .quad sys_setuid
++ .quad sys_setgid
++ .quad sys_setfsuid /* 215 */
++ .quad sys_setfsgid
++ .quad sys_pivot_root
++ .quad sys_mincore
++ .quad sys_madvise
++ .quad compat_sys_getdents64 /* 220 getdents64 */
++ .quad compat_sys_fcntl64
++ .quad quiet_ni_syscall /* tux */
++ .quad quiet_ni_syscall /* security */
++ .quad sys_gettid
++ .quad sys_readahead /* 225 */
++ .quad sys_setxattr
++ .quad sys_lsetxattr
++ .quad sys_fsetxattr
++ .quad sys_getxattr
++ .quad sys_lgetxattr /* 230 */
++ .quad sys_fgetxattr
++ .quad sys_listxattr
++ .quad sys_llistxattr
++ .quad sys_flistxattr
++ .quad sys_removexattr /* 235 */
++ .quad sys_lremovexattr
++ .quad sys_fremovexattr
++ .quad sys_tkill
++ .quad sys_sendfile64
++ .quad compat_sys_futex /* 240 */
++ .quad compat_sys_sched_setaffinity
++ .quad compat_sys_sched_getaffinity
++ .quad sys32_set_thread_area
++ .quad sys32_get_thread_area
++ .quad compat_sys_io_setup /* 245 */
++ .quad sys_io_destroy
++ .quad compat_sys_io_getevents
++ .quad compat_sys_io_submit
++ .quad sys_io_cancel
++ .quad sys_fadvise64 /* 250 */
++ .quad quiet_ni_syscall /* free_huge_pages */
++ .quad sys_exit_group
++ .quad sys32_lookup_dcookie
++ .quad sys_epoll_create
++ .quad sys_epoll_ctl /* 255 */
++ .quad sys_epoll_wait
++ .quad sys_remap_file_pages
++ .quad sys_set_tid_address
++ .quad compat_sys_timer_create
++ .quad compat_sys_timer_settime /* 260 */
++ .quad compat_sys_timer_gettime
++ .quad sys_timer_getoverrun
++ .quad sys_timer_delete
++ .quad compat_sys_clock_settime
++ .quad compat_sys_clock_gettime /* 265 */
++ .quad compat_sys_clock_getres
++ .quad compat_sys_clock_nanosleep
++ .quad compat_sys_statfs64
++ .quad compat_sys_fstatfs64
++ .quad sys_tgkill /* 270 */
++ .quad compat_sys_utimes
++ .quad sys32_fadvise64_64
++ .quad quiet_ni_syscall /* sys_vserver */
++ .quad sys_mbind
++ .quad compat_sys_get_mempolicy /* 275 */
++ .quad sys_set_mempolicy
++ .quad compat_sys_mq_open
++ .quad sys_mq_unlink
++ .quad compat_sys_mq_timedsend
++ .quad compat_sys_mq_timedreceive /* 280 */
++ .quad compat_sys_mq_notify
++ .quad compat_sys_mq_getsetattr
++ .quad compat_sys_kexec_load /* reserved for kexec */
++ .quad compat_sys_waitid
++ .quad quiet_ni_syscall /* 285: sys_altroot */
++ .quad sys_add_key
++ .quad sys_request_key
++ .quad sys_keyctl
++ .quad sys_ioprio_set
++ .quad sys_ioprio_get /* 290 */
++ .quad sys_inotify_init
++ .quad sys_inotify_add_watch
++ .quad sys_inotify_rm_watch
++ .quad sys_migrate_pages
++ .quad compat_sys_openat /* 295 */
++ .quad sys_mkdirat
++ .quad sys_mknodat
++ .quad sys_fchownat
++ .quad compat_sys_futimesat
++ .quad sys32_fstatat /* 300 */
++ .quad sys_unlinkat
++ .quad sys_renameat
++ .quad sys_linkat
++ .quad sys_symlinkat
++ .quad sys_readlinkat /* 305 */
++ .quad sys_fchmodat
++ .quad sys_faccessat
++ .quad compat_sys_pselect6
++ .quad compat_sys_ppoll
++ .quad sys_unshare /* 310 */
++ .quad compat_sys_set_robust_list
++ .quad compat_sys_get_robust_list
++ .quad sys_splice
++ .quad sys_sync_file_range
++ .quad sys_tee
++ .quad compat_sys_vmsplice
++ .quad compat_sys_move_pages
++ia32_syscall_end:
+diff -rpuN linux-2.6.18.8/arch/x86_64/ia32/ia32_signal.c linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/ia32_signal.c
+--- linux-2.6.18.8/arch/x86_64/ia32/ia32_signal.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/ia32_signal.c 2008-08-21 11:36:07.000000000 +0200
+@@ -113,25 +113,19 @@ int copy_siginfo_from_user32(siginfo_t *
+ }
+
+ asmlinkage long
+-sys32_sigsuspend(int history0, int history1, old_sigset_t mask,
+- struct pt_regs *regs)
++sys32_sigsuspend(int history0, int history1, old_sigset_t mask)
+ {
+- sigset_t saveset;
+-
+ mask &= _BLOCKABLE;
+ spin_lock_irq(&current->sighand->siglock);
+- saveset = current->blocked;
++ current->saved_sigmask = current->blocked;
+ siginitset(&current->blocked, mask);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+- regs->rax = -EINTR;
+- while (1) {
+- current->state = TASK_INTERRUPTIBLE;
+- schedule();
+- if (do_signal(regs, &saveset))
+- return -EINTR;
+- }
++ current->state = TASK_INTERRUPTIBLE;
++ schedule();
++ set_thread_flag(TIF_RESTORE_SIGMASK);
++ return -ERESTARTNOHAND;
+ }
+
+ asmlinkage long
+@@ -508,11 +502,11 @@ int ia32_setup_frame(int sig, struct k_s
+ current->comm, current->pid, frame, regs->rip, frame->pretcode);
+ #endif
+
+- return 1;
++ return 0;
+
+ give_sigsegv:
+ force_sigsegv(sig, current);
+- return 0;
++ return -EFAULT;
+ }
+
+ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+@@ -595,7 +589,7 @@ int ia32_setup_rt_frame(int sig, struct
+ regs->ss = __USER32_DS;
+
+ set_fs(USER_DS);
+- regs->eflags &= ~TF_MASK;
++ regs->eflags &= ~TF_MASK;
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
+
+@@ -604,9 +598,9 @@ int ia32_setup_rt_frame(int sig, struct
+ current->comm, current->pid, frame, regs->rip, frame->pretcode);
+ #endif
+
+- return 1;
++ return 0;
+
+ give_sigsegv:
+ force_sigsegv(sig, current);
+- return 0;
++ return -EFAULT;
+ }
+diff -rpuN linux-2.6.18.8/arch/x86_64/ia32/Makefile linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/Makefile
+--- linux-2.6.18.8/arch/x86_64/ia32/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -14,11 +14,14 @@ obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
+ audit-class-$(CONFIG_AUDIT) := audit.o
+ obj-$(CONFIG_IA32_EMULATION) += $(audit-class-y)
+
++syscall32-types-y := sysenter syscall
++syscall32-types-$(subst 1,$(CONFIG_XEN),$(shell expr $(CONFIG_XEN_COMPAT)0 '<' 0x0302000)) += int80
++
+ $(obj)/syscall32_syscall.o: \
+- $(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so)
++ $(foreach F,$(syscall32-types-y),$(obj)/vsyscall-$F.so)
+
+ # Teach kbuild about targets
+-targets := $(foreach F,sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
++targets := $(foreach F,$(syscall32-types-y),vsyscall-$F.o vsyscall-$F.so)
+
+ # The DSO images are built using a special linker script
+ quiet_cmd_syscall = SYSCALL $@
+@@ -27,9 +30,10 @@ quiet_cmd_syscall = SYSCALL $@
+ -Wl,-soname=linux-gate.so.1 -o $@ \
+ -Wl,-T,$(filter-out FORCE,$^)
+
+-$(obj)/vsyscall-sysenter.so $(obj)/vsyscall-syscall.so: \
++$(foreach F,$(syscall32-types-y),$(obj)/vsyscall-$F.so): \
+ $(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
+ $(call if_changed,syscall)
+
+-AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32
+-AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32
++AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32 -Iarch/i386/kernel
++AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32 -Iarch/i386/kernel
++AFLAGS_vsyscall-int80.o = -m32 -Wa,-32 -Iarch/i386/kernel
+diff -rpuN linux-2.6.18.8/arch/x86_64/ia32/syscall32.c linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/syscall32.c
+--- linux-2.6.18.8/arch/x86_64/ia32/syscall32.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/syscall32.c 2008-08-21 11:36:07.000000000 +0200
+@@ -59,6 +59,13 @@ int syscall32_setup_pages(struct linux_b
+ vma->vm_end = VSYSCALL32_END;
+ /* MAYWRITE to allow gdb to COW and set breakpoints */
+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
++ /*
++ * Make sure the vDSO gets into every core dump.
++ * Dumping its contents makes post-mortem fully interpretable later
++ * without matching up the same kernel and hardware config to see
++ * what PC values meant.
++ */
++ vma->vm_flags |= VM_ALWAYSDUMP;
+ vma->vm_flags |= mm->def_flags;
+ vma->vm_page_prot = protection_map[vma->vm_flags & 7];
+ vma->vm_ops = &syscall32_vm_ops;
+diff -rpuN linux-2.6.18.8/arch/x86_64/ia32/syscall32_syscall-xen.S linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/syscall32_syscall-xen.S
+--- linux-2.6.18.8/arch/x86_64/ia32/syscall32_syscall-xen.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/syscall32_syscall-xen.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,28 @@
++/* 32bit VDSOs mapped into user space. */
++
++ .section ".init.data","aw"
++
++#if CONFIG_XEN_COMPAT < 0x030200
++
++ .globl syscall32_int80
++ .globl syscall32_int80_end
++
++syscall32_int80:
++ .incbin "arch/x86_64/ia32/vsyscall-int80.so"
++syscall32_int80_end:
++
++#endif
++
++ .globl syscall32_syscall
++ .globl syscall32_syscall_end
++
++syscall32_syscall:
++ .incbin "arch/x86_64/ia32/vsyscall-syscall.so"
++syscall32_syscall_end:
++
++ .globl syscall32_sysenter
++ .globl syscall32_sysenter_end
++
++syscall32_sysenter:
++ .incbin "arch/x86_64/ia32/vsyscall-sysenter.so"
++syscall32_sysenter_end:
+diff -rpuN linux-2.6.18.8/arch/x86_64/ia32/syscall32-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/syscall32-xen.c
+--- linux-2.6.18.8/arch/x86_64/ia32/syscall32-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/syscall32-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,137 @@
++/* Copyright 2002,2003 Andi Kleen, SuSE Labs */
++
++/* vsyscall handling for 32bit processes. Map a stub page into it
++ on demand because 32bit cannot reach the kernel's fixmaps */
++
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/gfp.h>
++#include <linux/init.h>
++#include <linux/stringify.h>
++#include <linux/security.h>
++#include <asm/proto.h>
++#include <asm/tlbflush.h>
++#include <asm/ia32_unistd.h>
++#include <xen/interface/callback.h>
++
++extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
++extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
++extern int sysctl_vsyscall32;
++
++char *syscall32_page;
++static int use_sysenter = -1;
++
++#if CONFIG_XEN_COMPAT < 0x030200
++extern unsigned char syscall32_int80[], syscall32_int80_end[];
++static int use_int80 = 1;
++#endif
++
++static struct page *
++syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
++{
++ struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
++ get_page(p);
++ return p;
++}
++
++/* Prevent VMA merging */
++static void syscall32_vma_close(struct vm_area_struct *vma)
++{
++}
++
++static struct vm_operations_struct syscall32_vm_ops = {
++ .close = syscall32_vma_close,
++ .nopage = syscall32_nopage,
++};
++
++struct linux_binprm;
++
++/* Setup a VMA at program startup for the vsyscall page */
++int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
++{
++ int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
++ struct vm_area_struct *vma;
++ struct mm_struct *mm = current->mm;
++ int ret;
++
++ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++ if (!vma)
++ return -ENOMEM;
++
++ memset(vma, 0, sizeof(struct vm_area_struct));
++ /* Could randomize here */
++ vma->vm_start = VSYSCALL32_BASE;
++ vma->vm_end = VSYSCALL32_END;
++ /* MAYWRITE to allow gdb to COW and set breakpoints */
++ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
++ vma->vm_flags |= mm->def_flags;
++ vma->vm_page_prot = protection_map[vma->vm_flags & 7];
++ vma->vm_ops = &syscall32_vm_ops;
++ vma->vm_mm = mm;
++
++ down_write(&mm->mmap_sem);
++ if ((ret = insert_vm_struct(mm, vma))) {
++ up_write(&mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return ret;
++ }
++ mm->total_vm += npages;
++ up_write(&mm->mmap_sem);
++ return 0;
++}
++
++static int __init init_syscall32(void)
++{
++ syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
++ if (!syscall32_page)
++ panic("Cannot allocate syscall32 page");
++
++#if CONFIG_XEN_COMPAT < 0x030200
++ if (use_int80) {
++ memcpy(syscall32_page, syscall32_int80,
++ syscall32_int80_end - syscall32_int80);
++ } else
++#endif
++ if (use_sysenter > 0) {
++ memcpy(syscall32_page, syscall32_sysenter,
++ syscall32_sysenter_end - syscall32_sysenter);
++ } else {
++ memcpy(syscall32_page, syscall32_syscall,
++ syscall32_syscall_end - syscall32_syscall);
++ }
++ return 0;
++}
++
++/*
++ * This must be done early in case we have an initrd containing 32-bit
++ * binaries (e.g., hotplug). This could be pushed upstream to arch/x86_64.
++ */
++core_initcall(init_syscall32);
++
++/* May not be __init: called during resume */
++void syscall32_cpu_init(void)
++{
++ static const struct callback_register cstar = {
++ .type = CALLBACKTYPE_syscall32,
++ .address = (unsigned long)ia32_cstar_target
++ };
++ static const struct callback_register sysenter = {
++ .type = CALLBACKTYPE_sysenter,
++ .address = (unsigned long)ia32_sysenter_target
++ };
++
++ /* Load these always in case some future AMD CPU supports
++ SYSENTER from compat mode too. */
++ if ((HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) < 0) ||
++ (HYPERVISOR_callback_op(CALLBACKOP_register, &cstar) < 0))
++#if CONFIG_XEN_COMPAT < 0x030200
++ return;
++ use_int80 = 0;
++#else
++ BUG();
++#endif
++
++ if (use_sysenter < 0)
++ use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
++}
+diff -rpuN linux-2.6.18.8/arch/x86_64/ia32/vsyscall-int80.S linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/vsyscall-int80.S
+--- linux-2.6.18.8/arch/x86_64/ia32/vsyscall-int80.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/vsyscall-int80.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,58 @@
++/*
++ * Code for the vsyscall page. This version uses the old int $0x80 method.
++ *
++ * NOTE:
++ * 1) __kernel_vsyscall _must_ be first in this page.
++ * 2) there are alignment constraints on this stub, see vsyscall-sigreturn.S
++ * for details.
++ */
++#include <asm/ia32_unistd.h>
++#include <asm/asm-offsets.h>
++
++ .code32
++ .text
++ .section .text.vsyscall,"ax"
++ .globl __kernel_vsyscall
++ .type __kernel_vsyscall,@function
++__kernel_vsyscall:
++.LSTART_vsyscall:
++ int $0x80
++ ret
++.LEND_vsyscall:
++ .size __kernel_vsyscall,.-.LSTART_vsyscall
++ .previous
++
++ .section .eh_frame,"a",@progbits
++.LSTARTFRAME:
++ .long .LENDCIE-.LSTARTCIE
++.LSTARTCIE:
++ .long 0 /* CIE ID */
++ .byte 1 /* Version number */
++ .string "zR" /* NUL-terminated augmentation string */
++ .uleb128 1 /* Code alignment factor */
++ .sleb128 -4 /* Data alignment factor */
++ .byte 8 /* Return address register column */
++ .uleb128 1 /* Augmentation value length */
++ .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
++ .byte 0x0c /* DW_CFA_def_cfa */
++ .uleb128 4
++ .uleb128 4
++ .byte 0x88 /* DW_CFA_offset, column 0x8 */
++ .uleb128 1
++ .align 4
++.LENDCIE:
++
++ .long .LENDFDE1-.LSTARTFDE1 /* Length FDE */
++.LSTARTFDE1:
++ .long .LSTARTFDE1-.LSTARTFRAME /* CIE pointer */
++ .long .LSTART_vsyscall-. /* PC-relative start address */
++ .long .LEND_vsyscall-.LSTART_vsyscall
++ .uleb128 0 /* Augmentation length */
++ .align 4
++.LENDFDE1:
++
++/*
++ * Get the common code for the sigreturn entry points.
++ */
++#define SYSCALL_ENTER_KERNEL int $0x80
++#include "vsyscall-sigreturn.S"
+diff -rpuN linux-2.6.18.8/arch/x86_64/ia32/vsyscall-sigreturn.S linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/vsyscall-sigreturn.S
+--- linux-2.6.18.8/arch/x86_64/ia32/vsyscall-sigreturn.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/ia32/vsyscall-sigreturn.S 2008-08-21 11:36:07.000000000 +0200
+@@ -139,5 +139,5 @@ __kernel_rt_sigreturn:
+ .align 4
+ .LENDFDE3:
+
+-#include "../../i386/kernel/vsyscall-note.S"
++#include <vsyscall-note.S>
+
+diff -rpuN linux-2.6.18.8/arch/x86_64/Kconfig linux-2.6.18-xen-3.3.0/arch/x86_64/Kconfig
+--- linux-2.6.18.8/arch/x86_64/Kconfig 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -135,6 +135,23 @@ config GENERIC_CPU
+
+ endchoice
+
++config X86_64_XEN
++ bool "Enable Xen compatible kernel"
++ select XEN
++ select SWIOTLB
++ help
++ This option will compile a kernel compatible with Xen hypervisor
++
++config X86_NO_TSS
++ bool
++ depends on X86_64_XEN
++ default y
++
++config X86_NO_IDT
++ bool
++ depends on X86_64_XEN
++ default y
++
+ #
+ # Define implied options from the CPU selection here
+ #
+@@ -155,6 +172,7 @@ config X86_INTERNODE_CACHE_BYTES
+
+ config X86_TSC
+ bool
++ depends on !X86_64_XEN
+ default y
+
+ config X86_GOOD_APIC
+@@ -197,7 +215,7 @@ config X86_CPUID
+
+ config X86_HT
+ bool
+- depends on SMP && !MK8
++ depends on SMP && !MK8 && !X86_64_XEN
+ default y
+
+ config MATH_EMULATION
+@@ -211,14 +229,22 @@ config EISA
+
+ config X86_IO_APIC
+ bool
++ depends !XEN_UNPRIVILEGED_GUEST
++ default y
++
++config X86_XEN_GENAPIC
++ bool
++ depends on X86_64_XEN
+ default y
+
+ config X86_LOCAL_APIC
+ bool
++ depends !XEN_UNPRIVILEGED_GUEST
+ default y
+
+ config MTRR
+ bool "MTRR (Memory Type Range Register) support"
++ depends on !XEN_UNPRIVILEGED_GUEST
+ ---help---
+ On Intel P6 family processors (Pentium Pro, Pentium II and later)
+ the Memory Type Range Registers (MTRRs) may be used to control
+@@ -259,7 +285,7 @@ config SMP
+
+ config SCHED_SMT
+ bool "SMT (Hyperthreading) scheduler support"
+- depends on SMP
++ depends on SMP && !X86_64_XEN
+ default n
+ help
+ SMT scheduler support improves the CPU scheduler's decision making
+@@ -269,7 +295,7 @@ config SCHED_SMT
+
+ config SCHED_MC
+ bool "Multi-core scheduler support"
+- depends on SMP
++ depends on SMP && !X86_64_XEN
+ default y
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+@@ -280,7 +306,7 @@ source "kernel/Kconfig.preempt"
+
+ config NUMA
+ bool "Non Uniform Memory Access (NUMA) Support"
+- depends on SMP
++ depends on SMP && !X86_64_XEN
+ help
+ Enable NUMA (Non Uniform Memory Access) support. The kernel
+ will try to allocate memory used by a CPU on the local memory
+@@ -341,7 +367,7 @@ config ARCH_DISCONTIGMEM_DEFAULT
+
+ config ARCH_SPARSEMEM_ENABLE
+ def_bool y
+- depends on (NUMA || EXPERIMENTAL)
++ depends on (NUMA || EXPERIMENTAL) && !X86_64_XEN
+
+ config ARCH_MEMORY_PROBE
+ def_bool y
+@@ -365,6 +391,7 @@ config NR_CPUS
+ int "Maximum number of CPUs (2-256)"
+ range 2 255
+ depends on SMP
++ default "16" if X86_64_XEN
+ default "8"
+ help
+ This allows you to specify the maximum number of CPUs which this
+@@ -387,6 +414,7 @@ config ARCH_ENABLE_MEMORY_HOTPLUG
+
+ config HPET_TIMER
+ bool
++ depends on !X86_64_XEN
+ default y
+ help
+ Use the IA-PC HPET (High Precision Event Timer) to manage
+@@ -407,7 +435,7 @@ config IOMMU
+ default y
+ select SWIOTLB
+ select AGP
+- depends on PCI
++ depends on PCI && !X86_64_XEN
+ help
+ Support for full DMA access of devices with 32bit memory access only
+ on systems with more than 3GB. This is usually needed for USB,
+@@ -423,7 +451,7 @@ config CALGARY_IOMMU
+ bool "IBM Calgary IOMMU support"
+ default y
+ select SWIOTLB
+- depends on PCI && EXPERIMENTAL
++ depends on PCI && !X86_64_XEN && EXPERIMENTAL
+ help
+ Support for hardware IOMMUs in IBM's xSeries x366 and x460
+ systems. Needed to run systems with more than 3GB of memory
+@@ -444,6 +472,7 @@ config SWIOTLB
+
+ config X86_MCE
+ bool "Machine check support" if EMBEDDED
++ depends on !X86_64_XEN
+ default y
+ help
+ Include a machine check error handler to report hardware errors.
+@@ -469,7 +498,7 @@ config X86_MCE_AMD
+
+ config KEXEC
+ bool "kexec system call (EXPERIMENTAL)"
+- depends on EXPERIMENTAL
++ depends on EXPERIMENTAL && !XEN_UNPRIVILEGED_GUEST
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+@@ -564,8 +593,9 @@ config GENERIC_PENDING_IRQ
+ default y
+
+ menu "Power management options"
++ depends on !XEN_UNPRIVILEGED_GUEST
+
+-source kernel/power/Kconfig
++source "kernel/power/Kconfig"
+
+ source "drivers/acpi/Kconfig"
+
+@@ -588,6 +618,22 @@ config PCI_MMCONFIG
+ bool "Support mmconfig PCI config space access"
+ depends on PCI && ACPI
+
++config XEN_PCIDEV_FRONTEND
++ bool "Xen PCI Frontend"
++ depends on PCI && X86_64_XEN
++ select HOTPLUG
++ default y
++ help
++ The PCI device frontend driver allows the kernel to import arbitrary
++ PCI devices from a PCI backend to support PCI driver domains.
++
++config XEN_PCIDEV_FE_DEBUG
++ bool "Xen PCI Frontend Debugging"
++ depends on XEN_PCIDEV_FRONTEND
++ default n
++ help
++ Enables some debug statements within the PCI Frontend.
++
+ source "drivers/pci/pcie/Kconfig"
+
+ source "drivers/pci/Kconfig"
+@@ -658,4 +704,6 @@ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+
++source "drivers/xen/Kconfig"
++
+ source "lib/Kconfig"
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/acpi/Makefile linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/acpi/Makefile
+--- linux-2.6.18.8/arch/x86_64/kernel/acpi/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/acpi/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -5,5 +5,10 @@ obj-$(CONFIG_ACPI_SLEEP) += sleep.o wake
+ ifneq ($(CONFIG_ACPI_PROCESSOR),)
+ obj-y += processor.o
+ processor-y := ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o
++processor-$(CONFIG_XEN) := ../../../i386/kernel/acpi/processor.o
++ifneq ($(CONFIG_PROCESSOR_EXTERNAL_CONTROL),)
++processor-$(CONFIG_XEN) += ../../../i386/kernel/acpi/processor_extcntl_xen.o
++endif
+ endif
+
++disabled-obj-$(CONFIG_XEN) := wakeup.o
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/acpi/sleep-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/acpi/sleep-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/acpi/sleep-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/acpi/sleep-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,146 @@
++/*
++ * acpi.c - Architecture-Specific Low-Level ACPI Support
++ *
++ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
++ * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
++ * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
++ * Copyright (C) 2002 Andi Kleen, SuSE Labs (x86-64 port)
++ * Copyright (C) 2003 Pavel Machek, SuSE Labs
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/pci.h>
++#include <linux/bootmem.h>
++#include <linux/acpi.h>
++#include <linux/cpumask.h>
++
++#include <asm/mpspec.h>
++#include <asm/io.h>
++#include <asm/apic.h>
++#include <asm/apicdef.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/io_apic.h>
++#include <asm/proto.h>
++#include <asm/tlbflush.h>
++
++/* --------------------------------------------------------------------------
++ Low-Level Sleep Support
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI_SLEEP
++
++#ifndef CONFIG_ACPI_PV_SLEEP
++/* address in low memory of the wakeup routine. */
++unsigned long acpi_wakeup_address = 0;
++unsigned long acpi_video_flags;
++extern char wakeup_start, wakeup_end;
++
++extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
++
++static pgd_t low_ptr;
++
++static void init_low_mapping(void)
++{
++ pgd_t *slot0 = pgd_offset(current->mm, 0UL);
++ low_ptr = *slot0;
++ set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
++ WARN_ON(num_online_cpus() != 1);
++ local_flush_tlb();
++}
++#endif
++
++/**
++ * acpi_save_state_mem - save kernel state
++ *
++ * Create an identity mapped page table and copy the wakeup routine to
++ * low memory.
++ */
++int acpi_save_state_mem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++ init_low_mapping();
++
++ memcpy((void *)acpi_wakeup_address, &wakeup_start,
++ &wakeup_end - &wakeup_start);
++ acpi_copy_wakeup_routine(acpi_wakeup_address);
++#endif
++ return 0;
++}
++
++/*
++ * acpi_restore_state
++ */
++void acpi_restore_state_mem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++ set_pgd(pgd_offset(current->mm, 0UL), low_ptr);
++ local_flush_tlb();
++#endif
++}
++
++/**
++ * acpi_reserve_bootmem - do _very_ early ACPI initialisation
++ *
++ * We allocate a page in low memory for the wakeup
++ * routine for when we come back from a sleep state. The
++ * runtime allocator allows specification of <16M pages, but not
++ * <1M pages.
++ */
++void __init acpi_reserve_bootmem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++ acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
++ if ((&wakeup_end - &wakeup_start) > PAGE_SIZE)
++ printk(KERN_CRIT
++ "ACPI: Wakeup code way too big, will crash on attempt to suspend\n");
++#endif
++}
++
++#ifndef CONFIG_ACPI_PV_SLEEP
++static int __init acpi_sleep_setup(char *str)
++{
++ while ((str != NULL) && (*str != '\0')) {
++ if (strncmp(str, "s3_bios", 7) == 0)
++ acpi_video_flags = 1;
++ if (strncmp(str, "s3_mode", 7) == 0)
++ acpi_video_flags |= 2;
++ str = strchr(str, ',');
++ if (str != NULL)
++ str += strspn(str, ", \t");
++ }
++
++ return 1;
++}
++
++__setup("acpi_sleep=", acpi_sleep_setup);
++#endif /* CONFIG_ACPI_PV_SLEEP */
++
++#endif /*CONFIG_ACPI_SLEEP */
++
++void acpi_pci_link_exit(void)
++{
++}
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/apic-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/apic-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/apic-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/apic-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,197 @@
++/*
++ * Local APIC handling, local APIC timers
++ *
++ * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively.
++ * Maciej W. Rozycki : Various updates and fixes.
++ * Mikael Pettersson : Power Management for UP-APIC.
++ * Pavel Machek and
++ * Mikael Pettersson : PM converted to driver model.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/mc146818rtc.h>
++#include <linux/kernel_stat.h>
++#include <linux/sysdev.h>
++#include <linux/module.h>
++
++#include <asm/atomic.h>
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/hpet.h>
++#include <asm/idle.h>
++
++int apic_verbosity;
++
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++ /*
++ * Currently unexpected vectors happen only on SMP and APIC.
++ * We _must_ ack these because every local APIC has only N
++ * irq slots per priority level, and a 'hanging, unacked' IRQ
++ * holds up an irq slot - in excessive cases (when multiple
++ * unexpected vectors occur) that might lock up the APIC
++ * completely.
++ * But don't ack when the APIC is disabled. -AK
++ */
++ if (!disable_apic)
++ ack_APIC_irq();
++}
++
++int setup_profiling_timer(unsigned int multiplier)
++{
++ return -EINVAL;
++}
++
++void smp_local_timer_interrupt(struct pt_regs *regs)
++{
++ profile_tick(CPU_PROFILING, regs);
++#ifndef CONFIG_XEN
++#ifdef CONFIG_SMP
++ update_process_times(user_mode(regs));
++#endif
++#endif
++ /*
++ * We take the 'long' return path, and there every subsystem
++ * grabs the appropriate locks (kernel lock/ irq lock).
++ *
++ * we might want to decouple profiling from the 'long path',
++ * and do the profiling totally in assembly.
++ *
++ * Currently this isn't too much of an issue (performance wise),
++ * we can take more than 100K local irqs per second on a 100 MHz P5.
++ */
++}
++
++/*
++ * Local APIC timer interrupt. This is the most natural way for doing
++ * local interrupts, but local timer interrupts can be emulated by
++ * broadcast interrupts too. [in case the hw doesn't support APIC timers]
++ *
++ * [ if a single-CPU system runs an SMP kernel then we call the local
++ * interrupt as well. Thus we cannot inline the local irq ... ]
++ */
++void smp_apic_timer_interrupt(struct pt_regs *regs)
++{
++ /*
++ * the NMI deadlock-detector uses this.
++ */
++ add_pda(apic_timer_irqs, 1);
++
++ /*
++ * NOTE! We'd better ACK the irq immediately,
++ * because timer handling can be slow.
++ */
++ ack_APIC_irq();
++ /*
++ * update_process_times() expects us to have done irq_enter().
++ * Besides, if we don't timer interrupts ignore the global
++ * interrupt lock, which is the WrongThing (tm) to do.
++ */
++ exit_idle();
++ irq_enter();
++ smp_local_timer_interrupt(regs);
++ irq_exit();
++}
++
++/*
++ * This interrupt should _never_ happen with our APIC/SMP architecture
++ */
++asmlinkage void smp_spurious_interrupt(void)
++{
++ unsigned int v;
++ exit_idle();
++ irq_enter();
++ /*
++ * Check if this really is a spurious interrupt and ACK it
++ * if it is a vectored one. Just in case...
++ * Spurious interrupts should not be ACKed.
++ */
++ v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
++ if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
++ ack_APIC_irq();
++
++#if 0
++ static unsigned long last_warning;
++ static unsigned long skipped;
++
++ /* see sw-dev-man vol 3, chapter 7.4.13.5 */
++ if (time_before(last_warning+30*HZ,jiffies)) {
++ printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
++ smp_processor_id(), skipped);
++ last_warning = jiffies;
++ skipped = 0;
++ } else {
++ skipped++;
++ }
++#endif
++ irq_exit();
++}
++
++/*
++ * This interrupt should never happen with our APIC/SMP architecture
++ */
++
++asmlinkage void smp_error_interrupt(void)
++{
++ unsigned int v, v1;
++
++ exit_idle();
++ irq_enter();
++ /* First tickle the hardware, only then report what went on. -- REW */
++ v = apic_read(APIC_ESR);
++ apic_write(APIC_ESR, 0);
++ v1 = apic_read(APIC_ESR);
++ ack_APIC_irq();
++ atomic_inc(&irq_err_count);
++
++ /* Here is what the APIC error bits mean:
++ 0: Send CS error
++ 1: Receive CS error
++ 2: Send accept error
++ 3: Receive accept error
++ 4: Reserved
++ 5: Send illegal vector
++ 6: Received illegal vector
++ 7: Illegal register address
++ */
++ printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
++ smp_processor_id(), v , v1);
++ irq_exit();
++}
++
++int disable_apic;
++
++/*
++ * This initializes the IO-APIC and APIC hardware if this is
++ * a UP kernel.
++ */
++int __init APIC_init_uniprocessor (void)
++{
++#ifdef CONFIG_X86_IO_APIC
++ if (smp_found_config)
++ if (!skip_ioapic_setup && nr_ioapics)
++ setup_IO_APIC();
++#endif
++
++ return 1;
++}
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/asm-offsets.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/asm-offsets.c
+--- linux-2.6.18.8/arch/x86_64/kernel/asm-offsets.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/asm-offsets.c 2008-08-21 11:36:07.000000000 +0200
+@@ -67,8 +67,10 @@ int main(void)
+ DEFINE(pbe_address, offsetof(struct pbe, address));
+ DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
+ DEFINE(pbe_next, offsetof(struct pbe, next));
++#ifndef CONFIG_X86_NO_TSS
+ BLANK();
+ DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
++#endif
+ BLANK();
+ DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
+ return 0;
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/crash.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/crash.c
+--- linux-2.6.18.8/arch/x86_64/kernel/crash.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/crash.c 2008-08-21 11:36:07.000000000 +0200
+@@ -92,6 +92,7 @@ static void crash_save_self(struct pt_re
+ crash_save_this_cpu(regs, cpu);
+ }
+
++#ifndef CONFIG_XEN
+ #ifdef CONFIG_SMP
+ static atomic_t waiting_for_crash_ipi;
+
+@@ -156,6 +157,7 @@ static void nmi_shootdown_cpus(void)
+ /* There are no cpus to shootdown */
+ }
+ #endif
++#endif /* CONFIG_XEN */
+
+ void machine_crash_shutdown(struct pt_regs *regs)
+ {
+@@ -173,6 +175,8 @@ void machine_crash_shutdown(struct pt_re
+
+ /* Make a note of crashing cpu. Will be used in NMI callback.*/
+ crashing_cpu = smp_processor_id();
++
++#ifndef CONFIG_XEN
+ nmi_shootdown_cpus();
+
+ if(cpu_has_apic)
+@@ -181,6 +185,6 @@ void machine_crash_shutdown(struct pt_re
+ #if defined(CONFIG_X86_IO_APIC)
+ disable_IO_APIC();
+ #endif
+-
++#endif /* CONFIG_XEN */
+ crash_save_self(regs);
+ }
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/e820.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/e820.c
+--- linux-2.6.18.8/arch/x86_64/kernel/e820.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/e820.c 2008-08-21 11:36:07.000000000 +0200
+@@ -93,7 +93,7 @@ static inline int bad_addr(unsigned long
+ * This function checks if any part of the range <start,end> is mapped
+ * with type.
+ */
+-int __meminit
++int
+ e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
+ {
+ int i;
+@@ -107,6 +107,7 @@ e820_any_mapped(unsigned long start, uns
+ }
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(e820_any_mapped);
+
+ /*
+ * This function checks if the entire range <start,end> is mapped with type.
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/e820-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/e820-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/e820-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/e820-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,798 @@
++/*
++ * Handle the memory map.
++ * The functions here do the job until bootmem takes over.
++ *
++ * Getting sanitize_e820_map() in sync with i386 version by applying change:
++ * - Provisions for empty E820 memory regions (reported by certain BIOSes).
++ * Alex Achenbach <xela@slit.de>, December 2002.
++ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
++ *
++ */
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/bootmem.h>
++#include <linux/ioport.h>
++#include <linux/string.h>
++#include <linux/kexec.h>
++#include <linux/module.h>
++
++#include <asm/pgtable.h>
++#include <asm/page.h>
++#include <asm/e820.h>
++#include <asm/proto.h>
++#include <asm/bootsetup.h>
++#include <asm/sections.h>
++#include <xen/interface/memory.h>
++
++/*
++ * PFN of last memory page.
++ */
++unsigned long end_pfn;
++EXPORT_SYMBOL(end_pfn);
++
++/*
++ * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
++ * The direct mapping extends to end_pfn_map, so that we can directly access
++ * apertures, ACPI and other tables without having to play with fixmaps.
++ */
++unsigned long end_pfn_map;
++
++/*
++ * Last pfn which the user wants to use.
++ */
++unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
++
++extern struct resource code_resource, data_resource;
++
++#ifdef CONFIG_XEN
++extern struct e820map machine_e820;
++#endif
++
++/* Check for some hardcoded bad areas that early boot is not allowed to touch */
++static inline int bad_addr(unsigned long *addrp, unsigned long size)
++{
++ unsigned long addr = *addrp, last = addr + size;
++
++#ifndef CONFIG_XEN
++ /* various gunk below that needed for SMP startup */
++ if (addr < 0x8000) {
++ *addrp = 0x8000;
++ return 1;
++ }
++
++ /* direct mapping tables of the kernel */
++ if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
++ *addrp = table_end << PAGE_SHIFT;
++ return 1;
++ }
++
++ /* initrd */
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
++ addr < INITRD_START+INITRD_SIZE) {
++ *addrp = INITRD_START + INITRD_SIZE;
++ return 1;
++ }
++#endif
++ /* kernel code + 640k memory hole (later should not be needed, but
++ be paranoid for now) */
++ if (last >= 640*1024 && addr < 1024*1024) {
++ *addrp = 1024*1024;
++ return 1;
++ }
++ if (last >= __pa_symbol(&_text) && last < __pa_symbol(&_end)) {
++ *addrp = __pa_symbol(&_end);
++ return 1;
++ }
++
++ if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
++ *addrp = ebda_addr + ebda_size;
++ return 1;
++ }
++
++ /* XXX ramdisk image here? */
++#else
++ if (last < (table_end<<PAGE_SHIFT)) {
++ *addrp = table_end << PAGE_SHIFT;
++ return 1;
++ }
++#endif
++ return 0;
++}
++
++/*
++ * This function checks if any part of the range <start,end> is mapped
++ * with type.
++ */
++int e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
++{
++ int i;
++
++#ifndef CONFIG_XEN
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; i++) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
++
++ if (type && ei->type != type)
++ continue;
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(e820_any_mapped);
++
++/*
++ * This function checks if the entire range <start,end> is mapped with type.
++ *
++ * Note: this function only works correct if the e820 table is sorted and
++ * not-overlapping, which is the case
++ */
++int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
++{
++ int i;
++
++#ifndef CONFIG_XEN
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; i++) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
++
++ if (type && ei->type != type)
++ continue;
++ /* is the region (part) in overlap with the current region ?*/
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++
++ /* if the region is at the beginning of <start,end> we move
++ * start to the end of the region since it's ok until there
++ */
++ if (ei->addr <= start)
++ start = ei->addr + ei->size;
++ /* if start is now at or beyond end, we're done, full coverage */
++ if (start >= end)
++ return 1; /* we're done */
++ }
++ return 0;
++}
++
++/*
++ * Find a free area in a specific range.
++ */
++unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size)
++{
++ int i;
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ unsigned long addr = ei->addr, last;
++ if (ei->type != E820_RAM)
++ continue;
++ if (addr < start)
++ addr = start;
++ if (addr > ei->addr + ei->size)
++ continue;
++ while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
++ ;
++ last = addr + size;
++ if (last > ei->addr + ei->size)
++ continue;
++ if (last > end)
++ continue;
++ return addr;
++ }
++ return -1UL;
++}
++
++/*
++ * Free bootmem based on the e820 table for a node.
++ */
++void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
++{
++ int i;
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ unsigned long last, addr;
++
++ if (ei->type != E820_RAM ||
++ ei->addr+ei->size <= start ||
++ ei->addr >= end)
++ continue;
++
++ addr = round_up(ei->addr, PAGE_SIZE);
++ if (addr < start)
++ addr = start;
++
++ last = round_down(ei->addr + ei->size, PAGE_SIZE);
++ if (last >= end)
++ last = end;
++
++ if (last > addr && last-addr >= PAGE_SIZE)
++ free_bootmem_node(pgdat, addr, last-addr);
++ }
++}
++
++/*
++ * Find the highest page frame number we have available
++ */
++unsigned long __init e820_end_of_ram(void)
++{
++ int i;
++ unsigned long end_pfn = 0;
++
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ unsigned long start, end;
++
++ start = round_up(ei->addr, PAGE_SIZE);
++ end = round_down(ei->addr + ei->size, PAGE_SIZE);
++ if (start >= end)
++ continue;
++ if (ei->type == E820_RAM) {
++ if (end > end_pfn<<PAGE_SHIFT)
++ end_pfn = end>>PAGE_SHIFT;
++ } else {
++ if (end > end_pfn_map<<PAGE_SHIFT)
++ end_pfn_map = end>>PAGE_SHIFT;
++ }
++ }
++
++ if (end_pfn > end_pfn_map)
++ end_pfn_map = end_pfn;
++ if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
++ end_pfn_map = MAXMEM>>PAGE_SHIFT;
++ if (end_pfn > end_user_pfn)
++ end_pfn = end_user_pfn;
++ if (end_pfn > end_pfn_map)
++ end_pfn = end_pfn_map;
++
++ return end_pfn;
++}
++
++/*
++ * Compute how much memory is missing in a range.
++ * Unlike the other functions in this file the arguments are in page numbers.
++ */
++unsigned long __init
++e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
++{
++ unsigned long ram = 0;
++ unsigned long start = start_pfn << PAGE_SHIFT;
++ unsigned long end = end_pfn << PAGE_SHIFT;
++ int i;
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ unsigned long last, addr;
++
++ if (ei->type != E820_RAM ||
++ ei->addr+ei->size <= start ||
++ ei->addr >= end)
++ continue;
++
++ addr = round_up(ei->addr, PAGE_SIZE);
++ if (addr < start)
++ addr = start;
++
++ last = round_down(ei->addr + ei->size, PAGE_SIZE);
++ if (last >= end)
++ last = end;
++
++ if (last > addr)
++ ram += last - addr;
++ }
++ return ((end - start) - ram) >> PAGE_SHIFT;
++}
++
++/*
++ * Mark e820 reserved areas as busy for the resource manager.
++ */
++void __init e820_reserve_resources(struct e820entry *e820, int nr_map)
++{
++ int i;
++ for (i = 0; i < nr_map; i++) {
++ struct resource *res;
++ res = alloc_bootmem_low(sizeof(struct resource));
++ switch (e820[i].type) {
++ case E820_RAM: res->name = "System RAM"; break;
++ case E820_ACPI: res->name = "ACPI Tables"; break;
++ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
++ default: res->name = "reserved";
++ }
++ res->start = e820[i].addr;
++ res->end = res->start + e820[i].size - 1;
++ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++ request_resource(&iomem_resource, res);
++ if (e820[i].type == E820_RAM) {
++ /*
++ * We don't know which RAM region contains kernel data,
++ * so we try it repeatedly and let the resource manager
++ * test it.
++ */
++#ifndef CONFIG_XEN
++ request_resource(res, &code_resource);
++ request_resource(res, &data_resource);
++#endif
++#ifdef CONFIG_KEXEC
++ if (crashk_res.start != crashk_res.end)
++ request_resource(res, &crashk_res);
++#ifdef CONFIG_XEN
++ xen_machine_kexec_register_resources(res);
++#endif
++#endif
++ }
++ }
++}
++
++/*
++ * Add a memory region to the kernel e820 map.
++ */
++void __init add_memory_region(unsigned long start, unsigned long size, int type)
++{
++ int x = e820.nr_map;
++
++ if (x == E820MAX) {
++ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
++ return;
++ }
++
++ e820.map[x].addr = start;
++ e820.map[x].size = size;
++ e820.map[x].type = type;
++ e820.nr_map++;
++}
++
++void __init e820_print_map(char *who)
++{
++ int i;
++
++ for (i = 0; i < e820.nr_map; i++) {
++ printk(" %s: %016Lx - %016Lx ", who,
++ (unsigned long long) e820.map[i].addr,
++ (unsigned long long) (e820.map[i].addr + e820.map[i].size));
++ switch (e820.map[i].type) {
++ case E820_RAM: printk("(usable)\n");
++ break;
++ case E820_RESERVED:
++ printk("(reserved)\n");
++ break;
++ case E820_ACPI:
++ printk("(ACPI data)\n");
++ break;
++ case E820_NVS:
++ printk("(ACPI NVS)\n");
++ break;
++ default: printk("type %u\n", e820.map[i].type);
++ break;
++ }
++ }
++}
++
++/*
++ * Sanitize the BIOS e820 map.
++ *
++ * Some e820 responses include overlapping entries. The following
++ * replaces the original e820 map with a new one, removing overlaps.
++ *
++ */
++static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
++{
++ struct change_member {
++ struct e820entry *pbios; /* pointer to original bios entry */
++ unsigned long long addr; /* address for this change point */
++ };
++ static struct change_member change_point_list[2*E820MAX] __initdata;
++ static struct change_member *change_point[2*E820MAX] __initdata;
++ static struct e820entry *overlap_list[E820MAX] __initdata;
++ static struct e820entry new_bios[E820MAX] __initdata;
++ struct change_member *change_tmp;
++ unsigned long current_type, last_type;
++ unsigned long long last_addr;
++ int chgidx, still_changing;
++ int overlap_entries;
++ int new_bios_entry;
++ int old_nr, new_nr, chg_nr;
++ int i;
++
++ /*
++ Visually we're performing the following (1,2,3,4 = memory types)...
++
++ Sample memory map (w/overlaps):
++ ____22__________________
++ ______________________4_
++ ____1111________________
++ _44_____________________
++ 11111111________________
++ ____________________33__
++ ___________44___________
++ __________33333_________
++ ______________22________
++ ___________________2222_
++ _________111111111______
++ _____________________11_
++ _________________4______
++
++ Sanitized equivalent (no overlap):
++ 1_______________________
++ _44_____________________
++ ___1____________________
++ ____22__________________
++ ______11________________
++ _________1______________
++ __________3_____________
++ ___________44___________
++ _____________33_________
++ _______________2________
++ ________________1_______
++ _________________4______
++ ___________________2____
++ ____________________33__
++ ______________________4_
++ */
++
++ /* if there's only one memory region, don't bother */
++ if (*pnr_map < 2)
++ return -1;
++
++ old_nr = *pnr_map;
++
++ /* bail out if we find any unreasonable addresses in bios map */
++ for (i=0; i<old_nr; i++)
++ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
++ return -1;
++
++ /* create pointers for initial change-point information (for sorting) */
++ for (i=0; i < 2*old_nr; i++)
++ change_point[i] = &change_point_list[i];
++
++ /* record all known change-points (starting and ending addresses),
++ omitting those that are for empty memory regions */
++ chgidx = 0;
++ for (i=0; i < old_nr; i++) {
++ if (biosmap[i].size != 0) {
++ change_point[chgidx]->addr = biosmap[i].addr;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ }
++ }
++ chg_nr = chgidx;
++
++ /* sort change-point list by memory addresses (low -> high) */
++ still_changing = 1;
++ while (still_changing) {
++ still_changing = 0;
++ for (i=1; i < chg_nr; i++) {
++ /* if <current_addr> > <last_addr>, swap */
++ /* or, if current=<start_addr> & last=<end_addr>, swap */
++ if ((change_point[i]->addr < change_point[i-1]->addr) ||
++ ((change_point[i]->addr == change_point[i-1]->addr) &&
++ (change_point[i]->addr == change_point[i]->pbios->addr) &&
++ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
++ )
++ {
++ change_tmp = change_point[i];
++ change_point[i] = change_point[i-1];
++ change_point[i-1] = change_tmp;
++ still_changing=1;
++ }
++ }
++ }
++
++ /* create a new bios memory map, removing overlaps */
++ overlap_entries=0; /* number of entries in the overlap table */
++ new_bios_entry=0; /* index for creating new bios map entries */
++ last_type = 0; /* start with undefined memory type */
++ last_addr = 0; /* start with 0 as last starting address */
++ /* loop through change-points, determining affect on the new bios map */
++ for (chgidx=0; chgidx < chg_nr; chgidx++)
++ {
++ /* keep track of all overlapping bios entries */
++ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
++ {
++ /* add map entry to overlap list (> 1 entry implies an overlap) */
++ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
++ }
++ else
++ {
++ /* remove entry from list (order independent, so swap with last) */
++ for (i=0; i<overlap_entries; i++)
++ {
++ if (overlap_list[i] == change_point[chgidx]->pbios)
++ overlap_list[i] = overlap_list[overlap_entries-1];
++ }
++ overlap_entries--;
++ }
++ /* if there are overlapping entries, decide which "type" to use */
++ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
++ current_type = 0;
++ for (i=0; i<overlap_entries; i++)
++ if (overlap_list[i]->type > current_type)
++ current_type = overlap_list[i]->type;
++ /* continue building up new bios map based on this information */
++ if (current_type != last_type) {
++ if (last_type != 0) {
++ new_bios[new_bios_entry].size =
++ change_point[chgidx]->addr - last_addr;
++ /* move forward only if the new size was non-zero */
++ if (new_bios[new_bios_entry].size != 0)
++ if (++new_bios_entry >= E820MAX)
++ break; /* no more space left for new bios entries */
++ }
++ if (current_type != 0) {
++ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
++ new_bios[new_bios_entry].type = current_type;
++ last_addr=change_point[chgidx]->addr;
++ }
++ last_type = current_type;
++ }
++ }
++ new_nr = new_bios_entry; /* retain count for new bios entries */
++
++ /* copy new bios mapping into original location */
++ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
++ *pnr_map = new_nr;
++
++ return 0;
++}
++
++/*
++ * Copy the BIOS e820 map into a safe place.
++ *
++ * Sanity-check it while we're at it..
++ *
++ * If we're lucky and live on a modern system, the setup code
++ * will have given us a memory map that we can use to properly
++ * set up memory. If we aren't, we'll fake a memory map.
++ *
++ * We check to see that the memory map contains at least 2 elements
++ * before we'll use it, because the detection code in setup.S may
++ * not be perfect and most every PC known to man has two memory
++ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
++ * thinkpad 560x, for example, does not cooperate with the memory
++ * detection code.)
++ */
++static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
++{
++#ifndef CONFIG_XEN
++ /* Only one memory region (or negative)? Ignore it */
++ if (nr_map < 2)
++ return -1;
++#else
++ BUG_ON(nr_map < 1);
++#endif
++
++ do {
++ unsigned long start = biosmap->addr;
++ unsigned long size = biosmap->size;
++ unsigned long end = start + size;
++ unsigned long type = biosmap->type;
++
++ /* Overflow in 64 bits? Ignore the memory map. */
++ if (start > end)
++ return -1;
++
++#ifndef CONFIG_XEN
++ /*
++ * Some BIOSes claim RAM in the 640k - 1M region.
++ * Not right. Fix it up.
++ *
++ * This should be removed on Hammer which is supposed to not
++ * have non e820 covered ISA mappings there, but I had some strange
++ * problems so it stays for now. -AK
++ */
++ if (type == E820_RAM) {
++ if (start < 0x100000ULL && end > 0xA0000ULL) {
++ if (start < 0xA0000ULL)
++ add_memory_region(start, 0xA0000ULL-start, type);
++ if (end <= 0x100000ULL)
++ continue;
++ start = 0x100000ULL;
++ size = end - start;
++ }
++ }
++#endif
++
++ add_memory_region(start, size, type);
++ } while (biosmap++,--nr_map);
++
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain()) {
++ struct xen_memory_map memmap;
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, machine_e820.map);
++
++ if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
++ BUG();
++ machine_e820.nr_map = memmap.nr_entries;
++ } else
++ machine_e820 = e820;
++#endif
++
++ return 0;
++}
++
++#ifndef CONFIG_XEN
++void __init setup_memory_region(void)
++{
++ char *who = "BIOS-e820";
++
++ /*
++ * Try to copy the BIOS-supplied E820-map.
++ *
++ * Otherwise fake a memory map; one section from 0k->640k,
++ * the next section from 1mb->appropriate_mem_k
++ */
++ sanitize_e820_map(E820_MAP, &E820_MAP_NR);
++ if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
++ unsigned long mem_size;
++
++ /* compare results from other methods and take the greater */
++ if (ALT_MEM_K < EXT_MEM_K) {
++ mem_size = EXT_MEM_K;
++ who = "BIOS-88";
++ } else {
++ mem_size = ALT_MEM_K;
++ who = "BIOS-e801";
++ }
++
++ e820.nr_map = 0;
++ add_memory_region(0, LOWMEMSIZE(), E820_RAM);
++ add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
++ }
++ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++ e820_print_map(who);
++}
++
++#else /* CONFIG_XEN */
++
++void __init setup_memory_region(void)
++{
++ int rc;
++ struct xen_memory_map memmap;
++ /*
++ * This is rather large for a stack variable but this early in
++ * the boot process we know we have plenty slack space.
++ */
++ struct e820entry map[E820MAX];
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, map);
++
++ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
++ if ( rc == -ENOSYS ) {
++ memmap.nr_entries = 1;
++ map[0].addr = 0ULL;
++ map[0].size = xen_start_info->nr_pages << PAGE_SHIFT;
++ /* 8MB slack (to balance backend allocations). */
++ map[0].size += 8 << 20;
++ map[0].type = E820_RAM;
++ rc = 0;
++ }
++ BUG_ON(rc);
++
++ sanitize_e820_map(map, (char *)&memmap.nr_entries);
++
++ BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
++
++ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++ e820_print_map("Xen");
++}
++#endif
++
++void __init parse_memopt(char *p, char **from)
++{
++ int i;
++ unsigned long current_end;
++ unsigned long end;
++
++ end_user_pfn = memparse(p, from);
++ end_user_pfn >>= PAGE_SHIFT;
++
++ end = end_user_pfn<<PAGE_SHIFT;
++ i = e820.nr_map-1;
++ current_end = e820.map[i].addr + e820.map[i].size;
++
++ if (current_end < end) {
++ /*
++ * The e820 map ends before our requested size so
++ * extend the final entry to the requested address.
++ */
++ if (e820.map[i].type == E820_RAM)
++ e820.map[i].size = end - e820.map[i].addr;
++ else
++ add_memory_region(current_end, end - current_end, E820_RAM);
++ }
++}
++
++void __init parse_memmapopt(char *p, char **from)
++{
++ unsigned long long start_at, mem_size;
++
++ mem_size = memparse(p, from);
++ p = *from;
++ if (*p == '@') {
++ start_at = memparse(p+1, from);
++ add_memory_region(start_at, mem_size, E820_RAM);
++ } else if (*p == '#') {
++ start_at = memparse(p+1, from);
++ add_memory_region(start_at, mem_size, E820_ACPI);
++ } else if (*p == '$') {
++ start_at = memparse(p+1, from);
++ add_memory_region(start_at, mem_size, E820_RESERVED);
++ } else {
++ end_user_pfn = (mem_size >> PAGE_SHIFT);
++ }
++ p = *from;
++}
++
++unsigned long pci_mem_start = 0xaeedbabe;
++EXPORT_SYMBOL(pci_mem_start);
++
++/*
++ * Search for the biggest gap in the low 32 bits of the e820
++ * memory space. We pass this space to PCI to assign MMIO resources
++ * for hotplug or unconfigured devices in.
++ * Hopefully the BIOS let enough space left.
++ */
++__init void e820_setup_gap(struct e820entry *e820, int nr_map)
++{
++ unsigned long gapstart, gapsize, round;
++ unsigned long last;
++ int i;
++ int found = 0;
++
++ last = 0x100000000ull;
++ gapstart = 0x10000000;
++ gapsize = 0x400000;
++ i = nr_map;
++ while (--i >= 0) {
++ unsigned long long start = e820[i].addr;
++ unsigned long long end = start + e820[i].size;
++
++ /*
++ * Since "last" is at most 4GB, we know we'll
++ * fit in 32 bits if this condition is true
++ */
++ if (last > end) {
++ unsigned long gap = last - end;
++
++ if (gap > gapsize) {
++ gapsize = gap;
++ gapstart = end;
++ found = 1;
++ }
++ }
++ if (start < last)
++ last = start;
++ }
++
++ if (!found) {
++ gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
++ printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
++ KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
++ }
++
++ /*
++ * See how much we want to round up: start off with
++ * rounding to the next 1MB area.
++ */
++ round = 0x100000;
++ while ((gapsize >> 4) > round)
++ round += round;
++ /* Fun with two's complement */
++ pci_mem_start = (gapstart + round) & -round;
++
++ printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
++ pci_mem_start, gapstart, gapsize);
++}
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/early_printk-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/early_printk-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/early_printk-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/early_printk-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,302 @@
++#include <linux/console.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/string.h>
++#include <linux/screen_info.h>
++#include <asm/io.h>
++#include <asm/processor.h>
++#include <asm/fcntl.h>
++
++/* Simple VGA output */
++
++#ifdef __i386__
++#include <asm/setup.h>
++#define VGABASE (__ISA_IO_base + 0xb8000)
++#else
++#include <asm/bootsetup.h>
++#define VGABASE ((void __iomem *)0xffffffff800b8000UL)
++#endif
++
++#ifndef CONFIG_XEN
++static int max_ypos = 25, max_xpos = 80;
++static int current_ypos = 25, current_xpos = 0;
++
++static void early_vga_write(struct console *con, const char *str, unsigned n)
++{
++ char c;
++ int i, k, j;
++
++ while ((c = *str++) != '\0' && n-- > 0) {
++ if (current_ypos >= max_ypos) {
++ /* scroll 1 line up */
++ for (k = 1, j = 0; k < max_ypos; k++, j++) {
++ for (i = 0; i < max_xpos; i++) {
++ writew(readw(VGABASE+2*(max_xpos*k+i)),
++ VGABASE + 2*(max_xpos*j + i));
++ }
++ }
++ for (i = 0; i < max_xpos; i++)
++ writew(0x720, VGABASE + 2*(max_xpos*j + i));
++ current_ypos = max_ypos-1;
++ }
++ if (c == '\n') {
++ current_xpos = 0;
++ current_ypos++;
++ } else if (c != '\r') {
++ writew(((0x7 << 8) | (unsigned short) c),
++ VGABASE + 2*(max_xpos*current_ypos +
++ current_xpos++));
++ if (current_xpos >= max_xpos) {
++ current_xpos = 0;
++ current_ypos++;
++ }
++ }
++ }
++}
++
++static struct console early_vga_console = {
++ .name = "earlyvga",
++ .write = early_vga_write,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++};
++
++/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
++
++static int early_serial_base = 0x3f8; /* ttyS0 */
++
++#define XMTRDY 0x20
++
++#define DLAB 0x80
++
++#define TXR 0 /* Transmit register (WRITE) */
++#define RXR 0 /* Receive register (READ) */
++#define IER 1 /* Interrupt Enable */
++#define IIR 2 /* Interrupt ID */
++#define FCR 2 /* FIFO control */
++#define LCR 3 /* Line control */
++#define MCR 4 /* Modem control */
++#define LSR 5 /* Line Status */
++#define MSR 6 /* Modem Status */
++#define DLL 0 /* Divisor Latch Low */
++#define DLH 1 /* Divisor latch High */
++
++static int early_serial_putc(unsigned char ch)
++{
++ unsigned timeout = 0xffff;
++ while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
++ cpu_relax();
++ outb(ch, early_serial_base + TXR);
++ return timeout ? 0 : -1;
++}
++
++static void early_serial_write(struct console *con, const char *s, unsigned n)
++{
++ while (*s && n-- > 0) {
++ early_serial_putc(*s);
++ if (*s == '\n')
++ early_serial_putc('\r');
++ s++;
++ }
++}
++
++#define DEFAULT_BAUD 9600
++
++static __init void early_serial_init(char *s)
++{
++ unsigned char c;
++ unsigned divisor;
++ unsigned baud = DEFAULT_BAUD;
++ char *e;
++
++ if (*s == ',')
++ ++s;
++
++ if (*s) {
++ unsigned port;
++ if (!strncmp(s,"0x",2)) {
++ early_serial_base = simple_strtoul(s, &e, 16);
++ } else {
++ static int bases[] = { 0x3f8, 0x2f8 };
++
++ if (!strncmp(s,"ttyS",4))
++ s += 4;
++ port = simple_strtoul(s, &e, 10);
++ if (port > 1 || s == e)
++ port = 0;
++ early_serial_base = bases[port];
++ }
++ s += strcspn(s, ",");
++ if (*s == ',')
++ s++;
++ }
++
++ outb(0x3, early_serial_base + LCR); /* 8n1 */
++ outb(0, early_serial_base + IER); /* no interrupt */
++ outb(0, early_serial_base + FCR); /* no fifo */
++ outb(0x3, early_serial_base + MCR); /* DTR + RTS */
++
++ if (*s) {
++ baud = simple_strtoul(s, &e, 0);
++ if (baud == 0 || s == e)
++ baud = DEFAULT_BAUD;
++ }
++
++ divisor = 115200 / baud;
++ c = inb(early_serial_base + LCR);
++ outb(c | DLAB, early_serial_base + LCR);
++ outb(divisor & 0xff, early_serial_base + DLL);
++ outb((divisor >> 8) & 0xff, early_serial_base + DLH);
++ outb(c & ~DLAB, early_serial_base + LCR);
++}
++
++#else /* CONFIG_XEN */
++
++static void
++early_serial_write(struct console *con, const char *s, unsigned count)
++{
++ int n;
++
++ while (count > 0) {
++ n = HYPERVISOR_console_io(CONSOLEIO_write, count, (char *)s);
++ if (n <= 0)
++ break;
++ count -= n;
++ s += n;
++ }
++}
++
++static __init void early_serial_init(char *s)
++{
++}
++
++/*
++ * No early VGA console on Xen, as we do not have convenient ISA-space
++ * mappings. Someone should fix this for domain 0. For now, use fake serial.
++ */
++#define early_vga_console early_serial_console
++
++#endif
++
++static struct console early_serial_console = {
++ .name = "earlyser",
++ .write = early_serial_write,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++};
++
++/* Console interface to a host file on AMD's SimNow! */
++
++static int simnow_fd;
++
++enum {
++ MAGIC1 = 0xBACCD00A,
++ MAGIC2 = 0xCA110000,
++ XOPEN = 5,
++ XWRITE = 4,
++};
++
++static noinline long simnow(long cmd, long a, long b, long c)
++{
++ long ret;
++ asm volatile("cpuid" :
++ "=a" (ret) :
++ "b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2));
++ return ret;
++}
++
++void __init simnow_init(char *str)
++{
++ char *fn = "klog";
++ if (*str == '=')
++ fn = ++str;
++ /* error ignored */
++ simnow_fd = simnow(XOPEN, (unsigned long)fn, O_WRONLY|O_APPEND|O_CREAT, 0644);
++}
++
++static void simnow_write(struct console *con, const char *s, unsigned n)
++{
++ simnow(XWRITE, simnow_fd, (unsigned long)s, n);
++}
++
++static struct console simnow_console = {
++ .name = "simnow",
++ .write = simnow_write,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++};
++
++/* Direct interface for emergencies */
++struct console *early_console = &early_vga_console;
++static int early_console_initialized = 0;
++
++void early_printk(const char *fmt, ...)
++{
++ char buf[512];
++ int n;
++ va_list ap;
++
++ va_start(ap,fmt);
++ n = vscnprintf(buf,512,fmt,ap);
++ early_console->write(early_console,buf,n);
++ va_end(ap);
++}
++
++static int __initdata keep_early;
++
++int __init setup_early_printk(char *opt)
++{
++ char *space;
++ char buf[256];
++
++ if (early_console_initialized)
++ return 1;
++
++ strlcpy(buf,opt,sizeof(buf));
++ space = strchr(buf, ' ');
++ if (space)
++ *space = 0;
++
++ if (strstr(buf,"keep"))
++ keep_early = 1;
++
++ if (!strncmp(buf, "serial", 6)) {
++ early_serial_init(buf + 6);
++ early_console = &early_serial_console;
++ } else if (!strncmp(buf, "ttyS", 4)) {
++ early_serial_init(buf);
++ early_console = &early_serial_console;
++ } else if (!strncmp(buf, "vga", 3)
++#ifndef CONFIG_XEN
++ && SCREEN_INFO.orig_video_isVGA == 1) {
++ max_xpos = SCREEN_INFO.orig_video_cols;
++ max_ypos = SCREEN_INFO.orig_video_lines;
++ current_ypos = SCREEN_INFO.orig_y;
++#else
++ || !strncmp(buf, "xen", 3)) {
++#endif
++ early_console = &early_vga_console;
++ } else if (!strncmp(buf, "simnow", 6)) {
++ simnow_init(buf + 6);
++ early_console = &simnow_console;
++ keep_early = 1;
++ }
++ early_console_initialized = 1;
++ register_console(early_console);
++ return 0;
++}
++
++void __init disable_early_printk(void)
++{
++ if (!early_console_initialized || !early_console)
++ return;
++ if (!keep_early) {
++ printk("disabling early console\n");
++ unregister_console(early_console);
++ early_console_initialized = 0;
++ } else {
++ printk("keeping early console\n");
++ }
++}
++
++__setup("earlyprintk=", setup_early_printk);
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/entry-xen.S linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/entry-xen.S
+--- linux-2.6.18.8/arch/x86_64/kernel/entry-xen.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/entry-xen.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1322 @@
++/*
++ * linux/arch/x86_64/entry.S
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
++ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
++ *
++ * $Id$
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Asit Mallick <asit.k.mallick@intel.com>
++ * Modified for Xen
++ */
++
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after an interrupt and after each system call.
++ *
++ * Normal syscalls and interrupts don't save a full stack frame, this is
++ * only done for syscall tracing, signals or fork/exec et.al.
++ *
++ * A note on terminology:
++ * - top of stack: Architecture defined interrupt frame from SS to RIP
++ * at the top of the kernel process stack.
++ * - partial stack frame: partially saved registers upto R11.
++ * - full stack frame: Like partial stack frame, but all register saved.
++ *
++ * TODO:
++ * - schedule it carefully for the final hardware.
++ */
++
++#define ASSEMBLY 1
++#include <linux/linkage.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/cache.h>
++#include <asm/errno.h>
++#include <asm/dwarf2.h>
++#include <asm/calling.h>
++#include <asm/asm-offsets.h>
++#include <asm/msr.h>
++#include <asm/unistd.h>
++#include <asm/thread_info.h>
++#include <asm/hw_irq.h>
++#include <asm/page.h>
++#include <asm/irqflags.h>
++#include <asm/errno.h>
++#include <xen/interface/arch-x86_64.h>
++#include <xen/interface/features.h>
++
++#include "xen_entry.S"
++
++ .code64
++
++#ifndef CONFIG_PREEMPT
++#define retint_kernel retint_restore_args
++#endif
++
++
++.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
++#ifdef CONFIG_TRACE_IRQFLAGS
++ bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
++ jnc 1f
++ TRACE_IRQS_ON
++1:
++#endif
++.endm
++
++NMI_MASK = 0x80000000
++
++/*
++ * C code is not supposed to know about undefined top of stack. Every time
++ * a C function with an pt_regs argument is called from the SYSCALL based
++ * fast path FIXUP_TOP_OF_STACK is needed.
++ * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
++ * manipulation.
++ */
++
++ /* %rsp:at FRAMEEND */
++ .macro FIXUP_TOP_OF_STACK tmp
++ movq $__USER_CS,CS(%rsp)
++ movq $-1,RCX(%rsp)
++ .endm
++
++ .macro RESTORE_TOP_OF_STACK tmp,offset=0
++ .endm
++
++ .macro FAKE_STACK_FRAME child_rip
++ /* push in order ss, rsp, eflags, cs, rip */
++ xorl %eax, %eax
++ pushq %rax /* ss */
++ CFI_ADJUST_CFA_OFFSET 8
++ /*CFI_REL_OFFSET ss,0*/
++ pushq %rax /* rsp */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rsp,0
++ pushq $(1<<9) /* eflags - interrupts on */
++ CFI_ADJUST_CFA_OFFSET 8
++ /*CFI_REL_OFFSET rflags,0*/
++ pushq $__KERNEL_CS /* cs */
++ CFI_ADJUST_CFA_OFFSET 8
++ /*CFI_REL_OFFSET cs,0*/
++ pushq \child_rip /* rip */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rip,0
++ pushq %rax /* orig rax */
++ CFI_ADJUST_CFA_OFFSET 8
++ .endm
++
++ .macro UNFAKE_STACK_FRAME
++ addq $8*6, %rsp
++ CFI_ADJUST_CFA_OFFSET -(6*8)
++ .endm
++
++ .macro CFI_DEFAULT_STACK start=1,adj=0
++ .if \start
++ CFI_STARTPROC simple
++ CFI_DEF_CFA rsp,SS+8-(\adj*ARGOFFSET)
++ .else
++ CFI_DEF_CFA_OFFSET SS+8-(\adj*ARGOFFSET)
++ .endif
++ .if \adj == 0
++ CFI_REL_OFFSET r15,R15
++ CFI_REL_OFFSET r14,R14
++ CFI_REL_OFFSET r13,R13
++ CFI_REL_OFFSET r12,R12
++ CFI_REL_OFFSET rbp,RBP
++ CFI_REL_OFFSET rbx,RBX
++ .endif
++ CFI_REL_OFFSET r11,R11
++ CFI_REL_OFFSET r10,R10
++ CFI_REL_OFFSET r9,R9
++ CFI_REL_OFFSET r8,R8
++ CFI_REL_OFFSET rax,RAX
++ CFI_REL_OFFSET rcx,RCX
++ CFI_REL_OFFSET rdx,RDX
++ CFI_REL_OFFSET rsi,RSI
++ CFI_REL_OFFSET rdi,RDI
++ CFI_REL_OFFSET rip,RIP
++ /*CFI_REL_OFFSET cs,CS*/
++ /*CFI_REL_OFFSET rflags,EFLAGS*/
++ CFI_REL_OFFSET rsp,RSP
++ /*CFI_REL_OFFSET ss,SS*/
++ .endm
++
++ /*
++ * Must be consistent with the definition in arch-x86/xen-x86_64.h:
++ * struct iret_context {
++ * u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
++ * };
++ * with rax, r11, and rcx being taken care of in the hypercall stub.
++ */
++ .macro HYPERVISOR_IRET flag
++ testb $3,1*8(%rsp)
++ jnz 2f
++ testl $NMI_MASK,2*8(%rsp)
++ jnz 2f
++
++ cmpb $0,(xen_features+XENFEAT_supervisor_mode_kernel)(%rip)
++ jne 1f
++
++ /* Direct iret to kernel space. Correct CS and SS. */
++ orl $3,1*8(%rsp)
++ orl $3,4*8(%rsp)
++1: iretq
++
++2: /* Slow iret via hypervisor. */
++ andl $~NMI_MASK, 2*8(%rsp)
++ pushq $\flag
++ jmp hypercall_page + (__HYPERVISOR_iret * 32)
++ .endm
++
++/*
++ * A newly forked process directly context switches into this.
++ */
++/* rdi: prev */
++ENTRY(ret_from_fork)
++ CFI_DEFAULT_STACK
++ push kernel_eflags(%rip)
++ CFI_ADJUST_CFA_OFFSET 4
++ popf # reset kernel eflags
++ CFI_ADJUST_CFA_OFFSET -4
++ call schedule_tail
++ GET_THREAD_INFO(%rcx)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
++ jnz rff_trace
++rff_action:
++ RESTORE_REST
++ testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
++ je int_ret_from_sys_call
++ testl $_TIF_IA32,threadinfo_flags(%rcx)
++ jnz int_ret_from_sys_call
++ RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
++ jmp ret_from_sys_call
++rff_trace:
++ movq %rsp,%rdi
++ call syscall_trace_leave
++ GET_THREAD_INFO(%rcx)
++ jmp rff_action
++ CFI_ENDPROC
++END(ret_from_fork)
++
++/*
++ * initial frame state for interrupts and exceptions
++ */
++ .macro _frame ref
++ CFI_STARTPROC simple
++ CFI_DEF_CFA rsp,SS+8-\ref
++ /*CFI_REL_OFFSET ss,SS-\ref*/
++ CFI_REL_OFFSET rsp,RSP-\ref
++ /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
++ /*CFI_REL_OFFSET cs,CS-\ref*/
++ CFI_REL_OFFSET rip,RIP-\ref
++ .endm
++
++/*
++ * System call entry. Upto 6 arguments in registers are supported.
++ *
++ * SYSCALL does not save anything on the stack and does not change the
++ * stack pointer.
++ */
++
++/*
++ * Register setup:
++ * rax system call number
++ * rdi arg0
++ * rcx return address for syscall/sysret, C arg3
++ * rsi arg1
++ * rdx arg2
++ * r10 arg3 (--> moved to rcx for C)
++ * r8 arg4
++ * r9 arg5
++ * r11 eflags for syscall/sysret, temporary for C
++ * r12-r15,rbp,rbx saved by C code, not touched.
++ *
++ * Interrupts are enabled on entry.
++ * Only called from user space.
++ *
++ * XXX if we had a free scratch register we could save the RSP into the stack frame
++ * and report it properly in ps. Unfortunately we haven't.
++ *
++ * When user can change the frames always force IRET. That is because
++ * it deals with uncanonical addresses better. SYSRET has trouble
++ * with them due to bugs in both AMD and Intel CPUs.
++ */
++
++ENTRY(system_call)
++ _frame (RIP-0x10)
++ SAVE_ARGS -8,0
++ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
++ GET_THREAD_INFO(%rcx)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
++ CFI_REMEMBER_STATE
++ jnz tracesys
++ cmpq $__NR_syscall_max,%rax
++ ja badsys
++ movq %r10,%rcx
++ call *sys_call_table(,%rax,8) # XXX: rip relative
++ movq %rax,RAX-ARGOFFSET(%rsp)
++/*
++ * Syscall return path ending with SYSRET (fast path)
++ * Has incomplete stack frame and undefined top of stack.
++ */
++ .globl ret_from_sys_call
++ret_from_sys_call:
++ movl $_TIF_ALLWORK_MASK,%edi
++ /* edi: flagmask */
++sysret_check:
++ GET_THREAD_INFO(%rcx)
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ movl threadinfo_flags(%rcx),%edx
++ andl %edi,%edx
++ CFI_REMEMBER_STATE
++ jnz sysret_careful
++ /*
++ * sysretq will re-enable interrupts:
++ */
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi)
++ RESTORE_ARGS 0,8,0
++ HYPERVISOR_IRET VGCF_IN_SYSCALL
++
++ /* Handle reschedules */
++ /* edx: work, edi: workmask */
++sysret_careful:
++ CFI_RESTORE_STATE
++ bt $TIF_NEED_RESCHED,%edx
++ jnc sysret_signal
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi)
++ pushq %rdi
++ CFI_ADJUST_CFA_OFFSET 8
++ call schedule
++ popq %rdi
++ CFI_ADJUST_CFA_OFFSET -8
++ jmp sysret_check
++
++ /* Handle a signal */
++sysret_signal:
++ TRACE_IRQS_ON
++/* sti */
++ XEN_UNBLOCK_EVENTS(%rsi)
++ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
++ jz 1f
++
++ /* Really a signal */
++ /* edx: work flags (arg3) */
++ leaq do_notify_resume(%rip),%rax
++ leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
++ xorl %esi,%esi # oldset -> arg2
++ call ptregscall_common
++1: movl $_TIF_NEED_RESCHED,%edi
++ /* Use IRET because user could have changed frame. This
++ works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ jmp int_with_check
++
++badsys:
++ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
++ jmp ret_from_sys_call
++
++ /* Do syscall tracing */
++tracesys:
++ CFI_RESTORE_STATE
++ SAVE_REST
++ movq $-ENOSYS,RAX(%rsp)
++ FIXUP_TOP_OF_STACK %rdi
++ movq %rsp,%rdi
++ call syscall_trace_enter
++ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
++ RESTORE_REST
++ cmpq $__NR_syscall_max,%rax
++ ja int_ret_from_sys_call
++ movq %r10,%rcx /* fixup for C */
++ call *sys_call_table(,%rax,8)
++ movq %rax,RAX-ARGOFFSET(%rsp)
++ /* Use IRET because user could have changed frame */
++ jmp int_ret_from_sys_call
++ CFI_ENDPROC
++END(system_call)
++
++/*
++ * Syscall return path ending with IRET.
++ * Has correct top of stack, but partial stack frame.
++ */
++ENTRY(int_ret_from_sys_call)
++ CFI_STARTPROC simple
++ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
++ /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
++ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
++ /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
++ /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
++ CFI_REL_OFFSET rip,RIP-ARGOFFSET
++ CFI_REL_OFFSET rdx,RDX-ARGOFFSET
++ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
++ CFI_REL_OFFSET rax,RAX-ARGOFFSET
++ CFI_REL_OFFSET rdi,RDI-ARGOFFSET
++ CFI_REL_OFFSET rsi,RSI-ARGOFFSET
++ CFI_REL_OFFSET r8,R8-ARGOFFSET
++ CFI_REL_OFFSET r9,R9-ARGOFFSET
++ CFI_REL_OFFSET r10,R10-ARGOFFSET
++ CFI_REL_OFFSET r11,R11-ARGOFFSET
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ testb $3,CS-ARGOFFSET(%rsp)
++ jnz 1f
++ /* Need to set the proper %ss (not NULL) for ring 3 iretq */
++ movl $__KERNEL_DS,SS-ARGOFFSET(%rsp)
++ jmp retint_restore_args # retrun from ring3 kernel
++1:
++ movl $_TIF_ALLWORK_MASK,%edi
++ /* edi: mask to check */
++int_with_check:
++ GET_THREAD_INFO(%rcx)
++ movl threadinfo_flags(%rcx),%edx
++ andl %edi,%edx
++ jnz int_careful
++ andl $~TS_COMPAT,threadinfo_status(%rcx)
++ jmp retint_restore_args
++
++ /* Either reschedule or signal or syscall exit tracking needed. */
++ /* First do a reschedule test. */
++ /* edx: work, edi: workmask */
++int_careful:
++ bt $TIF_NEED_RESCHED,%edx
++ jnc int_very_careful
++ TRACE_IRQS_ON
++/* sti */
++ XEN_UNBLOCK_EVENTS(%rsi)
++ pushq %rdi
++ CFI_ADJUST_CFA_OFFSET 8
++ call schedule
++ popq %rdi
++ CFI_ADJUST_CFA_OFFSET -8
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ jmp int_with_check
++
++ /* handle signals and tracing -- both require a full stack frame */
++int_very_careful:
++ TRACE_IRQS_ON
++/* sti */
++ XEN_UNBLOCK_EVENTS(%rsi)
++ SAVE_REST
++ /* Check for syscall exit trace */
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
++ jz int_signal
++ pushq %rdi
++ CFI_ADJUST_CFA_OFFSET 8
++ leaq 8(%rsp),%rdi # &ptregs -> arg1
++ call syscall_trace_leave
++ popq %rdi
++ CFI_ADJUST_CFA_OFFSET -8
++ andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ jmp int_restore_rest
++
++int_signal:
++ testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
++ jz 1f
++ movq %rsp,%rdi # &ptregs -> arg1
++ xorl %esi,%esi # oldset -> arg2
++ call do_notify_resume
++1: movl $_TIF_NEED_RESCHED,%edi
++int_restore_rest:
++ RESTORE_REST
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ jmp int_with_check
++ CFI_ENDPROC
++END(int_ret_from_sys_call)
++
++/*
++ * Certain special system calls that need to save a complete full stack frame.
++ */
++
++ .macro PTREGSCALL label,func,arg
++ .globl \label
++\label:
++ leaq \func(%rip),%rax
++ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
++ jmp ptregscall_common
++END(\label)
++ .endm
++
++ CFI_STARTPROC
++
++ PTREGSCALL stub_clone, sys_clone, %r8
++ PTREGSCALL stub_fork, sys_fork, %rdi
++ PTREGSCALL stub_vfork, sys_vfork, %rdi
++ PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
++ PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
++ PTREGSCALL stub_iopl, sys_iopl, %rsi
++
++ENTRY(ptregscall_common)
++ popq %r11
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_REGISTER rip, r11
++ SAVE_REST
++ movq %r11, %r15
++ CFI_REGISTER rip, r15
++ FIXUP_TOP_OF_STACK %r11
++ call *%rax
++ RESTORE_TOP_OF_STACK %r11
++ movq %r15, %r11
++ CFI_REGISTER rip, r11
++ RESTORE_REST
++ pushq %r11
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rip, 0
++ ret
++ CFI_ENDPROC
++END(ptregscall_common)
++
++ENTRY(stub_execve)
++ CFI_STARTPROC
++ popq %r11
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_REGISTER rip, r11
++ SAVE_REST
++ FIXUP_TOP_OF_STACK %r11
++ call sys_execve
++ RESTORE_TOP_OF_STACK %r11
++ movq %rax,RAX(%rsp)
++ RESTORE_REST
++ jmp int_ret_from_sys_call
++ CFI_ENDPROC
++END(stub_execve)
++
++/*
++ * sigreturn is special because it needs to restore all registers on return.
++ * This cannot be done with SYSRET, so use the IRET return path instead.
++ */
++ENTRY(stub_rt_sigreturn)
++ CFI_STARTPROC
++ addq $8, %rsp
++ CFI_ADJUST_CFA_OFFSET -8
++ SAVE_REST
++ movq %rsp,%rdi
++ FIXUP_TOP_OF_STACK %r11
++ call sys_rt_sigreturn
++ movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
++ RESTORE_REST
++ jmp int_ret_from_sys_call
++ CFI_ENDPROC
++END(stub_rt_sigreturn)
++
++/* initial frame state for interrupts (and exceptions without error code) */
++#define INTR_FRAME _frame (RIP-0x10); \
++ CFI_REL_OFFSET rcx,0; \
++ CFI_REL_OFFSET r11,8
++
++/* initial frame state for exceptions with error code (and interrupts with
++ vector already pushed) */
++#define XCPT_FRAME _frame (RIP-0x18); \
++ CFI_REL_OFFSET rcx,0; \
++ CFI_REL_OFFSET r11,8
++
++/*
++ * Interrupt exit.
++ *
++ */
++
++retint_check:
++ CFI_DEFAULT_STACK adj=1
++ movl threadinfo_flags(%rcx),%edx
++ andl %edi,%edx
++ CFI_REMEMBER_STATE
++ jnz retint_careful
++retint_restore_args:
++ movl EFLAGS-REST_SKIP(%rsp), %eax
++ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
++ XEN_GET_VCPU_INFO(%rsi)
++ andb evtchn_upcall_mask(%rsi),%al
++ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
++ jnz restore_all_enable_events # != 0 => enable event delivery
++ XEN_PUT_VCPU_INFO(%rsi)
++
++ RESTORE_ARGS 0,8,0
++ HYPERVISOR_IRET 0
++
++ /* edi: workmask, edx: work */
++retint_careful:
++ CFI_RESTORE_STATE
++ bt $TIF_NEED_RESCHED,%edx
++ jnc retint_signal
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi)
++/* sti */
++ pushq %rdi
++ CFI_ADJUST_CFA_OFFSET 8
++ call schedule
++ popq %rdi
++ CFI_ADJUST_CFA_OFFSET -8
++ GET_THREAD_INFO(%rcx)
++ XEN_BLOCK_EVENTS(%rsi)
++/* cli */
++ TRACE_IRQS_OFF
++ jmp retint_check
++
++retint_signal:
++ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
++ jz retint_restore_args
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi)
++ SAVE_REST
++ movq $-1,ORIG_RAX(%rsp)
++ xorl %esi,%esi # oldset
++ movq %rsp,%rdi # &pt_regs
++ call do_notify_resume
++ RESTORE_REST
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ movl $_TIF_NEED_RESCHED,%edi
++ GET_THREAD_INFO(%rcx)
++ jmp retint_check
++
++#ifdef CONFIG_PREEMPT
++ /* Returning to kernel space. Check if we need preemption */
++ /* rcx: threadinfo. interrupts off. */
++ .p2align
++retint_kernel:
++ cmpl $0,threadinfo_preempt_count(%rcx)
++ jnz retint_restore_args
++ bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
++ jnc retint_restore_args
++ bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
++ jnc retint_restore_args
++ call preempt_schedule_irq
++ jmp retint_kernel /* check again */
++#endif
++
++ CFI_ENDPROC
++END(retint_check)
++
++#ifndef CONFIG_XEN
++/*
++ * APIC interrupts.
++ */
++ .macro apicinterrupt num,func
++ INTR_FRAME
++ pushq $~(\num)
++ CFI_ADJUST_CFA_OFFSET 8
++ interrupt \func
++ jmp error_entry
++ CFI_ENDPROC
++ .endm
++
++ENTRY(thermal_interrupt)
++ apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
++END(thermal_interrupt)
++
++ENTRY(threshold_interrupt)
++ apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
++END(threshold_interrupt)
++
++#ifdef CONFIG_SMP
++ENTRY(reschedule_interrupt)
++ apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
++END(reschedule_interrupt)
++
++ .macro INVALIDATE_ENTRY num
++ENTRY(invalidate_interrupt\num)
++ apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
++END(invalidate_interrupt\num)
++ .endm
++
++ INVALIDATE_ENTRY 0
++ INVALIDATE_ENTRY 1
++ INVALIDATE_ENTRY 2
++ INVALIDATE_ENTRY 3
++ INVALIDATE_ENTRY 4
++ INVALIDATE_ENTRY 5
++ INVALIDATE_ENTRY 6
++ INVALIDATE_ENTRY 7
++
++ENTRY(call_function_interrupt)
++ apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
++END(call_function_interrupt)
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++ENTRY(apic_timer_interrupt)
++ apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
++END(apic_timer_interrupt)
++
++ENTRY(error_interrupt)
++ apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
++END(error_interrupt)
++
++ENTRY(spurious_interrupt)
++ apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
++END(spurious_interrupt)
++#endif
++#endif /* !CONFIG_XEN */
++
++/*
++ * Exception entry points.
++ */
++ .macro zeroentry sym
++ INTR_FRAME
++ movq (%rsp),%rcx
++ CFI_RESTORE rcx
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ addq $0x10,%rsp /* skip rcx and r11 */
++ CFI_ADJUST_CFA_OFFSET -0x10
++ pushq $0 /* push error code/oldrax */
++ CFI_ADJUST_CFA_OFFSET 8
++ pushq %rax /* push real oldrax to the rdi slot */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rax,0
++ leaq \sym(%rip),%rax
++ jmp error_entry
++ CFI_ENDPROC
++ .endm
++
++ .macro errorentry sym
++ XCPT_FRAME
++ movq (%rsp),%rcx
++ CFI_RESTORE rcx
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ addq $0x10,%rsp /* rsp points to the error code */
++ CFI_ADJUST_CFA_OFFSET -0x10
++ pushq %rax
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rax,0
++ leaq \sym(%rip),%rax
++ jmp error_entry
++ CFI_ENDPROC
++ .endm
++
++#if 0 /* not XEN */
++ /* error code is on the stack already */
++ /* handle NMI like exceptions that can happen everywhere */
++ .macro paranoidentry sym, ist=0, irqtrace=1
++ movq (%rsp),%rcx
++ movq 8(%rsp),%r11
++ addq $0x10,%rsp /* skip rcx and r11 */
++ SAVE_ALL
++ cld
++#if 0 /* not XEN */
++ movl $1,%ebx
++ movl $MSR_GS_BASE,%ecx
++ rdmsr
++ testl %edx,%edx
++ js 1f
++ swapgs
++ xorl %ebx,%ebx
++1:
++#endif
++ .if \ist
++ movq %gs:pda_data_offset, %rbp
++ .endif
++ movq %rsp,%rdi
++ movq ORIG_RAX(%rsp),%rsi
++ movq $-1,ORIG_RAX(%rsp)
++ .if \ist
++ subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
++ .endif
++ call \sym
++ .if \ist
++ addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
++ .endif
++/* cli */
++ XEN_BLOCK_EVENTS(%rsi)
++ .if \irqtrace
++ TRACE_IRQS_OFF
++ .endif
++ .endm
++
++ /*
++ * "Paranoid" exit path from exception stack.
++ * Paranoid because this is used by NMIs and cannot take
++ * any kernel state for granted.
++ * We don't do kernel preemption checks here, because only
++ * NMI should be common and it does not enable IRQs and
++ * cannot get reschedule ticks.
++ *
++ * "trace" is 0 for the NMI handler only, because irq-tracing
++ * is fundamentally NMI-unsafe. (we cannot change the soft and
++ * hard flags at once, atomically)
++ */
++ .macro paranoidexit trace=1
++ /* ebx: no swapgs flag */
++paranoid_exit\trace:
++ testl %ebx,%ebx /* swapgs needed? */
++ jnz paranoid_restore\trace
++ testl $3,CS(%rsp)
++ jnz paranoid_userspace\trace
++paranoid_swapgs\trace:
++ TRACE_IRQS_IRETQ 0
++ swapgs
++paranoid_restore\trace:
++ RESTORE_ALL 8
++ iretq
++paranoid_userspace\trace:
++ GET_THREAD_INFO(%rcx)
++ movl threadinfo_flags(%rcx),%ebx
++ andl $_TIF_WORK_MASK,%ebx
++ jz paranoid_swapgs\trace
++ movq %rsp,%rdi /* &pt_regs */
++ call sync_regs
++ movq %rax,%rsp /* switch stack for scheduling */
++ testl $_TIF_NEED_RESCHED,%ebx
++ jnz paranoid_schedule\trace
++ movl %ebx,%edx /* arg3: thread flags */
++ .if \trace
++ TRACE_IRQS_ON
++ .endif
++ sti
++ xorl %esi,%esi /* arg2: oldset */
++ movq %rsp,%rdi /* arg1: &pt_regs */
++ call do_notify_resume
++ cli
++ .if \trace
++ TRACE_IRQS_OFF
++ .endif
++ jmp paranoid_userspace\trace
++paranoid_schedule\trace:
++ .if \trace
++ TRACE_IRQS_ON
++ .endif
++ sti
++ call schedule
++ cli
++ .if \trace
++ TRACE_IRQS_OFF
++ .endif
++ jmp paranoid_userspace\trace
++ CFI_ENDPROC
++ .endm
++#endif
++
++/*
++ * Exception entry point. This expects an error code/orig_rax on the stack
++ * and the exception handler in %rax.
++ */
++ENTRY(error_entry)
++ _frame RDI
++ CFI_REL_OFFSET rax,0
++ /* rdi slot contains rax, oldrax contains error code */
++ cld
++ subq $14*8,%rsp
++ CFI_ADJUST_CFA_OFFSET (14*8)
++ movq %rsi,13*8(%rsp)
++ CFI_REL_OFFSET rsi,RSI
++ movq 14*8(%rsp),%rsi /* load rax from rdi slot */
++ CFI_REGISTER rax,rsi
++ movq %rdx,12*8(%rsp)
++ CFI_REL_OFFSET rdx,RDX
++ movq %rcx,11*8(%rsp)
++ CFI_REL_OFFSET rcx,RCX
++ movq %rsi,10*8(%rsp) /* store rax */
++ CFI_REL_OFFSET rax,RAX
++ movq %r8, 9*8(%rsp)
++ CFI_REL_OFFSET r8,R8
++ movq %r9, 8*8(%rsp)
++ CFI_REL_OFFSET r9,R9
++ movq %r10,7*8(%rsp)
++ CFI_REL_OFFSET r10,R10
++ movq %r11,6*8(%rsp)
++ CFI_REL_OFFSET r11,R11
++ movq %rbx,5*8(%rsp)
++ CFI_REL_OFFSET rbx,RBX
++ movq %rbp,4*8(%rsp)
++ CFI_REL_OFFSET rbp,RBP
++ movq %r12,3*8(%rsp)
++ CFI_REL_OFFSET r12,R12
++ movq %r13,2*8(%rsp)
++ CFI_REL_OFFSET r13,R13
++ movq %r14,1*8(%rsp)
++ CFI_REL_OFFSET r14,R14
++ movq %r15,(%rsp)
++ CFI_REL_OFFSET r15,R15
++#if 0
++ cmpl $__KERNEL_CS,CS(%rsp)
++ CFI_REMEMBER_STATE
++ je error_kernelspace
++#endif
++error_call_handler:
++ movq %rdi, RDI(%rsp)
++ CFI_REL_OFFSET rdi,RDI
++ movq %rsp,%rdi
++ movq ORIG_RAX(%rsp),%rsi # get error code
++ movq $-1,ORIG_RAX(%rsp)
++ call *%rax
++error_exit:
++ RESTORE_REST
++/* cli */
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ GET_THREAD_INFO(%rcx)
++ testb $3,CS-ARGOFFSET(%rsp)
++ jz retint_kernel
++ movl threadinfo_flags(%rcx),%edx
++ movl $_TIF_WORK_MASK,%edi
++ andl %edi,%edx
++ jnz retint_careful
++ /*
++ * The iret might restore flags:
++ */
++ TRACE_IRQS_IRETQ
++ jmp retint_restore_args
++
++#if 0
++ /*
++ * We need to re-write the logic here because we don't do iretq to
++ * to return to user mode. It's still possible that we get trap/fault
++ * in the kernel (when accessing buffers pointed to by system calls,
++ * for example).
++ *
++ */
++ CFI_RESTORE_STATE
++error_kernelspace:
++ incl %ebx
++ /* There are two places in the kernel that can potentially fault with
++ usergs. Handle them here. The exception handlers after
++ iret run with kernel gs again, so don't set the user space flag.
++ B stepping K8s sometimes report an truncated RIP for IRET
++ exceptions returning to compat mode. Check for these here too. */
++ leaq iret_label(%rip),%rbp
++ cmpq %rbp,RIP(%rsp)
++ je error_swapgs
++ movl %ebp,%ebp /* zero extend */
++ cmpq %rbp,RIP(%rsp)
++ je error_swapgs
++ cmpq $gs_change,RIP(%rsp)
++ je error_swapgs
++ jmp error_sti
++#endif
++ CFI_ENDPROC
++END(error_entry)
++
++ENTRY(hypervisor_callback)
++ zeroentry do_hypervisor_callback
++END(hypervisor_callback)
++
++/*
++ * Copied from arch/xen/i386/kernel/entry.S
++ */
++# A note on the "critical region" in our callback handler.
++# We want to avoid stacking callback handlers due to events occurring
++# during handling of the last event. To do this, we keep events disabled
++# until we've done all processing. HOWEVER, we must enable events before
++# popping the stack frame (can't be done atomically) and so it would still
++# be possible to get enough handler activations to overflow the stack.
++# Although unlikely, bugs of that kind are hard to track down, so we'd
++# like to avoid the possibility.
++# So, on entry to the handler we detect whether we interrupted an
++# existing activation in its critical region -- if so, we pop the current
++# activation and restart the handler using the previous one.
++ENTRY(do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
++ CFI_STARTPROC
++# Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
++# see the correct pointer to the pt_regs
++ movq %rdi, %rsp # we don't return, adjust the stack frame
++ CFI_ENDPROC
++ CFI_DEFAULT_STACK
++11: incl %gs:pda_irqcount
++ movq %rsp,%rbp
++ CFI_DEF_CFA_REGISTER rbp
++ cmovzq %gs:pda_irqstackptr,%rsp
++ pushq %rbp # backlink for old unwinder
++ call evtchn_do_upcall
++ popq %rsp
++ CFI_DEF_CFA_REGISTER rsp
++ decl %gs:pda_irqcount
++ jmp error_exit
++ CFI_ENDPROC
++END(do_hypervisor_callback)
++
++#ifdef CONFIG_X86_LOCAL_APIC
++KPROBE_ENTRY(nmi)
++ zeroentry do_nmi_callback
++ENTRY(do_nmi_callback)
++ CFI_STARTPROC
++ addq $8, %rsp
++ CFI_ENDPROC
++ CFI_DEFAULT_STACK
++ call do_nmi
++ orl $NMI_MASK,EFLAGS(%rsp)
++ RESTORE_REST
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ GET_THREAD_INFO(%rcx)
++ jmp retint_restore_args
++ CFI_ENDPROC
++ .previous .text
++END(nmi)
++#endif
++
++ ALIGN
++restore_all_enable_events:
++ CFI_DEFAULT_STACK adj=1
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
++
++scrit: /**** START OF CRITICAL REGION ****/
++ XEN_TEST_PENDING(%rsi)
++ CFI_REMEMBER_STATE
++ jnz 14f # process more events if necessary...
++ XEN_PUT_VCPU_INFO(%rsi)
++ RESTORE_ARGS 0,8,0
++ HYPERVISOR_IRET 0
++
++ CFI_RESTORE_STATE
++14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
++ XEN_PUT_VCPU_INFO(%rsi)
++ SAVE_REST
++ movq %rsp,%rdi # set the argument again
++ jmp 11b
++ CFI_ENDPROC
++ecrit: /**** END OF CRITICAL REGION ****/
++# At this point, unlike on x86-32, we don't do the fixup to simplify the
++# code and the stack frame is more complex on x86-64.
++# When the kernel is interrupted in the critical section, the kernel
++# will do IRET in that case, and everything will be restored at that point,
++# i.e. it just resumes from the next instruction interrupted with the same context.
++
++# Hypervisor uses this for application faults while it executes.
++# We get here for two reasons:
++# 1. Fault while reloading DS, ES, FS or GS
++# 2. Fault while executing IRET
++# Category 1 we do not need to fix up as Xen has already reloaded all segment
++# registers that could be reloaded and zeroed the others.
++# Category 2 we fix up by killing the current process. We cannot use the
++# normal Linux return path in this case because if we use the IRET hypercall
++# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
++# We distinguish between categories by comparing each saved segment register
++# with its current contents: any discrepancy means we in category 1.
++ENTRY(failsafe_callback)
++ _frame (RIP-0x30)
++ CFI_REL_OFFSET rcx, 0
++ CFI_REL_OFFSET r11, 8
++ movw %ds,%cx
++ cmpw %cx,0x10(%rsp)
++ CFI_REMEMBER_STATE
++ jne 1f
++ movw %es,%cx
++ cmpw %cx,0x18(%rsp)
++ jne 1f
++ movw %fs,%cx
++ cmpw %cx,0x20(%rsp)
++ jne 1f
++ movw %gs,%cx
++ cmpw %cx,0x28(%rsp)
++ jne 1f
++ /* All segments match their saved values => Category 2 (Bad IRET). */
++ movq (%rsp),%rcx
++ CFI_RESTORE rcx
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ addq $0x30,%rsp
++ CFI_ADJUST_CFA_OFFSET -0x30
++ movq $11,%rdi /* SIGSEGV */
++ jmp do_exit
++ CFI_RESTORE_STATE
++1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
++ movq (%rsp),%rcx
++ CFI_RESTORE rcx
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ addq $0x30,%rsp
++ CFI_ADJUST_CFA_OFFSET -0x30
++ pushq $0
++ CFI_ADJUST_CFA_OFFSET 8
++ SAVE_ALL
++ jmp error_exit
++ CFI_ENDPROC
++#if 0
++ .section __ex_table,"a"
++ .align 8
++ .quad gs_change,bad_gs
++ .previous
++ .section .fixup,"ax"
++ /* running with kernelgs */
++bad_gs:
++/* swapgs */ /* switch back to user gs */
++ xorl %eax,%eax
++ movl %eax,%gs
++ jmp 2b
++ .previous
++#endif
++
++/*
++ * Create a kernel thread.
++ *
++ * C extern interface:
++ * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++ *
++ * asm input arguments:
++ * rdi: fn, rsi: arg, rdx: flags
++ */
++ENTRY(kernel_thread)
++ CFI_STARTPROC
++ FAKE_STACK_FRAME $child_rip
++ SAVE_ALL
++
++ # rdi: flags, rsi: usp, rdx: will be &pt_regs
++ movq %rdx,%rdi
++ orq kernel_thread_flags(%rip),%rdi
++ movq $-1, %rsi
++ movq %rsp, %rdx
++
++ xorl %r8d,%r8d
++ xorl %r9d,%r9d
++
++ # clone now
++ call do_fork
++ movq %rax,RAX(%rsp)
++ xorl %edi,%edi
++
++ /*
++ * It isn't worth to check for reschedule here,
++ * so internally to the x86_64 port you can rely on kernel_thread()
++ * not to reschedule the child before returning, this avoids the need
++ * of hacks for example to fork off the per-CPU idle tasks.
++ * [Hopefully no generic code relies on the reschedule -AK]
++ */
++ RESTORE_ALL
++ UNFAKE_STACK_FRAME
++ ret
++ CFI_ENDPROC
++ENDPROC(kernel_thread)
++
++child_rip:
++ pushq $0 # fake return address
++ CFI_STARTPROC
++ /*
++ * Here we are in the child and the registers are set as they were
++ * at kernel_thread() invocation in the parent.
++ */
++ movq %rdi, %rax
++ movq %rsi, %rdi
++ call *%rax
++ # exit
++ xorl %edi, %edi
++ call do_exit
++ CFI_ENDPROC
++ENDPROC(child_rip)
++
++/*
++ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
++ *
++ * C extern interface:
++ * extern long execve(char *name, char **argv, char **envp)
++ *
++ * asm input arguments:
++ * rdi: name, rsi: argv, rdx: envp
++ *
++ * We want to fallback into:
++ * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
++ *
++ * do_sys_execve asm fallback arguments:
++ * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
++ */
++ENTRY(execve)
++ CFI_STARTPROC
++ FAKE_STACK_FRAME $0
++ SAVE_ALL
++ call sys_execve
++ movq %rax, RAX(%rsp)
++ RESTORE_REST
++ testq %rax,%rax
++ jne 1f
++ jmp int_ret_from_sys_call
++1: RESTORE_ARGS
++ UNFAKE_STACK_FRAME
++ ret
++ CFI_ENDPROC
++ENDPROC(execve)
++
++KPROBE_ENTRY(page_fault)
++ errorentry do_page_fault
++END(page_fault)
++ .previous .text
++
++ENTRY(coprocessor_error)
++ zeroentry do_coprocessor_error
++END(coprocessor_error)
++
++ENTRY(simd_coprocessor_error)
++ zeroentry do_simd_coprocessor_error
++END(simd_coprocessor_error)
++
++ENTRY(device_not_available)
++ zeroentry math_state_restore
++END(device_not_available)
++
++ /* runs on exception stack */
++KPROBE_ENTRY(debug)
++/* INTR_FRAME
++ pushq $0
++ CFI_ADJUST_CFA_OFFSET 8 */
++ zeroentry do_debug
++/* paranoidexit
++ CFI_ENDPROC */
++END(debug)
++ .previous .text
++
++#if 0
++ /* runs on exception stack */
++KPROBE_ENTRY(nmi)
++ INTR_FRAME
++ pushq $-1
++ CFI_ADJUST_CFA_OFFSET 8
++ paranoidentry do_nmi, 0, 0
++#ifdef CONFIG_TRACE_IRQFLAGS
++ paranoidexit 0
++#else
++ jmp paranoid_exit1
++ CFI_ENDPROC
++#endif
++END(nmi)
++ .previous .text
++#endif
++
++KPROBE_ENTRY(int3)
++/* INTR_FRAME
++ pushq $0
++ CFI_ADJUST_CFA_OFFSET 8 */
++ zeroentry do_int3
++/* jmp paranoid_exit1
++ CFI_ENDPROC */
++END(int3)
++ .previous .text
++
++ENTRY(overflow)
++ zeroentry do_overflow
++END(overflow)
++
++ENTRY(bounds)
++ zeroentry do_bounds
++END(bounds)
++
++ENTRY(invalid_op)
++ zeroentry do_invalid_op
++END(invalid_op)
++
++ENTRY(coprocessor_segment_overrun)
++ zeroentry do_coprocessor_segment_overrun
++END(coprocessor_segment_overrun)
++
++ENTRY(reserved)
++ zeroentry do_reserved
++END(reserved)
++
++#if 0
++ /* runs on exception stack */
++ENTRY(double_fault)
++ XCPT_FRAME
++ paranoidentry do_double_fault
++ jmp paranoid_exit1
++ CFI_ENDPROC
++END(double_fault)
++#endif
++
++ENTRY(invalid_TSS)
++ errorentry do_invalid_TSS
++END(invalid_TSS)
++
++ENTRY(segment_not_present)
++ errorentry do_segment_not_present
++END(segment_not_present)
++
++ /* runs on exception stack */
++ENTRY(stack_segment)
++/* XCPT_FRAME
++ paranoidentry do_stack_segment */
++ errorentry do_stack_segment
++/* jmp paranoid_exit1
++ CFI_ENDPROC */
++END(stack_segment)
++
++KPROBE_ENTRY(general_protection)
++ errorentry do_general_protection
++END(general_protection)
++ .previous .text
++
++ENTRY(alignment_check)
++ errorentry do_alignment_check
++END(alignment_check)
++
++ENTRY(divide_error)
++ zeroentry do_divide_error
++END(divide_error)
++
++ENTRY(spurious_interrupt_bug)
++ zeroentry do_spurious_interrupt_bug
++END(spurious_interrupt_bug)
++
++#ifdef CONFIG_X86_MCE
++ /* runs on exception stack */
++ENTRY(machine_check)
++ INTR_FRAME
++ pushq $0
++ CFI_ADJUST_CFA_OFFSET 8
++ paranoidentry do_machine_check
++ jmp paranoid_exit1
++ CFI_ENDPROC
++END(machine_check)
++#endif
++
++/* Call softirq on interrupt stack. Interrupts are off. */
++ENTRY(call_softirq)
++ CFI_STARTPROC
++ push %rbp
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rbp,0
++ mov %rsp,%rbp
++ CFI_DEF_CFA_REGISTER rbp
++ incl %gs:pda_irqcount
++ cmove %gs:pda_irqstackptr,%rsp
++ push %rbp # backlink for old unwinder
++ call __do_softirq
++ leaveq
++ CFI_DEF_CFA_REGISTER rsp
++ CFI_ADJUST_CFA_OFFSET -8
++ decl %gs:pda_irqcount
++ ret
++ CFI_ENDPROC
++ENDPROC(call_softirq)
++
++#ifdef CONFIG_STACK_UNWIND
++ENTRY(arch_unwind_init_running)
++ CFI_STARTPROC
++ movq %r15, R15(%rdi)
++ movq %r14, R14(%rdi)
++ xchgq %rsi, %rdx
++ movq %r13, R13(%rdi)
++ movq %r12, R12(%rdi)
++ xorl %eax, %eax
++ movq %rbp, RBP(%rdi)
++ movq %rbx, RBX(%rdi)
++ movq (%rsp), %rcx
++ movq %rax, R11(%rdi)
++ movq %rax, R10(%rdi)
++ movq %rax, R9(%rdi)
++ movq %rax, R8(%rdi)
++ movq %rax, RAX(%rdi)
++ movq %rax, RCX(%rdi)
++ movq %rax, RDX(%rdi)
++ movq %rax, RSI(%rdi)
++ movq %rax, RDI(%rdi)
++ movq %rax, ORIG_RAX(%rdi)
++ movq %rcx, RIP(%rdi)
++ leaq 8(%rsp), %rcx
++ movq $__KERNEL_CS, CS(%rdi)
++ movq %rax, EFLAGS(%rdi)
++ movq %rcx, RSP(%rdi)
++ movq $__KERNEL_DS, SS(%rdi)
++ jmpq *%rdx
++ CFI_ENDPROC
++ENDPROC(arch_unwind_init_running)
++#endif
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/genapic_xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/genapic_xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/genapic_xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/genapic_xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,161 @@
++/*
++ * Copyright 2004 James Cleverdon, IBM.
++ * Subject to the GNU Public License, v.2
++ *
++ * Xen APIC subarch code. Maximum 8 CPUs, logical delivery.
++ *
++ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
++ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
++ * James Cleverdon.
++ *
++ * Hacked to pieces for Xen by Chris Wright.
++ */
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++#include <asm/smp.h>
++#include <asm/ipi.h>
++#else
++#include <asm/apic.h>
++#include <asm/apicdef.h>
++#include <asm/genapic.h>
++#endif
++#include <xen/evtchn.h>
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++static inline void __send_IPI_one(unsigned int cpu, int vector)
++{
++ int irq = per_cpu(ipi_to_irq, cpu)[vector];
++ BUG_ON(irq < 0);
++ notify_remote_via_irq(irq);
++}
++
++void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
++{
++ int cpu;
++
++ switch (shortcut) {
++ case APIC_DEST_SELF:
++ __send_IPI_one(smp_processor_id(), vector);
++ break;
++ case APIC_DEST_ALLBUT:
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu == smp_processor_id())
++ continue;
++ if (cpu_isset(cpu, cpu_online_map)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++ break;
++ case APIC_DEST_ALLINC:
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu_isset(cpu, cpu_online_map)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++ break;
++ default:
++ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
++ vector);
++ break;
++ }
++}
++
++static cpumask_t xen_target_cpus(void)
++{
++ return cpu_online_map;
++}
++
++/*
++ * Set up the logical destination ID.
++ * Do nothing, not called now.
++ */
++static void xen_init_apic_ldr(void)
++{
++ Dprintk("%s\n", __FUNCTION__);
++ return;
++}
++
++static void xen_send_IPI_allbutself(int vector)
++{
++ /*
++ * if there are no other CPUs in the system then
++ * we get an APIC send error if we try to broadcast.
++ * thus we have to avoid sending IPIs in this case.
++ */
++ Dprintk("%s\n", __FUNCTION__);
++ if (num_online_cpus() > 1)
++ xen_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, APIC_DEST_LOGICAL);
++}
++
++static void xen_send_IPI_all(int vector)
++{
++ Dprintk("%s\n", __FUNCTION__);
++ xen_send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
++}
++
++static void xen_send_IPI_mask(cpumask_t cpumask, int vector)
++{
++ unsigned long mask = cpus_addr(cpumask)[0];
++ unsigned int cpu;
++ unsigned long flags;
++
++ Dprintk("%s\n", __FUNCTION__);
++ local_irq_save(flags);
++ WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
++
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu_isset(cpu, cpumask)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++ local_irq_restore(flags);
++}
++
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++static int xen_apic_id_registered(void)
++{
++ /* better be set */
++ Dprintk("%s\n", __FUNCTION__);
++ return physid_isset(smp_processor_id(), phys_cpu_present_map);
++}
++#endif
++
++static unsigned int xen_cpu_mask_to_apicid(cpumask_t cpumask)
++{
++ Dprintk("%s\n", __FUNCTION__);
++ return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
++}
++
++static unsigned int phys_pkg_id(int index_msb)
++{
++ u32 ebx;
++
++ Dprintk("%s\n", __FUNCTION__);
++ ebx = cpuid_ebx(1);
++ return ((ebx >> 24) & 0xFF) >> index_msb;
++}
++
++struct genapic apic_xen = {
++ .name = "xen",
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++ .int_delivery_mode = dest_LowestPrio,
++#endif
++ .int_dest_mode = (APIC_DEST_LOGICAL != 0),
++ .int_delivery_dest = APIC_DEST_LOGICAL | APIC_DM_LOWEST,
++ .target_cpus = xen_target_cpus,
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++ .apic_id_registered = xen_apic_id_registered,
++#endif
++ .init_apic_ldr = xen_init_apic_ldr,
++ .send_IPI_all = xen_send_IPI_all,
++ .send_IPI_allbutself = xen_send_IPI_allbutself,
++ .send_IPI_mask = xen_send_IPI_mask,
++ .cpu_mask_to_apicid = xen_cpu_mask_to_apicid,
++ .phys_pkg_id = phys_pkg_id,
++};
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/genapic-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/genapic-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/genapic-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/genapic-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,143 @@
++/*
++ * Copyright 2004 James Cleverdon, IBM.
++ * Subject to the GNU Public License, v.2
++ *
++ * Generic APIC sub-arch probe layer.
++ *
++ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
++ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
++ * James Cleverdon.
++ */
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#include <linux/module.h>
++
++#include <asm/smp.h>
++#include <asm/ipi.h>
++
++#if defined(CONFIG_ACPI)
++#include <acpi/acpi_bus.h>
++#endif
++
++/* which logical CPU number maps to which CPU (physical APIC ID) */
++u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
++EXPORT_SYMBOL(x86_cpu_to_apicid);
++u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++extern struct genapic apic_cluster;
++extern struct genapic apic_flat;
++extern struct genapic apic_physflat;
++
++#ifndef CONFIG_XEN
++struct genapic *genapic = &apic_flat;
++#else
++extern struct genapic apic_xen;
++struct genapic *genapic = &apic_xen;
++#endif
++
++
++/*
++ * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
++ */
++void __init clustered_apic_check(void)
++{
++#ifndef CONFIG_XEN
++ long i;
++ u8 clusters, max_cluster;
++ u8 id;
++ u8 cluster_cnt[NUM_APIC_CLUSTERS];
++ int max_apic = 0;
++
++#if defined(CONFIG_ACPI)
++ /*
++ * Some x86_64 machines use physical APIC mode regardless of how many
++ * procs/clusters are present (x86_64 ES7000 is an example).
++ */
++ if (acpi_fadt.revision > FADT2_REVISION_ID)
++ if (acpi_fadt.force_apic_physical_destination_mode) {
++ genapic = &apic_cluster;
++ goto print;
++ }
++#endif
++
++ memset(cluster_cnt, 0, sizeof(cluster_cnt));
++ for (i = 0; i < NR_CPUS; i++) {
++ id = bios_cpu_apicid[i];
++ if (id == BAD_APICID)
++ continue;
++ if (id > max_apic)
++ max_apic = id;
++ cluster_cnt[APIC_CLUSTERID(id)]++;
++ }
++
++ /* Don't use clustered mode on AMD platforms. */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
++ genapic = &apic_physflat;
++#ifndef CONFIG_HOTPLUG_CPU
++ /* In the CPU hotplug case we cannot use broadcast mode
++ because that opens a race when a CPU is removed.
++ Stay at physflat mode in this case.
++ It is bad to do this unconditionally though. Once
++ we have ACPI platform support for CPU hotplug
++ we should detect hotplug capablity from ACPI tables and
++ only do this when really needed. -AK */
++ if (max_apic <= 8)
++ genapic = &apic_flat;
++#endif
++ goto print;
++ }
++
++ clusters = 0;
++ max_cluster = 0;
++
++ for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
++ if (cluster_cnt[i] > 0) {
++ ++clusters;
++ if (cluster_cnt[i] > max_cluster)
++ max_cluster = cluster_cnt[i];
++ }
++ }
++
++ /*
++ * If we have clusters <= 1 and CPUs <= 8 in cluster 0, then flat mode,
++ * else if max_cluster <= 4 and cluster_cnt[15] == 0, clustered logical
++ * else physical mode.
++ * (We don't use lowest priority delivery + HW APIC IRQ steering, so
++ * can ignore the clustered logical case and go straight to physical.)
++ */
++ if (clusters <= 1 && max_cluster <= 8 && cluster_cnt[0] == max_cluster) {
++#ifdef CONFIG_HOTPLUG_CPU
++ /* Don't use APIC shortcuts in CPU hotplug to avoid races */
++ genapic = &apic_physflat;
++#else
++ genapic = &apic_flat;
++#endif
++ } else
++ genapic = &apic_cluster;
++
++print:
++#else
++ /* hardcode to xen apic functions */
++ genapic = &apic_xen;
++#endif
++ printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
++}
++
++/* Same for both flat and clustered. */
++
++#ifdef CONFIG_XEN
++extern void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest);
++#endif
++
++void send_IPI_self(int vector)
++{
++#ifndef CONFIG_XEN
++ __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
++#else
++ xen_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
++#endif
++}
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/head64-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/head64-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/head64-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/head64-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,162 @@
++/*
++ * linux/arch/x86_64/kernel/head64.c -- prepare to run common code
++ *
++ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen.
++ */
++
++#include <linux/init.h>
++#include <linux/linkage.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/percpu.h>
++#include <linux/module.h>
++
++#include <asm/processor.h>
++#include <asm/proto.h>
++#include <asm/smp.h>
++#include <asm/bootsetup.h>
++#include <asm/setup.h>
++#include <asm/desc.h>
++#include <asm/pgtable.h>
++#include <asm/sections.h>
++
++unsigned long start_pfn;
++
++/* Don't add a printk in there. printk relies on the PDA which is not initialized
++ yet. */
++#if 0
++static void __init clear_bss(void)
++{
++ memset(__bss_start, 0,
++ (unsigned long) __bss_stop - (unsigned long) __bss_start);
++}
++#endif
++
++#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
++#define OLD_CL_MAGIC_ADDR 0x90020
++#define OLD_CL_MAGIC 0xA33F
++#define OLD_CL_BASE_ADDR 0x90000
++#define OLD_CL_OFFSET 0x90022
++
++extern char saved_command_line[];
++
++static void __init copy_bootdata(char *real_mode_data)
++{
++#ifndef CONFIG_XEN
++ int new_data;
++ char * command_line;
++
++ memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE);
++ new_data = *(int *) (x86_boot_params + NEW_CL_POINTER);
++ if (!new_data) {
++ if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
++ printk("so old bootloader that it does not support commandline?!\n");
++ return;
++ }
++ new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
++ printk("old bootloader convention, maybe loadlin?\n");
++ }
++ command_line = (char *) ((u64)(new_data));
++ memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
++#else
++ int max_cmdline;
++
++ if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
++ max_cmdline = COMMAND_LINE_SIZE;
++ memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
++ saved_command_line[max_cmdline-1] = '\0';
++#endif
++ printk("Bootdata ok (command line is %s)\n", saved_command_line);
++}
++
++static void __init setup_boot_cpu_data(void)
++{
++ unsigned int dummy, eax;
++
++ /* get vendor info */
++ cpuid(0, (unsigned int *)&boot_cpu_data.cpuid_level,
++ (unsigned int *)&boot_cpu_data.x86_vendor_id[0],
++ (unsigned int *)&boot_cpu_data.x86_vendor_id[8],
++ (unsigned int *)&boot_cpu_data.x86_vendor_id[4]);
++
++ /* get cpu type */
++ cpuid(1, &eax, &dummy, &dummy,
++ (unsigned int *) &boot_cpu_data.x86_capability);
++ boot_cpu_data.x86 = (eax >> 8) & 0xf;
++ boot_cpu_data.x86_model = (eax >> 4) & 0xf;
++ boot_cpu_data.x86_mask = eax & 0xf;
++}
++
++#include <xen/interface/memory.h>
++unsigned long *machine_to_phys_mapping;
++EXPORT_SYMBOL(machine_to_phys_mapping);
++unsigned int machine_to_phys_order;
++EXPORT_SYMBOL(machine_to_phys_order);
++
++void __init x86_64_start_kernel(char * real_mode_data)
++{
++ struct xen_machphys_mapping mapping;
++ unsigned long machine_to_phys_nr_ents;
++ char *s;
++ int i;
++
++ setup_xen_features();
++
++ xen_start_info = (struct start_info *)real_mode_data;
++ if (!xen_feature(XENFEAT_auto_translated_physmap))
++ phys_to_machine_mapping =
++ (unsigned long *)xen_start_info->mfn_list;
++ start_pfn = (__pa(xen_start_info->pt_base) >> PAGE_SHIFT) +
++ xen_start_info->nr_pt_frames;
++
++ machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
++ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
++ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
++ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
++ machine_to_phys_nr_ents = mapping.max_mfn + 1;
++ }
++ while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
++ machine_to_phys_order++;
++
++#if 0
++ for (i = 0; i < 256; i++)
++ set_intr_gate(i, early_idt_handler);
++ asm volatile("lidt %0" :: "m" (idt_descr));
++#endif
++
++ /*
++ * This must be called really, really early:
++ */
++ lockdep_init();
++
++ for (i = 0; i < NR_CPUS; i++)
++ cpu_pda(i) = &boot_cpu_pda[i];
++
++ pda_init(0);
++ copy_bootdata(real_mode_data);
++#ifdef CONFIG_SMP
++ cpu_set(0, cpu_online_map);
++#endif
++ s = strstr(saved_command_line, "earlyprintk=");
++ if (s != NULL)
++ setup_early_printk(strchr(s, '=') + 1);
++#ifdef CONFIG_NUMA
++ s = strstr(saved_command_line, "numa=");
++ if (s != NULL)
++ numa_setup(s+5);
++#endif
++#ifdef CONFIG_X86_IO_APIC
++ if (strstr(saved_command_line, "disableapic"))
++ disable_apic = 1;
++#endif
++ /* You need early console to see that */
++ if (__pa_symbol(&_end) >= KERNEL_TEXT_SIZE)
++ panic("Kernel too big for kernel mapping\n");
++
++ setup_boot_cpu_data();
++ start_kernel();
++}
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/head-xen.S linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/head-xen.S
+--- linux-2.6.18.8/arch/x86_64/kernel/head-xen.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/head-xen.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,214 @@
++/*
++ * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
++ *
++ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
++ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
++ * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
++ * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
++ *
++ * $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen
++ */
++
++
++#include <linux/linkage.h>
++#include <linux/threads.h>
++#include <linux/init.h>
++#include <linux/elfnote.h>
++#include <asm/desc.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/msr.h>
++#include <asm/cache.h>
++#include <asm/dwarf2.h>
++#include <xen/interface/elfnote.h>
++
++ .section .bootstrap.text, "ax", @progbits
++ .code64
++ .globl startup_64
++startup_64:
++ movq $(init_thread_union+THREAD_SIZE-8),%rsp
++
++ /* rsi is pointer to startup info structure.
++ pass it to C */
++ movq %rsi,%rdi
++ pushq $0 # fake return address
++ jmp x86_64_start_kernel
++
++#ifdef CONFIG_ACPI_SLEEP
++.org 0xf00
++ .globl pGDT32
++pGDT32:
++ .word gdt_end-cpu_gdt_table-1
++ .long cpu_gdt_table-__START_KERNEL_map
++#endif
++ENTRY(stext)
++ENTRY(_stext)
++
++ $page = 0
++#define NEXT_PAGE(name) \
++ $page = $page + 1; \
++ .org $page * 0x1000; \
++ phys_##name = $page * 0x1000 + __PHYSICAL_START; \
++ENTRY(name)
++
++NEXT_PAGE(init_level4_pgt)
++ /* This gets initialized in x86_64_start_kernel */
++ .fill 512,8,0
++NEXT_PAGE(init_level4_user_pgt)
++ /*
++ * We update two pgd entries to make kernel and user pgd consistent
++ * at pgd_populate(). It can be used for kernel modules. So we place
++ * this page here for those cases to avoid memory corruption.
++ * We also use this page to establish the initial mapping for the
++ * vsyscall area.
++ */
++ .fill 512,8,0
++
++NEXT_PAGE(level3_kernel_pgt)
++ .fill 512,8,0
++
++ /*
++ * This is used for vsyscall area mapping as we have a different
++ * level4 page table for user.
++ */
++NEXT_PAGE(level3_user_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level2_kernel_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(hypercall_page)
++ CFI_STARTPROC
++ .rept 0x1000 / 0x20
++ .skip 1 /* push %rcx */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rcx,0
++ .skip 2 /* push %r11 */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rcx,0
++ .skip 5 /* mov $#,%eax */
++ .skip 2 /* syscall */
++ .skip 2 /* pop %r11 */
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_RESTORE r11
++ .skip 1 /* pop %rcx */
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_RESTORE rcx
++ .align 0x20,0 /* ret */
++ .endr
++ CFI_ENDPROC
++
++#undef NEXT_PAGE
++
++ .data
++/* Just dummy symbol to allow compilation. Not used in sleep path */
++#ifdef CONFIG_ACPI_SLEEP
++ .align PAGE_SIZE
++ENTRY(wakeup_level4_pgt)
++ .fill 512,8,0
++#endif
++
++ .data
++
++ .align 16
++ .globl cpu_gdt_descr
++cpu_gdt_descr:
++ .word gdt_end-cpu_gdt_table-1
++gdt:
++ .quad cpu_gdt_table
++#ifdef CONFIG_SMP
++ .rept NR_CPUS-1
++ .word 0
++ .quad 0
++ .endr
++#endif
++
++/* We need valid kernel segments for data and code in long mode too
++ * IRET will check the segment types kkeil 2000/10/28
++ * Also sysret mandates a special GDT layout
++ */
++
++ .section .data.page_aligned, "aw"
++ .align PAGE_SIZE
++
++/* The TLS descriptors are currently at a different place compared to i386.
++ Hopefully nobody expects them at a fixed place (Wine?) */
++
++ENTRY(cpu_gdt_table)
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x0 /* unused */
++ .quad 0x00af9a000000ffff /* __KERNEL_CS */
++ .quad 0x00cf92000000ffff /* __KERNEL_DS */
++ .quad 0x00cffa000000ffff /* __USER32_CS */
++ .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
++ .quad 0x00affa000000ffff /* __USER_CS */
++ .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
++ .quad 0,0 /* TSS */
++ .quad 0,0 /* LDT */
++ .quad 0,0,0 /* three TLS descriptors */
++ .quad 0 /* unused */
++gdt_end:
++ /* asm/segment.h:GDT_ENTRIES must match this */
++ /* This should be a multiple of the cache line size */
++ /* GDTs of other CPUs are now dynamically allocated */
++
++ /* zero the remaining page */
++ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
++
++ .section .bss.page_aligned, "aw", @nobits
++ .align PAGE_SIZE
++ENTRY(empty_zero_page)
++ .skip PAGE_SIZE
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++/*
++ * __xen_guest information
++ */
++.macro utoh value
++ .if (\value) < 0 || (\value) >= 0x10
++ utoh (((\value)>>4)&0x0fffffffffffffff)
++ .endif
++ .if ((\value) & 0xf) < 10
++ .byte '0' + ((\value) & 0xf)
++ .else
++ .byte 'A' + ((\value) & 0xf) - 10
++ .endif
++.endm
++
++.section __xen_guest
++ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
++ .ascii ",XEN_VER=xen-3.0"
++ .ascii ",VIRT_BASE=0x"
++ utoh __START_KERNEL_map
++ .ascii ",ELF_PADDR_OFFSET=0x"
++ utoh __START_KERNEL_map
++ .ascii ",VIRT_ENTRY=0x"
++ utoh (__START_KERNEL_map + __PHYSICAL_START)
++ .ascii ",HYPERCALL_PAGE=0x"
++ utoh (phys_hypercall_page >> PAGE_SHIFT)
++ .ascii ",FEATURES=writable_page_tables"
++ .ascii "|writable_descriptor_tables"
++ .ascii "|auto_translated_physmap"
++ .ascii "|supervisor_mode_kernel"
++ .ascii ",LOADER=generic"
++ .byte 0
++#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
++
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux")
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6")
++ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
++ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .quad, __START_KERNEL_map)
++#if CONFIG_XEN_COMPAT <= 0x030002
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad, __START_KERNEL_map)
++#else
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad, 0)
++#endif
++ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .quad, startup_64)
++ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad, hypercall_page)
++ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT)
++ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
++ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
++ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/init_task.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/init_task.c
+--- linux-2.6.18.8/arch/x86_64/kernel/init_task.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/init_task.c 2008-08-21 11:36:07.000000000 +0200
+@@ -37,6 +37,8 @@ union thread_union init_thread_union
+ struct task_struct init_task = INIT_TASK(init_task);
+
+ EXPORT_SYMBOL(init_task);
++
++#ifndef CONFIG_X86_NO_TSS
+ /*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+ * no more per-task TSS's. The TSS size is kept cacheline-aligned
+@@ -50,5 +52,6 @@ DEFINE_PER_CPU(struct tss_struct, init_t
+ * debugging, no special alignment required.
+ */
+ DEFINE_PER_CPU(struct orig_ist, orig_ist);
++#endif
+
+ #define ALIGN_TO_4K __attribute__((section(".data.init_task")))
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/io_apic-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/io_apic-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/io_apic-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/io_apic-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2262 @@
++/*
++ * Intel IO-APIC support for multi-Pentium hosts.
++ *
++ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
++ *
++ * Many thanks to Stig Venaas for trying out countless experimental
++ * patches and reporting/debugging problems patiently!
++ *
++ * (c) 1999, Multiple IO-APIC support, developed by
++ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
++ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
++ * further tested and cleaned up by Zach Brown <zab@redhat.com>
++ * and Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively
++ * Paul Diefenbaugh : Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <linux/mc146818rtc.h>
++#include <linux/acpi.h>
++#include <linux/sysdev.h>
++#ifdef CONFIG_ACPI
++#include <acpi/acpi_bus.h>
++#endif
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/mach_apic.h>
++#include <asm/acpi.h>
++#include <asm/dma.h>
++#include <asm/nmi.h>
++
++#define __apicdebuginit __init
++
++int sis_apic_bug; /* not actually supported, dummy for compile */
++
++static int no_timer_check;
++
++int disable_timer_pin_1 __initdata;
++
++#ifndef CONFIG_XEN
++int timer_over_8254 __initdata = 0;
++
++/* Where if anywhere is the i8259 connect in external int mode */
++static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
++#endif
++
++static DEFINE_SPINLOCK(ioapic_lock);
++static DEFINE_SPINLOCK(vector_lock);
++
++/*
++ * # of IRQ routing registers
++ */
++int nr_ioapic_registers[MAX_IO_APICS];
++
++/*
++ * Rough estimation of how many shared IRQs there are, can
++ * be changed anytime.
++ */
++#define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
++#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
++
++/*
++ * This is performance-critical, we want to do it O(1)
++ *
++ * the indexing order of this array favors 1:1 mappings
++ * between pins and IRQs.
++ */
++
++static struct irq_pin_list {
++ short apic, pin, next;
++} irq_2_pin[PIN_MAP_SIZE];
++
++int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
++#ifdef CONFIG_PCI_MSI
++#define vector_to_irq(vector) \
++ (platform_legacy_irq(vector) ? vector : vector_irq[vector])
++#else
++#define vector_to_irq(vector) (vector)
++#endif
++
++#ifdef CONFIG_XEN
++
++#include <xen/interface/xen.h>
++#include <xen/interface/physdev.h>
++
++/* Fake i8259 */
++#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
++#define disable_8259A_irq(_irq) ((void)0)
++#define i8259A_irq_pending(_irq) (0)
++
++unsigned long io_apic_irqs;
++
++static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
++{
++ struct physdev_apic apic_op;
++ int ret;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
++ if (ret)
++ return ret;
++ return apic_op.value;
++}
++
++static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++ struct physdev_apic apic_op;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ apic_op.value = value;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op));
++}
++
++#define io_apic_read(a,r) xen_io_apic_read(a,r)
++#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++
++#define clear_IO_APIC() ((void)0)
++
++#else
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
++{
++ unsigned long flags;
++ unsigned int dest;
++ cpumask_t tmp;
++
++ cpus_and(tmp, mask, cpu_online_map);
++ if (cpus_empty(tmp))
++ tmp = TARGET_CPUS;
++
++ cpus_and(mask, tmp, CPU_MASK_ALL);
++
++ dest = cpu_mask_to_apicid(mask);
++
++ /*
++ * Only the high 8 bits are valid.
++ */
++ dest = SET_APIC_LOGICAL_ID(dest);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __DO_ACTION(1, = dest, )
++ set_irq_info(irq, mask);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++#endif
++
++#endif /* !CONFIG_XEN */
++
++/*
++ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
++ * shared ISA-space IRQs, so we have to support them. We are super
++ * fast in the common case, and fast for shared ISA-space IRQs.
++ */
++static void add_pin_to_irq(unsigned int irq, int apic, int pin)
++{
++ static int first_free_entry = NR_IRQS;
++ struct irq_pin_list *entry = irq_2_pin + irq;
++
++ BUG_ON(irq >= NR_IRQS);
++ while (entry->next)
++ entry = irq_2_pin + entry->next;
++
++ if (entry->pin != -1) {
++ entry->next = first_free_entry;
++ entry = irq_2_pin + entry->next;
++ if (++first_free_entry >= PIN_MAP_SIZE)
++ panic("io_apic.c: ran out of irq_2_pin entries!");
++ }
++ entry->apic = apic;
++ entry->pin = pin;
++}
++
++#ifndef CONFIG_XEN
++#define __DO_ACTION(R, ACTION, FINAL) \
++ \
++{ \
++ int pin; \
++ struct irq_pin_list *entry = irq_2_pin + irq; \
++ \
++ BUG_ON(irq >= NR_IRQS); \
++ for (;;) { \
++ unsigned int reg; \
++ pin = entry->pin; \
++ if (pin == -1) \
++ break; \
++ reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
++ reg ACTION; \
++ io_apic_modify(entry->apic, reg); \
++ if (!entry->next) \
++ break; \
++ entry = irq_2_pin + entry->next; \
++ } \
++ FINAL; \
++}
++
++#define DO_ACTION(name,R,ACTION, FINAL) \
++ \
++ static void name##_IO_APIC_irq (unsigned int irq) \
++ __DO_ACTION(R, ACTION, FINAL)
++
++DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) )
++ /* mask = 1 */
++DO_ACTION( __unmask, 0, &= 0xfffeffff, )
++ /* mask = 0 */
++
++static void mask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __mask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void unmask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ /* Check delivery_mode to be sure we're not clearing an SMI pin */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (entry.delivery_mode == dest_SMI)
++ return;
++ /*
++ * Disable it in the IO-APIC irq-routing table:
++ */
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 1;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC (void)
++{
++ int apic, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
++ clear_IO_APIC_pin(apic, pin);
++}
++
++#endif /* !CONFIG_XEN */
++
++static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
++
++/*
++ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
++ * specific CPU-side IRQs.
++ */
++
++#define MAX_PIRQS 8
++static int pirq_entries [MAX_PIRQS];
++static int pirqs_enabled;
++int skip_ioapic_setup;
++int ioapic_force;
++
++/* dummy parsing: see setup.c */
++
++static int __init disable_ioapic_setup(char *str)
++{
++ skip_ioapic_setup = 1;
++ return 1;
++}
++
++static int __init enable_ioapic_setup(char *str)
++{
++ ioapic_force = 1;
++ skip_ioapic_setup = 0;
++ return 1;
++}
++
++__setup("noapic", disable_ioapic_setup);
++__setup("apic", enable_ioapic_setup);
++
++#ifndef CONFIG_XEN
++static int __init setup_disable_8254_timer(char *s)
++{
++ timer_over_8254 = -1;
++ return 1;
++}
++static int __init setup_enable_8254_timer(char *s)
++{
++ timer_over_8254 = 2;
++ return 1;
++}
++
++__setup("disable_8254_timer", setup_disable_8254_timer);
++__setup("enable_8254_timer", setup_enable_8254_timer);
++#endif /* !CONFIG_XEN */
++
++#include <asm/pci-direct.h>
++#include <linux/pci_ids.h>
++#include <linux/pci.h>
++
++
++#ifdef CONFIG_ACPI
++
++static int nvidia_hpet_detected __initdata;
++
++static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
++{
++ nvidia_hpet_detected = 1;
++ return 0;
++}
++#endif
++
++/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
++ off. Check for an Nvidia or VIA PCI bridge and turn it off.
++ Use pci direct infrastructure because this runs before the PCI subsystem.
++
++ Can be overwritten with "apic"
++
++ And another hack to disable the IOMMU on VIA chipsets.
++
++ ... and others. Really should move this somewhere else.
++
++ Kludge-O-Rama. */
++void __init check_ioapic(void)
++{
++ int num,slot,func;
++ /* Poor man's PCI discovery */
++ for (num = 0; num < 32; num++) {
++ for (slot = 0; slot < 32; slot++) {
++ for (func = 0; func < 8; func++) {
++ u32 class;
++ u32 vendor;
++ u8 type;
++ class = read_pci_config(num,slot,func,
++ PCI_CLASS_REVISION);
++ if (class == 0xffffffff)
++ break;
++
++ if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
++ continue;
++
++ vendor = read_pci_config(num, slot, func,
++ PCI_VENDOR_ID);
++ vendor &= 0xffff;
++ switch (vendor) {
++ case PCI_VENDOR_ID_VIA:
++#ifdef CONFIG_IOMMU
++ if ((end_pfn > MAX_DMA32_PFN ||
++ force_iommu) &&
++ !iommu_aperture_allowed) {
++ printk(KERN_INFO
++ "Looks like a VIA chipset. Disabling IOMMU. Override with \"iommu=allowed\"\n");
++ iommu_aperture_disabled = 1;
++ }
++#endif
++ return;
++ case PCI_VENDOR_ID_NVIDIA:
++#ifdef CONFIG_ACPI
++ /*
++ * All timer overrides on Nvidia are
++ * wrong unless HPET is enabled.
++ */
++ nvidia_hpet_detected = 0;
++ acpi_table_parse(ACPI_HPET,
++ nvidia_hpet_check);
++ if (nvidia_hpet_detected == 0) {
++ acpi_skip_timer_override = 1;
++ printk(KERN_INFO "Nvidia board "
++ "detected. Ignoring ACPI "
++ "timer override.\n");
++ }
++#endif
++ /* RED-PEN skip them on mptables too? */
++ return;
++ case PCI_VENDOR_ID_ATI:
++
++ /* This should be actually default, but
++ for 2.6.16 let's do it for ATI only where
++ it's really needed. */
++#ifndef CONFIG_XEN
++ if (timer_over_8254 == 1) {
++ timer_over_8254 = 0;
++ printk(KERN_INFO
++ "ATI board detected. Disabling timer routing over 8254.\n");
++ }
++#endif
++ return;
++ }
++
++
++ /* No multi-function device? */
++ type = read_pci_config_byte(num,slot,func,
++ PCI_HEADER_TYPE);
++ if (!(type & 0x80))
++ break;
++ }
++ }
++ }
++}
++
++static int __init ioapic_pirq_setup(char *str)
++{
++ int i, max;
++ int ints[MAX_PIRQS+1];
++
++ get_options(str, ARRAY_SIZE(ints), ints);
++
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ pirqs_enabled = 1;
++ apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
++ max = MAX_PIRQS;
++ if (ints[0] < MAX_PIRQS)
++ max = ints[0];
++
++ for (i = 0; i < max; i++) {
++ apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
++ /*
++ * PIRQs are mapped upside down, usually.
++ */
++ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
++ }
++ return 1;
++}
++
++__setup("pirq=", ioapic_pirq_setup);
++
++/*
++ * Find the IRQ entry number of a certain pin.
++ */
++static int find_irq_entry(int apic, int pin, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_irqtype == type &&
++ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
++ mp_irqs[i].mpc_dstirq == pin)
++ return i;
++
++ return -1;
++}
++
++#ifndef CONFIG_XEN
++/*
++ * Find the pin to which IRQ[irq] (ISA) is connected
++ */
++static int __init find_isa_irq_pin(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++
++ return mp_irqs[i].mpc_dstirq;
++ }
++ return -1;
++}
++
++static int __init find_isa_irq_apic(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++ break;
++ }
++ if (i < mp_irq_entries) {
++ int apic;
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
++ return apic;
++ }
++ }
++
++ return -1;
++}
++#endif
++
++/*
++ * Find a specific PCI IRQ entry.
++ * Not an __init, possibly needed by modules
++ */
++static int pin_2_irq(int idx, int apic, int pin);
++
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
++{
++ int apic, i, best_guess = -1;
++
++ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
++ bus, slot, pin);
++ if (mp_bus_id_to_pci_bus[bus] == -1) {
++ apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
++ return -1;
++ }
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
++ break;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
++ !mp_irqs[i].mpc_irqtype &&
++ (bus == lbus) &&
++ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
++ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
++
++ if (!(apic || IO_APIC_IRQ(irq)))
++ continue;
++
++ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
++ return irq;
++ /*
++ * Use the first all-but-pin matching entry as a
++ * best-guess fuzzy result for broken mptables.
++ */
++ if (best_guess < 0)
++ best_guess = irq;
++ }
++ }
++ BUG_ON(best_guess >= NR_IRQS);
++ return best_guess;
++}
++
++/*
++ * EISA Edge/Level control register, ELCR
++ */
++static int EISA_ELCR(unsigned int irq)
++{
++ if (irq < 16) {
++ unsigned int port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++ }
++ apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
++ return 0;
++}
++
++/* EISA interrupts are always polarity zero and can be edge or level
++ * trigger depending on the ELCR value. If an interrupt is listed as
++ * EISA conforming in the MP table, that means its trigger type must
++ * be read in from the ELCR */
++
++#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
++#define default_EISA_polarity(idx) (0)
++
++/* ISA interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_ISA_trigger(idx) (0)
++#define default_ISA_polarity(idx) (0)
++
++/* PCI interrupts are always polarity one level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_PCI_trigger(idx) (1)
++#define default_PCI_polarity(idx) (1)
++
++/* MCA interrupts are always polarity zero level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_MCA_trigger(idx) (1)
++#define default_MCA_polarity(idx) (0)
++
++static int __init MPBIOS_polarity(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int polarity;
++
++ /*
++ * Determine IRQ line polarity (high active or low active):
++ */
++ switch (mp_irqs[idx].mpc_irqflag & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent polarity */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ polarity = default_ISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ polarity = default_EISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ polarity = default_PCI_polarity(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ polarity = default_MCA_polarity(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* high active */
++ {
++ polarity = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ case 3: /* low active */
++ {
++ polarity = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ return polarity;
++}
++
++static int MPBIOS_trigger(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int trigger;
++
++ /*
++ * Determine IRQ trigger mode (edge or level sensitive):
++ */
++ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ trigger = default_ISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ trigger = default_EISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ trigger = default_PCI_trigger(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ trigger = default_MCA_trigger(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* edge */
++ {
++ trigger = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ case 3: /* level */
++ {
++ trigger = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 0;
++ break;
++ }
++ }
++ return trigger;
++}
++
++static inline int irq_polarity(int idx)
++{
++ return MPBIOS_polarity(idx);
++}
++
++static inline int irq_trigger(int idx)
++{
++ return MPBIOS_trigger(idx);
++}
++
++static int next_irq = 16;
++
++/*
++ * gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ
++ * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
++ * from ACPI, which can reach 800 in large boxen.
++ *
++ * Compact the sparse GSI space into a sequential IRQ series and reuse
++ * vectors if possible.
++ */
++int gsi_irq_sharing(int gsi)
++{
++ int i, tries, vector;
++
++ BUG_ON(gsi >= NR_IRQ_VECTORS);
++
++ if (platform_legacy_irq(gsi))
++ return gsi;
++
++ if (gsi_2_irq[gsi] != 0xFF)
++ return (int)gsi_2_irq[gsi];
++
++ tries = NR_IRQS;
++ try_again:
++ vector = assign_irq_vector(gsi);
++
++ /*
++ * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
++ * use of vector and if found, return that IRQ. However, we never want
++ * to share legacy IRQs, which usually have a different trigger mode
++ * than PCI.
++ */
++ for (i = 0; i < NR_IRQS; i++)
++ if (IO_APIC_VECTOR(i) == vector)
++ break;
++ if (platform_legacy_irq(i)) {
++ if (--tries >= 0) {
++ IO_APIC_VECTOR(i) = 0;
++ goto try_again;
++ }
++ panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
++ }
++ if (i < NR_IRQS) {
++ gsi_2_irq[gsi] = i;
++ printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
++ gsi, vector, i);
++ return i;
++ }
++
++ i = next_irq++;
++ BUG_ON(i >= NR_IRQS);
++ gsi_2_irq[gsi] = i;
++ IO_APIC_VECTOR(i) = vector;
++ printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
++ gsi, vector, i);
++ return i;
++}
++
++static int pin_2_irq(int idx, int apic, int pin)
++{
++ int irq, i;
++ int bus = mp_irqs[idx].mpc_srcbus;
++
++ /*
++ * Debugging check, we are in big trouble if this message pops up!
++ */
++ if (mp_irqs[idx].mpc_dstirq != pin)
++ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
++
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ case MP_BUS_EISA:
++ case MP_BUS_MCA:
++ {
++ irq = mp_irqs[idx].mpc_srcbusirq;
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ /*
++ * PCI IRQs are mapped in order
++ */
++ i = irq = 0;
++ while (i < apic)
++ irq += nr_ioapic_registers[i++];
++ irq += pin;
++ irq = gsi_irq_sharing(irq);
++ break;
++ }
++ default:
++ {
++ printk(KERN_ERR "unknown bus type %d.\n",bus);
++ irq = 0;
++ break;
++ }
++ }
++ BUG_ON(irq >= NR_IRQS);
++
++ /*
++ * PCI IRQ command line redirection. Yes, limits are hardcoded.
++ */
++ if ((pin >= 16) && (pin <= 23)) {
++ if (pirq_entries[pin-16] != -1) {
++ if (!pirq_entries[pin-16]) {
++ apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
++ } else {
++ irq = pirq_entries[pin-16];
++ apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
++ pin-16, irq);
++ }
++ }
++ }
++ BUG_ON(irq >= NR_IRQS);
++ return irq;
++}
++
++static inline int IO_APIC_irq_trigger(int irq)
++{
++ int apic, idx, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
++ return irq_trigger(idx);
++ }
++ }
++ /*
++ * nonexistent IRQs are edge default
++ */
++ return 0;
++}
++
++/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
++u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
++
++int assign_irq_vector(int irq)
++{
++ unsigned long flags;
++ int vector;
++ struct physdev_irq irq_op;
++
++ BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
++
++ spin_lock_irqsave(&vector_lock, flags);
++
++ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
++ spin_unlock_irqrestore(&vector_lock, flags);
++ return IO_APIC_VECTOR(irq);
++ }
++
++ irq_op.irq = irq;
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
++ spin_unlock_irqrestore(&vector_lock, flags);
++ return -ENOSPC;
++ }
++
++ vector = irq_op.vector;
++ vector_irq[vector] = irq;
++ if (irq != AUTO_ASSIGN)
++ IO_APIC_VECTOR(irq) = vector;
++
++ spin_unlock_irqrestore(&vector_lock, flags);
++
++ return vector;
++}
++
++extern void (*interrupt[NR_IRQS])(void);
++#ifndef CONFIG_XEN
++static struct hw_interrupt_type ioapic_level_type;
++static struct hw_interrupt_type ioapic_edge_type;
++
++#define IOAPIC_AUTO -1
++#define IOAPIC_EDGE 0
++#define IOAPIC_LEVEL 1
++
++static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
++{
++ unsigned idx;
++
++ idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
++
++ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
++ trigger == IOAPIC_LEVEL)
++ irq_desc[idx].chip = &ioapic_level_type;
++ else
++ irq_desc[idx].chip = &ioapic_edge_type;
++ set_intr_gate(vector, interrupt[idx]);
++}
++#else
++#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
++#endif /* !CONFIG_XEN */
++
++static void __init setup_IO_APIC_irqs(void)
++{
++ struct IO_APIC_route_entry entry;
++ int apic, pin, idx, irq, first_notcon = 1, vector;
++ unsigned long flags;
++
++ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++
++ /*
++ * add it to the IO-APIC irq-routing table:
++ */
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* enable IRQ */
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if (idx == -1) {
++ if (first_notcon) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
++ first_notcon = 0;
++ } else
++ apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
++ continue;
++ }
++
++ entry.trigger = irq_trigger(idx);
++ entry.polarity = irq_polarity(idx);
++
++ if (irq_trigger(idx)) {
++ entry.trigger = 1;
++ entry.mask = 1;
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ }
++
++ irq = pin_2_irq(idx, apic, pin);
++ add_pin_to_irq(irq, apic, pin);
++
++ if (/* !apic && */ !IO_APIC_IRQ(irq))
++ continue;
++
++ if (IO_APIC_IRQ(irq)) {
++ vector = assign_irq_vector(irq);
++ entry.vector = vector;
++
++ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
++ if (!apic && (irq < 16))
++ disable_8259A_irq(irq);
++ }
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++ set_native_irq_info(irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++ }
++
++ if (!first_notcon)
++ apic_printk(APIC_VERBOSE," not connected.\n");
++}
++
++#ifndef CONFIG_XEN
++/*
++ * Set up the 8259A-master output pin as broadcast to all
++ * CPUs.
++ */
++static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ memset(&entry,0,sizeof(entry));
++
++ disable_8259A_irq(0);
++
++ /* mask LVT0 */
++ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++
++ /*
++ * We use logical delivery to get the timer IRQ
++ * to the first CPU.
++ */
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* unmask IRQ now */
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.polarity = 0;
++ entry.trigger = 0;
++ entry.vector = vector;
++
++ /*
++ * The timer IRQ doesn't have to know that behind the
++ * scene we have a 8259A-master in AEOI mode ...
++ */
++ irq_desc[0].chip = &ioapic_edge_type;
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ enable_8259A_irq(0);
++}
++
++void __init UNEXPECTED_IO_APIC(void)
++{
++}
++
++void __apicdebuginit print_IO_APIC(void)
++{
++ int apic, i;
++ union IO_APIC_reg_00 reg_00;
++ union IO_APIC_reg_01 reg_01;
++ union IO_APIC_reg_02 reg_02;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
++ for (i = 0; i < nr_ioapics; i++)
++ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
++ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
++
++ /*
++ * We are a bit conservative about what we expect. We have to
++ * know about every hardware change ASAP.
++ */
++ printk(KERN_INFO "testing the IO APIC.......................\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ reg_01.raw = io_apic_read(apic, 1);
++ if (reg_01.bits.version >= 0x10)
++ reg_02.raw = io_apic_read(apic, 2);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk("\n");
++ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
++ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
++ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
++ if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
++ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
++ if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
++ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
++ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
++ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
++ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
++ (reg_01.bits.entries != 0x2E) &&
++ (reg_01.bits.entries != 0x3F) &&
++ (reg_01.bits.entries != 0x03)
++ )
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
++ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
++ if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
++ (reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
++ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
++ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
++ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
++ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
++ )
++ UNEXPECTED_IO_APIC();
++ if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ if (reg_01.bits.version >= 0x10) {
++ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
++ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
++ if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++ }
++
++ printk(KERN_DEBUG ".... IRQ redirection table:\n");
++
++ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
++ " Stat Dest Deli Vect: \n");
++
++ for (i = 0; i <= reg_01.bits.entries; i++) {
++ struct IO_APIC_route_entry entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
++ *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk(KERN_DEBUG " %02x %03X %02X ",
++ i,
++ entry.dest.logical.logical_dest,
++ entry.dest.physical.physical_dest
++ );
++
++ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
++ entry.mask,
++ entry.trigger,
++ entry.irr,
++ entry.polarity,
++ entry.delivery_status,
++ entry.dest_mode,
++ entry.delivery_mode,
++ entry.vector
++ );
++ }
++ }
++ if (use_pci_vector())
++ printk(KERN_INFO "Using vector-based indexing\n");
++ printk(KERN_DEBUG "IRQ to pin mappings:\n");
++ for (i = 0; i < NR_IRQS; i++) {
++ struct irq_pin_list *entry = irq_2_pin + i;
++ if (entry->pin < 0)
++ continue;
++ if (use_pci_vector() && !platform_legacy_irq(i))
++ printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
++ else
++ printk(KERN_DEBUG "IRQ%d ", i);
++ for (;;) {
++ printk("-> %d:%d", entry->apic, entry->pin);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++ printk("\n");
++ }
++
++ printk(KERN_INFO ".................................... done.\n");
++
++ return;
++}
++
++static __apicdebuginit void print_APIC_bitfield (int base)
++{
++ unsigned int v;
++ int i, j;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
++ for (i = 0; i < 8; i++) {
++ v = apic_read(base + i*0x10);
++ for (j = 0; j < 32; j++) {
++ if (v & (1<<j))
++ printk("1");
++ else
++ printk("0");
++ }
++ printk("\n");
++ }
++}
++
++void __apicdebuginit print_local_APIC(void * dummy)
++{
++ unsigned int v, ver, maxlvt;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
++ smp_processor_id(), hard_smp_processor_id());
++ v = apic_read(APIC_ID);
++ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
++ v = apic_read(APIC_LVR);
++ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
++ ver = GET_APIC_VERSION(v);
++ maxlvt = get_maxlvt();
++
++ v = apic_read(APIC_TASKPRI);
++ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
++
++ v = apic_read(APIC_ARBPRI);
++ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
++ v & APIC_ARBPRI_MASK);
++ v = apic_read(APIC_PROCPRI);
++ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
++
++ v = apic_read(APIC_EOI);
++ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
++ v = apic_read(APIC_RRR);
++ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
++ v = apic_read(APIC_LDR);
++ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
++ v = apic_read(APIC_DFR);
++ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
++ v = apic_read(APIC_SPIV);
++ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
++
++ printk(KERN_DEBUG "... APIC ISR field:\n");
++ print_APIC_bitfield(APIC_ISR);
++ printk(KERN_DEBUG "... APIC TMR field:\n");
++ print_APIC_bitfield(APIC_TMR);
++ printk(KERN_DEBUG "... APIC IRR field:\n");
++ print_APIC_bitfield(APIC_IRR);
++
++ v = apic_read(APIC_ESR);
++ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
++
++ v = apic_read(APIC_ICR);
++ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
++ v = apic_read(APIC_ICR2);
++ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
++
++ v = apic_read(APIC_LVTT);
++ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
++
++ if (maxlvt > 3) { /* PC is LVT#4. */
++ v = apic_read(APIC_LVTPC);
++ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
++ }
++ v = apic_read(APIC_LVT0);
++ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
++ v = apic_read(APIC_LVT1);
++ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
++
++ if (maxlvt > 2) { /* ERR is LVT#3. */
++ v = apic_read(APIC_LVTERR);
++ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
++ }
++
++ v = apic_read(APIC_TMICT);
++ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
++ v = apic_read(APIC_TMCCT);
++ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
++ v = apic_read(APIC_TDCR);
++ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
++ printk("\n");
++}
++
++void print_all_local_APICs (void)
++{
++ on_each_cpu(print_local_APIC, NULL, 1, 1);
++}
++
++void __apicdebuginit print_PIC(void)
++{
++ unsigned int v;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "\nprinting PIC contents\n");
++
++ spin_lock_irqsave(&i8259A_lock, flags);
++
++ v = inb(0xa1) << 8 | inb(0x21);
++ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
++
++ v = inb(0xa0) << 8 | inb(0x20);
++ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
++
++ outb(0x0b,0xa0);
++ outb(0x0b,0x20);
++ v = inb(0xa0) << 8 | inb(0x20);
++ outb(0x0a,0xa0);
++ outb(0x0a,0x20);
++
++ spin_unlock_irqrestore(&i8259A_lock, flags);
++
++ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
++
++ v = inb(0x4d1) << 8 | inb(0x4d0);
++ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
++}
++#endif /* !CONFIG_XEN */
++
++static void __init enable_IO_APIC(void)
++{
++ union IO_APIC_reg_01 reg_01;
++#ifndef CONFIG_XEN
++ int i8259_apic, i8259_pin;
++#endif
++ int i, apic;
++ unsigned long flags;
++
++ for (i = 0; i < PIN_MAP_SIZE; i++) {
++ irq_2_pin[i].pin = -1;
++ irq_2_pin[i].next = 0;
++ }
++ if (!pirqs_enabled)
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ /*
++ * The number of IO-APIC IRQ registers (== #pins):
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(apic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
++ }
++#ifndef CONFIG_XEN
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ int pin;
++ /* See if any of the pins is in ExtINT mode */
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ struct IO_APIC_route_entry entry;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++
++ /* If the interrupt line is enabled and in ExtInt mode
++ * I have found the pin where the i8259 is connected.
++ */
++ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
++ ioapic_i8259.apic = apic;
++ ioapic_i8259.pin = pin;
++ goto found_i8259;
++ }
++ }
++ }
++ found_i8259:
++ /* Look to see what if the MP table has reported the ExtINT */
++ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
++ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
++ /* Trust the MP table if nothing is setup in the hardware */
++ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
++ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
++ ioapic_i8259.pin = i8259_pin;
++ ioapic_i8259.apic = i8259_apic;
++ }
++ /* Complain if the MP table and the hardware disagree */
++ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
++ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
++ {
++ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
++ }
++#endif
++
++ /*
++ * Do not trust the IO-APIC being empty at bootup
++ */
++ clear_IO_APIC();
++}
++
++/*
++ * Not an __init, needed by the reboot code
++ */
++void disable_IO_APIC(void)
++{
++ /*
++ * Clear the IO-APIC before rebooting:
++ */
++ clear_IO_APIC();
++
++#ifndef CONFIG_XEN
++ /*
++ * If the i8259 is routed through an IOAPIC
++ * Put that IOAPIC in virtual wire mode
++ * so legacy interrupts can be delivered.
++ */
++ if (ioapic_i8259.pin != -1) {
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 0; /* Enabled */
++ entry.trigger = 0; /* Edge */
++ entry.irr = 0;
++ entry.polarity = 0; /* High */
++ entry.delivery_status = 0;
++ entry.dest_mode = 0; /* Physical */
++ entry.delivery_mode = dest_ExtINT; /* ExtInt */
++ entry.vector = 0;
++ entry.dest.physical.physical_dest =
++ GET_APIC_ID(apic_read(APIC_ID));
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
++ *(((int *)&entry)+1));
++ io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
++ *(((int *)&entry)+0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++
++ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
++#endif
++}
++
++/*
++ * function to set the IO-APIC physical IDs based on the
++ * values stored in the MPC table.
++ *
++ * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
++ */
++
++#ifndef CONFIG_XEN
++static void __init setup_ioapic_ids_from_mpc (void)
++{
++ union IO_APIC_reg_00 reg_00;
++ int apic;
++ int i;
++ unsigned char old_id;
++ unsigned long flags;
++
++ /*
++ * Set the IOAPIC ID to the value stored in the MPC table.
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ /* Read the register 0 value */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ old_id = mp_ioapics[apic].mpc_apicid;
++
++
++ printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
++
++
++ /*
++ * We need to adjust the IRQ routing table
++ * if the ID changed.
++ */
++ if (old_id != mp_ioapics[apic].mpc_apicid)
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_dstapic == old_id)
++ mp_irqs[i].mpc_dstapic
++ = mp_ioapics[apic].mpc_apicid;
++
++ /*
++ * Read the right value from the MPC table and
++ * write it into the ID register.
++ */
++ apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
++ mp_ioapics[apic].mpc_apicid);
++
++ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0, reg_00.raw);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ /*
++ * Sanity check
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
++ printk("could not set ID!\n");
++ else
++ apic_printk(APIC_VERBOSE," ok.\n");
++ }
++}
++#else
++static void __init setup_ioapic_ids_from_mpc(void) { }
++#endif
++
++/*
++ * There is a nasty bug in some older SMP boards, their mptable lies
++ * about the timer IRQ. We do the following to work around the situation:
++ *
++ * - timer IRQ defaults to IO-APIC IRQ
++ * - if this function detects that timer IRQs are defunct, then we fall
++ * back to ISA timer IRQs
++ */
++#ifndef CONFIG_XEN
++static int __init timer_irq_works(void)
++{
++ unsigned long t1 = jiffies;
++
++ local_irq_enable();
++ /* Let ten ticks pass... */
++ mdelay((10 * 1000) / HZ);
++
++ /*
++ * Expect a few ticks at least, to be sure some possible
++ * glue logic does not lock up after one or two first
++ * ticks in a non-ExtINT mode. Also the local APIC
++ * might have cached one ExtINT interrupt. Finally, at
++ * least one tick may be lost due to delays.
++ */
++
++ /* jiffies wrap? */
++ if (jiffies - t1 > 4)
++ return 1;
++ return 0;
++}
++
++/*
++ * In the SMP+IOAPIC case it might happen that there are an unspecified
++ * number of pending IRQ events unhandled. These cases are very rare,
++ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
++ * better to do it this way as thus we do not have to be aware of
++ * 'pending' interrupts in the IRQ path, except at this point.
++ */
++/*
++ * Edge triggered needs to resend any interrupt
++ * that was delayed but this is now handled in the device
++ * independent code.
++ */
++
++/*
++ * Starting up a edge-triggered IO-APIC interrupt is
++ * nasty - we need to make sure that we get the edge.
++ * If it is already asserted for some reason, we need
++ * return 1 to indicate that is was pending.
++ *
++ * This is not complete - we should be able to fake
++ * an edge even if it isn't on the 8259A...
++ */
++
++static unsigned int startup_edge_ioapic_irq(unsigned int irq)
++{
++ int was_pending = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ if (irq < 16) {
++ disable_8259A_irq(irq);
++ if (i8259A_irq_pending(irq))
++ was_pending = 1;
++ }
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return was_pending;
++}
++
++/*
++ * Once we have recorded IRQ_PENDING already, we can mask the
++ * interrupt for real. This prevents IRQ storms from unhandled
++ * devices.
++ */
++static void ack_edge_ioapic_irq(unsigned int irq)
++{
++ move_irq(irq);
++ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
++ == (IRQ_PENDING | IRQ_DISABLED))
++ mask_IO_APIC_irq(irq);
++ ack_APIC_irq();
++}
++
++/*
++ * Level triggered interrupts can just be masked,
++ * and shutting down and starting up the interrupt
++ * is the same as enabling and disabling them -- except
++ * with a startup need to return a "was pending" value.
++ *
++ * Level triggered interrupts are special because we
++ * do not touch any IO-APIC register while handling
++ * them. We ack the APIC in the end-IRQ handler, not
++ * in the start-IRQ-handler. Protection against reentrance
++ * from the same interrupt is still provided, both by the
++ * generic IRQ layer and by the fact that an unacked local
++ * APIC does not accept IRQs.
++ */
++static unsigned int startup_level_ioapic_irq (unsigned int irq)
++{
++ unmask_IO_APIC_irq(irq);
++
++ return 0; /* don't check for pending */
++}
++
++static void end_level_ioapic_irq (unsigned int irq)
++{
++ move_irq(irq);
++ ack_APIC_irq();
++}
++
++#ifdef CONFIG_PCI_MSI
++static unsigned int startup_edge_ioapic_vector(unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ return startup_edge_ioapic_irq(irq);
++}
++
++static void ack_edge_ioapic_vector(unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ move_native_irq(vector);
++ ack_edge_ioapic_irq(irq);
++}
++
++static unsigned int startup_level_ioapic_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ return startup_level_ioapic_irq (irq);
++}
++
++static void end_level_ioapic_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ move_native_irq(vector);
++ end_level_ioapic_irq(irq);
++}
++
++static void mask_IO_APIC_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ mask_IO_APIC_irq(irq);
++}
++
++static void unmask_IO_APIC_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ unmask_IO_APIC_irq(irq);
++}
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_vector (unsigned int vector,
++ cpumask_t cpu_mask)
++{
++ int irq = vector_to_irq(vector);
++
++ set_native_irq_info(vector, cpu_mask);
++ set_ioapic_affinity_irq(irq, cpu_mask);
++}
++#endif // CONFIG_SMP
++#endif // CONFIG_PCI_MSI
++
++static int ioapic_retrigger(unsigned int irq)
++{
++ send_IPI_self(IO_APIC_VECTOR(irq));
++
++ return 1;
++}
++
++/*
++ * Level and edge triggered IO-APIC interrupts need different handling,
++ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
++ * handled with the level-triggered descriptor, but that one has slightly
++ * more overhead. Level-triggered interrupts cannot be handled with the
++ * edge-triggered handler, without risking IRQ storms and other ugly
++ * races.
++ */
++
++static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
++ .typename = "IO-APIC-edge",
++ .startup = startup_edge_ioapic,
++ .shutdown = shutdown_edge_ioapic,
++ .enable = enable_edge_ioapic,
++ .disable = disable_edge_ioapic,
++ .ack = ack_edge_ioapic,
++ .end = end_edge_ioapic,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity,
++#endif
++ .retrigger = ioapic_retrigger,
++};
++
++static struct hw_interrupt_type ioapic_level_type __read_mostly = {
++ .typename = "IO-APIC-level",
++ .startup = startup_level_ioapic,
++ .shutdown = shutdown_level_ioapic,
++ .enable = enable_level_ioapic,
++ .disable = disable_level_ioapic,
++ .ack = mask_and_ack_level_ioapic,
++ .end = end_level_ioapic,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity,
++#endif
++ .retrigger = ioapic_retrigger,
++};
++#endif /* !CONFIG_XEN */
++
++static inline void init_IO_APIC_traps(void)
++{
++ int irq;
++
++ /*
++ * NOTE! The local APIC isn't very good at handling
++ * multiple interrupts at the same interrupt level.
++ * As the interrupt level is determined by taking the
++ * vector number and shifting that right by 4, we
++ * want to spread these out a bit so that they don't
++ * all fall in the same interrupt level.
++ *
++ * Also, we've got to be careful not to trash gate
++ * 0x80, because int 0x80 is hm, kind of importantish. ;)
++ */
++ for (irq = 0; irq < NR_IRQS ; irq++) {
++ int tmp = irq;
++ if (use_pci_vector()) {
++ if (!platform_legacy_irq(tmp))
++ if ((tmp = vector_to_irq(tmp)) == -1)
++ continue;
++ }
++ if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
++ /*
++ * Hmm.. We don't have an entry for this,
++ * so default to an old-fashioned 8259
++ * interrupt if we can..
++ */
++ if (irq < 16)
++ make_8259A_irq(irq);
++#ifndef CONFIG_XEN
++ else
++ /* Strange. Oh, well.. */
++ irq_desc[irq].chip = &no_irq_type;
++#endif
++ }
++ }
++}
++
++#ifndef CONFIG_XEN
++static void enable_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
++}
++
++static void disable_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
++}
++
++static void ack_lapic_irq (unsigned int irq)
++{
++ ack_APIC_irq();
++}
++
++static void end_lapic_irq (unsigned int i) { /* nothing */ }
++
++static struct hw_interrupt_type lapic_irq_type __read_mostly = {
++ .typename = "local-APIC-edge",
++ .startup = NULL, /* startup_irq() not used for IRQ0 */
++ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
++ .enable = enable_lapic_irq,
++ .disable = disable_lapic_irq,
++ .ack = ack_lapic_irq,
++ .end = end_lapic_irq,
++};
++
++static void setup_nmi (void)
++{
++ /*
++ * Dirty trick to enable the NMI watchdog ...
++ * We put the 8259A master into AEOI mode and
++ * unmask on all local APICs LVT0 as NMI.
++ *
++ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
++ * is from Maciej W. Rozycki - so we do not have to EOI from
++ * the NMI handler or the timer interrupt.
++ */
++ printk(KERN_INFO "activating NMI Watchdog ...");
++
++ enable_NMI_through_LVT0(NULL);
++
++ printk(" done.\n");
++}
++
++/*
++ * This looks a bit hackish but it's about the only one way of sending
++ * a few INTA cycles to 8259As and any associated glue logic. ICR does
++ * not support the ExtINT mode, unfortunately. We need to send these
++ * cycles as some i82489DX-based boards have glue logic that keeps the
++ * 8259A interrupt line asserted until INTA. --macro
++ */
++static inline void unlock_ExtINT_logic(void)
++{
++ int apic, pin, i;
++ struct IO_APIC_route_entry entry0, entry1;
++ unsigned char save_control, save_freq_select;
++ unsigned long flags;
++
++ pin = find_isa_irq_pin(8, mp_INT);
++ apic = find_isa_irq_apic(8, mp_INT);
++ if (pin == -1)
++ return;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ clear_IO_APIC_pin(apic, pin);
++
++ memset(&entry1, 0, sizeof(entry1));
++
++ entry1.dest_mode = 0; /* physical delivery */
++ entry1.mask = 0; /* unmask IRQ now */
++ entry1.dest.physical.physical_dest = hard_smp_processor_id();
++ entry1.delivery_mode = dest_ExtINT;
++ entry1.polarity = entry0.polarity;
++ entry1.trigger = 0;
++ entry1.vector = 0;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ save_control = CMOS_READ(RTC_CONTROL);
++ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
++ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
++ RTC_FREQ_SELECT);
++ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
++
++ i = 100;
++ while (i-- > 0) {
++ mdelay(10);
++ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
++ i -= 10;
++ }
++
++ CMOS_WRITE(save_control, RTC_CONTROL);
++ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++ clear_IO_APIC_pin(apic, pin);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++int timer_uses_ioapic_pin_0;
++
++/*
++ * This code may look a bit paranoid, but it's supposed to cooperate with
++ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
++ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
++ * fanatically on his truly buggy board.
++ *
++ * FIXME: really need to revamp this for modern platforms only.
++ */
++static inline void check_timer(void)
++{
++ int apic1, pin1, apic2, pin2;
++ int vector;
++
++ /*
++ * get/set the timer IRQ vector:
++ */
++ disable_8259A_irq(0);
++ vector = assign_irq_vector(0);
++ set_intr_gate(vector, interrupt[0]);
++
++ /*
++ * Subtle, code in do_timer_interrupt() expects an AEOI
++ * mode for the 8259A whenever interrupts are routed
++ * through I/O APICs. Also IRQ0 has to be enabled in
++ * the 8259A which implies the virtual wire has to be
++ * disabled in the local APIC.
++ */
++ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++ init_8259A(1);
++ if (timer_over_8254 > 0)
++ enable_8259A_irq(0);
++
++ pin1 = find_isa_irq_pin(0, mp_INT);
++ apic1 = find_isa_irq_apic(0, mp_INT);
++ pin2 = ioapic_i8259.pin;
++ apic2 = ioapic_i8259.apic;
++
++ if (pin1 == 0)
++ timer_uses_ioapic_pin_0 = 1;
++
++ apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
++ vector, apic1, pin1, apic2, pin2);
++
++ if (pin1 != -1) {
++ /*
++ * Ok, does IRQ0 through the IOAPIC work?
++ */
++ unmask_IO_APIC_irq(0);
++ if (!no_timer_check && timer_irq_works()) {
++ nmi_watchdog_default();
++ if (nmi_watchdog == NMI_IO_APIC) {
++ disable_8259A_irq(0);
++ setup_nmi();
++ enable_8259A_irq(0);
++ }
++ if (disable_timer_pin_1 > 0)
++ clear_IO_APIC_pin(0, pin1);
++ return;
++ }
++ clear_IO_APIC_pin(apic1, pin1);
++ apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
++ "connected to IO-APIC\n");
++ }
++
++ apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
++ "through the 8259A ... ");
++ if (pin2 != -1) {
++ apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
++ apic2, pin2);
++ /*
++ * legacy devices should be connected to IO APIC #0
++ */
++ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
++ if (timer_irq_works()) {
++ apic_printk(APIC_VERBOSE," works.\n");
++ nmi_watchdog_default();
++ if (nmi_watchdog == NMI_IO_APIC) {
++ setup_nmi();
++ }
++ return;
++ }
++ /*
++ * Cleanup, just in case ...
++ */
++ clear_IO_APIC_pin(apic2, pin2);
++ }
++ apic_printk(APIC_VERBOSE," failed.\n");
++
++ if (nmi_watchdog == NMI_IO_APIC) {
++ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
++ nmi_watchdog = 0;
++ }
++
++ apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
++
++ disable_8259A_irq(0);
++ irq_desc[0].chip = &lapic_irq_type;
++ apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
++ enable_8259A_irq(0);
++
++ if (timer_irq_works()) {
++ apic_printk(APIC_VERBOSE," works.\n");
++ return;
++ }
++ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
++ apic_printk(APIC_VERBOSE," failed.\n");
++
++ apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
++
++ init_8259A(0);
++ make_8259A_irq(0);
++ apic_write(APIC_LVT0, APIC_DM_EXTINT);
++
++ unlock_ExtINT_logic();
++
++ if (timer_irq_works()) {
++ apic_printk(APIC_VERBOSE," works.\n");
++ return;
++ }
++ apic_printk(APIC_VERBOSE," failed :(.\n");
++ panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
++}
++#else
++#define check_timer() ((void)0)
++int timer_uses_ioapic_pin_0 = 0;
++#endif /* !CONFIG_XEN */
++
++static int __init notimercheck(char *s)
++{
++ no_timer_check = 1;
++ return 1;
++}
++__setup("no_timer_check", notimercheck);
++
++/*
++ *
++ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
++ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
++ * Linux doesn't really care, as it's not actually used
++ * for any interrupt handling anyway.
++ */
++#define PIC_IRQS (1<<2)
++
++void __init setup_IO_APIC(void)
++{
++ enable_IO_APIC();
++
++ if (acpi_ioapic)
++ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
++ else
++ io_apic_irqs = ~PIC_IRQS;
++
++ apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
++
++ /*
++ * Set up the IO-APIC IRQ routing table.
++ */
++ if (!acpi_ioapic)
++ setup_ioapic_ids_from_mpc();
++#ifndef CONFIG_XEN
++ sync_Arb_IDs();
++#endif /* !CONFIG_XEN */
++ setup_IO_APIC_irqs();
++ init_IO_APIC_traps();
++ check_timer();
++ if (!acpi_ioapic)
++ print_IO_APIC();
++}
++
++struct sysfs_ioapic_data {
++ struct sys_device dev;
++ struct IO_APIC_route_entry entry[0];
++};
++static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
++
++static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++ *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
++ *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++static int ioapic_resume(struct sys_device *dev)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ union IO_APIC_reg_00 reg_00;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(dev->id, 0);
++ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
++ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
++ io_apic_write(dev->id, 0, reg_00.raw);
++ }
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++ io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
++ io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++static struct sysdev_class ioapic_sysdev_class = {
++ set_kset_name("ioapic"),
++ .suspend = ioapic_suspend,
++ .resume = ioapic_resume,
++};
++
++static int __init ioapic_init_sysfs(void)
++{
++ struct sys_device * dev;
++ int i, size, error = 0;
++
++ error = sysdev_class_register(&ioapic_sysdev_class);
++ if (error)
++ return error;
++
++ for (i = 0; i < nr_ioapics; i++ ) {
++ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
++ * sizeof(struct IO_APIC_route_entry);
++ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
++ if (!mp_ioapic_data[i]) {
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ memset(mp_ioapic_data[i], 0, size);
++ dev = &mp_ioapic_data[i]->dev;
++ dev->id = i;
++ dev->cls = &ioapic_sysdev_class;
++ error = sysdev_register(dev);
++ if (error) {
++ kfree(mp_ioapic_data[i]);
++ mp_ioapic_data[i] = NULL;
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ }
++
++ return 0;
++}
++
++device_initcall(ioapic_init_sysfs);
++
++/* --------------------------------------------------------------------------
++ ACPI-based IOAPIC Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++#define IO_APIC_MAX_ID 0xFE
++
++int __init io_apic_get_version (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.version;
++}
++
++
++int __init io_apic_get_redir_entries (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.entries;
++}
++
++
++int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ if (!IO_APIC_IRQ(irq)) {
++ apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
++ ioapic);
++ return -EINVAL;
++ }
++
++ /*
++ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
++ * Note that we mask (disable) IRQs now -- these get enabled when the
++ * corresponding device driver registers for this IRQ.
++ */
++
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.trigger = edge_level;
++ entry.polarity = active_high_low;
++ entry.mask = 1; /* Disabled (masked) */
++
++ irq = gsi_irq_sharing(irq);
++ /*
++ * IRQs < 16 are already in the irq_2_pin[] map
++ */
++ if (irq >= 16)
++ add_pin_to_irq(irq, ioapic, pin);
++
++ entry.vector = assign_irq_vector(irq);
++
++ apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
++ "IRQ %d Mode:%i Active:%i)\n", ioapic,
++ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
++ edge_level, active_high_low);
++
++ ioapic_register_intr(irq, entry.vector, edge_level);
++
++ if (!ioapic && (irq < 16))
++ disable_8259A_irq(irq);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
++ set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++#endif /* CONFIG_ACPI */
++
++
++#ifndef CONFIG_XEN
++/*
++ * This function currently is only a helper for the i386 smp boot process where
++ * we need to reprogram the ioredtbls to cater for the cpus which have come online
++ * so mask in all cases should simply be TARGET_CPUS
++ */
++#ifdef CONFIG_SMP
++void __init setup_ioapic_dest(void)
++{
++ int pin, ioapic, irq, irq_entry;
++
++ if (skip_ioapic_setup == 1)
++ return;
++
++ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
++ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
++ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
++ if (irq_entry == -1)
++ continue;
++ irq = pin_2_irq(irq_entry, ioapic, pin);
++ set_ioapic_affinity_irq(irq, TARGET_CPUS);
++ }
++
++ }
++}
++#endif
++#endif /* !CONFIG_XEN */
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/ioport-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/ioport-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/ioport-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/ioport-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,100 @@
++/*
++ * linux/arch/x86_64/kernel/ioport.c
++ *
++ * This contains the io-permission bitmap code - written by obz, with changes
++ * by Linus.
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/thread_info.h>
++#include <xen/interface/physdev.h>
++
++/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
++static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
++{
++ int i;
++
++ if (new_value)
++ for (i = base; i < base + extent; i++)
++ __set_bit(i, bitmap);
++ else
++ for (i = base; i < base + extent; i++)
++ clear_bit(i, bitmap);
++}
++
++/*
++ * this changes the io permissions bitmap in the current task.
++ */
++asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
++{
++ struct thread_struct * t = &current->thread;
++ unsigned long *bitmap;
++ struct physdev_set_iobitmap set_iobitmap;
++
++ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
++ return -EINVAL;
++ if (turn_on && !capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
++ /*
++ * If it's the first ioperm() call in this thread's lifetime, set the
++ * IO bitmap up. ioperm() is much less timing critical than clone(),
++ * this is why we delay this operation until now:
++ */
++ if (!t->io_bitmap_ptr) {
++ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!bitmap)
++ return -ENOMEM;
++
++ memset(bitmap, 0xff, IO_BITMAP_BYTES);
++ t->io_bitmap_ptr = bitmap;
++
++ set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
++ set_iobitmap.nr_ports = IO_BITMAP_BITS;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
++ &set_iobitmap));
++ }
++
++ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
++
++ return 0;
++}
++
++/*
++ * sys_iopl has to be used when you want to access the IO ports
++ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
++ * you'd need 8kB of bitmaps/process, which is a bit excessive.
++ *
++ */
++
++asmlinkage long sys_iopl(unsigned int new_iopl, struct pt_regs *regs)
++{
++ unsigned int old_iopl = current->thread.iopl;
++ struct physdev_set_iopl set_iopl;
++
++ if (new_iopl > 3)
++ return -EINVAL;
++
++ /* Need "raw I/O" privileges for direct port access. */
++ if ((new_iopl > old_iopl) && !capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
++ /* Change our version of the privilege levels. */
++ current->thread.iopl = new_iopl;
++
++ /* Force the change at ring 0. */
++ set_iopl.iopl = (new_iopl == 0) ? 1 : new_iopl;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
++
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/irq-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/irq-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/irq-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/irq-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,197 @@
++/*
++ * linux/arch/x86_64/kernel/irq.c
++ *
++ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
++ *
++ * This file contains the lowest level x86_64-specific interrupt
++ * entry and irq statistics code. All the remaining irq logic is
++ * done by the generic kernel/irq/ code and in the
++ * x86_64-specific irq controller code. (e.g. i8259.c and
++ * io_apic.c.)
++ */
++
++#include <linux/kernel_stat.h>
++#include <linux/interrupt.h>
++#include <linux/seq_file.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <asm/uaccess.h>
++#include <asm/io_apic.h>
++#include <asm/idle.h>
++
++atomic_t irq_err_count;
++#ifdef CONFIG_X86_IO_APIC
++#ifdef APIC_MISMATCH_DEBUG
++atomic_t irq_mis_count;
++#endif
++#endif
++
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
++/*
++ * Probabilistic stack overflow check:
++ *
++ * Only check the stack in process context, because everything else
++ * runs on the big interrupt stacks. Checking reliably is too expensive,
++ * so we just check from interrupts.
++ */
++static inline void stack_overflow_check(struct pt_regs *regs)
++{
++ u64 curbase = (u64) current->thread_info;
++ static unsigned long warned = -60*HZ;
++
++ if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
++ regs->rsp < curbase + sizeof(struct thread_info) + 128 &&
++ time_after(jiffies, warned + 60*HZ)) {
++ printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
++ current->comm, curbase, regs->rsp);
++ show_stack(NULL,NULL);
++ warned = jiffies;
++ }
++}
++#endif
++
++/*
++ * Generic, controller-independent functions:
++ */
++
++int show_interrupts(struct seq_file *p, void *v)
++{
++ int i = *(loff_t *) v, j;
++ struct irqaction * action;
++ unsigned long flags;
++
++ if (i == 0) {
++ seq_printf(p, " ");
++ for_each_online_cpu(j)
++ seq_printf(p, "CPU%-8d",j);
++ seq_putc(p, '\n');
++ }
++
++ if (i < NR_IRQS) {
++ spin_lock_irqsave(&irq_desc[i].lock, flags);
++ action = irq_desc[i].action;
++ if (!action)
++ goto skip;
++ seq_printf(p, "%3d: ",i);
++#ifndef CONFIG_SMP
++ seq_printf(p, "%10u ", kstat_irqs(i));
++#else
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++#endif
++ seq_printf(p, " %14s", irq_desc[i].chip->typename);
++
++ seq_printf(p, " %s", action->name);
++ for (action=action->next; action; action = action->next)
++ seq_printf(p, ", %s", action->name);
++ seq_putc(p, '\n');
++skip:
++ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
++ } else if (i == NR_IRQS) {
++ seq_printf(p, "NMI: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
++ seq_putc(p, '\n');
++#ifdef CONFIG_X86_LOCAL_APIC
++ seq_printf(p, "LOC: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
++ seq_putc(p, '\n');
++#endif
++ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
++#ifdef CONFIG_X86_IO_APIC
++#ifdef APIC_MISMATCH_DEBUG
++ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
++#endif
++#endif
++ }
++ return 0;
++}
++
++/*
++ * do_IRQ handles all normal device IRQ's (the special
++ * SMP cross-CPU interrupts have their own specific
++ * handlers).
++ */
++asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
++{
++ /* high bit used in ret_from_ code */
++ unsigned irq = ~regs->orig_rax;
++
++ if (unlikely(irq >= NR_IRQS)) {
++ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
++ __FUNCTION__, irq);
++ BUG();
++ }
++
++ exit_idle();
++ irq_enter();
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
++ stack_overflow_check(regs);
++#endif
++ __do_IRQ(irq, regs);
++ irq_exit();
++
++ return 1;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++void fixup_irqs(cpumask_t map)
++{
++ unsigned int irq;
++ static int warned;
++
++ for (irq = 0; irq < NR_IRQS; irq++) {
++ cpumask_t mask;
++ if (irq == 2)
++ continue;
++
++ cpus_and(mask, irq_desc[irq].affinity, map);
++ if (any_online_cpu(mask) == NR_CPUS) {
++ /*printk("Breaking affinity for irq %i\n", irq);*/
++ mask = map;
++ }
++ if (irq_desc[irq].chip->set_affinity)
++ irq_desc[irq].chip->set_affinity(irq, mask);
++ else if (irq_desc[irq].action && !(warned++))
++ printk("Cannot set affinity for irq %i\n", irq);
++ }
++
++ /* That doesn't seem sufficient. Give it 1ms. */
++ local_irq_enable();
++ mdelay(1);
++ local_irq_disable();
++}
++#endif
++
++extern void call_softirq(void);
++
++asmlinkage void do_softirq(void)
++{
++ __u32 pending;
++ unsigned long flags;
++
++ if (in_interrupt())
++ return;
++
++ local_irq_save(flags);
++ pending = local_softirq_pending();
++ /* Switch to interrupt stack */
++ if (pending) {
++ call_softirq();
++ WARN_ON_ONCE(softirq_count());
++ }
++ local_irq_restore(flags);
++}
++EXPORT_SYMBOL(do_softirq);
++
++#ifndef CONFIG_X86_LOCAL_APIC
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++}
++#endif
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/ldt-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/ldt-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/ldt-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/ldt-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,282 @@
++/*
++ * linux/arch/x86_64/kernel/ldt.c
++ *
++ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
++ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
++ * Copyright (C) 2002 Andi Kleen
++ *
++ * This handles calls from both 32bit and 64bit mode.
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/ldt.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/pgalloc.h>
++
++#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
++static void flush_ldt(void *null)
++{
++ if (current->active_mm)
++ load_LDT(&current->active_mm->context);
++}
++#endif
++
++static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
++{
++ void *oldldt;
++ void *newldt;
++ unsigned oldsize;
++
++ if (mincount <= (unsigned)pc->size)
++ return 0;
++ oldsize = pc->size;
++ mincount = (mincount+511)&(~511);
++ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
++ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
++ else
++ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
++
++ if (!newldt)
++ return -ENOMEM;
++
++ if (oldsize)
++ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
++ oldldt = pc->ldt;
++ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
++ wmb();
++ pc->ldt = newldt;
++ wmb();
++ pc->size = mincount;
++ wmb();
++ if (reload) {
++#ifdef CONFIG_SMP
++ cpumask_t mask;
++
++ preempt_disable();
++#endif
++ make_pages_readonly(
++ pc->ldt,
++ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ load_LDT(pc);
++#ifdef CONFIG_SMP
++ mask = cpumask_of_cpu(smp_processor_id());
++ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
++ smp_call_function(flush_ldt, NULL, 1, 1);
++ preempt_enable();
++#endif
++ }
++ if (oldsize) {
++ make_pages_writable(
++ oldldt,
++ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(oldldt);
++ else
++ kfree(oldldt);
++ }
++ return 0;
++}
++
++static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++{
++ int err = alloc_ldt(new, old->size, 0);
++ if (err < 0)
++ return err;
++ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
++ make_pages_readonly(
++ new->ldt,
++ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ return 0;
++}
++
++/*
++ * we do not have to muck with descriptors here, that is
++ * done in switch_mm() as needed.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
++{
++ struct mm_struct * old_mm;
++ int retval = 0;
++
++ memset(&mm->context, 0, sizeof(mm->context));
++ init_MUTEX(&mm->context.sem);
++ old_mm = current->mm;
++ if (old_mm && old_mm->context.size > 0) {
++ down(&old_mm->context.sem);
++ retval = copy_ldt(&mm->context, &old_mm->context);
++ up(&old_mm->context.sem);
++ }
++ if (retval == 0) {
++ spin_lock(&mm_unpinned_lock);
++ list_add(&mm->context.unpinned, &mm_unpinned);
++ spin_unlock(&mm_unpinned_lock);
++ }
++ return retval;
++}
++
++/*
++ *
++ * Don't touch the LDT register - we're already in the next thread.
++ */
++void destroy_context(struct mm_struct *mm)
++{
++ if (mm->context.size) {
++ if (mm == current->active_mm)
++ clear_LDT();
++ make_pages_writable(
++ mm->context.ldt,
++ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(mm->context.ldt);
++ else
++ kfree(mm->context.ldt);
++ mm->context.size = 0;
++ }
++ if (!mm->context.pinned) {
++ spin_lock(&mm_unpinned_lock);
++ list_del(&mm->context.unpinned);
++ spin_unlock(&mm_unpinned_lock);
++ }
++}
++
++static int read_ldt(void __user * ptr, unsigned long bytecount)
++{
++ int err;
++ unsigned long size;
++ struct mm_struct * mm = current->mm;
++
++ if (!mm->context.size)
++ return 0;
++ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
++ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
++
++ down(&mm->context.sem);
++ size = mm->context.size*LDT_ENTRY_SIZE;
++ if (size > bytecount)
++ size = bytecount;
++
++ err = 0;
++ if (copy_to_user(ptr, mm->context.ldt, size))
++ err = -EFAULT;
++ up(&mm->context.sem);
++ if (err < 0)
++ goto error_return;
++ if (size != bytecount) {
++ /* zero-fill the rest */
++ if (clear_user(ptr+size, bytecount-size) != 0) {
++ err = -EFAULT;
++ goto error_return;
++ }
++ }
++ return bytecount;
++error_return:
++ return err;
++}
++
++static int read_default_ldt(void __user * ptr, unsigned long bytecount)
++{
++ /* Arbitrary number */
++ /* x86-64 default LDT is all zeros */
++ if (bytecount > 128)
++ bytecount = 128;
++ if (clear_user(ptr, bytecount))
++ return -EFAULT;
++ return bytecount;
++}
++
++static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
++{
++ struct task_struct *me = current;
++ struct mm_struct * mm = me->mm;
++ __u32 entry_1, entry_2, *lp;
++ unsigned long mach_lp;
++ int error;
++ struct user_desc ldt_info;
++
++ error = -EINVAL;
++
++ if (bytecount != sizeof(ldt_info))
++ goto out;
++ error = -EFAULT;
++ if (copy_from_user(&ldt_info, ptr, bytecount))
++ goto out;
++
++ error = -EINVAL;
++ if (ldt_info.entry_number >= LDT_ENTRIES)
++ goto out;
++ if (ldt_info.contents == 3) {
++ if (oldmode)
++ goto out;
++ if (ldt_info.seg_not_present == 0)
++ goto out;
++ }
++
++ down(&mm->context.sem);
++ if (ldt_info.entry_number >= (unsigned)mm->context.size) {
++ error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
++ if (error < 0)
++ goto out_unlock;
++ }
++
++ lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
++ mach_lp = arbitrary_virt_to_machine(lp);
++
++ /* Allow LDTs to be cleared by the user. */
++ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
++ if (oldmode || LDT_empty(&ldt_info)) {
++ entry_1 = 0;
++ entry_2 = 0;
++ goto install;
++ }
++ }
++
++ entry_1 = LDT_entry_a(&ldt_info);
++ entry_2 = LDT_entry_b(&ldt_info);
++ if (oldmode)
++ entry_2 &= ~(1 << 20);
++
++ /* Install the new entry ... */
++install:
++ error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32)));
++
++out_unlock:
++ up(&mm->context.sem);
++out:
++ return error;
++}
++
++asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
++{
++ int ret = -ENOSYS;
++
++ switch (func) {
++ case 0:
++ ret = read_ldt(ptr, bytecount);
++ break;
++ case 1:
++ ret = write_ldt(ptr, bytecount, 1);
++ break;
++ case 2:
++ ret = read_default_ldt(ptr, bytecount);
++ break;
++ case 0x11:
++ ret = write_ldt(ptr, bytecount, 0);
++ break;
++ }
++ return ret;
++}
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/machine_kexec.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/machine_kexec.c
+--- linux-2.6.18.8/arch/x86_64/kernel/machine_kexec.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/machine_kexec.c 2008-08-21 11:36:07.000000000 +0200
+@@ -15,6 +15,128 @@
+ #include <asm/mmu_context.h>
+ #include <asm/io.h>
+
++#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
++static u64 kexec_pgd[512] PAGE_ALIGNED;
++static u64 kexec_pud0[512] PAGE_ALIGNED;
++static u64 kexec_pmd0[512] PAGE_ALIGNED;
++static u64 kexec_pte0[512] PAGE_ALIGNED;
++static u64 kexec_pud1[512] PAGE_ALIGNED;
++static u64 kexec_pmd1[512] PAGE_ALIGNED;
++static u64 kexec_pte1[512] PAGE_ALIGNED;
++
++#ifdef CONFIG_XEN
++
++/* In the case of Xen, override hypervisor functions to be able to create
++ * a regular identity mapping page table...
++ */
++
++#include <xen/interface/kexec.h>
++#include <xen/interface/memory.h>
++
++#define x__pmd(x) ((pmd_t) { (x) } )
++#define x__pud(x) ((pud_t) { (x) } )
++#define x__pgd(x) ((pgd_t) { (x) } )
++
++#define x_pmd_val(x) ((x).pmd)
++#define x_pud_val(x) ((x).pud)
++#define x_pgd_val(x) ((x).pgd)
++
++static inline void x_set_pmd(pmd_t *dst, pmd_t val)
++{
++ x_pmd_val(*dst) = x_pmd_val(val);
++}
++
++static inline void x_set_pud(pud_t *dst, pud_t val)
++{
++ x_pud_val(*dst) = phys_to_machine(x_pud_val(val));
++}
++
++static inline void x_pud_clear (pud_t *pud)
++{
++ x_pud_val(*pud) = 0;
++}
++
++static inline void x_set_pgd(pgd_t *dst, pgd_t val)
++{
++ x_pgd_val(*dst) = phys_to_machine(x_pgd_val(val));
++}
++
++static inline void x_pgd_clear (pgd_t * pgd)
++{
++ x_pgd_val(*pgd) = 0;
++}
++
++#define X__PAGE_KERNEL_LARGE_EXEC \
++ _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_PSE
++#define X_KERNPG_TABLE _PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY
++
++#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
++
++#if PAGES_NR > KEXEC_XEN_NO_PAGES
++#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
++#endif
++
++#if PA_CONTROL_PAGE != 0
++#error PA_CONTROL_PAGE is non zero - Xen support will break
++#endif
++
++void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
++{
++ void *control_page;
++ void *table_page;
++
++ memset(xki->page_list, 0, sizeof(xki->page_list));
++
++ control_page = page_address(image->control_code_page) + PAGE_SIZE;
++ memcpy(control_page, relocate_kernel, PAGE_SIZE);
++
++ table_page = page_address(image->control_code_page);
++
++ xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
++ xki->page_list[PA_TABLE_PAGE] = __ma(table_page);
++
++ xki->page_list[PA_PGD] = __ma(kexec_pgd);
++ xki->page_list[PA_PUD_0] = __ma(kexec_pud0);
++ xki->page_list[PA_PUD_1] = __ma(kexec_pud1);
++ xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
++ xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
++ xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
++ xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
++}
++
++int __init machine_kexec_setup_resources(struct resource *hypervisor,
++ struct resource *phys_cpus,
++ int nr_phys_cpus)
++{
++ int k;
++
++ /* The per-cpu crash note resources belong to the hypervisor resource */
++ for (k = 0; k < nr_phys_cpus; k++)
++ request_resource(hypervisor, phys_cpus + k);
++
++ return 0;
++}
++
++void machine_kexec_register_resources(struct resource *res) { ; }
++
++#else /* CONFIG_XEN */
++
++#define x__pmd(x) __pmd(x)
++#define x__pud(x) __pud(x)
++#define x__pgd(x) __pgd(x)
++
++#define x_set_pmd(x, y) set_pmd(x, y)
++#define x_set_pud(x, y) set_pud(x, y)
++#define x_set_pgd(x, y) set_pgd(x, y)
++
++#define x_pud_clear(x) pud_clear(x)
++#define x_pgd_clear(x) pgd_clear(x)
++
++#define X__PAGE_KERNEL_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
++#define X_KERNPG_TABLE _KERNPG_TABLE
++
++#endif /* CONFIG_XEN */
++
+ static void init_level2_page(pmd_t *level2p, unsigned long addr)
+ {
+ unsigned long end_addr;
+@@ -22,7 +144,7 @@ static void init_level2_page(pmd_t *leve
+ addr &= PAGE_MASK;
+ end_addr = addr + PUD_SIZE;
+ while (addr < end_addr) {
+- set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
++ x_set_pmd(level2p++, x__pmd(addr | X__PAGE_KERNEL_LARGE_EXEC));
+ addr += PMD_SIZE;
+ }
+ }
+@@ -47,12 +169,12 @@ static int init_level3_page(struct kimag
+ }
+ level2p = (pmd_t *)page_address(page);
+ init_level2_page(level2p, addr);
+- set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
++ x_set_pud(level3p++, x__pud(__pa(level2p) | X_KERNPG_TABLE));
+ addr += PUD_SIZE;
+ }
+ /* clear the unused entries */
+ while (addr < end_addr) {
+- pud_clear(level3p++);
++ x_pud_clear(level3p++);
+ addr += PUD_SIZE;
+ }
+ out:
+@@ -83,12 +205,12 @@ static int init_level4_page(struct kimag
+ if (result) {
+ goto out;
+ }
+- set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
++ x_set_pgd(level4p++, x__pgd(__pa(level3p) | X_KERNPG_TABLE));
+ addr += PGDIR_SIZE;
+ }
+ /* clear the unused entries */
+ while (addr < end_addr) {
+- pgd_clear(level4p++);
++ x_pgd_clear(level4p++);
+ addr += PGDIR_SIZE;
+ }
+ out:
+@@ -99,77 +221,29 @@ out:
+ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
+ {
+ pgd_t *level4p;
+- level4p = (pgd_t *)__va(start_pgtable);
+- return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
+-}
+-
+-static void set_idt(void *newidt, u16 limit)
+-{
+- struct desc_ptr curidt;
+-
+- /* x86-64 supports unaliged loads & stores */
+- curidt.size = limit;
+- curidt.address = (unsigned long)newidt;
++ unsigned long x_end_pfn = end_pfn;
+
+- __asm__ __volatile__ (
+- "lidtq %0\n"
+- : : "m" (curidt)
+- );
+-};
++#ifdef CONFIG_XEN
++ x_end_pfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
++#endif
+
+-
+-static void set_gdt(void *newgdt, u16 limit)
+-{
+- struct desc_ptr curgdt;
+-
+- /* x86-64 supports unaligned loads & stores */
+- curgdt.size = limit;
+- curgdt.address = (unsigned long)newgdt;
+-
+- __asm__ __volatile__ (
+- "lgdtq %0\n"
+- : : "m" (curgdt)
+- );
+-};
+-
+-static void load_segments(void)
+-{
+- __asm__ __volatile__ (
+- "\tmovl %0,%%ds\n"
+- "\tmovl %0,%%es\n"
+- "\tmovl %0,%%ss\n"
+- "\tmovl %0,%%fs\n"
+- "\tmovl %0,%%gs\n"
+- : : "a" (__KERNEL_DS) : "memory"
+- );
++ level4p = (pgd_t *)__va(start_pgtable);
++ return init_level4_page(image, level4p, 0, x_end_pfn << PAGE_SHIFT);
+ }
+
+-typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
+- unsigned long control_code_buffer,
+- unsigned long start_address,
+- unsigned long pgtable) ATTRIB_NORET;
+-
+-extern const unsigned char relocate_new_kernel[];
+-extern const unsigned long relocate_new_kernel_size;
+-
+ int machine_kexec_prepare(struct kimage *image)
+ {
+- unsigned long start_pgtable, control_code_buffer;
++ unsigned long start_pgtable;
+ int result;
+
+ /* Calculate the offsets */
+ start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
+- control_code_buffer = start_pgtable + PAGE_SIZE;
+
+ /* Setup the identity mapped 64bit page table */
+ result = init_pgtable(image, start_pgtable);
+ if (result)
+ return result;
+
+- /* Place the code in the reboot code buffer */
+- memcpy(__va(control_code_buffer), relocate_new_kernel,
+- relocate_new_kernel_size);
+-
+ return 0;
+ }
+
+@@ -178,51 +252,43 @@ void machine_kexec_cleanup(struct kimage
+ return;
+ }
+
++#ifndef CONFIG_XEN
+ /*
+ * Do not allocate memory (or fail in any way) in machine_kexec().
+ * We are past the point of no return, committed to rebooting now.
+ */
+ NORET_TYPE void machine_kexec(struct kimage *image)
+ {
+- unsigned long page_list;
+- unsigned long control_code_buffer;
+- unsigned long start_pgtable;
+- relocate_new_kernel_t rnk;
++ unsigned long page_list[PAGES_NR];
++ void *control_page;
+
+ /* Interrupts aren't acceptable while we reboot */
+ local_irq_disable();
+
+- /* Calculate the offsets */
+- page_list = image->head;
+- start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
+- control_code_buffer = start_pgtable + PAGE_SIZE;
++ control_page = page_address(image->control_code_page) + PAGE_SIZE;
++ memcpy(control_page, relocate_kernel, PAGE_SIZE);
++
++ page_list[PA_CONTROL_PAGE] = __pa(control_page);
++ page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
++ page_list[PA_PGD] = __pa(kexec_pgd);
++ page_list[VA_PGD] = (unsigned long)kexec_pgd;
++ page_list[PA_PUD_0] = __pa(kexec_pud0);
++ page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
++ page_list[PA_PMD_0] = __pa(kexec_pmd0);
++ page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
++ page_list[PA_PTE_0] = __pa(kexec_pte0);
++ page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
++ page_list[PA_PUD_1] = __pa(kexec_pud1);
++ page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
++ page_list[PA_PMD_1] = __pa(kexec_pmd1);
++ page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
++ page_list[PA_PTE_1] = __pa(kexec_pte1);
++ page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
++
++ page_list[PA_TABLE_PAGE] =
++ (unsigned long)__pa(page_address(image->control_code_page));
+
+- /* Set the low half of the page table to my identity mapped
+- * page table for kexec. Leave the high half pointing at the
+- * kernel pages. Don't bother to flush the global pages
+- * as that will happen when I fully switch to my identity mapped
+- * page table anyway.
+- */
+- memcpy(__va(read_cr3()), __va(start_pgtable), PAGE_SIZE/2);
+- __flush_tlb();
+-
+-
+- /* The segment registers are funny things, they have both a
+- * visible and an invisible part. Whenever the visible part is
+- * set to a specific selector, the invisible part is loaded
+- * with from a table in memory. At no other time is the
+- * descriptor table in memory accessed.
+- *
+- * I take advantage of this here by force loading the
+- * segments, before I zap the gdt with an invalid value.
+- */
+- load_segments();
+- /* The gdt & idt are now invalid.
+- * If you want to load them you must set up your own idt & gdt.
+- */
+- set_gdt(phys_to_virt(0),0);
+- set_idt(phys_to_virt(0),0);
+- /* now call it */
+- rnk = (relocate_new_kernel_t) control_code_buffer;
+- (*rnk)(page_list, control_code_buffer, image->start, start_pgtable);
++ relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
++ image->start);
+ }
++#endif
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/Makefile linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/Makefile
+--- linux-2.6.18.8/arch/x86_64/kernel/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -21,11 +21,13 @@ obj-$(CONFIG_MICROCODE) += microcode.o
+ obj-$(CONFIG_X86_CPUID) += cpuid.o
+ obj-$(CONFIG_SMP) += smp.o smpboot.o trampoline.o
+ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
++obj-$(CONFIG_X86_XEN_GENAPIC) += genapic.o genapic_xen.o
+ obj-$(CONFIG_X86_IO_APIC) += io_apic.o mpparse.o \
+ genapic.o genapic_cluster.o genapic_flat.o
+ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
+ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+-obj-$(CONFIG_PM) += suspend.o
++obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend.o
++obj-$(CONFIG_ACPI_SLEEP) += suspend.o
+ obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
+ obj-$(CONFIG_CPU_FREQ) += cpufreq/
+ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+@@ -55,3 +57,8 @@ i8237-y += ../../i386/kernel/i8237.o
+ msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o
+ alternative-y += ../../i386/kernel/alternative.o
+
++time-$(CONFIG_XEN) += ../../i386/kernel/time.o
++pci-dma-$(CONFIG_XEN) += ../../i386/kernel/pci-dma.o
++
++disabled-obj-$(CONFIG_XEN) := i8259.o reboot.o smpboot.o trampoline.o
++%/head.o %/head.s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) :=
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/mpparse-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/mpparse-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/mpparse-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/mpparse-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1011 @@
++/*
++ * Intel Multiprocessor Specification 1.1 and 1.4
++ * compliant MP-table parsing routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Erich Boleyn : MP v1.4 and additional changes.
++ * Alan Cox : Added EBDA scanning
++ * Ingo Molnar : various cleanups and rewrites
++ * Maciej W. Rozycki: Bits for default MP configurations
++ * Paul Diefenbaugh: Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/acpi.h>
++#include <linux/module.h>
++
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/pgalloc.h>
++#include <asm/io_apic.h>
++#include <asm/proto.h>
++#include <asm/acpi.h>
++
++/* Have we found an MP table */
++int smp_found_config;
++unsigned int __initdata maxcpus = NR_CPUS;
++
++int acpi_found_madt;
++
++/*
++ * Various Linux-internal data structures created from the
++ * MP-table.
++ */
++unsigned char apic_version [MAX_APICS];
++unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++
++static int mp_current_pci_id = 0;
++/* I/O APIC entries */
++struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
++
++/* # of MP IRQ source entries */
++struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
++
++/* MP IRQ source entries */
++int mp_irq_entries;
++
++int nr_ioapics;
++int pic_mode;
++unsigned long mp_lapic_addr = 0;
++
++
++
++/* Processor that is doing the boot up */
++unsigned int boot_cpu_id = -1U;
++/* Internal processor count */
++unsigned int num_processors __initdata = 0;
++
++unsigned disabled_cpus __initdata;
++
++/* Bitmask of physically existing CPUs */
++physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
++
++/* ACPI MADT entry parsing functions */
++#ifdef CONFIG_ACPI
++extern struct acpi_boot_flags acpi_boot;
++#ifdef CONFIG_X86_LOCAL_APIC
++extern int acpi_parse_lapic (acpi_table_entry_header *header);
++extern int acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header);
++extern int acpi_parse_lapic_nmi (acpi_table_entry_header *header);
++#endif /*CONFIG_X86_LOCAL_APIC*/
++#ifdef CONFIG_X86_IO_APIC
++extern int acpi_parse_ioapic (acpi_table_entry_header *header);
++#endif /*CONFIG_X86_IO_APIC*/
++#endif /*CONFIG_ACPI*/
++
++u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++
++/*
++ * Intel MP BIOS table parsing routines:
++ */
++
++/*
++ * Checksum an MP configuration block.
++ */
++
++static int __init mpf_checksum(unsigned char *mp, int len)
++{
++ int sum = 0;
++
++ while (len--)
++ sum += *mp++;
++
++ return sum & 0xFF;
++}
++
++#ifndef CONFIG_XEN
++static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
++{
++ int cpu;
++ unsigned char ver;
++ cpumask_t tmp_map;
++
++ if (!(m->mpc_cpuflag & CPU_ENABLED)) {
++ disabled_cpus++;
++ return;
++ }
++
++ printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
++ m->mpc_apicid,
++ (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
++ (m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
++ m->mpc_apicver);
++
++ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++ Dprintk(" Bootup CPU\n");
++ boot_cpu_id = m->mpc_apicid;
++ }
++ if (num_processors >= NR_CPUS) {
++ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
++ " Processor ignored.\n", NR_CPUS);
++ return;
++ }
++
++ num_processors++;
++ cpus_complement(tmp_map, cpu_present_map);
++ cpu = first_cpu(tmp_map);
++
++#if MAX_APICS < 255
++ if ((int)m->mpc_apicid > MAX_APICS) {
++ printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
++ m->mpc_apicid, MAX_APICS);
++ return;
++ }
++#endif
++ ver = m->mpc_apicver;
++
++ physid_set(m->mpc_apicid, phys_cpu_present_map);
++ /*
++ * Validate version
++ */
++ if (ver == 0x0) {
++ printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
++ ver = 0x10;
++ }
++ apic_version[m->mpc_apicid] = ver;
++ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++ /*
++ * bios_cpu_apicid is required to have processors listed
++ * in same order as logical cpu numbers. Hence the first
++ * entry is BSP, and so on.
++ */
++ cpu = 0;
++ }
++ bios_cpu_apicid[cpu] = m->mpc_apicid;
++ x86_cpu_to_apicid[cpu] = m->mpc_apicid;
++
++ cpu_set(cpu, cpu_possible_map);
++ cpu_set(cpu, cpu_present_map);
++}
++#else
++static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
++{
++ num_processors++;
++}
++#endif /* CONFIG_XEN */
++
++static void __init MP_bus_info (struct mpc_config_bus *m)
++{
++ char str[7];
++
++ memcpy(str, m->mpc_bustype, 6);
++ str[6] = 0;
++ Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
++
++ if (strncmp(str, "ISA", 3) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
++ } else if (strncmp(str, "EISA", 4) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
++ } else if (strncmp(str, "PCI", 3) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
++ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
++ mp_current_pci_id++;
++ } else if (strncmp(str, "MCA", 3) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
++ } else {
++ printk(KERN_ERR "Unknown bustype %s\n", str);
++ }
++}
++
++static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
++{
++ if (!(m->mpc_flags & MPC_APIC_USABLE))
++ return;
++
++ printk("I/O APIC #%d Version %d at 0x%X.\n",
++ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_ERR "Max # of I/O APICs (%d) exceeded (found %d).\n",
++ MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
++ }
++ if (!m->mpc_apicaddr) {
++ printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
++ " found in MP table, skipping!\n");
++ return;
++ }
++ mp_ioapics[nr_ioapics] = *m;
++ nr_ioapics++;
++}
++
++static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
++{
++ mp_irqs [mp_irq_entries] = *m;
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
++ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
++ m->mpc_irqtype, m->mpc_irqflag & 3,
++ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
++ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
++ if (++mp_irq_entries >= MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!!\n");
++}
++
++static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
++{
++ Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
++ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
++ m->mpc_irqtype, m->mpc_irqflag & 3,
++ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
++ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
++ /*
++ * Well it seems all SMP boards in existence
++ * use ExtINT/LVT1 == LINT0 and
++ * NMI/LVT2 == LINT1 - the following check
++ * will show us if this assumptions is false.
++ * Until then we do not have to add baggage.
++ */
++ if ((m->mpc_irqtype == mp_ExtINT) &&
++ (m->mpc_destapiclint != 0))
++ BUG();
++ if ((m->mpc_irqtype == mp_NMI) &&
++ (m->mpc_destapiclint != 1))
++ BUG();
++}
++
++/*
++ * Read/parse the MPC
++ */
++
++static int __init smp_read_mpc(struct mp_config_table *mpc)
++{
++ char str[16];
++ int count=sizeof(*mpc);
++ unsigned char *mpt=((unsigned char *)mpc)+count;
++
++ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
++ printk("SMP mptable: bad signature [%c%c%c%c]!\n",
++ mpc->mpc_signature[0],
++ mpc->mpc_signature[1],
++ mpc->mpc_signature[2],
++ mpc->mpc_signature[3]);
++ return 0;
++ }
++ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
++ printk("SMP mptable: checksum error!\n");
++ return 0;
++ }
++ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
++ printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
++ mpc->mpc_spec);
++ return 0;
++ }
++ if (!mpc->mpc_lapic) {
++ printk(KERN_ERR "SMP mptable: null local APIC address!\n");
++ return 0;
++ }
++ memcpy(str,mpc->mpc_oem,8);
++ str[8]=0;
++ printk(KERN_INFO "OEM ID: %s ",str);
++
++ memcpy(str,mpc->mpc_productid,12);
++ str[12]=0;
++ printk("Product ID: %s ",str);
++
++ printk("APIC at: 0x%X\n",mpc->mpc_lapic);
++
++ /* save the local APIC address, it might be non-default */
++ if (!acpi_lapic)
++ mp_lapic_addr = mpc->mpc_lapic;
++
++ /*
++ * Now process the configuration blocks.
++ */
++ while (count < mpc->mpc_length) {
++ switch(*mpt) {
++ case MP_PROCESSOR:
++ {
++ struct mpc_config_processor *m=
++ (struct mpc_config_processor *)mpt;
++ if (!acpi_lapic)
++ MP_processor_info(m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
++ break;
++ }
++ case MP_BUS:
++ {
++ struct mpc_config_bus *m=
++ (struct mpc_config_bus *)mpt;
++ MP_bus_info(m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
++ break;
++ }
++ case MP_IOAPIC:
++ {
++ struct mpc_config_ioapic *m=
++ (struct mpc_config_ioapic *)mpt;
++ MP_ioapic_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ case MP_INTSRC:
++ {
++ struct mpc_config_intsrc *m=
++ (struct mpc_config_intsrc *)mpt;
++
++ MP_intsrc_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ case MP_LINTSRC:
++ {
++ struct mpc_config_lintsrc *m=
++ (struct mpc_config_lintsrc *)mpt;
++ MP_lintsrc_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ }
++ }
++ clustered_apic_check();
++ if (!num_processors)
++ printk(KERN_ERR "SMP mptable: no processors registered!\n");
++ return num_processors;
++}
++
++static int __init ELCR_trigger(unsigned int irq)
++{
++ unsigned int port;
++
++ port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++}
++
++static void __init construct_default_ioirq_mptable(int mpc_default_type)
++{
++ struct mpc_config_intsrc intsrc;
++ int i;
++ int ELCR_fallback = 0;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqflag = 0; /* conforming */
++ intsrc.mpc_srcbus = 0;
++ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
++
++ intsrc.mpc_irqtype = mp_INT;
++
++ /*
++ * If true, we have an ISA/PCI system with no IRQ entries
++ * in the MP table. To prevent the PCI interrupts from being set up
++ * incorrectly, we try to use the ELCR. The sanity check to see if
++ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
++ * never be level sensitive, so we simply see if the ELCR agrees.
++ * If it does, we assume it's valid.
++ */
++ if (mpc_default_type == 5) {
++ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
++
++ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
++ printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
++ else {
++ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
++ ELCR_fallback = 1;
++ }
++ }
++
++ for (i = 0; i < 16; i++) {
++ switch (mpc_default_type) {
++ case 2:
++ if (i == 0 || i == 13)
++ continue; /* IRQ0 & IRQ13 not connected */
++ /* fall through */
++ default:
++ if (i == 2)
++ continue; /* IRQ2 is never connected */
++ }
++
++ if (ELCR_fallback) {
++ /*
++ * If the ELCR indicates a level-sensitive interrupt, we
++ * copy that information over to the MP table in the
++ * irqflag field (level sensitive, active high polarity).
++ */
++ if (ELCR_trigger(i))
++ intsrc.mpc_irqflag = 13;
++ else
++ intsrc.mpc_irqflag = 0;
++ }
++
++ intsrc.mpc_srcbusirq = i;
++ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
++ MP_intsrc_info(&intsrc);
++ }
++
++ intsrc.mpc_irqtype = mp_ExtINT;
++ intsrc.mpc_srcbusirq = 0;
++ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
++ MP_intsrc_info(&intsrc);
++}
++
++static inline void __init construct_default_ISA_mptable(int mpc_default_type)
++{
++ struct mpc_config_processor processor;
++ struct mpc_config_bus bus;
++ struct mpc_config_ioapic ioapic;
++ struct mpc_config_lintsrc lintsrc;
++ int linttypes[2] = { mp_ExtINT, mp_NMI };
++ int i;
++
++ /*
++ * local APIC has default address
++ */
++ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++
++ /*
++ * 2 CPUs, numbered 0 & 1.
++ */
++ processor.mpc_type = MP_PROCESSOR;
++ /* Either an integrated APIC or a discrete 82489DX. */
++ processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ processor.mpc_cpuflag = CPU_ENABLED;
++ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++ (boot_cpu_data.x86_model << 4) |
++ boot_cpu_data.x86_mask;
++ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_reserved[0] = 0;
++ processor.mpc_reserved[1] = 0;
++ for (i = 0; i < 2; i++) {
++ processor.mpc_apicid = i;
++ MP_processor_info(&processor);
++ }
++
++ bus.mpc_type = MP_BUS;
++ bus.mpc_busid = 0;
++ switch (mpc_default_type) {
++ default:
++ printk(KERN_ERR "???\nUnknown standard configuration %d\n",
++ mpc_default_type);
++ /* fall through */
++ case 1:
++ case 5:
++ memcpy(bus.mpc_bustype, "ISA ", 6);
++ break;
++ case 2:
++ case 6:
++ case 3:
++ memcpy(bus.mpc_bustype, "EISA ", 6);
++ break;
++ case 4:
++ case 7:
++ memcpy(bus.mpc_bustype, "MCA ", 6);
++ }
++ MP_bus_info(&bus);
++ if (mpc_default_type > 4) {
++ bus.mpc_busid = 1;
++ memcpy(bus.mpc_bustype, "PCI ", 6);
++ MP_bus_info(&bus);
++ }
++
++ ioapic.mpc_type = MP_IOAPIC;
++ ioapic.mpc_apicid = 2;
++ ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ ioapic.mpc_flags = MPC_APIC_USABLE;
++ ioapic.mpc_apicaddr = 0xFEC00000;
++ MP_ioapic_info(&ioapic);
++
++ /*
++ * We set up most of the low 16 IO-APIC pins according to MPS rules.
++ */
++ construct_default_ioirq_mptable(mpc_default_type);
++
++ lintsrc.mpc_type = MP_LINTSRC;
++ lintsrc.mpc_irqflag = 0; /* conforming */
++ lintsrc.mpc_srcbusid = 0;
++ lintsrc.mpc_srcbusirq = 0;
++ lintsrc.mpc_destapic = MP_APIC_ALL;
++ for (i = 0; i < 2; i++) {
++ lintsrc.mpc_irqtype = linttypes[i];
++ lintsrc.mpc_destapiclint = i;
++ MP_lintsrc_info(&lintsrc);
++ }
++}
++
++static struct intel_mp_floating *mpf_found;
++
++/*
++ * Scan the memory blocks for an SMP configuration block.
++ */
++void __init get_smp_config (void)
++{
++ struct intel_mp_floating *mpf = mpf_found;
++
++ /*
++ * ACPI supports both logical (e.g. Hyper-Threading) and physical
++ * processors, where MPS only supports physical.
++ */
++ if (acpi_lapic && acpi_ioapic) {
++ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
++ return;
++ }
++ else if (acpi_lapic)
++ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
++
++ printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
++ if (mpf->mpf_feature2 & (1<<7)) {
++ printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
++ pic_mode = 1;
++ } else {
++ printk(KERN_INFO " Virtual Wire compatibility mode.\n");
++ pic_mode = 0;
++ }
++
++ /*
++ * Now see if we need to read further.
++ */
++ if (mpf->mpf_feature1 != 0) {
++
++ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
++ construct_default_ISA_mptable(mpf->mpf_feature1);
++
++ } else if (mpf->mpf_physptr) {
++
++ /*
++ * Read the physical hardware table. Anything here will
++ * override the defaults.
++ */
++ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
++ smp_found_config = 0;
++ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
++ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
++ return;
++ }
++ /*
++ * If there are no explicit MP IRQ entries, then we are
++ * broken. We set up most of the low 16 IO-APIC pins to
++ * ISA defaults and hope it will work.
++ */
++ if (!mp_irq_entries) {
++ struct mpc_config_bus bus;
++
++ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
++
++ bus.mpc_type = MP_BUS;
++ bus.mpc_busid = 0;
++ memcpy(bus.mpc_bustype, "ISA ", 6);
++ MP_bus_info(&bus);
++
++ construct_default_ioirq_mptable(0);
++ }
++
++ } else
++ BUG();
++
++ printk(KERN_INFO "Processors: %d\n", num_processors);
++ /*
++ * Only use the first configuration found.
++ */
++}
++
++static int __init smp_scan_config (unsigned long base, unsigned long length)
++{
++ extern void __bad_mpf_size(void);
++ unsigned int *bp = isa_bus_to_virt(base);
++ struct intel_mp_floating *mpf;
++
++ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
++ if (sizeof(*mpf) != 16)
++ __bad_mpf_size();
++
++ while (length > 0) {
++ mpf = (struct intel_mp_floating *)bp;
++ if ((*bp == SMP_MAGIC_IDENT) &&
++ (mpf->mpf_length == 1) &&
++ !mpf_checksum((unsigned char *)bp, 16) &&
++ ((mpf->mpf_specification == 1)
++ || (mpf->mpf_specification == 4)) ) {
++
++ smp_found_config = 1;
++ mpf_found = mpf;
++ return 1;
++ }
++ bp += 4;
++ length -= 16;
++ }
++ return 0;
++}
++
++void __init find_intel_smp (void)
++{
++ unsigned int address;
++
++ /*
++ * FIXME: Linux assumes you have 640K of base ram..
++ * this continues the error...
++ *
++ * 1) Scan the bottom 1K for a signature
++ * 2) Scan the top 1K of base RAM
++ * 3) Scan the 64K of bios
++ */
++ if (smp_scan_config(0x0,0x400) ||
++ smp_scan_config(639*0x400,0x400) ||
++ smp_scan_config(0xF0000,0x10000))
++ return;
++ /*
++ * If it is an SMP machine we should know now, unless the
++ * configuration is in an EISA/MCA bus machine with an
++ * extended bios data area.
++ *
++ * there is a real-mode segmented pointer pointing to the
++ * 4K EBDA area at 0x40E, calculate and scan it here.
++ *
++ * NOTE! There are Linux loaders that will corrupt the EBDA
++ * area, and as such this kind of SMP config may be less
++ * trustworthy, simply because the SMP table may have been
++ * stomped on during early boot. These loaders are buggy and
++ * should be fixed.
++ */
++
++ address = *(unsigned short *)phys_to_virt(0x40E);
++ address <<= 4;
++ if (smp_scan_config(address, 0x1000))
++ return;
++
++ /* If we have come this far, we did not find an MP table */
++ printk(KERN_INFO "No mptable found.\n");
++}
++
++/*
++ * - Intel MP Configuration Table
++ */
++void __init find_smp_config (void)
++{
++#ifdef CONFIG_X86_LOCAL_APIC
++ find_intel_smp();
++#endif
++}
++
++
++/* --------------------------------------------------------------------------
++ ACPI-based MP Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++void __init mp_register_lapic_address (
++ u64 address)
++{
++#ifndef CONFIG_XEN
++ mp_lapic_addr = (unsigned long) address;
++
++ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
++
++ if (boot_cpu_id == -1U)
++ boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
++
++ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
++#endif
++}
++
++
++void __cpuinit mp_register_lapic (
++ u8 id,
++ u8 enabled)
++{
++ struct mpc_config_processor processor;
++ int boot_cpu = 0;
++
++ if (id >= MAX_APICS) {
++ printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
++ id, MAX_APICS);
++ return;
++ }
++
++ if (id == boot_cpu_physical_apicid)
++ boot_cpu = 1;
++
++#ifndef CONFIG_XEN
++ processor.mpc_type = MP_PROCESSOR;
++ processor.mpc_apicid = id;
++ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
++ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
++ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
++ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
++ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_reserved[0] = 0;
++ processor.mpc_reserved[1] = 0;
++#endif
++
++ MP_processor_info(&processor);
++}
++
++#ifdef CONFIG_X86_IO_APIC
++
++#define MP_ISA_BUS 0
++#define MP_MAX_IOAPIC_PIN 127
++
++static struct mp_ioapic_routing {
++ int apic_id;
++ int gsi_start;
++ int gsi_end;
++ u32 pin_programmed[4];
++} mp_ioapic_routing[MAX_IO_APICS];
++
++
++static int mp_find_ioapic (
++ int gsi)
++{
++ int i = 0;
++
++ /* Find the IOAPIC that manages this GSI. */
++ for (i = 0; i < nr_ioapics; i++) {
++ if ((gsi >= mp_ioapic_routing[i].gsi_start)
++ && (gsi <= mp_ioapic_routing[i].gsi_end))
++ return i;
++ }
++
++ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
++
++ return -1;
++}
++
++
++void __init mp_register_ioapic (
++ u8 id,
++ u32 address,
++ u32 gsi_base)
++{
++ int idx = 0;
++
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
++ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
++ }
++ if (!address) {
++ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
++ " found in MADT table, skipping!\n");
++ return;
++ }
++
++ idx = nr_ioapics++;
++
++ mp_ioapics[idx].mpc_type = MP_IOAPIC;
++ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
++ mp_ioapics[idx].mpc_apicaddr = address;
++
++#ifndef CONFIG_XEN
++ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
++#endif
++ mp_ioapics[idx].mpc_apicid = id;
++ mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
++
++ /*
++ * Build basic IRQ lookup table to facilitate gsi->io_apic lookups
++ * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
++ */
++ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
++ mp_ioapic_routing[idx].gsi_start = gsi_base;
++ mp_ioapic_routing[idx].gsi_end = gsi_base +
++ io_apic_get_redir_entries(idx);
++
++ printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
++ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
++ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
++ mp_ioapic_routing[idx].gsi_start,
++ mp_ioapic_routing[idx].gsi_end);
++
++ return;
++}
++
++
++void __init mp_override_legacy_irq (
++ u8 bus_irq,
++ u8 polarity,
++ u8 trigger,
++ u32 gsi)
++{
++ struct mpc_config_intsrc intsrc;
++ int ioapic = -1;
++ int pin = -1;
++
++ /*
++ * Convert 'gsi' to 'ioapic.pin'.
++ */
++ ioapic = mp_find_ioapic(gsi);
++ if (ioapic < 0)
++ return;
++ pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
++
++ /*
++ * TBD: This check is for faulty timer entries, where the override
++ * erroneously sets the trigger to level, resulting in a HUGE
++ * increase of timer interrupts!
++ */
++ if ((bus_irq == 0) && (trigger == 3))
++ trigger = 1;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqtype = mp_INT;
++ intsrc.mpc_irqflag = (trigger << 2) | polarity;
++ intsrc.mpc_srcbus = MP_ISA_BUS;
++ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
++ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
++ intsrc.mpc_dstirq = pin; /* INTIN# */
++
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
++ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
++ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
++ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
++
++ mp_irqs[mp_irq_entries] = intsrc;
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!\n");
++
++ return;
++}
++
++
++void __init mp_config_acpi_legacy_irqs (void)
++{
++ struct mpc_config_intsrc intsrc;
++ int i = 0;
++ int ioapic = -1;
++
++ /*
++ * Fabricate the legacy ISA bus (bus #31).
++ */
++ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
++ Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
++
++ /*
++ * Locate the IOAPIC that manages the ISA IRQs (0-15).
++ */
++ ioapic = mp_find_ioapic(0);
++ if (ioapic < 0)
++ return;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqflag = 0; /* Conforming */
++ intsrc.mpc_srcbus = MP_ISA_BUS;
++ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
++
++ /*
++ * Use the default configuration for the IRQs 0-15. Unless
++ * overridden by (MADT) interrupt source override entries.
++ */
++ for (i = 0; i < 16; i++) {
++ int idx;
++
++ for (idx = 0; idx < mp_irq_entries; idx++) {
++ struct mpc_config_intsrc *irq = mp_irqs + idx;
++
++ /* Do we already have a mapping for this ISA IRQ? */
++ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
++ break;
++
++ /* Do we already have a mapping for this IOAPIC pin */
++ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
++ (irq->mpc_dstirq == i))
++ break;
++ }
++
++ if (idx != mp_irq_entries) {
++ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
++ continue; /* IRQ already used */
++ }
++
++ intsrc.mpc_irqtype = mp_INT;
++ intsrc.mpc_srcbusirq = i; /* Identity mapped */
++ intsrc.mpc_dstirq = i;
++
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
++ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
++ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
++ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
++ intsrc.mpc_dstirq);
++
++ mp_irqs[mp_irq_entries] = intsrc;
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!\n");
++ }
++
++ return;
++}
++
++#define MAX_GSI_NUM 4096
++
++int mp_register_gsi(u32 gsi, int triggering, int polarity)
++{
++ int ioapic = -1;
++ int ioapic_pin = 0;
++ int idx, bit = 0;
++ static int pci_irq = 16;
++ /*
++ * Mapping between Global System Interrupts, which
++ * represent all possible interrupts, to the IRQs
++ * assigned to actual devices.
++ */
++ static int gsi_to_irq[MAX_GSI_NUM];
++
++ if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
++ return gsi;
++
++ /* Don't set up the ACPI SCI because it's already set up */
++ if (acpi_fadt.sci_int == gsi)
++ return gsi;
++
++ ioapic = mp_find_ioapic(gsi);
++ if (ioapic < 0) {
++ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
++ return gsi;
++ }
++
++ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
++
++ /*
++ * Avoid pin reprogramming. PRTs typically include entries
++ * with redundant pin->gsi mappings (but unique PCI devices);
++ * we only program the IOAPIC on the first.
++ */
++ bit = ioapic_pin % 32;
++ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
++ if (idx > 3) {
++ printk(KERN_ERR "Invalid reference to IOAPIC pin "
++ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
++ ioapic_pin);
++ return gsi;
++ }
++ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
++ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
++ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
++ return gsi_to_irq[gsi];
++ }
++
++ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
++
++ if (triggering == ACPI_LEVEL_SENSITIVE) {
++ /*
++ * For PCI devices assign IRQs in order, avoiding gaps
++ * due to unused I/O APIC pins.
++ */
++ int irq = gsi;
++ if (gsi < MAX_GSI_NUM) {
++ /*
++ * Retain the VIA chipset work-around (gsi > 15), but
++ * avoid a problem where the 8254 timer (IRQ0) is setup
++ * via an override (so it's not on pin 0 of the ioapic),
++ * and at the same time, the pin 0 interrupt is a PCI
++ * type. The gsi > 15 test could cause these two pins
++ * to be shared as IRQ0, and they are not shareable.
++ * So test for this condition, and if necessary, avoid
++ * the pin collision.
++ */
++ if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
++ gsi = pci_irq++;
++ /*
++ * Don't assign IRQ used by ACPI SCI
++ */
++ if (gsi == acpi_fadt.sci_int)
++ gsi = pci_irq++;
++ gsi_to_irq[irq] = gsi;
++ } else {
++ printk(KERN_ERR "GSI %u is too high\n", gsi);
++ return gsi;
++ }
++ }
++
++ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
++ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
++ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
++ return gsi;
++}
++
++#endif /*CONFIG_X86_IO_APIC*/
++#endif /*CONFIG_ACPI*/
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/pci-swiotlb-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/pci-swiotlb-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/pci-swiotlb-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/pci-swiotlb-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,55 @@
++/* Glue code to lib/swiotlb.c */
++
++#include <linux/pci.h>
++#include <linux/cache.h>
++#include <linux/module.h>
++#include <asm/dma-mapping.h>
++#include <asm/proto.h>
++#include <asm/swiotlb.h>
++#include <asm/dma.h>
++
++#if 0
++int swiotlb __read_mostly;
++EXPORT_SYMBOL(swiotlb);
++#endif
++
++struct dma_mapping_ops swiotlb_dma_ops = {
++#if 0
++ .mapping_error = swiotlb_dma_mapping_error,
++ .alloc_coherent = swiotlb_alloc_coherent,
++ .free_coherent = swiotlb_free_coherent,
++ .map_single = swiotlb_map_single,
++ .unmap_single = swiotlb_unmap_single,
++ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
++ .sync_single_for_device = swiotlb_sync_single_for_device,
++ .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
++ .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
++ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
++ .sync_sg_for_device = swiotlb_sync_sg_for_device,
++ .map_sg = swiotlb_map_sg,
++ .unmap_sg = swiotlb_unmap_sg,
++ .dma_supported = NULL,
++#endif
++};
++
++void pci_swiotlb_init(void)
++{
++#if 0
++ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
++ if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
++ swiotlb = 1;
++ if (swiotlb_force)
++ swiotlb = 1;
++ if (swiotlb) {
++ printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
++ swiotlb_init();
++ dma_ops = &swiotlb_dma_ops;
++ }
++#else
++ swiotlb_init();
++ if (swiotlb) {
++ printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
++ dma_ops = &swiotlb_dma_ops;
++ }
++#endif
++}
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/process-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/process-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/process-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/process-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,848 @@
++/*
++ * linux/arch/x86-64/kernel/process.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ *
++ * X86-64 port
++ * Andi Kleen.
++ *
++ * CPU hotplug support - ashok.raj@intel.com
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen
++ */
++
++/*
++ * This file handles the architecture-dependent parts of process handling..
++ */
++
++#include <stdarg.h>
++
++#include <linux/cpu.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/elfcore.h>
++#include <linux/smp.h>
++#include <linux/slab.h>
++#include <linux/user.h>
++#include <linux/module.h>
++#include <linux/a.out.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/random.h>
++#include <linux/notifier.h>
++#include <linux/kprobes.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/mmu_context.h>
++#include <asm/pda.h>
++#include <asm/prctl.h>
++#include <asm/kdebug.h>
++#include <xen/interface/platform.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/vcpu.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/hardirq.h>
++#include <asm/ia32.h>
++#include <asm/idle.h>
++
++#include <xen/cpu_hotplug.h>
++
++asmlinkage extern void ret_from_fork(void);
++
++unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
++
++unsigned long boot_option_idle_override = 0;
++EXPORT_SYMBOL(boot_option_idle_override);
++
++/*
++ * Powermanagement idle function, if any..
++ */
++void (*pm_idle)(void);
++EXPORT_SYMBOL(pm_idle);
++static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
++
++static ATOMIC_NOTIFIER_HEAD(idle_notifier);
++
++void idle_notifier_register(struct notifier_block *n)
++{
++ atomic_notifier_chain_register(&idle_notifier, n);
++}
++EXPORT_SYMBOL_GPL(idle_notifier_register);
++
++void idle_notifier_unregister(struct notifier_block *n)
++{
++ atomic_notifier_chain_unregister(&idle_notifier, n);
++}
++EXPORT_SYMBOL(idle_notifier_unregister);
++
++enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
++static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
++
++void enter_idle(void)
++{
++ __get_cpu_var(idle_state) = CPU_IDLE;
++ atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
++}
++
++static void __exit_idle(void)
++{
++ __get_cpu_var(idle_state) = CPU_NOT_IDLE;
++ atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
++}
++
++/* Called from interrupts to signify idle end */
++void exit_idle(void)
++{
++ if (current->pid | read_pda(irqcount))
++ return;
++ __exit_idle();
++}
++
++/*
++ * On SMP it's slightly faster (but much more power-consuming!)
++ * to poll the ->need_resched flag instead of waiting for the
++ * cross-CPU IPI to arrive. Use this option with caution.
++ */
++static void poll_idle (void)
++{
++ local_irq_enable();
++
++ asm volatile(
++ "2:"
++ "testl %0,%1;"
++ "rep; nop;"
++ "je 2b;"
++ : :
++ "i" (_TIF_NEED_RESCHED),
++ "m" (current_thread_info()->flags));
++}
++
++static void xen_idle(void)
++{
++ local_irq_disable();
++
++ if (need_resched())
++ local_irq_enable();
++ else {
++ current_thread_info()->status &= ~TS_POLLING;
++ smp_mb__after_clear_bit();
++ safe_halt();
++ current_thread_info()->status |= TS_POLLING;
++ }
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static inline void play_dead(void)
++{
++ idle_task_exit();
++ local_irq_disable();
++ cpu_clear(smp_processor_id(), cpu_initialized);
++ preempt_enable_no_resched();
++ VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
++ cpu_bringup();
++}
++#else
++static inline void play_dead(void)
++{
++ BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++/*
++ * The idle thread. There's no useful work to be
++ * done, so just try to conserve power and have a
++ * low exit latency (ie sit in a loop waiting for
++ * somebody to say that they'd like to reschedule)
++ */
++void cpu_idle (void)
++{
++ current_thread_info()->status |= TS_POLLING;
++ /* endless idle loop with no priority at all */
++ while (1) {
++ while (!need_resched()) {
++ void (*idle)(void);
++
++ if (__get_cpu_var(cpu_idle_state))
++ __get_cpu_var(cpu_idle_state) = 0;
++ rmb();
++ idle = xen_idle; /* no alternatives */
++ if (cpu_is_offline(smp_processor_id()))
++ play_dead();
++ enter_idle();
++ idle();
++ __exit_idle();
++ }
++
++ preempt_enable_no_resched();
++ schedule();
++ preempt_disable();
++ }
++}
++
++void cpu_idle_wait(void)
++{
++ unsigned int cpu, this_cpu = get_cpu();
++ cpumask_t map;
++
++ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
++ put_cpu();
++
++ cpus_clear(map);
++ for_each_online_cpu(cpu) {
++ per_cpu(cpu_idle_state, cpu) = 1;
++ cpu_set(cpu, map);
++ }
++
++ __get_cpu_var(cpu_idle_state) = 0;
++
++ wmb();
++ do {
++ ssleep(1);
++ for_each_online_cpu(cpu) {
++ if (cpu_isset(cpu, map) &&
++ !per_cpu(cpu_idle_state, cpu))
++ cpu_clear(cpu, map);
++ }
++ cpus_and(map, map, cpu_online_map);
++ } while (!cpus_empty(map));
++}
++EXPORT_SYMBOL_GPL(cpu_idle_wait);
++
++void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
++{
++}
++
++static int __init idle_setup (char *str)
++{
++ if (!strncmp(str, "poll", 4)) {
++ printk("using polling idle threads.\n");
++ pm_idle = poll_idle;
++ }
++
++ boot_option_idle_override = 1;
++ return 1;
++}
++
++__setup("idle=", idle_setup);
++
++/* Prints also some state that isn't saved in the pt_regs */
++void __show_regs(struct pt_regs * regs)
++{
++ unsigned long fs, gs, shadowgs;
++ unsigned int fsindex,gsindex;
++ unsigned int ds,cs,es;
++
++ printk("\n");
++ print_modules();
++ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
++ current->pid, current->comm, print_tainted(),
++ system_utsname.release,
++ (int)strcspn(system_utsname.version, " "),
++ system_utsname.version);
++ printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
++ printk_address(regs->rip);
++ printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
++ regs->eflags);
++ printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
++ regs->rax, regs->rbx, regs->rcx);
++ printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
++ regs->rdx, regs->rsi, regs->rdi);
++ printk("RBP: %016lx R08: %016lx R09: %016lx\n",
++ regs->rbp, regs->r8, regs->r9);
++ printk("R10: %016lx R11: %016lx R12: %016lx\n",
++ regs->r10, regs->r11, regs->r12);
++ printk("R13: %016lx R14: %016lx R15: %016lx\n",
++ regs->r13, regs->r14, regs->r15);
++
++ asm("mov %%ds,%0" : "=r" (ds));
++ asm("mov %%cs,%0" : "=r" (cs));
++ asm("mov %%es,%0" : "=r" (es));
++ asm("mov %%fs,%0" : "=r" (fsindex));
++ asm("mov %%gs,%0" : "=r" (gsindex));
++
++ rdmsrl(MSR_FS_BASE, fs);
++ rdmsrl(MSR_GS_BASE, gs);
++ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
++
++ printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
++ fs,fsindex,gs,gsindex,shadowgs);
++ printk("CS: %04x DS: %04x ES: %04x\n", cs, ds, es);
++
++}
++
++void show_regs(struct pt_regs *regs)
++{
++ printk("CPU %d:", smp_processor_id());
++ __show_regs(regs);
++ show_trace(NULL, regs, (void *)(regs + 1));
++}
++
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++ struct task_struct *me = current;
++ struct thread_struct *t = &me->thread;
++
++ if (me->thread.io_bitmap_ptr) {
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
++#endif
++#ifdef CONFIG_XEN
++ struct physdev_set_iobitmap iobmp_op;
++ memset(&iobmp_op, 0, sizeof(iobmp_op));
++#endif
++
++ kfree(t->io_bitmap_ptr);
++ t->io_bitmap_ptr = NULL;
++ /*
++ * Careful, clear this in the TSS too:
++ */
++#ifndef CONFIG_X86_NO_TSS
++ memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
++ put_cpu();
++#endif
++#ifdef CONFIG_XEN
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
++ &iobmp_op));
++#endif
++ t->io_bitmap_max = 0;
++ }
++}
++
++void load_gs_index(unsigned gs)
++{
++ WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs));
++}
++
++void flush_thread(void)
++{
++ struct task_struct *tsk = current;
++ struct thread_info *t = current_thread_info();
++
++ if (t->flags & _TIF_ABI_PENDING) {
++ t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
++ if (t->flags & _TIF_IA32)
++ current_thread_info()->status |= TS_COMPAT;
++ }
++
++ tsk->thread.debugreg0 = 0;
++ tsk->thread.debugreg1 = 0;
++ tsk->thread.debugreg2 = 0;
++ tsk->thread.debugreg3 = 0;
++ tsk->thread.debugreg6 = 0;
++ tsk->thread.debugreg7 = 0;
++ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
++ /*
++ * Forget coprocessor state..
++ */
++ clear_fpu(tsk);
++ clear_used_math();
++}
++
++void release_thread(struct task_struct *dead_task)
++{
++ if (dead_task->mm) {
++ if (dead_task->mm->context.size) {
++ printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
++ dead_task->comm,
++ dead_task->mm->context.ldt,
++ dead_task->mm->context.size);
++ BUG();
++ }
++ }
++}
++
++static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
++{
++ struct user_desc ud = {
++ .base_addr = addr,
++ .limit = 0xfffff,
++ .seg_32bit = 1,
++ .limit_in_pages = 1,
++ .useable = 1,
++ };
++ struct n_desc_struct *desc = (void *)t->thread.tls_array;
++ desc += tls;
++ desc->a = LDT_entry_a(&ud);
++ desc->b = LDT_entry_b(&ud);
++}
++
++static inline u32 read_32bit_tls(struct task_struct *t, int tls)
++{
++ struct desc_struct *desc = (void *)t->thread.tls_array;
++ desc += tls;
++ return desc->base0 |
++ (((u32)desc->base1) << 16) |
++ (((u32)desc->base2) << 24);
++}
++
++/*
++ * This gets called before we allocate a new thread and copy
++ * the current task into it.
++ */
++void prepare_to_copy(struct task_struct *tsk)
++{
++ unlazy_fpu(tsk);
++}
++
++int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
++ unsigned long unused,
++ struct task_struct * p, struct pt_regs * regs)
++{
++ int err;
++ struct pt_regs * childregs;
++ struct task_struct *me = current;
++
++ childregs = ((struct pt_regs *)
++ (THREAD_SIZE + task_stack_page(p))) - 1;
++ *childregs = *regs;
++
++ childregs->rax = 0;
++ childregs->rsp = rsp;
++ if (rsp == ~0UL)
++ childregs->rsp = (unsigned long)childregs;
++
++ p->thread.rsp = (unsigned long) childregs;
++ p->thread.rsp0 = (unsigned long) (childregs+1);
++ p->thread.userrsp = me->thread.userrsp;
++
++ set_tsk_thread_flag(p, TIF_FORK);
++
++ p->thread.fs = me->thread.fs;
++ p->thread.gs = me->thread.gs;
++
++ asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
++ asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
++ asm("mov %%es,%0" : "=m" (p->thread.es));
++ asm("mov %%ds,%0" : "=m" (p->thread.ds));
++
++ if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
++ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!p->thread.io_bitmap_ptr) {
++ p->thread.io_bitmap_max = 0;
++ return -ENOMEM;
++ }
++ memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
++ IO_BITMAP_BYTES);
++ }
++
++ /*
++ * Set a new TLS for the child thread?
++ */
++ if (clone_flags & CLONE_SETTLS) {
++#ifdef CONFIG_IA32_EMULATION
++ if (test_thread_flag(TIF_IA32))
++ err = ia32_child_tls(p, childregs);
++ else
++#endif
++ err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
++ if (err)
++ goto out;
++ }
++ p->thread.iopl = current->thread.iopl;
++
++ err = 0;
++out:
++ if (err && p->thread.io_bitmap_ptr) {
++ kfree(p->thread.io_bitmap_ptr);
++ p->thread.io_bitmap_max = 0;
++ }
++ return err;
++}
++
++static inline void __save_init_fpu( struct task_struct *tsk )
++{
++ asm volatile( "rex64 ; fxsave %0 ; fnclex"
++ : "=m" (tsk->thread.i387.fxsave));
++ tsk->thread_info->status &= ~TS_USEDFPU;
++}
++
++/*
++ * switch_to(x,y) should switch tasks from x to y.
++ *
++ * This could still be optimized:
++ * - fold all the options into a flag word and test it with a single test.
++ * - could test fs/gs bitsliced
++ *
++ * Kprobes not supported here. Set the probe on schedule instead.
++ */
++__kprobes struct task_struct *
++__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ struct thread_struct *prev = &prev_p->thread,
++ *next = &next_p->thread;
++ int cpu = smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct *tss = &per_cpu(init_tss, cpu);
++#endif
++#if CONFIG_XEN_COMPAT > 0x030002
++ struct physdev_set_iopl iopl_op;
++ struct physdev_set_iobitmap iobmp_op;
++#else
++ struct physdev_op _pdo[2], *pdo = _pdo;
++#define iopl_op pdo->u.set_iopl
++#define iobmp_op pdo->u.set_iobitmap
++#endif
++ multicall_entry_t _mcl[8], *mcl = _mcl;
++
++ /*
++ * This is basically '__unlazy_fpu', except that we queue a
++ * multicall to indicate FPU task switch, rather than
++ * synchronously trapping to Xen.
++ * The AMD workaround requires it to be after DS reload, or
++ * after DS has been cleared, which we do in __prepare_arch_switch.
++ */
++ if (prev_p->thread_info->status & TS_USEDFPU) {
++ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
++ mcl->op = __HYPERVISOR_fpu_taskswitch;
++ mcl->args[0] = 1;
++ mcl++;
++ }
++
++ /*
++ * Reload esp0, LDT and the page table pointer:
++ */
++ mcl->op = __HYPERVISOR_stack_switch;
++ mcl->args[0] = __KERNEL_DS;
++ mcl->args[1] = next->rsp0;
++ mcl++;
++
++ /*
++ * Load the per-thread Thread-Local Storage descriptor.
++ * This is load_TLS(next, cpu) with multicalls.
++ */
++#define C(i) do { \
++ if (unlikely(next->tls_array[i] != prev->tls_array[i])) { \
++ mcl->op = __HYPERVISOR_update_descriptor; \
++ mcl->args[0] = virt_to_machine( \
++ &cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]); \
++ mcl->args[1] = next->tls_array[i]; \
++ mcl++; \
++ } \
++} while (0)
++ C(0); C(1); C(2);
++#undef C
++
++ if (unlikely(prev->iopl != next->iopl)) {
++ iopl_op.iopl = (next->iopl == 0) ? 1 : next->iopl;
++#if CONFIG_XEN_COMPAT > 0x030002
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iopl;
++ mcl->args[1] = (unsigned long)&iopl_op;
++#else
++ mcl->op = __HYPERVISOR_physdev_op_compat;
++ pdo->cmd = PHYSDEVOP_set_iopl;
++ mcl->args[0] = (unsigned long)pdo++;
++#endif
++ mcl++;
++ }
++
++ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
++ set_xen_guest_handle(iobmp_op.bitmap,
++ (char *)next->io_bitmap_ptr);
++ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
++#if CONFIG_XEN_COMPAT > 0x030002
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iobitmap;
++ mcl->args[1] = (unsigned long)&iobmp_op;
++#else
++ mcl->op = __HYPERVISOR_physdev_op_compat;
++ pdo->cmd = PHYSDEVOP_set_iobitmap;
++ mcl->args[0] = (unsigned long)pdo++;
++#endif
++ mcl++;
++ }
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ BUG_ON(pdo > _pdo + ARRAY_SIZE(_pdo));
++#endif
++ BUG_ON(mcl > _mcl + ARRAY_SIZE(_mcl));
++ if (unlikely(HYPERVISOR_multicall_check(_mcl, mcl - _mcl, NULL)))
++ BUG();
++
++ /*
++ * Switch DS and ES.
++ * This won't pick up thread selector changes, but I guess that is ok.
++ */
++ if (unlikely(next->es))
++ loadsegment(es, next->es);
++
++ if (unlikely(next->ds))
++ loadsegment(ds, next->ds);
++
++ /*
++ * Switch FS and GS.
++ */
++ if (unlikely(next->fsindex))
++ loadsegment(fs, next->fsindex);
++
++ if (next->fs)
++ WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs));
++
++ if (unlikely(next->gsindex))
++ load_gs_index(next->gsindex);
++
++ if (next->gs)
++ WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs));
++
++ /*
++ * Switch the PDA context.
++ */
++ prev->userrsp = read_pda(oldrsp);
++ write_pda(oldrsp, next->userrsp);
++ write_pda(pcurrent, next_p);
++ write_pda(kernelstack,
++ task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
++
++ /*
++ * Now maybe reload the debug registers
++ */
++ if (unlikely(next->debugreg7)) {
++ set_debugreg(next->debugreg0, 0);
++ set_debugreg(next->debugreg1, 1);
++ set_debugreg(next->debugreg2, 2);
++ set_debugreg(next->debugreg3, 3);
++ /* no 4 and 5 */
++ set_debugreg(next->debugreg6, 6);
++ set_debugreg(next->debugreg7, 7);
++ }
++
++ return prev_p;
++}
++
++/*
++ * sys_execve() executes a new program.
++ */
++asmlinkage
++long sys_execve(char __user *name, char __user * __user *argv,
++ char __user * __user *envp, struct pt_regs regs)
++{
++ long error;
++ char * filename;
++
++ filename = getname(name);
++ error = PTR_ERR(filename);
++ if (IS_ERR(filename))
++ return error;
++ error = do_execve(filename, argv, envp, &regs);
++ if (error == 0) {
++ task_lock(current);
++ current->ptrace &= ~PT_DTRACE;
++ task_unlock(current);
++ }
++ putname(filename);
++ return error;
++}
++
++void set_personality_64bit(void)
++{
++ /* inherit personality from parent */
++
++ /* Make sure to be in 64bit mode */
++ clear_thread_flag(TIF_IA32);
++
++ /* TBD: overwrites user setup. Should have two bits.
++ But 64bit processes have always behaved this way,
++ so it's not too bad. The main problem is just that
++ 32bit childs are affected again. */
++ current->personality &= ~READ_IMPLIES_EXEC;
++}
++
++asmlinkage long sys_fork(struct pt_regs *regs)
++{
++ return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
++}
++
++asmlinkage long
++sys_clone(unsigned long clone_flags, unsigned long newsp,
++ void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
++{
++ if (!newsp)
++ newsp = regs->rsp;
++ return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
++}
++
++/*
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
++ */
++asmlinkage long sys_vfork(struct pt_regs *regs)
++{
++ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
++ NULL, NULL);
++}
++
++unsigned long get_wchan(struct task_struct *p)
++{
++ unsigned long stack;
++ u64 fp,rip;
++ int count = 0;
++
++ if (!p || p == current || p->state==TASK_RUNNING)
++ return 0;
++ stack = (unsigned long)task_stack_page(p);
++ if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
++ return 0;
++ fp = *(u64 *)(p->thread.rsp);
++ do {
++ if (fp < (unsigned long)stack ||
++ fp > (unsigned long)stack+THREAD_SIZE)
++ return 0;
++ rip = *(u64 *)(fp+8);
++ if (!in_sched_functions(rip))
++ return rip;
++ fp = *(u64 *)fp;
++ } while (count++ < 16);
++ return 0;
++}
++
++long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
++{
++ int ret = 0;
++ int doit = task == current;
++ int cpu;
++
++ switch (code) {
++ case ARCH_SET_GS:
++ if (addr >= TASK_SIZE_OF(task))
++ return -EPERM;
++ cpu = get_cpu();
++ /* handle small bases via the GDT because that's faster to
++ switch. */
++ if (addr <= 0xffffffff) {
++ set_32bit_tls(task, GS_TLS, addr);
++ if (doit) {
++ load_TLS(&task->thread, cpu);
++ load_gs_index(GS_TLS_SEL);
++ }
++ task->thread.gsindex = GS_TLS_SEL;
++ task->thread.gs = 0;
++ } else {
++ task->thread.gsindex = 0;
++ task->thread.gs = addr;
++ if (doit) {
++ load_gs_index(0);
++ ret = HYPERVISOR_set_segment_base(
++ SEGBASE_GS_USER, addr);
++ }
++ }
++ put_cpu();
++ break;
++ case ARCH_SET_FS:
++ /* Not strictly needed for fs, but do it for symmetry
++ with gs */
++ if (addr >= TASK_SIZE_OF(task))
++ return -EPERM;
++ cpu = get_cpu();
++ /* handle small bases via the GDT because that's faster to
++ switch. */
++ if (addr <= 0xffffffff) {
++ set_32bit_tls(task, FS_TLS, addr);
++ if (doit) {
++ load_TLS(&task->thread, cpu);
++ asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
++ }
++ task->thread.fsindex = FS_TLS_SEL;
++ task->thread.fs = 0;
++ } else {
++ task->thread.fsindex = 0;
++ task->thread.fs = addr;
++ if (doit) {
++ /* set the selector to 0 to not confuse
++ __switch_to */
++ asm volatile("movl %0,%%fs" :: "r" (0));
++ ret = HYPERVISOR_set_segment_base(SEGBASE_FS,
++ addr);
++ }
++ }
++ put_cpu();
++ break;
++ case ARCH_GET_FS: {
++ unsigned long base;
++ if (task->thread.fsindex == FS_TLS_SEL)
++ base = read_32bit_tls(task, FS_TLS);
++ else if (doit)
++ rdmsrl(MSR_FS_BASE, base);
++ else
++ base = task->thread.fs;
++ ret = put_user(base, (unsigned long __user *)addr);
++ break;
++ }
++ case ARCH_GET_GS: {
++ unsigned long base;
++ unsigned gsindex;
++ if (task->thread.gsindex == GS_TLS_SEL)
++ base = read_32bit_tls(task, GS_TLS);
++ else if (doit) {
++ asm("movl %%gs,%0" : "=r" (gsindex));
++ if (gsindex)
++ rdmsrl(MSR_KERNEL_GS_BASE, base);
++ else
++ base = task->thread.gs;
++ }
++ else
++ base = task->thread.gs;
++ ret = put_user(base, (unsigned long __user *)addr);
++ break;
++ }
++
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++long sys_arch_prctl(int code, unsigned long addr)
++{
++ return do_arch_prctl(current, code, addr);
++}
++
++/*
++ * Capture the user space registers if the task is not running (in user space)
++ */
++int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++{
++ struct pt_regs *pp, ptregs;
++
++ pp = task_pt_regs(tsk);
++
++ ptregs = *pp;
++ ptregs.cs &= 0xffff;
++ ptregs.ss &= 0xffff;
++
++ elf_core_copy_regs(regs, &ptregs);
++
++ boot_option_idle_override = 1;
++ return 1;
++}
++
++unsigned long arch_align_stack(unsigned long sp)
++{
++ if (randomize_va_space)
++ sp -= get_random_int() % 8192;
++ return sp & ~0xf;
++}
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/relocate_kernel.S linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/relocate_kernel.S
+--- linux-2.6.18.8/arch/x86_64/kernel/relocate_kernel.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/relocate_kernel.S 2008-08-21 11:36:07.000000000 +0200
+@@ -7,31 +7,195 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/page.h>
++#include <asm/kexec.h>
+
+- /*
+- * Must be relocatable PIC code callable as a C function, that once
+- * it starts can not use the previous processes stack.
+- */
+- .globl relocate_new_kernel
++/*
++ * Must be relocatable PIC code callable as a C function
++ */
++
++#define PTR(x) (x << 3)
++#define PAGE_ALIGNED (1 << PAGE_SHIFT)
++#define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */
++
++ .text
++ .align PAGE_ALIGNED
+ .code64
++ .globl relocate_kernel
++relocate_kernel:
++ /* %rdi indirection_page
++ * %rsi page_list
++ * %rdx start address
++ */
++
++ /* map the control page at its virtual address */
++
++ movq $0x0000ff8000000000, %r10 /* mask */
++ mov $(39 - 3), %cl /* bits to shift */
++ movq PTR(VA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
++
++ movq %r11, %r9
++ andq %r10, %r9
++ shrq %cl, %r9
++
++ movq PTR(VA_PGD)(%rsi), %r8
++ addq %r8, %r9
++ movq PTR(PA_PUD_0)(%rsi), %r8
++ orq $PAGE_ATTR, %r8
++ movq %r8, (%r9)
++
++ shrq $9, %r10
++ sub $9, %cl
++
++ movq %r11, %r9
++ andq %r10, %r9
++ shrq %cl, %r9
++
++ movq PTR(VA_PUD_0)(%rsi), %r8
++ addq %r8, %r9
++ movq PTR(PA_PMD_0)(%rsi), %r8
++ orq $PAGE_ATTR, %r8
++ movq %r8, (%r9)
++
++ shrq $9, %r10
++ sub $9, %cl
++
++ movq %r11, %r9
++ andq %r10, %r9
++ shrq %cl, %r9
++
++ movq PTR(VA_PMD_0)(%rsi), %r8
++ addq %r8, %r9
++ movq PTR(PA_PTE_0)(%rsi), %r8
++ orq $PAGE_ATTR, %r8
++ movq %r8, (%r9)
++
++ shrq $9, %r10
++ sub $9, %cl
++
++ movq %r11, %r9
++ andq %r10, %r9
++ shrq %cl, %r9
++
++ movq PTR(VA_PTE_0)(%rsi), %r8
++ addq %r8, %r9
++ movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
++ orq $PAGE_ATTR, %r8
++ movq %r8, (%r9)
++
++ /* identity map the control page at its physical address */
++
++ movq $0x0000ff8000000000, %r10 /* mask */
++ mov $(39 - 3), %cl /* bits to shift */
++ movq PTR(PA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
++
++ movq %r11, %r9
++ andq %r10, %r9
++ shrq %cl, %r9
++
++ movq PTR(VA_PGD)(%rsi), %r8
++ addq %r8, %r9
++ movq PTR(PA_PUD_1)(%rsi), %r8
++ orq $PAGE_ATTR, %r8
++ movq %r8, (%r9)
++
++ shrq $9, %r10
++ sub $9, %cl
++
++ movq %r11, %r9
++ andq %r10, %r9
++ shrq %cl, %r9
++
++ movq PTR(VA_PUD_1)(%rsi), %r8
++ addq %r8, %r9
++ movq PTR(PA_PMD_1)(%rsi), %r8
++ orq $PAGE_ATTR, %r8
++ movq %r8, (%r9)
++
++ shrq $9, %r10
++ sub $9, %cl
++
++ movq %r11, %r9
++ andq %r10, %r9
++ shrq %cl, %r9
++
++ movq PTR(VA_PMD_1)(%rsi), %r8
++ addq %r8, %r9
++ movq PTR(PA_PTE_1)(%rsi), %r8
++ orq $PAGE_ATTR, %r8
++ movq %r8, (%r9)
++
++ shrq $9, %r10
++ sub $9, %cl
++
++ movq %r11, %r9
++ andq %r10, %r9
++ shrq %cl, %r9
++
++ movq PTR(VA_PTE_1)(%rsi), %r8
++ addq %r8, %r9
++ movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
++ orq $PAGE_ATTR, %r8
++ movq %r8, (%r9)
++
+ relocate_new_kernel:
+- /* %rdi page_list
+- * %rsi reboot_code_buffer
++ /* %rdi indirection_page
++ * %rsi page_list
+ * %rdx start address
+- * %rcx page_table
+- * %r8 arg5
+- * %r9 arg6
+ */
+
+ /* zero out flags, and disable interrupts */
+ pushq $0
+ popfq
+
+- /* set a new stack at the bottom of our page... */
+- lea 4096(%rsi), %rsp
+-
+- /* store the parameters back on the stack */
+- pushq %rdx /* store the start address */
++ /* get physical address of control page now */
++ /* this is impossible after page table switch */
++ movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
++
++ /* get physical address of page table now too */
++ movq PTR(PA_TABLE_PAGE)(%rsi), %rcx
++
++ /* switch to new set of page tables */
++ movq PTR(PA_PGD)(%rsi), %r9
++ movq %r9, %cr3
++
++ /* setup idt */
++ movq %r8, %rax
++ addq $(idt_80 - relocate_kernel), %rax
++ lidtq (%rax)
++
++ /* setup gdt */
++ movq %r8, %rax
++ addq $(gdt - relocate_kernel), %rax
++ movq %r8, %r9
++ addq $((gdt_80 - relocate_kernel) + 2), %r9
++ movq %rax, (%r9)
++
++ movq %r8, %rax
++ addq $(gdt_80 - relocate_kernel), %rax
++ lgdtq (%rax)
++
++ /* setup data segment registers */
++ xorl %eax, %eax
++ movl %eax, %ds
++ movl %eax, %es
++ movl %eax, %fs
++ movl %eax, %gs
++ movl %eax, %ss
++
++ /* setup a new stack at the end of the physical control page */
++ lea 4096(%r8), %rsp
++
++ /* load new code segment and jump to identity mapped page */
++ movq %r8, %rax
++ addq $(identity_mapped - relocate_kernel), %rax
++ pushq $(gdt_cs - gdt)
++ pushq %rax
++ lretq
++
++identity_mapped:
++ /* store the start address on the stack */
++ pushq %rdx
+
+ /* Set cr0 to a known state:
+ * 31 1 == Paging enabled
+@@ -134,10 +298,19 @@ relocate_new_kernel:
+ xorq %r13, %r13
+ xorq %r14, %r14
+ xorq %r15, %r15
+-
+ ret
+-relocate_new_kernel_end:
+
+- .globl relocate_new_kernel_size
+-relocate_new_kernel_size:
+- .quad relocate_new_kernel_end - relocate_new_kernel
++ .align 16
++gdt:
++ .quad 0x0000000000000000 /* NULL descriptor */
++gdt_cs:
++ .quad 0x00af9a000000ffff
++gdt_end:
++
++gdt_80:
++ .word gdt_end - gdt - 1 /* limit */
++ .quad 0 /* base - filled in by code above */
++
++idt_80:
++ .word 0 /* limit */
++ .quad 0 /* base */
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/setup64-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/setup64-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/setup64-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/setup64-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,367 @@
++/*
++ * X86-64 specific CPU setup.
++ * Copyright (C) 1995 Linus Torvalds
++ * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
++ * See setup.c for older changelog.
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen
++ *
++ */
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/bootmem.h>
++#include <linux/bitops.h>
++#include <linux/module.h>
++#include <asm/bootsetup.h>
++#include <asm/pda.h>
++#include <asm/pgtable.h>
++#include <asm/processor.h>
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/mmu_context.h>
++#include <asm/smp.h>
++#include <asm/i387.h>
++#include <asm/percpu.h>
++#include <asm/proto.h>
++#include <asm/sections.h>
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++#endif
++
++char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
++
++cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
++
++struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
++EXPORT_SYMBOL(_cpu_pda);
++struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
++
++#ifndef CONFIG_X86_NO_IDT
++struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
++#endif
++
++char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
++
++unsigned long __supported_pte_mask __read_mostly = ~0UL;
++EXPORT_SYMBOL(__supported_pte_mask);
++static int do_not_nx __cpuinitdata = 0;
++
++/* noexec=on|off
++Control non executable mappings for 64bit processes.
++
++on Enable(default)
++off Disable
++*/
++int __init nonx_setup(char *str)
++{
++ if (!strncmp(str, "on", 2)) {
++ __supported_pte_mask |= _PAGE_NX;
++ do_not_nx = 0;
++ } else if (!strncmp(str, "off", 3)) {
++ do_not_nx = 1;
++ __supported_pte_mask &= ~_PAGE_NX;
++ }
++ return 1;
++}
++__setup("noexec=", nonx_setup); /* parsed early actually */
++
++int force_personality32 = 0;
++
++/* noexec32=on|off
++Control non executable heap for 32bit processes.
++To control the stack too use noexec=off
++
++on PROT_READ does not imply PROT_EXEC for 32bit processes
++off PROT_READ implies PROT_EXEC (default)
++*/
++static int __init nonx32_setup(char *str)
++{
++ if (!strcmp(str, "on"))
++ force_personality32 &= ~READ_IMPLIES_EXEC;
++ else if (!strcmp(str, "off"))
++ force_personality32 |= READ_IMPLIES_EXEC;
++ return 1;
++}
++__setup("noexec32=", nonx32_setup);
++
++/*
++ * Great future plan:
++ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
++ * Always point %gs to its beginning
++ */
++void __init setup_per_cpu_areas(void)
++{
++ int i;
++ unsigned long size;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ prefill_possible_map();
++#endif
++
++ /* Copy section for each CPU (we discard the original) */
++ size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
++#ifdef CONFIG_MODULES
++ if (size < PERCPU_ENOUGH_ROOM)
++ size = PERCPU_ENOUGH_ROOM;
++#endif
++
++ for_each_cpu_mask (i, cpu_possible_map) {
++ char *ptr;
++
++ if (!NODE_DATA(cpu_to_node(i))) {
++ printk("cpu with no node %d, num_online_nodes %d\n",
++ i, num_online_nodes());
++ ptr = alloc_bootmem(size);
++ } else {
++ ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
++ }
++ if (!ptr)
++ panic("Cannot allocate cpu data for CPU %d\n", i);
++ cpu_pda(i)->data_offset = ptr - __per_cpu_start;
++ memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
++ }
++}
++
++#ifdef CONFIG_XEN
++static void switch_pt(void)
++{
++ xen_pt_switch(__pa_symbol(init_level4_pgt));
++ xen_new_user_pt(__pa_symbol(__user_pgd(init_level4_pgt)));
++}
++
++static void __cpuinit cpu_gdt_init(const struct desc_ptr *gdt_descr)
++{
++ unsigned long frames[16];
++ unsigned long va;
++ int f;
++
++ for (va = gdt_descr->address, f = 0;
++ va < gdt_descr->address + gdt_descr->size;
++ va += PAGE_SIZE, f++) {
++ frames[f] = virt_to_mfn(va);
++ make_page_readonly(
++ (void *)va, XENFEAT_writable_descriptor_tables);
++ }
++ if (HYPERVISOR_set_gdt(frames, (gdt_descr->size + 1) /
++ sizeof (struct desc_struct)))
++ BUG();
++}
++#else
++static void switch_pt(void)
++{
++ asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
++}
++
++static void __cpuinit cpu_gdt_init(const struct desc_ptr *gdt_descr)
++{
++ asm volatile("lgdt %0" :: "m" (*gdt_descr));
++ asm volatile("lidt %0" :: "m" (idt_descr));
++}
++#endif
++
++void pda_init(int cpu)
++{
++ struct x8664_pda *pda = cpu_pda(cpu);
++
++ /* Setup up data that may be needed in __get_free_pages early */
++ asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
++#ifndef CONFIG_XEN
++ wrmsrl(MSR_GS_BASE, pda);
++#else
++ if (HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL,
++ (unsigned long)pda))
++ BUG();
++#endif
++ pda->cpunumber = cpu;
++ pda->irqcount = -1;
++ pda->kernelstack =
++ (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
++ pda->active_mm = &init_mm;
++ pda->mmu_state = 0;
++
++ if (cpu == 0) {
++#ifdef CONFIG_XEN
++ xen_init_pt();
++#endif
++ /* others are initialized in smpboot.c */
++ pda->pcurrent = &init_task;
++ pda->irqstackptr = boot_cpu_stack;
++ } else {
++ pda->irqstackptr = (char *)
++ __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
++ if (!pda->irqstackptr)
++ panic("cannot allocate irqstack for cpu %d", cpu);
++ }
++
++ switch_pt();
++
++ pda->irqstackptr += IRQSTACKSIZE-64;
++}
++
++#ifndef CONFIG_X86_NO_TSS
++char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
++__attribute__((section(".bss.page_aligned")));
++#endif
++
++/* May not be marked __init: used by software suspend */
++void syscall_init(void)
++{
++#ifndef CONFIG_XEN
++ /*
++ * LSTAR and STAR live in a bit strange symbiosis.
++ * They both write to the same internal register. STAR allows to set CS/DS
++ * but only a 32bit target. LSTAR sets the 64bit rip.
++ */
++ wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
++ wrmsrl(MSR_LSTAR, system_call);
++
++ /* Flags to clear on syscall */
++ wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000);
++#endif
++#ifdef CONFIG_IA32_EMULATION
++ syscall32_cpu_init ();
++#endif
++}
++
++void __cpuinit check_efer(void)
++{
++ unsigned long efer;
++
++ rdmsrl(MSR_EFER, efer);
++ if (!(efer & EFER_NX) || do_not_nx) {
++ __supported_pte_mask &= ~_PAGE_NX;
++ }
++}
++
++unsigned long kernel_eflags;
++
++/*
++ * cpu_init() initializes state that is per-CPU. Some data is already
++ * initialized (naturally) in the bootstrap process, such as the GDT
++ * and IDT. We reload them nevertheless, this function acts as a
++ * 'CPU state barrier', nothing should get across.
++ * A lot of state is already set up in PDA init.
++ */
++void __cpuinit cpu_init (void)
++{
++ int cpu = stack_smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
++ unsigned long v;
++ char *estacks = NULL;
++ unsigned i;
++#endif
++ struct task_struct *me;
++
++ /* CPU 0 is initialised in head64.c */
++ if (cpu != 0) {
++ pda_init(cpu);
++ zap_low_mappings(cpu);
++ }
++#ifndef CONFIG_X86_NO_TSS
++ else
++ estacks = boot_exception_stacks;
++#endif
++
++ me = current;
++
++ if (cpu_test_and_set(cpu, cpu_initialized))
++ panic("CPU#%d already initialized!\n", cpu);
++
++ printk("Initializing CPU#%d\n", cpu);
++
++ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
++
++ /*
++ * Initialize the per-CPU GDT with the boot GDT,
++ * and set up the GDT descriptor:
++ */
++#ifndef CONFIG_XEN
++ if (cpu)
++ memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
++#endif
++
++ cpu_gdt_descr[cpu].size = GDT_SIZE;
++ cpu_gdt_init(&cpu_gdt_descr[cpu]);
++
++ memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
++ syscall_init();
++
++ wrmsrl(MSR_FS_BASE, 0);
++ wrmsrl(MSR_KERNEL_GS_BASE, 0);
++ barrier();
++
++ check_efer();
++
++#ifndef CONFIG_X86_NO_TSS
++ /*
++ * set up and load the per-CPU TSS
++ */
++ for (v = 0; v < N_EXCEPTION_STACKS; v++) {
++ if (cpu) {
++ static const unsigned int order[N_EXCEPTION_STACKS] = {
++ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
++ [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
++ };
++
++ estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
++ if (!estacks)
++ panic("Cannot allocate exception stack %ld %d\n",
++ v, cpu);
++ }
++ switch (v + 1) {
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++ case DEBUG_STACK:
++ cpu_pda(cpu)->debugstack = (unsigned long)estacks;
++ estacks += DEBUG_STKSZ;
++ break;
++#endif
++ default:
++ estacks += EXCEPTION_STKSZ;
++ break;
++ }
++ orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
++ }
++
++ t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
++ /*
++ * <= is required because the CPU will access up to
++ * 8 bits beyond the end of the IO permission bitmap.
++ */
++ for (i = 0; i <= IO_BITMAP_LONGS; i++)
++ t->io_bitmap[i] = ~0UL;
++#endif
++
++ atomic_inc(&init_mm.mm_count);
++ me->active_mm = &init_mm;
++ if (me->mm)
++ BUG();
++ enter_lazy_tlb(&init_mm, me);
++
++#ifndef CONFIG_X86_NO_TSS
++ set_tss_desc(cpu, t);
++#endif
++#ifndef CONFIG_XEN
++ load_TR_desc();
++#endif
++ load_LDT(&init_mm.context);
++
++ /*
++ * Clear all 6 debug registers:
++ */
++
++ set_debugreg(0UL, 0);
++ set_debugreg(0UL, 1);
++ set_debugreg(0UL, 2);
++ set_debugreg(0UL, 3);
++ set_debugreg(0UL, 6);
++ set_debugreg(0UL, 7);
++
++ fpu_init();
++
++ raw_local_save_flags(kernel_eflags);
++}
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/setup.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/setup.c
+--- linux-2.6.18.8/arch/x86_64/kernel/setup.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/setup.c 2008-08-21 11:36:07.000000000 +0200
+@@ -122,7 +122,9 @@ struct resource standard_io_resources[]
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "timer1", .start = 0x50, .end = 0x53,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+- { .name = "keyboard", .start = 0x60, .end = 0x6f,
++ { .name = "keyboard", .start = 0x60, .end = 0x60,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "keyboard", .start = 0x64, .end = 0x64,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+ { .name = "dma page reg", .start = 0x80, .end = 0x8f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/setup-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/setup-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/setup-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/setup-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1654 @@
++/*
++ * linux/arch/x86-64/kernel/setup.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Nov 2001 Dave Jones <davej@suse.de>
++ * Forked from i386 setup code.
++ */
++
++/*
++ * This file handles the architecture-dependent parts of initialization
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/stddef.h>
++#include <linux/unistd.h>
++#include <linux/ptrace.h>
++#include <linux/slab.h>
++#include <linux/user.h>
++#include <linux/a.out.h>
++#include <linux/screen_info.h>
++#include <linux/ioport.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/initrd.h>
++#include <linux/highmem.h>
++#include <linux/bootmem.h>
++#include <linux/module.h>
++#include <asm/processor.h>
++#include <linux/console.h>
++#include <linux/seq_file.h>
++#include <linux/crash_dump.h>
++#include <linux/root_dev.h>
++#include <linux/pci.h>
++#include <linux/acpi.h>
++#include <linux/kallsyms.h>
++#include <linux/edd.h>
++#include <linux/mmzone.h>
++#include <linux/kexec.h>
++#include <linux/cpufreq.h>
++#include <linux/dmi.h>
++#include <linux/dma-mapping.h>
++#include <linux/ctype.h>
++
++#include <asm/mtrr.h>
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/msr.h>
++#include <asm/desc.h>
++#include <video/edid.h>
++#include <asm/e820.h>
++#include <asm/dma.h>
++#include <asm/mpspec.h>
++#include <asm/mmu_context.h>
++#include <asm/bootsetup.h>
++#include <asm/proto.h>
++#include <asm/setup.h>
++#include <asm/mach_apic.h>
++#include <asm/numa.h>
++#include <asm/sections.h>
++#include <asm/dmi.h>
++#ifdef CONFIG_XEN
++#include <linux/percpu.h>
++#include <xen/interface/physdev.h>
++#include "setup_arch_pre.h"
++#include <asm/hypervisor.h>
++#include <xen/interface/nmi.h>
++#include <xen/features.h>
++#include <xen/firmware.h>
++#include <xen/xencons.h>
++#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
++#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
++#include <asm/mach-xen/setup_arch_post.h>
++#include <xen/interface/memory.h>
++
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
++
++extern unsigned long start_pfn;
++extern struct edid_info edid_info;
++
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++extern char hypercall_page[PAGE_SIZE];
++EXPORT_SYMBOL(hypercall_page);
++
++static int xen_panic_event(struct notifier_block *, unsigned long, void *);
++static struct notifier_block xen_panic_block = {
++ xen_panic_event, NULL, 0 /* try to go last */
++};
++
++unsigned long *phys_to_machine_mapping;
++unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
++
++EXPORT_SYMBOL(phys_to_machine_mapping);
++
++DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
++DEFINE_PER_CPU(int, nr_multicall_ents);
++
++/* Raw start-of-day parameters from the hypervisor. */
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++#endif
++
++/*
++ * Machine setup..
++ */
++
++struct cpuinfo_x86 boot_cpu_data __read_mostly;
++EXPORT_SYMBOL(boot_cpu_data);
++
++unsigned long mmu_cr4_features;
++
++int acpi_disabled;
++EXPORT_SYMBOL(acpi_disabled);
++#ifdef CONFIG_ACPI
++extern int __initdata acpi_ht;
++extern acpi_interrupt_flags acpi_sci_flags;
++int __initdata acpi_force = 0;
++#endif
++
++int acpi_numa __initdata;
++
++/* Boot loader ID as an integer, for the benefit of proc_dointvec */
++int bootloader_type;
++
++unsigned long saved_video_mode;
++
++/*
++ * Early DMI memory
++ */
++int dmi_alloc_index;
++char dmi_alloc_data[DMI_MAX_DATA];
++
++/*
++ * Setup options
++ */
++struct screen_info screen_info;
++EXPORT_SYMBOL(screen_info);
++struct sys_desc_table_struct {
++ unsigned short length;
++ unsigned char table[0];
++};
++
++struct edid_info edid_info;
++EXPORT_SYMBOL_GPL(edid_info);
++struct e820map e820;
++#ifdef CONFIG_XEN
++struct e820map machine_e820;
++#endif
++
++extern int root_mountflags;
++
++char command_line[COMMAND_LINE_SIZE];
++
++struct resource standard_io_resources[] = {
++ { .name = "dma1", .start = 0x00, .end = 0x1f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "pic1", .start = 0x20, .end = 0x21,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "timer0", .start = 0x40, .end = 0x43,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "timer1", .start = 0x50, .end = 0x53,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "keyboard", .start = 0x60, .end = 0x60,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "keyboard", .start = 0x64, .end = 0x64,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "dma page reg", .start = 0x80, .end = 0x8f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "pic2", .start = 0xa0, .end = 0xa1,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "dma2", .start = 0xc0, .end = 0xdf,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "fpu", .start = 0xf0, .end = 0xff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO }
++};
++
++#define STANDARD_IO_RESOURCES \
++ (sizeof standard_io_resources / sizeof standard_io_resources[0])
++
++#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
++
++struct resource data_resource = {
++ .name = "Kernel data",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_RAM,
++};
++struct resource code_resource = {
++ .name = "Kernel code",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_RAM,
++};
++
++#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
++
++static struct resource system_rom_resource = {
++ .name = "System ROM",
++ .start = 0xf0000,
++ .end = 0xfffff,
++ .flags = IORESOURCE_ROM,
++};
++
++static struct resource extension_rom_resource = {
++ .name = "Extension ROM",
++ .start = 0xe0000,
++ .end = 0xeffff,
++ .flags = IORESOURCE_ROM,
++};
++
++static struct resource adapter_rom_resources[] = {
++ { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM }
++};
++
++#define ADAPTER_ROM_RESOURCES \
++ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
++
++static struct resource video_rom_resource = {
++ .name = "Video ROM",
++ .start = 0xc0000,
++ .end = 0xc7fff,
++ .flags = IORESOURCE_ROM,
++};
++
++static struct resource video_ram_resource = {
++ .name = "Video RAM area",
++ .start = 0xa0000,
++ .end = 0xbffff,
++ .flags = IORESOURCE_RAM,
++};
++
++#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
++
++static int __init romchecksum(unsigned char *rom, unsigned long length)
++{
++ unsigned char *p, sum = 0;
++
++ for (p = rom; p < rom + length; p++)
++ sum += *p;
++ return sum == 0;
++}
++
++static void __init probe_roms(void)
++{
++ unsigned long start, length, upper;
++ unsigned char *rom;
++ int i;
++
++#ifdef CONFIG_XEN
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return;
++#endif
++
++ /* video rom */
++ upper = adapter_rom_resources[0].start;
++ for (start = video_rom_resource.start; start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ video_rom_resource.start = start;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* if checksum okay, trust length byte */
++ if (length && romchecksum(rom, length))
++ video_rom_resource.end = start + length - 1;
++
++ request_resource(&iomem_resource, &video_rom_resource);
++ break;
++ }
++
++ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
++ if (start < upper)
++ start = upper;
++
++ /* system rom */
++ request_resource(&iomem_resource, &system_rom_resource);
++ upper = system_rom_resource.start;
++
++ /* check for extension rom (ignore length byte!) */
++ rom = isa_bus_to_virt(extension_rom_resource.start);
++ if (romsignature(rom)) {
++ length = extension_rom_resource.end - extension_rom_resource.start + 1;
++ if (romchecksum(rom, length)) {
++ request_resource(&iomem_resource, &extension_rom_resource);
++ upper = extension_rom_resource.start;
++ }
++ }
++
++ /* check for adapter roms on 2k boundaries */
++ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* but accept any length that fits if checksum okay */
++ if (!length || start + length > upper || !romchecksum(rom, length))
++ continue;
++
++ adapter_rom_resources[i].start = start;
++ adapter_rom_resources[i].end = start + length - 1;
++ request_resource(&iomem_resource, &adapter_rom_resources[i]);
++
++ start = adapter_rom_resources[i++].end & ~2047UL;
++ }
++}
++
++/* Check for full argument with no trailing characters */
++static int fullarg(char *p, char *arg)
++{
++ int l = strlen(arg);
++ return !memcmp(p, arg, l) && (p[l] == 0 || isspace(p[l]));
++}
++
++static __init void parse_cmdline_early (char ** cmdline_p)
++{
++ char c = ' ', *to = command_line, *from = COMMAND_LINE;
++ int len = 0;
++ int userdef = 0;
++
++ for (;;) {
++ if (c != ' ')
++ goto next_char;
++
++#ifdef CONFIG_SMP
++ /*
++ * If the BIOS enumerates physical processors before logical,
++ * maxcpus=N at enumeration-time can be used to disable HT.
++ */
++ else if (!memcmp(from, "maxcpus=", 8)) {
++ extern unsigned int maxcpus;
++
++ maxcpus = simple_strtoul(from + 8, NULL, 0);
++ }
++#endif
++#ifdef CONFIG_ACPI
++ /* "acpi=off" disables both ACPI table parsing and interpreter init */
++ if (fullarg(from,"acpi=off"))
++ disable_acpi();
++
++ if (fullarg(from, "acpi=force")) {
++ /* add later when we do DMI horrors: */
++ acpi_force = 1;
++ acpi_disabled = 0;
++ }
++
++ /* acpi=ht just means: do ACPI MADT parsing
++ at bootup, but don't enable the full ACPI interpreter */
++ if (fullarg(from, "acpi=ht")) {
++ if (!acpi_force)
++ disable_acpi();
++ acpi_ht = 1;
++ }
++ else if (fullarg(from, "pci=noacpi"))
++ acpi_disable_pci();
++ else if (fullarg(from, "acpi=noirq"))
++ acpi_noirq_set();
++
++ else if (fullarg(from, "acpi_sci=edge"))
++ acpi_sci_flags.trigger = 1;
++ else if (fullarg(from, "acpi_sci=level"))
++ acpi_sci_flags.trigger = 3;
++ else if (fullarg(from, "acpi_sci=high"))
++ acpi_sci_flags.polarity = 1;
++ else if (fullarg(from, "acpi_sci=low"))
++ acpi_sci_flags.polarity = 3;
++
++ /* acpi=strict disables out-of-spec workarounds */
++ else if (fullarg(from, "acpi=strict")) {
++ acpi_strict = 1;
++ }
++#ifdef CONFIG_X86_IO_APIC
++ else if (fullarg(from, "acpi_skip_timer_override"))
++ acpi_skip_timer_override = 1;
++#endif
++#endif
++
++#ifndef CONFIG_XEN
++ if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) {
++ clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
++ disable_apic = 1;
++ }
++
++ if (fullarg(from, "noapic"))
++ skip_ioapic_setup = 1;
++
++ if (fullarg(from,"apic")) {
++ skip_ioapic_setup = 0;
++ ioapic_force = 1;
++ }
++#endif
++
++ if (!memcmp(from, "mem=", 4))
++ parse_memopt(from+4, &from);
++
++ if (!memcmp(from, "memmap=", 7)) {
++ /* exactmap option is for used defined memory */
++ if (!memcmp(from+7, "exactmap", 8)) {
++#ifdef CONFIG_CRASH_DUMP
++ /* If we are doing a crash dump, we
++ * still need to know the real mem
++ * size before original memory map is
++ * reset.
++ */
++ saved_max_pfn = e820_end_of_ram();
++#endif
++ from += 8+7;
++ end_pfn_map = 0;
++ e820.nr_map = 0;
++ userdef = 1;
++ }
++ else {
++ parse_memmapopt(from+7, &from);
++ userdef = 1;
++ }
++ }
++
++#ifdef CONFIG_NUMA
++ if (!memcmp(from, "numa=", 5))
++ numa_setup(from+5);
++#endif
++
++ if (!memcmp(from,"iommu=",6)) {
++ iommu_setup(from+6);
++ }
++
++ if (fullarg(from,"oops=panic"))
++ panic_on_oops = 1;
++
++ if (!memcmp(from, "noexec=", 7))
++ nonx_setup(from + 7);
++
++#ifdef CONFIG_KEXEC
++ /* crashkernel=size@addr specifies the location to reserve for
++ * a crash kernel. By reserving this memory we guarantee
++ * that linux never set's it up as a DMA target.
++ * Useful for holding code to do something appropriate
++ * after a kernel panic.
++ */
++ else if (!memcmp(from, "crashkernel=", 12)) {
++#ifndef CONFIG_XEN
++ unsigned long size, base;
++ size = memparse(from+12, &from);
++ if (*from == '@') {
++ base = memparse(from+1, &from);
++ /* FIXME: Do I want a sanity check
++ * to validate the memory range?
++ */
++ crashk_res.start = base;
++ crashk_res.end = base + size - 1;
++ }
++#else
++ printk("Ignoring crashkernel command line, "
++ "parameter will be supplied by xen\n");
++#endif
++ }
++#endif
++
++#ifdef CONFIG_PROC_VMCORE
++ /* elfcorehdr= specifies the location of elf core header
++ * stored by the crashed kernel. This option will be passed
++ * by kexec loader to the capture kernel.
++ */
++ else if(!memcmp(from, "elfcorehdr=", 11))
++ elfcorehdr_addr = memparse(from+11, &from);
++#endif
++
++#if defined(CONFIG_HOTPLUG_CPU) && !defined(CONFIG_XEN)
++ else if (!memcmp(from, "additional_cpus=", 16))
++ setup_additional_cpus(from+16);
++#endif
++
++ next_char:
++ c = *(from++);
++ if (!c)
++ break;
++ if (COMMAND_LINE_SIZE <= ++len)
++ break;
++ *(to++) = c;
++ }
++ if (userdef) {
++ printk(KERN_INFO "user-defined physical RAM map:\n");
++ e820_print_map("user");
++ }
++ *to = '\0';
++ *cmdline_p = command_line;
++}
++
++#ifndef CONFIG_NUMA
++static void __init
++contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
++{
++ unsigned long bootmap_size, bootmap;
++
++ bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
++ bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
++ if (bootmap == -1L)
++ panic("Cannot find bootmem map of size %ld\n",bootmap_size);
++ bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
++#ifdef CONFIG_XEN
++ e820_bootmem_free(NODE_DATA(0), 0, xen_start_info->nr_pages<<PAGE_SHIFT);
++#else
++ e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
++#endif
++ reserve_bootmem(bootmap, bootmap_size);
++}
++#endif
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++struct edd edd;
++#ifdef CONFIG_EDD_MODULE
++EXPORT_SYMBOL(edd);
++#endif
++#ifndef CONFIG_XEN
++/**
++ * copy_edd() - Copy the BIOS EDD information
++ * from boot_params into a safe place.
++ *
++ */
++static inline void copy_edd(void)
++{
++ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
++ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
++ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
++ edd.edd_info_nr = EDD_NR;
++}
++#endif
++#else
++static inline void copy_edd(void)
++{
++}
++#endif
++
++#ifndef CONFIG_XEN
++#define EBDA_ADDR_POINTER 0x40E
++
++unsigned __initdata ebda_addr;
++unsigned __initdata ebda_size;
++
++static void discover_ebda(void)
++{
++ /*
++ * there is a real-mode segmented pointer pointing to the
++ * 4K EBDA area at 0x40E
++ */
++ ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
++ ebda_addr <<= 4;
++
++ ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
++
++ /* Round EBDA up to pages */
++ if (ebda_size == 0)
++ ebda_size = 1;
++ ebda_size <<= 10;
++ ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
++ if (ebda_size > 64*1024)
++ ebda_size = 64*1024;
++}
++#else
++#define discover_ebda() ((void)0)
++#endif
++
++void __init setup_arch(char **cmdline_p)
++{
++#ifdef CONFIG_XEN
++ /* Register a call for panic conditions. */
++ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++
++ ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
++ screen_info = SCREEN_INFO;
++
++ if (is_initial_xendomain()) {
++ const struct dom0_vga_console_info *info =
++ (void *)((char *)xen_start_info +
++ xen_start_info->console.dom0.info_off);
++
++ dom0_init_screen_info(info,
++ xen_start_info->console.dom0.info_size);
++ xen_start_info->console.domU.mfn = 0;
++ xen_start_info->console.domU.evtchn = 0;
++ } else
++ screen_info.orig_video_isVGA = 0;
++
++ copy_edid();
++
++ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++ VMASST_TYPE_writable_pagetables));
++
++ ARCH_SETUP
++#else
++ ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
++ screen_info = SCREEN_INFO;
++ edid_info = EDID_INFO;
++#endif /* !CONFIG_XEN */
++ saved_video_mode = SAVED_VIDEO_MODE;
++ bootloader_type = LOADER_TYPE;
++
++#ifdef CONFIG_BLK_DEV_RAM
++ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++#endif
++ setup_memory_region();
++ copy_edd();
++
++ if (!MOUNT_ROOT_RDONLY)
++ root_mountflags &= ~MS_RDONLY;
++ init_mm.start_code = (unsigned long) &_text;
++ init_mm.end_code = (unsigned long) &_etext;
++ init_mm.end_data = (unsigned long) &_edata;
++ init_mm.brk = (unsigned long) &_end;
++
++ code_resource.start = virt_to_phys(&_text);
++ code_resource.end = virt_to_phys(&_etext)-1;
++ data_resource.start = virt_to_phys(&_etext);
++ data_resource.end = virt_to_phys(&_edata)-1;
++
++ parse_cmdline_early(cmdline_p);
++
++ early_identify_cpu(&boot_cpu_data);
++
++ /*
++ * partially used pages are not usable - thus
++ * we are rounding upwards:
++ */
++ end_pfn = e820_end_of_ram();
++ num_physpages = end_pfn; /* for pfn_valid */
++
++ check_efer();
++
++ discover_ebda();
++
++ init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
++
++ if (is_initial_xendomain())
++ dmi_scan_machine();
++
++#ifdef CONFIG_ACPI_NUMA
++ /*
++ * Parse SRAT to discover nodes.
++ */
++ acpi_numa_init();
++#endif
++
++#ifdef CONFIG_NUMA
++ numa_initmem_init(0, end_pfn);
++#else
++ contig_initmem_init(0, end_pfn);
++#endif
++
++#ifdef CONFIG_XEN
++ /*
++ * Reserve kernel, physmap, start info, initial page tables, and
++ * direct mapping.
++ */
++ reserve_bootmem_generic(__pa_symbol(&_text),
++ (table_end << PAGE_SHIFT) - __pa_symbol(&_text));
++#else
++ /* Reserve direct mapping */
++ reserve_bootmem_generic(table_start << PAGE_SHIFT,
++ (table_end - table_start) << PAGE_SHIFT);
++
++ /* reserve kernel */
++ reserve_bootmem_generic(__pa_symbol(&_text),
++ __pa_symbol(&_end) - __pa_symbol(&_text));
++
++ /*
++ * reserve physical page 0 - it's a special BIOS page on many boxes,
++ * enabling clean reboots, SMP operation, laptop functions.
++ */
++ reserve_bootmem_generic(0, PAGE_SIZE);
++
++ /* reserve ebda region */
++ if (ebda_addr)
++ reserve_bootmem_generic(ebda_addr, ebda_size);
++
++#ifdef CONFIG_SMP
++ /*
++ * But first pinch a few for the stack/trampoline stuff
++ * FIXME: Don't need the extra page at 4K, but need to fix
++ * trampoline before removing it. (see the GDT stuff)
++ */
++ reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
++
++ /* Reserve SMP trampoline */
++ reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
++#endif
++#endif
++
++#ifdef CONFIG_ACPI_SLEEP
++ /*
++ * Reserve low memory region for sleep support.
++ */
++ acpi_reserve_bootmem();
++#endif
++#ifdef CONFIG_XEN
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (xen_start_info->mod_start) {
++ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
++ /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
++ initrd_start = INITRD_START + PAGE_OFFSET;
++ initrd_end = initrd_start+INITRD_SIZE;
++ initrd_below_start_ok = 1;
++ } else {
++ printk(KERN_ERR "initrd extends beyond end of memory "
++ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++ (unsigned long)(INITRD_START + INITRD_SIZE),
++ (unsigned long)(end_pfn << PAGE_SHIFT));
++ initrd_start = 0;
++ }
++ }
++#endif
++#else /* CONFIG_XEN */
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (LOADER_TYPE && INITRD_START) {
++ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
++ reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
++ initrd_start =
++ INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
++ initrd_end = initrd_start+INITRD_SIZE;
++ }
++ else {
++ printk(KERN_ERR "initrd extends beyond end of memory "
++ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++ (unsigned long)(INITRD_START + INITRD_SIZE),
++ (unsigned long)(end_pfn << PAGE_SHIFT));
++ initrd_start = 0;
++ }
++ }
++#endif
++#endif /* !CONFIG_XEN */
++#ifdef CONFIG_KEXEC
++#ifdef CONFIG_XEN
++ xen_machine_kexec_setup_resources();
++#else
++ if (crashk_res.start != crashk_res.end) {
++ reserve_bootmem_generic(crashk_res.start,
++ crashk_res.end - crashk_res.start + 1);
++ }
++#endif
++#endif
++
++ paging_init();
++#ifdef CONFIG_X86_LOCAL_APIC
++ /*
++ * Find and reserve possible boot-time SMP configuration:
++ */
++ find_smp_config();
++#endif
++#ifdef CONFIG_XEN
++ {
++ int i, j, k, fpp;
++ unsigned long p2m_pages;
++
++ p2m_pages = end_pfn;
++ if (xen_start_info->nr_pages > end_pfn) {
++ /*
++ * the end_pfn was shrunk (probably by mem= or highmem=
++ * kernel parameter); shrink reservation with the HV
++ */
++ struct xen_memory_reservation reservation = {
++ .address_bits = 0,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ unsigned int difference;
++ int ret;
++
++ difference = xen_start_info->nr_pages - end_pfn;
++
++ set_xen_guest_handle(reservation.extent_start,
++ ((unsigned long *)xen_start_info->mfn_list) + end_pfn);
++ reservation.nr_extents = difference;
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &reservation);
++ BUG_ON (ret != difference);
++ }
++ else if (end_pfn > xen_start_info->nr_pages)
++ p2m_pages = xen_start_info->nr_pages;
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Make sure we have a large enough P->M table. */
++ phys_to_machine_mapping = alloc_bootmem_pages(
++ end_pfn * sizeof(unsigned long));
++ memset(phys_to_machine_mapping, ~0,
++ end_pfn * sizeof(unsigned long));
++ memcpy(phys_to_machine_mapping,
++ (unsigned long *)xen_start_info->mfn_list,
++ p2m_pages * sizeof(unsigned long));
++ free_bootmem(
++ __pa(xen_start_info->mfn_list),
++ PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
++ sizeof(unsigned long))));
++
++ /*
++ * Initialise the list of the frames that specify the
++ * list of frames that make up the p2m table. Used by
++ * save/restore.
++ */
++ pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE);
++
++ fpp = PAGE_SIZE/sizeof(unsigned long);
++ for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
++ if ((j % fpp) == 0) {
++ k++;
++ BUG_ON(k>=fpp);
++ pfn_to_mfn_frame_list[k] =
++ alloc_bootmem_pages(PAGE_SIZE);
++ pfn_to_mfn_frame_list_list[k] =
++ virt_to_mfn(pfn_to_mfn_frame_list[k]);
++ j=0;
++ }
++ pfn_to_mfn_frame_list[k][j] =
++ virt_to_mfn(&phys_to_machine_mapping[i]);
++ }
++ HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
++ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++ virt_to_mfn(pfn_to_mfn_frame_list_list);
++ }
++
++ /* Mark all ISA DMA channels in-use - using them wouldn't work. */
++ for (i = 0; i < MAX_DMA_CHANNELS; ++i)
++ if (i != 4 && request_dma(i, "xen") != 0)
++ BUG();
++ }
++
++ if (!is_initial_xendomain()) {
++ acpi_disabled = 1;
++#ifdef CONFIG_ACPI
++ acpi_ht = 0;
++#endif
++ }
++#endif
++
++#ifndef CONFIG_XEN
++ check_ioapic();
++#endif
++
++ zap_low_mappings(0);
++
++ /*
++ * set this early, so we dont allocate cpu0
++ * if MADT list doesnt list BSP first
++ * mpparse.c/MP_processor_info() allocates logical cpu numbers.
++ */
++ cpu_set(0, cpu_present_map);
++#ifdef CONFIG_ACPI
++ /*
++ * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
++ * Call this early for SRAT node setup.
++ */
++ acpi_boot_table_init();
++
++ /*
++ * Read APIC and some other early information from ACPI tables.
++ */
++ acpi_boot_init();
++#endif
++
++ init_cpu_to_node();
++
++#ifdef CONFIG_X86_LOCAL_APIC
++ /*
++ * get boot-time SMP configuration:
++ */
++ if (smp_found_config)
++ get_smp_config();
++#ifndef CONFIG_XEN
++ init_apic_mappings();
++#endif
++#endif
++#if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
++ prefill_possible_map();
++#endif
++
++ /*
++ * Request address space for all standard RAM and ROM resources
++ * and also for regions reported as reserved by the e820.
++ */
++ probe_roms();
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain())
++ e820_reserve_resources(machine_e820.map, machine_e820.nr_map);
++#else
++ e820_reserve_resources(e820.map, e820.nr_map);
++#endif
++
++ request_resource(&iomem_resource, &video_ram_resource);
++
++ {
++ unsigned i;
++ /* request I/O space for devices used on all i[345]86 PCs */
++ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
++ request_resource(&ioport_resource, &standard_io_resources[i]);
++ }
++
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain())
++ e820_setup_gap(machine_e820.map, machine_e820.nr_map);
++#else
++ e820_setup_gap(e820.map, e820.nr_map);
++#endif
++
++#ifdef CONFIG_XEN
++ {
++ struct physdev_set_iopl set_iopl;
++
++ set_iopl.iopl = 1;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
++
++ if (is_initial_xendomain()) {
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++ conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++#endif
++ } else {
++#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++ }
++ }
++#else /* CONFIG_XEN */
++
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++ conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++#endif
++
++#endif /* !CONFIG_XEN */
++}
++
++#ifdef CONFIG_XEN
++static int
++xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ HYPERVISOR_shutdown(SHUTDOWN_crash);
++ /* we're never actually going to get here... */
++ return NOTIFY_DONE;
++}
++#endif /* !CONFIG_XEN */
++
++
++static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
++{
++ unsigned int *v;
++
++ if (c->extended_cpuid_level < 0x80000004)
++ return 0;
++
++ v = (unsigned int *) c->x86_model_id;
++ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
++ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
++ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
++ c->x86_model_id[48] = 0;
++ return 1;
++}
++
++
++static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
++{
++ unsigned int n, dummy, eax, ebx, ecx, edx;
++
++ n = c->extended_cpuid_level;
++
++ if (n >= 0x80000005) {
++ cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
++ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
++ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
++ c->x86_cache_size=(ecx>>24)+(edx>>24);
++ /* On K8 L1 TLB is inclusive, so don't count it */
++ c->x86_tlbsize = 0;
++ }
++
++ if (n >= 0x80000006) {
++ cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
++ ecx = cpuid_ecx(0x80000006);
++ c->x86_cache_size = ecx >> 16;
++ c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
++
++ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
++ c->x86_cache_size, ecx & 0xFF);
++ }
++
++ if (n >= 0x80000007)
++ cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
++ if (n >= 0x80000008) {
++ cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
++ c->x86_virt_bits = (eax >> 8) & 0xff;
++ c->x86_phys_bits = eax & 0xff;
++ }
++}
++
++#ifdef CONFIG_NUMA
++static int nearby_node(int apicid)
++{
++ int i;
++ for (i = apicid - 1; i >= 0; i--) {
++ int node = apicid_to_node[i];
++ if (node != NUMA_NO_NODE && node_online(node))
++ return node;
++ }
++ for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
++ int node = apicid_to_node[i];
++ if (node != NUMA_NO_NODE && node_online(node))
++ return node;
++ }
++ return first_node(node_online_map); /* Shouldn't happen */
++}
++#endif
++
++/*
++ * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
++ * Assumes number of cores is a power of two.
++ */
++static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++ unsigned bits;
++#ifdef CONFIG_NUMA
++ int cpu = smp_processor_id();
++ int node = 0;
++ unsigned apicid = hard_smp_processor_id();
++#endif
++ unsigned ecx = cpuid_ecx(0x80000008);
++
++ c->x86_max_cores = (ecx & 0xff) + 1;
++
++ /* CPU telling us the core id bits shift? */
++ bits = (ecx >> 12) & 0xF;
++
++ /* Otherwise recompute */
++ if (bits == 0) {
++ while ((1 << bits) < c->x86_max_cores)
++ bits++;
++ }
++
++ /* Low order bits define the core id (index of core in socket) */
++ c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
++ /* Convert the APIC ID into the socket ID */
++ c->phys_proc_id = phys_pkg_id(bits);
++
++#ifdef CONFIG_NUMA
++ node = c->phys_proc_id;
++ if (apicid_to_node[apicid] != NUMA_NO_NODE)
++ node = apicid_to_node[apicid];
++ if (!node_online(node)) {
++ /* Two possibilities here:
++ - The CPU is missing memory and no node was created.
++ In that case try picking one from a nearby CPU
++ - The APIC IDs differ from the HyperTransport node IDs
++ which the K8 northbridge parsing fills in.
++ Assume they are all increased by a constant offset,
++ but in the same order as the HT nodeids.
++ If that doesn't result in a usable node fall back to the
++ path for the previous case. */
++ int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
++ if (ht_nodeid >= 0 &&
++ apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
++ node = apicid_to_node[ht_nodeid];
++ /* Pick a nearby node */
++ if (!node_online(node))
++ node = nearby_node(apicid);
++ }
++ numa_set_node(cpu, node);
++
++ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
++#endif
++#endif
++}
++
++static void __init init_amd(struct cpuinfo_x86 *c)
++{
++ unsigned level;
++
++#ifdef CONFIG_SMP
++ unsigned long value;
++
++ /*
++ * Disable TLB flush filter by setting HWCR.FFDIS on K8
++ * bit 6 of msr C001_0015
++ *
++ * Errata 63 for SH-B3 steppings
++ * Errata 122 for all steppings (F+ have it disabled by default)
++ */
++ if (c->x86 == 15) {
++ rdmsrl(MSR_K8_HWCR, value);
++ value |= 1 << 6;
++ wrmsrl(MSR_K8_HWCR, value);
++ }
++#endif
++
++ /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
++ 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
++ clear_bit(0*32+31, &c->x86_capability);
++
++ /* On C+ stepping K8 rep microcode works well for copy/memset */
++ level = cpuid_eax(1);
++ if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
++ set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
++
++ /* Enable workaround for FXSAVE leak */
++ if (c->x86 >= 6)
++ set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
++
++ level = get_model_name(c);
++ if (!level) {
++ switch (c->x86) {
++ case 15:
++ /* Should distinguish Models here, but this is only
++ a fallback anyways. */
++ strcpy(c->x86_model_id, "Hammer");
++ break;
++ }
++ }
++ display_cacheinfo(c);
++
++ /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
++ if (c->x86_power & (1<<8))
++ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++
++ /* Multi core CPU? */
++ if (c->extended_cpuid_level >= 0x80000008)
++ amd_detect_cmp(c);
++
++ /* Fix cpuid4 emulation for more */
++ num_cache_leaves = 3;
++}
++
++static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++ u32 eax, ebx, ecx, edx;
++ int index_msb, core_bits;
++
++ cpuid(1, &eax, &ebx, &ecx, &edx);
++
++
++ if (!cpu_has(c, X86_FEATURE_HT))
++ return;
++ if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
++ goto out;
++
++ smp_num_siblings = (ebx & 0xff0000) >> 16;
++
++ if (smp_num_siblings == 1) {
++ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
++ } else if (smp_num_siblings > 1 ) {
++
++ if (smp_num_siblings > NR_CPUS) {
++ printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
++ smp_num_siblings = 1;
++ return;
++ }
++
++ index_msb = get_count_order(smp_num_siblings);
++ c->phys_proc_id = phys_pkg_id(index_msb);
++
++ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
++
++ index_msb = get_count_order(smp_num_siblings) ;
++
++ core_bits = get_count_order(c->x86_max_cores);
++
++ c->cpu_core_id = phys_pkg_id(index_msb) &
++ ((1 << core_bits) - 1);
++ }
++out:
++ if ((c->x86_max_cores * smp_num_siblings) > 1) {
++ printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
++ printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
++ }
++
++#endif
++}
++
++/*
++ * find out the number of processor cores on the die
++ */
++static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
++{
++ unsigned int eax, t;
++
++ if (c->cpuid_level < 4)
++ return 1;
++
++ cpuid_count(4, 0, &eax, &t, &t, &t);
++
++ if (eax & 0x1f)
++ return ((eax >> 26) + 1);
++ else
++ return 1;
++}
++
++static void srat_detect_node(void)
++{
++#ifdef CONFIG_NUMA
++ unsigned node;
++ int cpu = smp_processor_id();
++ int apicid = hard_smp_processor_id();
++
++ /* Don't do the funky fallback heuristics the AMD version employs
++ for now. */
++ node = apicid_to_node[apicid];
++ if (node == NUMA_NO_NODE)
++ node = first_node(node_online_map);
++ numa_set_node(cpu, node);
++
++ if (acpi_numa > 0)
++ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
++#endif
++}
++
++static void __cpuinit init_intel(struct cpuinfo_x86 *c)
++{
++ /* Cache sizes */
++ unsigned n;
++
++ init_intel_cacheinfo(c);
++ if (c->cpuid_level > 9 ) {
++ unsigned eax = cpuid_eax(10);
++ /* Check for version and the number of counters */
++ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
++ set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
++ }
++
++ n = c->extended_cpuid_level;
++ if (n >= 0x80000008) {
++ unsigned eax = cpuid_eax(0x80000008);
++ c->x86_virt_bits = (eax >> 8) & 0xff;
++ c->x86_phys_bits = eax & 0xff;
++ /* CPUID workaround for Intel 0F34 CPU */
++ if (c->x86_vendor == X86_VENDOR_INTEL &&
++ c->x86 == 0xF && c->x86_model == 0x3 &&
++ c->x86_mask == 0x4)
++ c->x86_phys_bits = 36;
++ }
++
++ if (c->x86 == 15)
++ c->x86_cache_alignment = c->x86_clflush_size * 2;
++ if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
++ (c->x86 == 0x6 && c->x86_model >= 0x0e))
++ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++ set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++ c->x86_max_cores = intel_num_cpu_cores(c);
++
++ srat_detect_node();
++}
++
++static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
++{
++ char *v = c->x86_vendor_id;
++
++ if (!strcmp(v, "AuthenticAMD"))
++ c->x86_vendor = X86_VENDOR_AMD;
++ else if (!strcmp(v, "GenuineIntel"))
++ c->x86_vendor = X86_VENDOR_INTEL;
++ else
++ c->x86_vendor = X86_VENDOR_UNKNOWN;
++}
++
++struct cpu_model_info {
++ int vendor;
++ int family;
++ char *model_names[16];
++};
++
++/* Do some early cpuid on the boot CPU to get some parameter that are
++ needed before check_bugs. Everything advanced is in identify_cpu
++ below. */
++void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
++{
++ u32 tfms;
++
++ c->loops_per_jiffy = loops_per_jiffy;
++ c->x86_cache_size = -1;
++ c->x86_vendor = X86_VENDOR_UNKNOWN;
++ c->x86_model = c->x86_mask = 0; /* So far unknown... */
++ c->x86_vendor_id[0] = '\0'; /* Unset */
++ c->x86_model_id[0] = '\0'; /* Unset */
++ c->x86_clflush_size = 64;
++ c->x86_cache_alignment = c->x86_clflush_size;
++ c->x86_max_cores = 1;
++ c->extended_cpuid_level = 0;
++ memset(&c->x86_capability, 0, sizeof c->x86_capability);
++
++ /* Get vendor name */
++ cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
++ (unsigned int *)&c->x86_vendor_id[0],
++ (unsigned int *)&c->x86_vendor_id[8],
++ (unsigned int *)&c->x86_vendor_id[4]);
++
++ get_cpu_vendor(c);
++
++ /* Initialize the standard set of capabilities */
++ /* Note that the vendor-specific code below might override */
++
++ /* Intel-defined flags: level 0x00000001 */
++ if (c->cpuid_level >= 0x00000001) {
++ __u32 misc;
++ cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
++ &c->x86_capability[0]);
++ c->x86 = (tfms >> 8) & 0xf;
++ c->x86_model = (tfms >> 4) & 0xf;
++ c->x86_mask = tfms & 0xf;
++ if (c->x86 == 0xf)
++ c->x86 += (tfms >> 20) & 0xff;
++ if (c->x86 >= 0x6)
++ c->x86_model += ((tfms >> 16) & 0xF) << 4;
++ if (c->x86_capability[0] & (1<<19))
++ c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
++ } else {
++ /* Have CPUID level 0 only - unheard of */
++ c->x86 = 4;
++ }
++
++#ifdef CONFIG_SMP
++ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
++#endif
++}
++
++/*
++ * This does the hard work of actually picking apart the CPU stuff...
++ */
++void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
++{
++ int i;
++ u32 xlvl;
++
++ early_identify_cpu(c);
++
++ /* AMD-defined flags: level 0x80000001 */
++ xlvl = cpuid_eax(0x80000000);
++ c->extended_cpuid_level = xlvl;
++ if ((xlvl & 0xffff0000) == 0x80000000) {
++ if (xlvl >= 0x80000001) {
++ c->x86_capability[1] = cpuid_edx(0x80000001);
++ c->x86_capability[6] = cpuid_ecx(0x80000001);
++ }
++ if (xlvl >= 0x80000004)
++ get_model_name(c); /* Default name */
++ }
++
++ /* Transmeta-defined flags: level 0x80860001 */
++ xlvl = cpuid_eax(0x80860000);
++ if ((xlvl & 0xffff0000) == 0x80860000) {
++ /* Don't set x86_cpuid_level here for now to not confuse. */
++ if (xlvl >= 0x80860001)
++ c->x86_capability[2] = cpuid_edx(0x80860001);
++ }
++
++ c->apicid = phys_pkg_id(0);
++
++ /*
++ * Vendor-specific initialization. In this section we
++ * canonicalize the feature flags, meaning if there are
++ * features a certain CPU supports which CPUID doesn't
++ * tell us, CPUID claiming incorrect flags, or other bugs,
++ * we handle them here.
++ *
++ * At the end of this section, c->x86_capability better
++ * indicate the features this CPU genuinely supports!
++ */
++ switch (c->x86_vendor) {
++ case X86_VENDOR_AMD:
++ init_amd(c);
++ break;
++
++ case X86_VENDOR_INTEL:
++ init_intel(c);
++ break;
++
++ case X86_VENDOR_UNKNOWN:
++ default:
++ display_cacheinfo(c);
++ break;
++ }
++
++ select_idle_routine(c);
++ detect_ht(c);
++
++ /*
++ * On SMP, boot_cpu_data holds the common feature set between
++ * all CPUs; so make sure that we indicate which features are
++ * common between the CPUs. The first time this routine gets
++ * executed, c == &boot_cpu_data.
++ */
++ if (c != &boot_cpu_data) {
++ /* AND the already accumulated flags with these */
++ for (i = 0 ; i < NCAPINTS ; i++)
++ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
++ }
++
++#ifdef CONFIG_X86_MCE
++ mcheck_init(c);
++#endif
++ if (c == &boot_cpu_data)
++ mtrr_bp_init();
++ else
++ mtrr_ap_init();
++#ifdef CONFIG_NUMA
++ numa_add_cpu(smp_processor_id());
++#endif
++}
++
++
++void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
++{
++ if (c->x86_model_id[0])
++ printk("%s", c->x86_model_id);
++
++ if (c->x86_mask || c->cpuid_level >= 0)
++ printk(" stepping %02x\n", c->x86_mask);
++ else
++ printk("\n");
++}
++
++/*
++ * Get CPU information for use by the procfs.
++ */
++
++static int show_cpuinfo(struct seq_file *m, void *v)
++{
++ struct cpuinfo_x86 *c = v;
++
++ /*
++ * These flag bits must match the definitions in <asm/cpufeature.h>.
++ * NULL means this bit is undefined or reserved; either way it doesn't
++ * have meaning as far as Linux is concerned. Note that it's important
++ * to realize there is a difference between this table and CPUID -- if
++ * applications want to get the raw CPUID data, they should access
++ * /dev/cpu/<cpu_nr>/cpuid instead.
++ */
++ static char *x86_cap_flags[] = {
++ /* Intel-defined */
++ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
++ "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
++ "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
++ "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
++
++ /* AMD-defined */
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
++ NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
++
++ /* Transmeta-defined */
++ "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++ /* Other (Linux-defined) */
++ "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
++ "constant_tsc", NULL, NULL,
++ "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++ /* Intel-defined (#2) */
++ "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
++ "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++ /* VIA/Cyrix/Centaur-defined */
++ NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++ /* AMD-defined (#2) */
++ "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ };
++ static char *x86_power_flags[] = {
++ "ts", /* temperature sensor */
++ "fid", /* frequency id control */
++ "vid", /* voltage id control */
++ "ttp", /* thermal trip */
++ "tm",
++ "stc",
++ NULL,
++ /* nothing */ /* constant_tsc - moved to flags */
++ };
++
++
++#ifdef CONFIG_SMP
++ if (!cpu_online(c-cpu_data))
++ return 0;
++#endif
++
++ seq_printf(m,"processor\t: %u\n"
++ "vendor_id\t: %s\n"
++ "cpu family\t: %d\n"
++ "model\t\t: %d\n"
++ "model name\t: %s\n",
++ (unsigned)(c-cpu_data),
++ c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
++ c->x86,
++ (int)c->x86_model,
++ c->x86_model_id[0] ? c->x86_model_id : "unknown");
++
++ if (c->x86_mask || c->cpuid_level >= 0)
++ seq_printf(m, "stepping\t: %d\n", c->x86_mask);
++ else
++ seq_printf(m, "stepping\t: unknown\n");
++
++ if (cpu_has(c,X86_FEATURE_TSC)) {
++ unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
++ if (!freq)
++ freq = cpu_khz;
++ seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
++ freq / 1000, (freq % 1000));
++ }
++
++ /* Cache size */
++ if (c->x86_cache_size >= 0)
++ seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
++
++#ifdef CONFIG_SMP
++ if (smp_num_siblings * c->x86_max_cores > 1) {
++ int cpu = c - cpu_data;
++ seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
++ seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
++ seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
++ seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
++ }
++#endif
++
++ seq_printf(m,
++ "fpu\t\t: yes\n"
++ "fpu_exception\t: yes\n"
++ "cpuid level\t: %d\n"
++ "wp\t\t: yes\n"
++ "flags\t\t:",
++ c->cpuid_level);
++
++ {
++ int i;
++ for ( i = 0 ; i < 32*NCAPINTS ; i++ )
++ if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
++ seq_printf(m, " %s", x86_cap_flags[i]);
++ }
++
++ seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
++ c->loops_per_jiffy/(500000/HZ),
++ (c->loops_per_jiffy/(5000/HZ)) % 100);
++
++ if (c->x86_tlbsize > 0)
++ seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
++ seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
++ seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
++
++ seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
++ c->x86_phys_bits, c->x86_virt_bits);
++
++ seq_printf(m, "power management:");
++ {
++ unsigned i;
++ for (i = 0; i < 32; i++)
++ if (c->x86_power & (1 << i)) {
++ if (i < ARRAY_SIZE(x86_power_flags) &&
++ x86_power_flags[i])
++ seq_printf(m, "%s%s",
++ x86_power_flags[i][0]?" ":"",
++ x86_power_flags[i]);
++ else
++ seq_printf(m, " [%d]", i);
++ }
++ }
++
++ seq_printf(m, "\n\n");
++
++ return 0;
++}
++
++static void *c_start(struct seq_file *m, loff_t *pos)
++{
++ return *pos < NR_CPUS ? cpu_data + *pos : NULL;
++}
++
++static void *c_next(struct seq_file *m, void *v, loff_t *pos)
++{
++ ++*pos;
++ return c_start(m, pos);
++}
++
++static void c_stop(struct seq_file *m, void *v)
++{
++}
++
++struct seq_operations cpuinfo_op = {
++ .start =c_start,
++ .next = c_next,
++ .stop = c_stop,
++ .show = show_cpuinfo,
++};
++
++#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
++#include <linux/platform_device.h>
++static __init int add_pcspkr(void)
++{
++ struct platform_device *pd;
++ int ret;
++
++ if (!is_initial_xendomain())
++ return 0;
++
++ pd = platform_device_alloc("pcspkr", -1);
++ if (!pd)
++ return -ENOMEM;
++
++ ret = platform_device_add(pd);
++ if (ret)
++ platform_device_put(pd);
++
++ return ret;
++}
++device_initcall(add_pcspkr);
++#endif
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/signal.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/signal.c
+--- linux-2.6.18.8/arch/x86_64/kernel/signal.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/signal.c 2008-08-21 11:36:07.000000000 +0200
+@@ -38,37 +38,6 @@ int ia32_setup_frame(int sig, struct k_s
+ sigset_t *set, struct pt_regs * regs);
+
+ asmlinkage long
+-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
+-{
+- sigset_t saveset, newset;
+-
+- /* XXX: Don't preclude handling different sized sigset_t's. */
+- if (sigsetsize != sizeof(sigset_t))
+- return -EINVAL;
+-
+- if (copy_from_user(&newset, unewset, sizeof(newset)))
+- return -EFAULT;
+- sigdelsetmask(&newset, ~_BLOCKABLE);
+-
+- spin_lock_irq(&current->sighand->siglock);
+- saveset = current->blocked;
+- current->blocked = newset;
+- recalc_sigpending();
+- spin_unlock_irq(&current->sighand->siglock);
+-#ifdef DEBUG_SIG
+- printk("rt_sigsuspend savset(%lx) newset(%lx) regs(%p) rip(%lx)\n",
+- saveset, newset, regs, regs->rip);
+-#endif
+- regs->rax = -EINTR;
+- while (1) {
+- current->state = TASK_INTERRUPTIBLE;
+- schedule();
+- if (do_signal(regs, &saveset))
+- return -EINTR;
+- }
+-}
+-
+-asmlinkage long
+ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+ struct pt_regs *regs)
+ {
+@@ -341,11 +310,11 @@ static int setup_rt_frame(int sig, struc
+ current->comm, current->pid, frame, regs->rip, frame->pretcode);
+ #endif
+
+- return 1;
++ return 0;
+
+ give_sigsegv:
+ force_sigsegv(sig, current);
+- return 0;
++ return -EFAULT;
+ }
+
+ /*
+@@ -408,7 +377,7 @@ handle_signal(unsigned long sig, siginfo
+ #endif
+ ret = setup_rt_frame(sig, ka, info, oldset, regs);
+
+- if (ret) {
++ if (ret == 0) {
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+@@ -425,11 +394,12 @@ handle_signal(unsigned long sig, siginfo
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+-int do_signal(struct pt_regs *regs, sigset_t *oldset)
++static void do_signal(struct pt_regs *regs)
+ {
+ struct k_sigaction ka;
+ siginfo_t info;
+ int signr;
++ sigset_t *oldset;
+
+ /*
+ * We want the common case to go fast, which
+@@ -438,9 +408,11 @@ int do_signal(struct pt_regs *regs, sigs
+ * if so.
+ */
+ if (!user_mode(regs))
+- return 1;
++ return;
+
+- if (!oldset)
++ if (test_thread_flag(TIF_RESTORE_SIGMASK))
++ oldset = &current->saved_sigmask;
++ else
+ oldset = &current->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+@@ -454,30 +426,46 @@ int do_signal(struct pt_regs *regs, sigs
+ set_debugreg(current->thread.debugreg7, 7);
+
+ /* Whee! Actually deliver the signal. */
+- return handle_signal(signr, &info, &ka, oldset, regs);
++ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
++ /* a signal was successfully delivered; the saved
++ * sigmask will have been stored in the signal frame,
++ * and will be restored by sigreturn, so we can simply
++ * clear the TIF_RESTORE_SIGMASK flag */
++ clear_thread_flag(TIF_RESTORE_SIGMASK);
++ }
++ return;
+ }
+
+ /* Did we come from a system call? */
+ if ((long)regs->orig_rax >= 0) {
+ /* Restart the system call - no handlers present */
+ long res = regs->rax;
+- if (res == -ERESTARTNOHAND ||
+- res == -ERESTARTSYS ||
+- res == -ERESTARTNOINTR) {
++ switch (res) {
++ case -ERESTARTNOHAND:
++ case -ERESTARTSYS:
++ case -ERESTARTNOINTR:
+ regs->rax = regs->orig_rax;
+ regs->rip -= 2;
+- }
+- if (regs->rax == (unsigned long)-ERESTART_RESTARTBLOCK) {
++ break;
++ case -ERESTART_RESTARTBLOCK:
+ regs->rax = test_thread_flag(TIF_IA32) ?
+ __NR_ia32_restart_syscall :
+ __NR_restart_syscall;
+ regs->rip -= 2;
++ break;
+ }
+ }
+- return 0;
++
++ /* if there's no signal to deliver, we just put the saved sigmask
++ back. */
++ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
++ clear_thread_flag(TIF_RESTORE_SIGMASK);
++ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
++ }
+ }
+
+-void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, __u32 thread_info_flags)
++void
++do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
+ {
+ #ifdef DEBUG_SIG
+ printk("do_notify_resume flags:%x rip:%lx rsp:%lx caller:%lx pending:%lx\n",
+@@ -491,8 +479,8 @@ void do_notify_resume(struct pt_regs *re
+ }
+
+ /* deal with pending signal delivery */
+- if (thread_info_flags & _TIF_SIGPENDING)
+- do_signal(regs,oldset);
++ if (thread_info_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK))
++ do_signal(regs);
+ }
+
+ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/smp-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/smp-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/smp-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/smp-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,575 @@
++/*
++ * Intel SMP support routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
++ * (c) 2002,2003 Andi Kleen, SuSE Labs.
++ *
++ * This code is released under the GNU General Public License version 2 or
++ * later.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/smp.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/interrupt.h>
++
++#include <asm/mtrr.h>
++#include <asm/pgalloc.h>
++#include <asm/tlbflush.h>
++#include <asm/mach_apic.h>
++#include <asm/mmu_context.h>
++#include <asm/proto.h>
++#include <asm/apicdef.h>
++#include <asm/idle.h>
++#ifdef CONFIG_XEN
++#include <xen/evtchn.h>
++#endif
++
++#ifndef CONFIG_XEN
++/*
++ * Smarter SMP flushing macros.
++ * c/o Linus Torvalds.
++ *
++ * These mean you can really definitely utterly forget about
++ * writing to user space from interrupts. (Its not allowed anyway).
++ *
++ * Optimizations Manfred Spraul <manfred@colorfullife.com>
++ *
++ * More scalable flush, from Andi Kleen
++ *
++ * To avoid global state use 8 different call vectors.
++ * Each CPU uses a specific vector to trigger flushes on other
++ * CPUs. Depending on the received vector the target CPUs look into
++ * the right per cpu variable for the flush data.
++ *
++ * With more than 8 CPUs they are hashed to the 8 available
++ * vectors. The limited global vector space forces us to this right now.
++ * In future when interrupts are split into per CPU domains this could be
++ * fixed, at the cost of triggering multiple IPIs in some cases.
++ */
++
++union smp_flush_state {
++ struct {
++ cpumask_t flush_cpumask;
++ struct mm_struct *flush_mm;
++ unsigned long flush_va;
++#define FLUSH_ALL -1ULL
++ spinlock_t tlbstate_lock;
++ };
++ char pad[SMP_CACHE_BYTES];
++} ____cacheline_aligned;
++
++/* State is put into the per CPU data section, but padded
++ to a full cache line because other CPUs can access it and we don't
++ want false sharing in the per cpu data segment. */
++static DEFINE_PER_CPU(union smp_flush_state, flush_state);
++
++/*
++ * We cannot call mmdrop() because we are in interrupt context,
++ * instead update mm->cpu_vm_mask.
++ */
++static inline void leave_mm(unsigned long cpu)
++{
++ if (read_pda(mmu_state) == TLBSTATE_OK)
++ BUG();
++ cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
++ load_cr3(swapper_pg_dir);
++}
++
++/*
++ *
++ * The flush IPI assumes that a thread switch happens in this order:
++ * [cpu0: the cpu that switches]
++ * 1) switch_mm() either 1a) or 1b)
++ * 1a) thread switch to a different mm
++ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
++ * Stop ipi delivery for the old mm. This is not synchronized with
++ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
++ * for the wrong mm, and in the worst case we perform a superfluous
++ * tlb flush.
++ * 1a2) set cpu mmu_state to TLBSTATE_OK
++ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
++ * was in lazy tlb mode.
++ * 1a3) update cpu active_mm
++ * Now cpu0 accepts tlb flushes for the new mm.
++ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
++ * Now the other cpus will send tlb flush ipis.
++ * 1a4) change cr3.
++ * 1b) thread switch without mm change
++ * cpu active_mm is correct, cpu0 already handles
++ * flush ipis.
++ * 1b1) set cpu mmu_state to TLBSTATE_OK
++ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
++ * Atomically set the bit [other cpus will start sending flush ipis],
++ * and test the bit.
++ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
++ * 2) switch %%esp, ie current
++ *
++ * The interrupt must handle 2 special cases:
++ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
++ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
++ * runs in kernel space, the cpu could load tlb entries for user space
++ * pages.
++ *
++ * The good news is that cpu mmu_state is local to each cpu, no
++ * write/read ordering problems.
++ */
++
++/*
++ * TLB flush IPI:
++ *
++ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
++ * 2) Leave the mm if we are in the lazy tlb mode.
++ *
++ * Interrupts are disabled.
++ */
++
++asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
++{
++ int cpu;
++ int sender;
++ union smp_flush_state *f;
++
++ cpu = smp_processor_id();
++ /*
++ * orig_rax contains the negated interrupt vector.
++ * Use that to determine where the sender put the data.
++ */
++ sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
++ f = &per_cpu(flush_state, sender);
++
++ if (!cpu_isset(cpu, f->flush_cpumask))
++ goto out;
++ /*
++ * This was a BUG() but until someone can quote me the
++ * line from the intel manual that guarantees an IPI to
++ * multiple CPUs is retried _only_ on the erroring CPUs
++ * its staying as a return
++ *
++ * BUG();
++ */
++
++ if (f->flush_mm == read_pda(active_mm)) {
++ if (read_pda(mmu_state) == TLBSTATE_OK) {
++ if (f->flush_va == FLUSH_ALL)
++ local_flush_tlb();
++ else
++ __flush_tlb_one(f->flush_va);
++ } else
++ leave_mm(cpu);
++ }
++out:
++ ack_APIC_irq();
++ cpu_clear(cpu, f->flush_cpumask);
++}
++
++static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
++ unsigned long va)
++{
++ int sender;
++ union smp_flush_state *f;
++
++ /* Caller has disabled preemption */
++ sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
++ f = &per_cpu(flush_state, sender);
++
++ /* Could avoid this lock when
++ num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
++ probably not worth checking this for a cache-hot lock. */
++ spin_lock(&f->tlbstate_lock);
++
++ f->flush_mm = mm;
++ f->flush_va = va;
++ cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
++
++ /*
++ * We have to send the IPI only to
++ * CPUs affected.
++ */
++ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
++
++ while (!cpus_empty(f->flush_cpumask))
++ cpu_relax();
++
++ f->flush_mm = NULL;
++ f->flush_va = 0;
++ spin_unlock(&f->tlbstate_lock);
++}
++
++int __cpuinit init_smp_flush(void)
++{
++ int i;
++ for_each_cpu_mask(i, cpu_possible_map) {
++ spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
++ }
++ return 0;
++}
++
++core_initcall(init_smp_flush);
++
++void flush_tlb_current_task(void)
++{
++ struct mm_struct *mm = current->mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ local_flush_tlb();
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++ preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_current_task);
++
++void flush_tlb_mm (struct mm_struct * mm)
++{
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if (current->mm)
++ local_flush_tlb();
++ else
++ leave_mm(smp_processor_id());
++ }
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++
++ preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_mm);
++
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if(current->mm)
++ __flush_tlb_one(va);
++ else
++ leave_mm(smp_processor_id());
++ }
++
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, va);
++
++ preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_page);
++
++static void do_flush_tlb_all(void* info)
++{
++ unsigned long cpu = smp_processor_id();
++
++ __flush_tlb_all();
++ if (read_pda(mmu_state) == TLBSTATE_LAZY)
++ leave_mm(cpu);
++}
++
++void flush_tlb_all(void)
++{
++ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
++}
++#endif /* Xen */
++
++/*
++ * this function sends a 'reschedule' IPI to another CPU.
++ * it goes straight through and wastes no time serializing
++ * anything. Worst case is that we lose a reschedule ...
++ */
++
++void smp_send_reschedule(int cpu)
++{
++ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
++}
++
++/*
++ * Structure and data for smp_call_function(). This is designed to minimise
++ * static memory requirements. It also looks cleaner.
++ */
++static DEFINE_SPINLOCK(call_lock);
++
++struct call_data_struct {
++ void (*func) (void *info);
++ void *info;
++ atomic_t started;
++ atomic_t finished;
++ int wait;
++};
++
++static struct call_data_struct * call_data;
++
++void lock_ipi_call_lock(void)
++{
++ spin_lock_irq(&call_lock);
++}
++
++void unlock_ipi_call_lock(void)
++{
++ spin_unlock_irq(&call_lock);
++}
++
++/*
++ * this function sends a 'generic call function' IPI to one other CPU
++ * in the system.
++ *
++ * cpu is a standard Linux logical CPU number.
++ */
++static void
++__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
++ int nonatomic, int wait)
++{
++ struct call_data_struct data;
++ int cpus = 1;
++
++ data.func = func;
++ data.info = info;
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ call_data = &data;
++ wmb();
++ /* Send a message to all other CPUs and wait for them to respond */
++ send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != cpus)
++ cpu_relax();
++
++ if (!wait)
++ return;
++
++ while (atomic_read(&data.finished) != cpus)
++ cpu_relax();
++}
++
++/*
++ * smp_call_function_single - Run a function on another CPU
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: Currently unused.
++ * @wait: If true, wait until function has completed on other CPUs.
++ *
++ * Retrurns 0 on success, else a negative status code.
++ *
++ * Does not return until the remote CPU is nearly ready to execute <func>
++ * or is or has executed.
++ */
++
++int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
++ int nonatomic, int wait)
++{
++ /* prevent preemption and reschedule on another processor */
++ int me = get_cpu();
++ if (cpu == me) {
++ WARN_ON(1);
++ put_cpu();
++ return -EBUSY;
++ }
++ spin_lock_bh(&call_lock);
++ __smp_call_function_single(cpu, func, info, nonatomic, wait);
++ spin_unlock_bh(&call_lock);
++ put_cpu();
++ return 0;
++}
++
++/*
++ * this function sends a 'generic call function' IPI to all other CPUs
++ * in the system.
++ */
++static void __smp_call_function (void (*func) (void *info), void *info,
++ int nonatomic, int wait)
++{
++ struct call_data_struct data;
++ int cpus = num_online_cpus()-1;
++
++ if (!cpus)
++ return;
++
++ data.func = func;
++ data.info = info;
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ call_data = &data;
++ wmb();
++ /* Send a message to all other CPUs and wait for them to respond */
++ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != cpus)
++ cpu_relax();
++
++ if (!wait)
++ return;
++
++ while (atomic_read(&data.finished) != cpus)
++ cpu_relax();
++}
++
++/*
++ * smp_call_function - run a function on all other CPUs.
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: currently unused.
++ * @wait: If true, wait (atomically) until function has completed on other
++ * CPUs.
++ *
++ * Returns 0 on success, else a negative status code. Does not return until
++ * remote CPUs are nearly ready to execute func or are or have executed.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ * Actually there are a few legal cases, like panic.
++ */
++int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++ int wait)
++{
++ spin_lock(&call_lock);
++ __smp_call_function(func,info,nonatomic,wait);
++ spin_unlock(&call_lock);
++ return 0;
++}
++EXPORT_SYMBOL(smp_call_function);
++
++void smp_stop_cpu(void)
++{
++ unsigned long flags;
++ /*
++ * Remove this CPU:
++ */
++ cpu_clear(smp_processor_id(), cpu_online_map);
++ local_irq_save(flags);
++ disable_all_local_evtchn();
++ local_irq_restore(flags);
++}
++
++static void smp_really_stop_cpu(void *dummy)
++{
++ smp_stop_cpu();
++ for (;;)
++ halt();
++}
++
++void smp_send_stop(void)
++{
++ int nolock = 0;
++#ifndef CONFIG_XEN
++ if (reboot_force)
++ return;
++#endif
++ /* Don't deadlock on the call lock in panic */
++ if (!spin_trylock(&call_lock)) {
++ /* ignore locking because we have panicked anyways */
++ nolock = 1;
++ }
++ __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
++ if (!nolock)
++ spin_unlock(&call_lock);
++
++ local_irq_disable();
++ disable_all_local_evtchn();
++ local_irq_enable();
++}
++
++/*
++ * Reschedule call back. Nothing to do,
++ * all the work is done automatically when
++ * we return from the interrupt.
++ */
++#ifndef CONFIG_XEN
++asmlinkage void smp_reschedule_interrupt(void)
++#else
++asmlinkage irqreturn_t smp_reschedule_interrupt(void)
++#endif
++{
++#ifndef CONFIG_XEN
++ ack_APIC_irq();
++#else
++ return IRQ_HANDLED;
++#endif
++}
++
++#ifndef CONFIG_XEN
++asmlinkage void smp_call_function_interrupt(void)
++#else
++asmlinkage irqreturn_t smp_call_function_interrupt(void)
++#endif
++{
++ void (*func) (void *info) = call_data->func;
++ void *info = call_data->info;
++ int wait = call_data->wait;
++
++#ifndef CONFIG_XEN
++ ack_APIC_irq();
++#endif
++ /*
++ * Notify initiating CPU that I've grabbed the data and am
++ * about to execute the function
++ */
++ mb();
++ atomic_inc(&call_data->started);
++ /*
++ * At this point the info structure may be out of scope unless wait==1
++ */
++ exit_idle();
++ irq_enter();
++ (*func)(info);
++ irq_exit();
++ if (wait) {
++ mb();
++ atomic_inc(&call_data->finished);
++ }
++#ifdef CONFIG_XEN
++ return IRQ_HANDLED;
++#endif
++}
++
++int safe_smp_processor_id(void)
++{
++#ifdef CONFIG_XEN
++ return smp_processor_id();
++#else
++ unsigned apicid, i;
++
++ if (disable_apic)
++ return 0;
++
++ apicid = hard_smp_processor_id();
++ if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
++ return apicid;
++
++ for (i = 0; i < NR_CPUS; ++i) {
++ if (x86_cpu_to_apicid[i] == apicid)
++ return i;
++ }
++
++ /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI,
++ * or called too early. Either way, we must be CPU 0. */
++ if (x86_cpu_to_apicid[0] == BAD_APICID)
++ return 0;
++
++ return 0; /* Should not happen */
++#endif
++}
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/suspend.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/suspend.c
+--- linux-2.6.18.8/arch/x86_64/kernel/suspend.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/suspend.c 2008-08-21 11:36:07.000000000 +0200
+@@ -114,12 +114,14 @@ void restore_processor_state(void)
+
+ void fix_processor_context(void)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ int cpu = smp_processor_id();
+ struct tss_struct *t = &per_cpu(init_tss, cpu);
+
+ set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
+
+ cpu_gdt(cpu)[GDT_ENTRY_TSS].type = 9;
++#endif
+
+ syscall_init(); /* This sets MSR_*STAR and related */
+ load_TR_desc(); /* This does ltr */
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/traps-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/traps-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/traps-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/traps-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1173 @@
++/*
++ * linux/arch/x86-64/traps.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ */
++
++/*
++ * 'Traps.c' handles hardware traps and faults after we have saved some
++ * state in 'entry.S'.
++ */
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/ptrace.h>
++#include <linux/timer.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/nmi.h>
++#include <linux/kprobes.h>
++#include <linux/kexec.h>
++#include <linux/unwind.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/debugreg.h>
++#include <asm/desc.h>
++#include <asm/i387.h>
++#include <asm/kdebug.h>
++#include <asm/processor.h>
++#include <asm/unwind.h>
++#include <asm/smp.h>
++#include <asm/pgalloc.h>
++#include <asm/pda.h>
++#include <asm/proto.h>
++#include <asm/nmi.h>
++
++asmlinkage void divide_error(void);
++asmlinkage void debug(void);
++asmlinkage void nmi(void);
++asmlinkage void int3(void);
++asmlinkage void overflow(void);
++asmlinkage void bounds(void);
++asmlinkage void invalid_op(void);
++asmlinkage void device_not_available(void);
++asmlinkage void double_fault(void);
++asmlinkage void coprocessor_segment_overrun(void);
++asmlinkage void invalid_TSS(void);
++asmlinkage void segment_not_present(void);
++asmlinkage void stack_segment(void);
++asmlinkage void general_protection(void);
++asmlinkage void page_fault(void);
++asmlinkage void coprocessor_error(void);
++asmlinkage void simd_coprocessor_error(void);
++asmlinkage void reserved(void);
++asmlinkage void alignment_check(void);
++asmlinkage void machine_check(void);
++asmlinkage void spurious_interrupt_bug(void);
++
++ATOMIC_NOTIFIER_HEAD(die_chain);
++EXPORT_SYMBOL(die_chain);
++
++int register_die_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(&die_chain, nb);
++}
++EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
++
++int unregister_die_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(&die_chain, nb);
++}
++EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
++
++static inline void conditional_sti(struct pt_regs *regs)
++{
++ if (regs->eflags & X86_EFLAGS_IF)
++ local_irq_enable();
++}
++
++static inline void preempt_conditional_sti(struct pt_regs *regs)
++{
++ preempt_disable();
++ if (regs->eflags & X86_EFLAGS_IF)
++ local_irq_enable();
++}
++
++static inline void preempt_conditional_cli(struct pt_regs *regs)
++{
++ if (regs->eflags & X86_EFLAGS_IF)
++ local_irq_disable();
++ /* Make sure to not schedule here because we could be running
++ on an exception stack. */
++ preempt_enable_no_resched();
++}
++
++static int kstack_depth_to_print = 12;
++#ifdef CONFIG_STACK_UNWIND
++static int call_trace = 1;
++#else
++#define call_trace (-1)
++#endif
++
++#ifdef CONFIG_KALLSYMS
++# include <linux/kallsyms.h>
++void printk_address(unsigned long address)
++{
++ unsigned long offset = 0, symsize;
++ const char *symname;
++ char *modname;
++ char *delim = ":";
++ char namebuf[128];
++
++ symname = kallsyms_lookup(address, &symsize, &offset,
++ &modname, namebuf);
++ if (!symname) {
++ printk(" [<%016lx>]\n", address);
++ return;
++ }
++ if (!modname)
++ modname = delim = "";
++ printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
++ address, delim, modname, delim, symname, offset, symsize);
++}
++#else
++void printk_address(unsigned long address)
++{
++ printk(" [<%016lx>]\n", address);
++}
++#endif
++
++static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
++ unsigned *usedp, const char **idp)
++{
++#ifndef CONFIG_X86_NO_TSS
++ static char ids[][8] = {
++ [DEBUG_STACK - 1] = "#DB",
++ [NMI_STACK - 1] = "NMI",
++ [DOUBLEFAULT_STACK - 1] = "#DF",
++ [STACKFAULT_STACK - 1] = "#SS",
++ [MCE_STACK - 1] = "#MC",
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++ [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
++#endif
++ };
++ unsigned k;
++
++ /*
++ * Iterate over all exception stacks, and figure out whether
++ * 'stack' is in one of them:
++ */
++ for (k = 0; k < N_EXCEPTION_STACKS; k++) {
++ unsigned long end;
++
++ /*
++ * set 'end' to the end of the exception stack.
++ */
++ switch (k + 1) {
++ /*
++ * TODO: this block is not needed i think, because
++ * setup64.c:cpu_init() sets up t->ist[DEBUG_STACK]
++ * properly too.
++ */
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++ case DEBUG_STACK:
++ end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
++ break;
++#endif
++ default:
++ end = per_cpu(orig_ist, cpu).ist[k];
++ break;
++ }
++ /*
++ * Is 'stack' above this exception frame's end?
++ * If yes then skip to the next frame.
++ */
++ if (stack >= end)
++ continue;
++ /*
++ * Is 'stack' above this exception frame's start address?
++ * If yes then we found the right frame.
++ */
++ if (stack >= end - EXCEPTION_STKSZ) {
++ /*
++ * Make sure we only iterate through an exception
++ * stack once. If it comes up for the second time
++ * then there's something wrong going on - just
++ * break out and return NULL:
++ */
++ if (*usedp & (1U << k))
++ break;
++ *usedp |= 1U << k;
++ *idp = ids[k];
++ return (unsigned long *)end;
++ }
++ /*
++ * If this is a debug stack, and if it has a larger size than
++ * the usual exception stacks, then 'stack' might still
++ * be within the lower portion of the debug stack:
++ */
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++ if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
++ unsigned j = N_EXCEPTION_STACKS - 1;
++
++ /*
++ * Black magic. A large debug stack is composed of
++ * multiple exception stack entries, which we
++ * iterate through now. Dont look:
++ */
++ do {
++ ++j;
++ end -= EXCEPTION_STKSZ;
++ ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
++ } while (stack < end - EXCEPTION_STKSZ);
++ if (*usedp & (1U << j))
++ break;
++ *usedp |= 1U << j;
++ *idp = ids[j];
++ return (unsigned long *)end;
++ }
++#endif
++ }
++#endif
++ return NULL;
++}
++
++static int show_trace_unwind(struct unwind_frame_info *info, void *context)
++{
++ int n = 0;
++
++ while (unwind(info) == 0 && UNW_PC(info)) {
++ n++;
++ printk_address(UNW_PC(info));
++ if (arch_unw_user_mode(info))
++ break;
++ }
++ return n;
++}
++
++/*
++ * x86-64 can have upto three kernel stacks:
++ * process stack
++ * interrupt stack
++ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
++ */
++
++void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack)
++{
++ const unsigned cpu = safe_smp_processor_id();
++ unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
++ unsigned used = 0;
++
++ printk("\nCall Trace:\n");
++
++ if (!tsk)
++ tsk = current;
++
++ if (call_trace >= 0) {
++ int unw_ret = 0;
++ struct unwind_frame_info info;
++
++ if (regs) {
++ if (unwind_init_frame_info(&info, tsk, regs) == 0)
++ unw_ret = show_trace_unwind(&info, NULL);
++ } else if (tsk == current)
++ unw_ret = unwind_init_running(&info, show_trace_unwind, NULL);
++ else {
++ if (unwind_init_blocked(&info, tsk) == 0)
++ unw_ret = show_trace_unwind(&info, NULL);
++ }
++ if (unw_ret > 0) {
++ if (call_trace == 1 && !arch_unw_user_mode(&info)) {
++ print_symbol("DWARF2 unwinder stuck at %s\n",
++ UNW_PC(&info));
++ if ((long)UNW_SP(&info) < 0) {
++ printk("Leftover inexact backtrace:\n");
++ stack = (unsigned long *)UNW_SP(&info);
++ } else
++ printk("Full inexact backtrace again:\n");
++ } else if (call_trace >= 1)
++ return;
++ else
++ printk("Full inexact backtrace again:\n");
++ } else
++ printk("Inexact backtrace:\n");
++ }
++
++ /*
++ * Print function call entries within a stack. 'cond' is the
++ * "end of stackframe" condition, that the 'stack++'
++ * iteration will eventually trigger.
++ */
++#define HANDLE_STACK(cond) \
++ do while (cond) { \
++ unsigned long addr = *stack++; \
++ if (kernel_text_address(addr)) { \
++ /* \
++ * If the address is either in the text segment of the \
++ * kernel, or in the region which contains vmalloc'ed \
++ * memory, it *may* be the address of a calling \
++ * routine; if so, print it so that someone tracing \
++ * down the cause of the crash will be able to figure \
++ * out the call path that was taken. \
++ */ \
++ printk_address(addr); \
++ } \
++ } while (0)
++
++ /*
++ * Print function call entries in all stacks, starting at the
++ * current stack address. If the stacks consist of nested
++ * exceptions
++ */
++ for ( ; ; ) {
++ const char *id;
++ unsigned long *estack_end;
++ estack_end = in_exception_stack(cpu, (unsigned long)stack,
++ &used, &id);
++
++ if (estack_end) {
++ printk(" <%s>", id);
++ HANDLE_STACK (stack < estack_end);
++ printk(" <EOE>");
++ /*
++ * We link to the next stack via the
++ * second-to-last pointer (index -2 to end) in the
++ * exception stack:
++ */
++ stack = (unsigned long *) estack_end[-2];
++ continue;
++ }
++ if (irqstack_end) {
++ unsigned long *irqstack;
++ irqstack = irqstack_end -
++ (IRQSTACKSIZE - 64) / sizeof(*irqstack);
++
++ if (stack >= irqstack && stack < irqstack_end) {
++ printk(" <IRQ>");
++ HANDLE_STACK (stack < irqstack_end);
++ /*
++ * We link to the next stack (which would be
++ * the process stack normally) the last
++ * pointer (index -1 to end) in the IRQ stack:
++ */
++ stack = (unsigned long *) (irqstack_end[-1]);
++ irqstack_end = NULL;
++ printk(" <EOI>");
++ continue;
++ }
++ }
++ break;
++ }
++
++ /*
++ * This prints the process stack:
++ */
++ HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
++#undef HANDLE_STACK
++
++ printk("\n");
++}
++
++static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp)
++{
++ unsigned long *stack;
++ int i;
++ const int cpu = safe_smp_processor_id();
++ unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
++ unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
++
++ // debugging aid: "show_stack(NULL, NULL);" prints the
++ // back trace for this cpu.
++
++ if (rsp == NULL) {
++ if (tsk)
++ rsp = (unsigned long *)tsk->thread.rsp;
++ else
++ rsp = (unsigned long *)&rsp;
++ }
++
++ stack = rsp;
++ for(i=0; i < kstack_depth_to_print; i++) {
++ if (stack >= irqstack && stack <= irqstack_end) {
++ if (stack == irqstack_end) {
++ stack = (unsigned long *) (irqstack_end[-1]);
++ printk(" <EOI> ");
++ }
++ } else {
++ if (((long) stack & (THREAD_SIZE-1)) == 0)
++ break;
++ }
++ if (i && ((i % 4) == 0))
++ printk("\n");
++ printk(" %016lx", *stack++);
++ touch_nmi_watchdog();
++ }
++ show_trace(tsk, regs, rsp);
++}
++
++void show_stack(struct task_struct *tsk, unsigned long * rsp)
++{
++ _show_stack(tsk, NULL, rsp);
++}
++
++/*
++ * The architecture-independent dump_stack generator
++ */
++void dump_stack(void)
++{
++ unsigned long dummy;
++ show_trace(NULL, NULL, &dummy);
++}
++
++EXPORT_SYMBOL(dump_stack);
++
++void show_registers(struct pt_regs *regs)
++{
++ int i;
++ int in_kernel = !user_mode(regs);
++ unsigned long rsp;
++ const int cpu = safe_smp_processor_id();
++ struct task_struct *cur = cpu_pda(cpu)->pcurrent;
++
++ rsp = regs->rsp;
++
++ printk("CPU %d ", cpu);
++ __show_regs(regs);
++ printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
++ cur->comm, cur->pid, task_thread_info(cur), cur);
++
++ /*
++ * When in-kernel, we also print out the stack and code at the
++ * time of the fault..
++ */
++ if (in_kernel) {
++
++ printk("Stack: ");
++ _show_stack(NULL, regs, (unsigned long*)rsp);
++
++ printk("\nCode: ");
++ if (regs->rip < PAGE_OFFSET)
++ goto bad;
++
++ for (i=0; i<20; i++) {
++ unsigned char c;
++ if (__get_user(c, &((unsigned char*)regs->rip)[i])) {
++bad:
++ printk(" Bad RIP value.");
++ break;
++ }
++ printk("%02x ", c);
++ }
++ }
++ printk("\n");
++}
++
++void handle_BUG(struct pt_regs *regs)
++{
++ struct bug_frame f;
++ long len;
++ const char *prefix = "";
++
++ if (user_mode(regs))
++ return;
++ if (__copy_from_user(&f, (const void __user *) regs->rip,
++ sizeof(struct bug_frame)))
++ return;
++ if (f.filename >= 0 ||
++ f.ud2[0] != 0x0f || f.ud2[1] != 0x0b)
++ return;
++ len = __strnlen_user((char *)(long)f.filename, PATH_MAX) - 1;
++ if (len < 0 || len >= PATH_MAX)
++ f.filename = (int)(long)"unmapped filename";
++ else if (len > 50) {
++ f.filename += len - 50;
++ prefix = "...";
++ }
++ printk("----------- [cut here ] --------- [please bite here ] ---------\n");
++ printk(KERN_ALERT "Kernel BUG at %s%.50s:%d\n", prefix, (char *)(long)f.filename, f.line);
++}
++
++#ifdef CONFIG_BUG
++void out_of_line_bug(void)
++{
++ BUG();
++}
++EXPORT_SYMBOL(out_of_line_bug);
++#endif
++
++static DEFINE_SPINLOCK(die_lock);
++static int die_owner = -1;
++static unsigned int die_nest_count;
++
++unsigned __kprobes long oops_begin(void)
++{
++ int cpu = safe_smp_processor_id();
++ unsigned long flags;
++
++ /* racy, but better than risking deadlock. */
++ local_irq_save(flags);
++ if (!spin_trylock(&die_lock)) {
++ if (cpu == die_owner)
++ /* nested oops. should stop eventually */;
++ else
++ spin_lock(&die_lock);
++ }
++ die_nest_count++;
++ die_owner = cpu;
++ console_verbose();
++ bust_spinlocks(1);
++ return flags;
++}
++
++void __kprobes oops_end(unsigned long flags)
++{
++ die_owner = -1;
++ bust_spinlocks(0);
++ die_nest_count--;
++ if (die_nest_count)
++ /* We still own the lock */
++ local_irq_restore(flags);
++ else
++ /* Nest count reaches zero, release the lock. */
++ spin_unlock_irqrestore(&die_lock, flags);
++ if (panic_on_oops)
++ panic("Fatal exception");
++}
++
++void __kprobes __die(const char * str, struct pt_regs * regs, long err)
++{
++ static int die_counter;
++ printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
++#ifdef CONFIG_PREEMPT
++ printk("PREEMPT ");
++#endif
++#ifdef CONFIG_SMP
++ printk("SMP ");
++#endif
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ printk("DEBUG_PAGEALLOC");
++#endif
++ printk("\n");
++ notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
++ show_registers(regs);
++ /* Executive summary in case the oops scrolled away */
++ printk(KERN_ALERT "RIP ");
++ printk_address(regs->rip);
++ printk(" RSP <%016lx>\n", regs->rsp);
++ if (kexec_should_crash(current))
++ crash_kexec(regs);
++}
++
++void die(const char * str, struct pt_regs * regs, long err)
++{
++ unsigned long flags = oops_begin();
++
++ handle_BUG(regs);
++ __die(str, regs, err);
++ oops_end(flags);
++ do_exit(SIGSEGV);
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++void __kprobes die_nmi(char *str, struct pt_regs *regs)
++{
++ unsigned long flags = oops_begin();
++
++ /*
++ * We are in trouble anyway, lets at least try
++ * to get a message out.
++ */
++ printk(str, safe_smp_processor_id());
++ show_registers(regs);
++ if (kexec_should_crash(current))
++ crash_kexec(regs);
++ if (panic_on_timeout || panic_on_oops)
++ panic("nmi watchdog");
++ printk("console shuts up ...\n");
++ oops_end(flags);
++ nmi_exit();
++ local_irq_enable();
++ do_exit(SIGSEGV);
++}
++#endif
++
++static void __kprobes do_trap(int trapnr, int signr, char *str,
++ struct pt_regs * regs, long error_code,
++ siginfo_t *info)
++{
++ struct task_struct *tsk = current;
++
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = trapnr;
++
++ if (user_mode(regs)) {
++ if (exception_trace && unhandled_signal(tsk, signr))
++ printk(KERN_INFO
++ "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
++ tsk->comm, tsk->pid, str,
++ regs->rip, regs->rsp, error_code);
++
++ if (info)
++ force_sig_info(signr, info, tsk);
++ else
++ force_sig(signr, tsk);
++ return;
++ }
++
++
++ /* kernel trap */
++ {
++ const struct exception_table_entry *fixup;
++ fixup = search_exception_tables(regs->rip);
++ if (fixup)
++ regs->rip = fixup->fixup;
++ else
++ die(str, regs, error_code);
++ return;
++ }
++}
++
++#define DO_ERROR(trapnr, signr, str, name) \
++asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ conditional_sti(regs); \
++ do_trap(trapnr, signr, str, regs, error_code, NULL); \
++}
++
++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ siginfo_t info; \
++ info.si_signo = signr; \
++ info.si_errno = 0; \
++ info.si_code = sicode; \
++ info.si_addr = (void __user *)siaddr; \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ conditional_sti(regs); \
++ do_trap(trapnr, signr, str, regs, error_code, &info); \
++}
++
++DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
++DO_ERROR( 4, SIGSEGV, "overflow", overflow)
++DO_ERROR( 5, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
++DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
++DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
++DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++DO_ERROR(18, SIGSEGV, "reserved", reserved)
++
++/* Runs on IST stack */
++asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
++{
++ if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
++ 12, SIGBUS) == NOTIFY_STOP)
++ return;
++ preempt_conditional_sti(regs);
++ do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
++ preempt_conditional_cli(regs);
++}
++
++asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
++{
++ static const char str[] = "double fault";
++ struct task_struct *tsk = current;
++
++ /* Return not checked because double check cannot be ignored */
++ notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
++
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 8;
++
++ /* This is always a kernel trap and never fixable (and thus must
++ never return). */
++ for (;;)
++ die(str, regs, error_code);
++}
++
++asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
++ long error_code)
++{
++ struct task_struct *tsk = current;
++
++ conditional_sti(regs);
++
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 13;
++
++ if (user_mode(regs)) {
++ if (exception_trace && unhandled_signal(tsk, SIGSEGV))
++ printk(KERN_INFO
++ "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
++ tsk->comm, tsk->pid,
++ regs->rip, regs->rsp, error_code);
++
++ force_sig(SIGSEGV, tsk);
++ return;
++ }
++
++ /* kernel gp */
++ {
++ const struct exception_table_entry *fixup;
++ fixup = search_exception_tables(regs->rip);
++ if (fixup) {
++ regs->rip = fixup->fixup;
++ return;
++ }
++ if (notify_die(DIE_GPF, "general protection fault", regs,
++ error_code, 13, SIGSEGV) == NOTIFY_STOP)
++ return;
++ die("general protection fault", regs, error_code);
++ }
++}
++
++static __kprobes void
++mem_parity_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
++ printk("You probably have a hardware problem with your RAM chips\n");
++
++#if 0 /* XEN */
++ /* Clear and disable the memory parity error line. */
++ reason = (reason & 0xf) | 4;
++ outb(reason, 0x61);
++#endif /* XEN */
++}
++
++static __kprobes void
++io_check_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk("NMI: IOCK error (debug interrupt?)\n");
++ show_registers(regs);
++
++#if 0 /* XEN */
++ /* Re-enable the IOCK line, wait for a few seconds */
++ reason = (reason & 0xf) | 8;
++ outb(reason, 0x61);
++ mdelay(2000);
++ reason &= ~8;
++ outb(reason, 0x61);
++#endif /* XEN */
++}
++
++static __kprobes void
++unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
++ printk("Dazed and confused, but trying to continue\n");
++ printk("Do you have a strange power saving mode enabled?\n");
++}
++
++/* Runs on IST stack. This code must keep interrupts off all the time.
++ Nested NMIs are prevented by the CPU. */
++asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
++{
++ unsigned char reason = 0;
++ int cpu;
++
++ cpu = smp_processor_id();
++
++ /* Only the BSP gets external NMIs from the system. */
++ if (!cpu)
++ reason = get_nmi_reason();
++
++ if (!(reason & 0xc0)) {
++ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
++ == NOTIFY_STOP)
++ return;
++#ifdef CONFIG_X86_LOCAL_APIC
++ /*
++ * Ok, so this is none of the documented NMI sources,
++ * so it must be the NMI watchdog.
++ */
++ if (nmi_watchdog > 0) {
++ nmi_watchdog_tick(regs,reason);
++ return;
++ }
++#endif
++ unknown_nmi_error(reason, regs);
++ return;
++ }
++ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
++ return;
++
++ /* AK: following checks seem to be broken on modern chipsets. FIXME */
++
++ if (reason & 0x80)
++ mem_parity_error(reason, regs);
++ if (reason & 0x40)
++ io_check_error(reason, regs);
++}
++
++/* runs on IST stack. */
++asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
++{
++ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
++ return;
++ }
++ preempt_conditional_sti(regs);
++ do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
++ preempt_conditional_cli(regs);
++}
++
++/* Help handler running on IST stack to switch back to user stack
++ for scheduling or signal handling. The actual stack switch is done in
++ entry.S */
++asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
++{
++ struct pt_regs *regs = eregs;
++ /* Did already sync */
++ if (eregs == (struct pt_regs *)eregs->rsp)
++ ;
++ /* Exception from user space */
++ else if (user_mode(eregs))
++ regs = task_pt_regs(current);
++ /* Exception from kernel and interrupts are enabled. Move to
++ kernel process stack. */
++ else if (eregs->eflags & X86_EFLAGS_IF)
++ regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
++ if (eregs != regs)
++ *regs = *eregs;
++ return regs;
++}
++
++/* runs on IST stack. */
++asmlinkage void __kprobes do_debug(struct pt_regs * regs,
++ unsigned long error_code)
++{
++ unsigned long condition;
++ struct task_struct *tsk = current;
++ siginfo_t info;
++
++ get_debugreg(condition, 6);
++
++ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
++ SIGTRAP) == NOTIFY_STOP)
++ return;
++
++ preempt_conditional_sti(regs);
++
++ /* Mask out spurious debug traps due to lazy DR7 setting */
++ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
++ if (!tsk->thread.debugreg7) {
++ goto clear_dr7;
++ }
++ }
++
++ tsk->thread.debugreg6 = condition;
++
++ /* Mask out spurious TF errors due to lazy TF clearing */
++ if (condition & DR_STEP) {
++ /*
++ * The TF error should be masked out only if the current
++ * process is not traced and if the TRAP flag has been set
++ * previously by a tracing process (condition detected by
++ * the PT_DTRACE flag); remember that the i386 TRAP flag
++ * can be modified by the process itself in user mode,
++ * allowing programs to debug themselves without the ptrace()
++ * interface.
++ */
++ if (!user_mode(regs))
++ goto clear_TF_reenable;
++ /*
++ * Was the TF flag set by a debugger? If so, clear it now,
++ * so that register information is correct.
++ */
++ if (tsk->ptrace & PT_DTRACE) {
++ regs->eflags &= ~TF_MASK;
++ tsk->ptrace &= ~PT_DTRACE;
++ }
++ }
++
++ /* Ok, finally something we can handle */
++ tsk->thread.trap_no = 1;
++ tsk->thread.error_code = error_code;
++ info.si_signo = SIGTRAP;
++ info.si_errno = 0;
++ info.si_code = TRAP_BRKPT;
++ info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
++ force_sig_info(SIGTRAP, &info, tsk);
++
++clear_dr7:
++ set_debugreg(0UL, 7);
++ preempt_conditional_cli(regs);
++ return;
++
++clear_TF_reenable:
++ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++ regs->eflags &= ~TF_MASK;
++ preempt_conditional_cli(regs);
++}
++
++static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
++{
++ const struct exception_table_entry *fixup;
++ fixup = search_exception_tables(regs->rip);
++ if (fixup) {
++ regs->rip = fixup->fixup;
++ return 1;
++ }
++ notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
++ /* Illegal floating point operation in the kernel */
++ current->thread.trap_no = trapnr;
++ die(str, regs, 0);
++ return 0;
++}
++
++/*
++ * Note that we play around with the 'TS' bit in an attempt to get
++ * the correct behaviour even in the presence of the asynchronous
++ * IRQ13 behaviour
++ */
++asmlinkage void do_coprocessor_error(struct pt_regs *regs)
++{
++ void __user *rip = (void __user *)(regs->rip);
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short cwd, swd;
++
++ conditional_sti(regs);
++ if (!user_mode(regs) &&
++ kernel_math_error(regs, "kernel x87 math error", 16))
++ return;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 16;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = rip;
++ /*
++ * (~cwd & swd) will mask out exceptions that are not set to unmasked
++ * status. 0x3f is the exception bits in these regs, 0x200 is the
++ * C1 reg you need in case of a stack fault, 0x040 is the stack
++ * fault bit. We should only be taking one exception at a time,
++ * so if this combination doesn't produce any single exception,
++ * then we have a bad program that isn't synchronizing its FPU usage
++ * and it will suffer the consequences since we won't be able to
++ * fully reproduce the context of the exception
++ */
++ cwd = get_fpu_cwd(task);
++ swd = get_fpu_swd(task);
++ switch (swd & ~cwd & 0x3f) {
++ case 0x000:
++ default:
++ break;
++ case 0x001: /* Invalid Op */
++ /*
++ * swd & 0x240 == 0x040: Stack Underflow
++ * swd & 0x240 == 0x240: Stack Overflow
++ * User must clear the SF bit (0x40) if set
++ */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++asmlinkage void bad_intr(void)
++{
++ printk("bad interrupt");
++}
++
++asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
++{
++ void __user *rip = (void __user *)(regs->rip);
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short mxcsr;
++
++ conditional_sti(regs);
++ if (!user_mode(regs) &&
++ kernel_math_error(regs, "kernel simd math error", 19))
++ return;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 19;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = rip;
++ /*
++ * The SIMD FPU exceptions are handled a little differently, as there
++ * is only a single status/control register. Thus, to determine which
++ * unmasked exception was caught we must mask the exception mask bits
++ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
++ */
++ mxcsr = get_fpu_mxcsr(task);
++ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
++ case 0x000:
++ default:
++ break;
++ case 0x001: /* Invalid Op */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
++{
++}
++
++#if 0
++asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
++{
++}
++#endif
++
++asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
++{
++}
++
++/*
++ * 'math_state_restore()' saves the current math information in the
++ * old math state array, and gets the new ones from the current task
++ *
++ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
++ * Don't touch unless you *really* know how it works.
++ */
++asmlinkage void math_state_restore(void)
++{
++ struct task_struct *me = current;
++ /* clts(); */ /* 'clts' is done for us by Xen during virtual trap. */
++
++ if (!used_math())
++ init_fpu(me);
++ restore_fpu_checking(&me->thread.i387.fxsave);
++ task_thread_info(me)->status |= TS_USEDFPU;
++}
++
++
++/*
++ * NB. All these are "interrupt gates" (i.e. events_mask is set) because we
++ * specify <dpl>|4 in the second field.
++ */
++static trap_info_t __cpuinitdata trap_table[] = {
++ { 0, 0|4, __KERNEL_CS, (unsigned long)divide_error },
++ { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
++ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
++ { 4, 3|4, __KERNEL_CS, (unsigned long)overflow },
++ { 5, 0|4, __KERNEL_CS, (unsigned long)bounds },
++ { 6, 0|4, __KERNEL_CS, (unsigned long)invalid_op },
++ { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
++ { 9, 0|4, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun},
++ { 10, 0|4, __KERNEL_CS, (unsigned long)invalid_TSS },
++ { 11, 0|4, __KERNEL_CS, (unsigned long)segment_not_present },
++ { 12, 0|4, __KERNEL_CS, (unsigned long)stack_segment },
++ { 13, 0|4, __KERNEL_CS, (unsigned long)general_protection },
++ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
++ { 15, 0|4, __KERNEL_CS, (unsigned long)spurious_interrupt_bug },
++ { 16, 0|4, __KERNEL_CS, (unsigned long)coprocessor_error },
++ { 17, 0|4, __KERNEL_CS, (unsigned long)alignment_check },
++#ifdef CONFIG_X86_MCE
++ { 18, 0|4, __KERNEL_CS, (unsigned long)machine_check },
++#endif
++ { 19, 0|4, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
++#ifdef CONFIG_IA32_EMULATION
++ { IA32_SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)ia32_syscall},
++#endif
++ { 0, 0, 0, 0 }
++};
++
++void __init trap_init(void)
++{
++ int ret;
++
++ ret = HYPERVISOR_set_trap_table(trap_table);
++ if (ret)
++ printk("HYPERVISOR_set_trap_table failed: error %d\n", ret);
++
++ /*
++ * Should be a barrier for any external CPU state.
++ */
++ cpu_init();
++}
++
++void __cpuinit smp_trap_init(trap_info_t *trap_ctxt)
++{
++ const trap_info_t *t = trap_table;
++
++ for (t = trap_table; t->address; t++) {
++ trap_ctxt[t->vector].flags = t->flags;
++ trap_ctxt[t->vector].cs = t->cs;
++ trap_ctxt[t->vector].address = t->address;
++ }
++}
++
++
++/* Actual parsing is done early in setup.c. */
++static int __init oops_dummy(char *s)
++{
++ panic_on_oops = 1;
++ return 1;
++}
++__setup("oops=", oops_dummy);
++
++static int __init kstack_setup(char *s)
++{
++ kstack_depth_to_print = simple_strtoul(s,NULL,0);
++ return 1;
++}
++__setup("kstack=", kstack_setup);
++
++#ifdef CONFIG_STACK_UNWIND
++static int __init call_trace_setup(char *s)
++{
++ if (strcmp(s, "old") == 0)
++ call_trace = -1;
++ else if (strcmp(s, "both") == 0)
++ call_trace = 0;
++ else if (strcmp(s, "newfallback") == 0)
++ call_trace = 1;
++ else if (strcmp(s, "new") == 0)
++ call_trace = 2;
++ return 1;
++}
++__setup("call_trace=", call_trace_setup);
++#endif
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/vmlinux.lds.S linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/vmlinux.lds.S
+--- linux-2.6.18.8/arch/x86_64/kernel/vmlinux.lds.S 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/vmlinux.lds.S 2008-08-21 11:36:07.000000000 +0200
+@@ -13,6 +13,13 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86
+ OUTPUT_ARCH(i386:x86-64)
+ ENTRY(phys_startup_64)
+ jiffies_64 = jiffies;
++PHDRS {
++ text PT_LOAD FLAGS(5); /* R_E */
++ data PT_LOAD FLAGS(7); /* RWE */
++ user PT_LOAD FLAGS(7); /* RWE */
++ data.init PT_LOAD FLAGS(7); /* RWE */
++ note PT_NOTE FLAGS(4); /* R__ */
++}
+ SECTIONS
+ {
+ . = __START_KERNEL;
+@@ -31,7 +38,7 @@ SECTIONS
+ KPROBES_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+- } = 0x9090
++ } :text = 0x9090
+ /* out-of-line lock text */
+ .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
+
+@@ -57,17 +64,10 @@ SECTIONS
+ .data : AT(ADDR(.data) - LOAD_OFFSET) {
+ *(.data)
+ CONSTRUCTORS
+- }
++ } :data
+
+ _edata = .; /* End of data section */
+
+- __bss_start = .; /* BSS */
+- .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+- *(.bss.page_aligned)
+- *(.bss)
+- }
+- __bss_stop = .;
+-
+ . = ALIGN(PAGE_SIZE);
+ . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
+ .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
+@@ -89,7 +89,7 @@ SECTIONS
+ #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
+
+ . = VSYSCALL_ADDR;
+- .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) }
++ .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user
+ __vsyscall_0 = VSYSCALL_VIRT_ADDR;
+
+ . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
+@@ -132,7 +132,7 @@ SECTIONS
+ . = ALIGN(8192); /* init_task */
+ .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
+ *(.data.init_task)
+- }
++ }:data.init
+
+ . = ALIGN(4096);
+ .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
+@@ -222,6 +222,14 @@ SECTIONS
+ . = ALIGN(4096);
+ __nosave_end = .;
+
++ __bss_start = .; /* BSS */
++ . = ALIGN(4096);
++ .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
++ *(.bss.page_aligned)
++ *(.bss)
++ }
++ __bss_stop = .;
++
+ _end = . ;
+
+ /* Sections to be discarded */
+@@ -235,4 +243,6 @@ SECTIONS
+ STABS_DEBUG
+
+ DWARF_DEBUG
++
++ NOTES
+ }
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/vsyscall-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/vsyscall-xen.c
+--- linux-2.6.18.8/arch/x86_64/kernel/vsyscall-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/vsyscall-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,227 @@
++/*
++ * linux/arch/x86_64/kernel/vsyscall.c
++ *
++ * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
++ * Copyright 2003 Andi Kleen, SuSE Labs.
++ *
++ * Thanks to hpa@transmeta.com for some useful hint.
++ * Special thanks to Ingo Molnar for his early experience with
++ * a different vsyscall implementation for Linux/IA32 and for the name.
++ *
++ * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
++ * at virtual address -10Mbyte+1024bytes etc... There are at max 4
++ * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
++ * jumping out of line if necessary. We cannot add more with this
++ * mechanism because older kernels won't return -ENOSYS.
++ * If we want more than four we need a vDSO.
++ *
++ * Note: the concept clashes with user mode linux. If you use UML and
++ * want per guest time just set the kernel.vsyscall64 sysctl to 0.
++ */
++
++#include <linux/time.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/timer.h>
++#include <linux/seqlock.h>
++#include <linux/jiffies.h>
++#include <linux/sysctl.h>
++
++#include <asm/vsyscall.h>
++#include <asm/pgtable.h>
++#include <asm/page.h>
++#include <asm/fixmap.h>
++#include <asm/errno.h>
++#include <asm/io.h>
++
++#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
++
++int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
++seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
++
++#include <asm/unistd.h>
++
++static __always_inline void timeval_normalize(struct timeval * tv)
++{
++ time_t __sec;
++
++ __sec = tv->tv_usec / 1000000;
++ if (__sec) {
++ tv->tv_usec %= 1000000;
++ tv->tv_sec += __sec;
++ }
++}
++
++static __always_inline void do_vgettimeofday(struct timeval * tv)
++{
++ long sequence, t;
++ unsigned long sec, usec;
++
++ do {
++ sequence = read_seqbegin(&__xtime_lock);
++
++ sec = __xtime.tv_sec;
++ usec = (__xtime.tv_nsec / 1000) +
++ (__jiffies - __wall_jiffies) * (1000000 / HZ);
++
++ if (__vxtime.mode != VXTIME_HPET) {
++ t = get_cycles_sync();
++ if (t < __vxtime.last_tsc)
++ t = __vxtime.last_tsc;
++ usec += ((t - __vxtime.last_tsc) *
++ __vxtime.tsc_quot) >> 32;
++ /* See comment in x86_64 do_gettimeofday. */
++ } else {
++ usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
++ __vxtime.last) * __vxtime.quot) >> 32;
++ }
++ } while (read_seqretry(&__xtime_lock, sequence));
++
++ tv->tv_sec = sec + usec / 1000000;
++ tv->tv_usec = usec % 1000000;
++}
++
++/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
++static __always_inline void do_get_tz(struct timezone * tz)
++{
++ *tz = __sys_tz;
++}
++
++static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
++{
++ int ret;
++ asm volatile("vsysc2: syscall"
++ : "=a" (ret)
++ : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
++ return ret;
++}
++
++static __always_inline long time_syscall(long *t)
++{
++ long secs;
++ asm volatile("vsysc1: syscall"
++ : "=a" (secs)
++ : "0" (__NR_time),"D" (t) : __syscall_clobber);
++ return secs;
++}
++
++int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
++{
++ if (!__sysctl_vsyscall)
++ return gettimeofday(tv,tz);
++ if (tv)
++ do_vgettimeofday(tv);
++ if (tz)
++ do_get_tz(tz);
++ return 0;
++}
++
++/* This will break when the xtime seconds get inaccurate, but that is
++ * unlikely */
++time_t __vsyscall(1) vtime(time_t *t)
++{
++ if (!__sysctl_vsyscall)
++ return time_syscall(t);
++ else if (t)
++ *t = __xtime.tv_sec;
++ return __xtime.tv_sec;
++}
++
++long __vsyscall(2) venosys_0(void)
++{
++ return -ENOSYS;
++}
++
++long __vsyscall(3) venosys_1(void)
++{
++ return -ENOSYS;
++}
++
++#ifdef CONFIG_SYSCTL
++
++#define SYSCALL 0x050f
++#define NOP2 0x9090
++
++/*
++ * NOP out syscall in vsyscall page when not needed.
++ */
++static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ extern u16 vsysc1, vsysc2;
++ u16 *map1, *map2;
++ int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
++ if (!write)
++ return ret;
++ /* gcc has some trouble with __va(__pa()), so just do it this
++ way. */
++ map1 = ioremap(__pa_symbol(&vsysc1), 2);
++ if (!map1)
++ return -ENOMEM;
++ map2 = ioremap(__pa_symbol(&vsysc2), 2);
++ if (!map2) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ if (!sysctl_vsyscall) {
++ *map1 = SYSCALL;
++ *map2 = SYSCALL;
++ } else {
++ *map1 = NOP2;
++ *map2 = NOP2;
++ }
++ iounmap(map2);
++out:
++ iounmap(map1);
++ return ret;
++}
++
++static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
++ void __user *oldval, size_t __user *oldlenp,
++ void __user *newval, size_t newlen,
++ void **context)
++{
++ return -ENOSYS;
++}
++
++static ctl_table kernel_table2[] = {
++ { .ctl_name = 99, .procname = "vsyscall64",
++ .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
++ .strategy = vsyscall_sysctl_nostrat,
++ .proc_handler = vsyscall_sysctl_change },
++ { 0, }
++};
++
++static ctl_table kernel_root_table2[] = {
++ { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
++ .child = kernel_table2 },
++ { 0 },
++};
++
++#endif
++
++static void __init map_vsyscall(void)
++{
++ extern char __vsyscall_0;
++ unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
++
++ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
++}
++
++static int __init vsyscall_init(void)
++{
++ BUG_ON(((unsigned long) &vgettimeofday !=
++ VSYSCALL_ADDR(__NR_vgettimeofday)));
++ BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
++ BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
++ map_vsyscall();
++#ifdef CONFIG_XEN
++ sysctl_vsyscall = 0; /* disable vgettimeofay() */
++#endif
++#ifdef CONFIG_SYSCTL
++ register_sysctl_table(kernel_root_table2, 0);
++#endif
++ return 0;
++}
++
++__initcall(vsyscall_init);
+diff -rpuN linux-2.6.18.8/arch/x86_64/kernel/xen_entry.S linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/xen_entry.S
+--- linux-2.6.18.8/arch/x86_64/kernel/xen_entry.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/kernel/xen_entry.S 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,36 @@
++/*
++ * Copied from arch/xen/i386/kernel/entry.S
++ */
++/* Offsets into shared_info_t. */
++#define evtchn_upcall_pending /* 0 */
++#define evtchn_upcall_mask 1
++
++#define sizeof_vcpu_shift 6
++
++#ifdef CONFIG_SMP
++//#define preempt_disable(reg) incl threadinfo_preempt_count(reg)
++//#define preempt_enable(reg) decl threadinfo_preempt_count(reg)
++#define preempt_disable(reg)
++#define preempt_enable(reg)
++#define XEN_GET_VCPU_INFO(reg) preempt_disable(%rbp) ; \
++ movq %gs:pda_cpunumber,reg ; \
++ shl $32, reg ; \
++ shr $32-sizeof_vcpu_shift,reg ; \
++ addq HYPERVISOR_shared_info,reg
++#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%rbp) ; \
++#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
++#else
++#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
++#define XEN_PUT_VCPU_INFO(reg)
++#define XEN_PUT_VCPU_INFO_fixup
++#endif
++
++#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
++#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
++#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
++ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
++ XEN_PUT_VCPU_INFO(reg)
++#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
++ XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
++ XEN_PUT_VCPU_INFO(reg)
++#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
+diff -rpuN linux-2.6.18.8/arch/x86_64/lib/Makefile linux-2.6.18-xen-3.3.0/arch/x86_64/lib/Makefile
+--- linux-2.6.18.8/arch/x86_64/lib/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/lib/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -10,3 +10,4 @@ lib-y := csum-partial.o csum-copy.o csum
+ usercopy.o getuser.o putuser.o \
+ thunk.o clear_page.o copy_page.o bitstr.o bitops.o
+ lib-y += memcpy.o memmove.o memset.o copy_user.o
++lib-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o
+diff -rpuN linux-2.6.18.8/arch/x86_64/lib/scrub.c linux-2.6.18-xen-3.3.0/arch/x86_64/lib/scrub.c
+--- linux-2.6.18.8/arch/x86_64/lib/scrub.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/lib/scrub.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1 @@
++#include "../../i386/lib/scrub.c"
+diff -rpuN linux-2.6.18.8/arch/x86_64/Makefile linux-2.6.18-xen-3.3.0/arch/x86_64/Makefile
+--- linux-2.6.18.8/arch/x86_64/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -71,9 +71,22 @@ drivers-$(CONFIG_OPROFILE) += arch/x86_
+
+ boot := arch/x86_64/boot
+
+-PHONY += bzImage bzlilo install archmrproper \
++PHONY += bzImage bzlilo vmlinuz install archmrproper \
+ fdimage fdimage144 fdimage288 isoimage archclean
+
++ifdef CONFIG_XEN
++CPPFLAGS := -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION) \
++ -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
++LDFLAGS_vmlinux := -e startup_64
++#Default target when executing "make"
++all: vmlinuz
++
++BOOTIMAGE := $(boot)/vmlinuz
++KBUILD_IMAGE := $(BOOTIMAGE)
++
++vmlinuz: vmlinux
++ $(Q)$(MAKE) $(build)=$(boot) $(BOOTIMAGE)
++else
+ #Default target when executing "make"
+ all: bzImage
+
+@@ -91,6 +104,7 @@ bzdisk: vmlinux
+
+ fdimage fdimage144 fdimage288 isoimage: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
++endif
+
+ install:
+ $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
+diff -rpuN linux-2.6.18.8/arch/x86_64/mm/fault-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/mm/fault-xen.c
+--- linux-2.6.18.8/arch/x86_64/mm/fault-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/mm/fault-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,724 @@
++/*
++ * linux/arch/x86-64/mm/fault.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h> /* For unblank_screen() */
++#include <linux/compiler.h>
++#include <linux/module.h>
++#include <linux/kprobes.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgalloc.h>
++#include <asm/smp.h>
++#include <asm/tlbflush.h>
++#include <asm/proto.h>
++#include <asm/kdebug.h>
++#include <asm-generic/sections.h>
++
++/* Page fault error code bits */
++#define PF_PROT (1<<0) /* or no page found */
++#define PF_WRITE (1<<1)
++#define PF_USER (1<<2)
++#define PF_RSVD (1<<3)
++#define PF_INSTR (1<<4)
++
++#ifdef CONFIG_KPROBES
++ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++
++/* Hook to register for page fault notifications */
++int register_page_fault_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
++}
++
++int unregister_page_fault_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
++}
++
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ struct die_args args = {
++ .regs = regs,
++ .str = str,
++ .err = err,
++ .trapnr = trap,
++ .signr = sig
++ };
++ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
++}
++#else
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ return NOTIFY_DONE;
++}
++#endif
++
++void bust_spinlocks(int yes)
++{
++ int loglevel_save = console_loglevel;
++ if (yes) {
++ oops_in_progress = 1;
++ } else {
++#ifdef CONFIG_VT
++ unblank_screen();
++#endif
++ oops_in_progress = 0;
++ /*
++ * OK, the message is on the console. Now we call printk()
++ * without oops_in_progress set so that printk will give klogd
++ * a poke. Hold onto your hats...
++ */
++ console_loglevel = 15; /* NMI oopser may have shut the console up */
++ printk(" ");
++ console_loglevel = loglevel_save;
++ }
++}
++
++/* Sometimes the CPU reports invalid exceptions on prefetch.
++ Check that here and ignore.
++ Opcode checker based on code by Richard Brunner */
++static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
++ unsigned long error_code)
++{
++ unsigned char *instr;
++ int scan_more = 1;
++ int prefetch = 0;
++ unsigned char *max_instr;
++
++ /* If it was a exec fault ignore */
++ if (error_code & PF_INSTR)
++ return 0;
++
++ instr = (unsigned char *)convert_rip_to_linear(current, regs);
++ max_instr = instr + 15;
++
++ if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
++ return 0;
++
++ while (scan_more && instr < max_instr) {
++ unsigned char opcode;
++ unsigned char instr_hi;
++ unsigned char instr_lo;
++
++ if (__get_user(opcode, instr))
++ break;
++
++ instr_hi = opcode & 0xf0;
++ instr_lo = opcode & 0x0f;
++ instr++;
++
++ switch (instr_hi) {
++ case 0x20:
++ case 0x30:
++ /* Values 0x26,0x2E,0x36,0x3E are valid x86
++ prefixes. In long mode, the CPU will signal
++ invalid opcode if some of these prefixes are
++ present so we will never get here anyway */
++ scan_more = ((instr_lo & 7) == 0x6);
++ break;
++
++ case 0x40:
++ /* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
++ Need to figure out under what instruction mode the
++ instruction was issued ... */
++ /* Could check the LDT for lm, but for now it's good
++ enough to assume that long mode only uses well known
++ segments or kernel. */
++ scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
++ break;
++
++ case 0x60:
++ /* 0x64 thru 0x67 are valid prefixes in all modes. */
++ scan_more = (instr_lo & 0xC) == 0x4;
++ break;
++ case 0xF0:
++ /* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
++ scan_more = !instr_lo || (instr_lo>>1) == 1;
++ break;
++ case 0x00:
++ /* Prefetch instruction is 0x0F0D or 0x0F18 */
++ scan_more = 0;
++ if (__get_user(opcode, instr))
++ break;
++ prefetch = (instr_lo == 0xF) &&
++ (opcode == 0x0D || opcode == 0x18);
++ break;
++ default:
++ scan_more = 0;
++ break;
++ }
++ }
++ return prefetch;
++}
++
++static int bad_address(void *p)
++{
++ unsigned long dummy;
++ return __get_user(dummy, (unsigned long *)p);
++}
++
++void dump_pagetable(unsigned long address)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
++ pgd += pgd_index(address);
++ if (bad_address(pgd)) goto bad;
++ printk("PGD %lx ", pgd_val(*pgd));
++ if (!pgd_present(*pgd)) goto ret;
++
++ pud = pud_offset(pgd, address);
++ if (bad_address(pud)) goto bad;
++ printk("PUD %lx ", pud_val(*pud));
++ if (!pud_present(*pud)) goto ret;
++
++ pmd = pmd_offset(pud, address);
++ if (bad_address(pmd)) goto bad;
++ printk("PMD %lx ", pmd_val(*pmd));
++ if (!pmd_present(*pmd)) goto ret;
++
++ pte = pte_offset_kernel(pmd, address);
++ if (bad_address(pte)) goto bad;
++ printk("PTE %lx", pte_val(*pte));
++ret:
++ printk("\n");
++ return;
++bad:
++ printk("BAD\n");
++}
++
++static const char errata93_warning[] =
++KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
++KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
++KERN_ERR "******* Please consider a BIOS update.\n"
++KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
++
++/* Workaround for K8 erratum #93 & buggy BIOS.
++ BIOS SMM functions are required to use a specific workaround
++ to avoid corruption of the 64bit RIP register on C stepping K8.
++ A lot of BIOS that didn't get tested properly miss this.
++ The OS sees this as a page fault with the upper 32bits of RIP cleared.
++ Try to work around it here.
++ Note we only handle faults in kernel here. */
++
++static int is_errata93(struct pt_regs *regs, unsigned long address)
++{
++ static int warned;
++ if (address != regs->rip)
++ return 0;
++ if ((address >> 32) != 0)
++ return 0;
++ address |= 0xffffffffUL << 32;
++ if ((address >= (u64)_stext && address <= (u64)_etext) ||
++ (address >= MODULES_VADDR && address <= MODULES_END)) {
++ if (!warned) {
++ printk(errata93_warning);
++ warned = 1;
++ }
++ regs->rip = address;
++ return 1;
++ }
++ return 0;
++}
++
++int unhandled_signal(struct task_struct *tsk, int sig)
++{
++ if (tsk->pid == 1)
++ return 1;
++ if (tsk->ptrace & PT_PTRACED)
++ return 0;
++ return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
++ (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
++}
++
++static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
++ unsigned long error_code)
++{
++ unsigned long flags = oops_begin();
++ struct task_struct *tsk;
++
++ printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
++ current->comm, address);
++ dump_pagetable(address);
++ tsk = current;
++ tsk->thread.cr2 = address;
++ tsk->thread.trap_no = 14;
++ tsk->thread.error_code = error_code;
++ __die("Bad pagetable", regs, error_code);
++ oops_end(flags);
++ do_exit(SIGKILL);
++}
++
++/*
++ * Handle a fault on the vmalloc area
++ *
++ * This assumes no large pages in there.
++ */
++static int vmalloc_fault(unsigned long address)
++{
++ pgd_t *pgd, *pgd_ref;
++ pud_t *pud, *pud_ref;
++ pmd_t *pmd, *pmd_ref;
++ pte_t *pte, *pte_ref;
++
++ /* Copy kernel mappings over when needed. This can also
++ happen within a race in page table update. In the later
++ case just flush. */
++
++ /* On Xen the line below does not always work. Needs investigating! */
++ /*pgd = pgd_offset(current->mm ?: &init_mm, address);*/
++ pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
++ pgd += pgd_index(address);
++ pgd_ref = pgd_offset_k(address);
++ if (pgd_none(*pgd_ref))
++ return -1;
++ if (pgd_none(*pgd))
++ set_pgd(pgd, *pgd_ref);
++ else
++ BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
++
++ /* Below here mismatches are bugs because these lower tables
++ are shared */
++
++ pud = pud_offset(pgd, address);
++ pud_ref = pud_offset(pgd_ref, address);
++ if (pud_none(*pud_ref))
++ return -1;
++ if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
++ BUG();
++ pmd = pmd_offset(pud, address);
++ pmd_ref = pmd_offset(pud_ref, address);
++ if (pmd_none(*pmd_ref))
++ return -1;
++ if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
++ BUG();
++ pte_ref = pte_offset_kernel(pmd_ref, address);
++ if (!pte_present(*pte_ref))
++ return -1;
++ pte = pte_offset_kernel(pmd, address);
++ /* Don't use pte_page here, because the mappings can point
++ outside mem_map, and the NUMA hash lookup cannot handle
++ that. */
++ if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
++ BUG();
++ return 0;
++}
++
++int page_fault_trace = 0;
++int exception_trace = 1;
++
++
++#define MEM_VERBOSE 1
++
++#ifdef MEM_VERBOSE
++#define MEM_LOG(_f, _a...) \
++ printk("fault.c:[%d]-> " _f "\n", \
++ __LINE__ , ## _a )
++#else
++#define MEM_LOG(_f, _a...) ((void)0)
++#endif
++
++static int spurious_fault(struct pt_regs *regs,
++ unsigned long address,
++ unsigned long error_code)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++#ifdef CONFIG_XEN
++ /* Faults in hypervisor area are never spurious. */
++ if ((address >= HYPERVISOR_VIRT_START) &&
++ (address < HYPERVISOR_VIRT_END))
++ return 0;
++#endif
++
++ /* Reserved-bit violation or user access to kernel space? */
++ if (error_code & (PF_RSVD|PF_USER))
++ return 0;
++
++ pgd = init_mm.pgd + pgd_index(address);
++ if (!pgd_present(*pgd))
++ return 0;
++
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return 0;
++
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return 0;
++
++ pte = pte_offset_kernel(pmd, address);
++ if (!pte_present(*pte))
++ return 0;
++ if ((error_code & PF_WRITE) && !pte_write(*pte))
++ return 0;
++ if ((error_code & PF_INSTR) && (__pte_val(*pte) & _PAGE_NX))
++ return 0;
++
++ return 1;
++}
++
++/*
++ * This routine handles page faults. It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ */
++asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
++ unsigned long error_code)
++{
++ struct task_struct *tsk;
++ struct mm_struct *mm;
++ struct vm_area_struct * vma;
++ unsigned long address;
++ const struct exception_table_entry *fixup;
++ int write;
++ unsigned long flags;
++ siginfo_t info;
++
++ if (!user_mode(regs))
++ error_code &= ~PF_USER; /* means kernel */
++
++ tsk = current;
++ mm = tsk->mm;
++ prefetchw(&mm->mmap_sem);
++
++ /* get the address */
++ address = current_vcpu_info()->arch.cr2;
++
++ info.si_code = SEGV_MAPERR;
++
++
++ /*
++ * We fault-in kernel-space virtual memory on-demand. The
++ * 'reference' page table is init_mm.pgd.
++ *
++ * NOTE! We MUST NOT take any locks for this case. We may
++ * be in an interrupt or a critical region, and should
++ * only copy the information from the master page table,
++ * nothing more.
++ *
++ * This verifies that the fault happens in kernel space
++ * (error_code & 4) == 0, and that the fault was not a
++ * protection error (error_code & 9) == 0.
++ */
++ if (unlikely(address >= TASK_SIZE64)) {
++ /*
++ * Don't check for the module range here: its PML4
++ * is always initialized because it's shared with the main
++ * kernel text. Only vmalloc may need PML4 syncups.
++ */
++ if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
++ ((address >= VMALLOC_START && address < VMALLOC_END))) {
++ if (vmalloc_fault(address) >= 0)
++ return;
++ }
++ /* Can take a spurious fault if mapping changes R/O -> R/W. */
++ if (spurious_fault(regs, address, error_code))
++ return;
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++ /*
++ * Don't take the mm semaphore here. If we fixup a prefetch
++ * fault we could otherwise deadlock.
++ */
++ goto bad_area_nosemaphore;
++ }
++
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++
++ if (likely(regs->eflags & X86_EFLAGS_IF))
++ local_irq_enable();
++
++ if (unlikely(page_fault_trace))
++ printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
++ regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
++
++ if (unlikely(error_code & PF_RSVD))
++ pgtable_bad(address, regs, error_code);
++
++ /*
++ * If we're in an interrupt or have no user
++ * context, we must not take the fault..
++ */
++ if (unlikely(in_atomic() || !mm))
++ goto bad_area_nosemaphore;
++
++ again:
++ /* When running in the kernel we expect faults to occur only to
++ * addresses in user space. All other faults represent errors in the
++ * kernel and should generate an OOPS. Unfortunatly, in the case of an
++ * erroneous fault occurring in a code path which already holds mmap_sem
++ * we will deadlock attempting to validate the fault against the
++ * address space. Luckily the kernel only validly references user
++ * space from well defined areas of code, which are listed in the
++ * exceptions table.
++ *
++ * As the vast majority of faults will be valid we will only perform
++ * the source reference check when there is a possibilty of a deadlock.
++ * Attempt to lock the address space, if we cannot we then validate the
++ * source. If this is invalid we can skip the address space check,
++ * thus avoiding the deadlock.
++ */
++ if (!down_read_trylock(&mm->mmap_sem)) {
++ if ((error_code & PF_USER) == 0 &&
++ !search_exception_tables(regs->rip))
++ goto bad_area_nosemaphore;
++ down_read(&mm->mmap_sem);
++ }
++
++ vma = find_vma(mm, address);
++ if (!vma)
++ goto bad_area;
++ if (likely(vma->vm_start <= address))
++ goto good_area;
++ if (!(vma->vm_flags & VM_GROWSDOWN))
++ goto bad_area;
++ if (error_code & 4) {
++ /* Allow userspace just enough access below the stack pointer
++ * to let the 'enter' instruction work.
++ */
++ if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
++ goto bad_area;
++ }
++ if (expand_stack(vma, address))
++ goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++ info.si_code = SEGV_ACCERR;
++ write = 0;
++ switch (error_code & (PF_PROT|PF_WRITE)) {
++ default: /* 3: write, present */
++ /* fall through */
++ case PF_WRITE: /* write, not present */
++ if (!(vma->vm_flags & VM_WRITE))
++ goto bad_area;
++ write++;
++ break;
++ case PF_PROT: /* read, present */
++ goto bad_area;
++ case 0: /* read, not present */
++ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++ goto bad_area;
++ }
++
++ /*
++ * If for any reason at all we couldn't handle the fault,
++ * make sure we exit gracefully rather than endlessly redo
++ * the fault.
++ */
++ switch (handle_mm_fault(mm, vma, address, write)) {
++ case VM_FAULT_MINOR:
++ tsk->min_flt++;
++ break;
++ case VM_FAULT_MAJOR:
++ tsk->maj_flt++;
++ break;
++ case VM_FAULT_SIGBUS:
++ goto do_sigbus;
++ default:
++ goto out_of_memory;
++ }
++
++ up_read(&mm->mmap_sem);
++ return;
++
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++ up_read(&mm->mmap_sem);
++
++bad_area_nosemaphore:
++ /* User mode accesses just cause a SIGSEGV */
++ if (error_code & PF_USER) {
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ /* Work around K8 erratum #100 K8 in compat mode
++ occasionally jumps to illegal addresses >4GB. We
++ catch this here in the page fault handler because
++ these addresses are not reachable. Just detect this
++ case and return. Any code segment in LDT is
++ compatibility mode. */
++ if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
++ (address >> 32))
++ return;
++
++ if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
++ printk(
++ "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
++ tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
++ tsk->comm, tsk->pid, address, regs->rip,
++ regs->rsp, error_code);
++ }
++
++ tsk->thread.cr2 = address;
++ /* Kernel addresses are always protection faults */
++ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++ tsk->thread.trap_no = 14;
++ info.si_signo = SIGSEGV;
++ info.si_errno = 0;
++ /* info.si_code has been set above */
++ info.si_addr = (void __user *)address;
++ force_sig_info(SIGSEGV, &info, tsk);
++ return;
++ }
++
++no_context:
++
++ /* Are we prepared to handle this kernel fault? */
++ fixup = search_exception_tables(regs->rip);
++ if (fixup) {
++ regs->rip = fixup->fixup;
++ return;
++ }
++
++ /*
++ * Hall of shame of CPU/BIOS bugs.
++ */
++
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ if (is_errata93(regs, address))
++ return;
++
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ */
++
++ flags = oops_begin();
++
++ if (address < PAGE_SIZE)
++ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
++ else
++ printk(KERN_ALERT "Unable to handle kernel paging request");
++ printk(" at %016lx RIP: \n" KERN_ALERT,address);
++ printk_address(regs->rip);
++ dump_pagetable(address);
++ tsk->thread.cr2 = address;
++ tsk->thread.trap_no = 14;
++ tsk->thread.error_code = error_code;
++ __die("Oops", regs, error_code);
++ /* Executive summary in case the body of the oops scrolled away */
++ printk(KERN_EMERG "CR2: %016lx\n", address);
++ oops_end(flags);
++ do_exit(SIGKILL);
++
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++ up_read(&mm->mmap_sem);
++ if (current->pid == 1) {
++ yield();
++ goto again;
++ }
++ printk("VM: killing process %s\n", tsk->comm);
++ if (error_code & 4)
++ do_exit(SIGKILL);
++ goto no_context;
++
++do_sigbus:
++ up_read(&mm->mmap_sem);
++
++ /* Kernel mode? Handle exceptions or die */
++ if (!(error_code & PF_USER))
++ goto no_context;
++
++ tsk->thread.cr2 = address;
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 14;
++ info.si_signo = SIGBUS;
++ info.si_errno = 0;
++ info.si_code = BUS_ADRERR;
++ info.si_addr = (void __user *)address;
++ force_sig_info(SIGBUS, &info, tsk);
++ return;
++}
++
++DEFINE_SPINLOCK(pgd_lock);
++struct page *pgd_list;
++
++void vmalloc_sync_all(void)
++{
++ /* Note that races in the updates of insync and start aren't
++ problematic:
++ insync can only get set bits added, and updates to start are only
++ improving performance (without affecting correctness if undone). */
++ static DECLARE_BITMAP(insync, PTRS_PER_PGD);
++ static unsigned long start = VMALLOC_START & PGDIR_MASK;
++ unsigned long address;
++
++ for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
++ if (!test_bit(pgd_index(address), insync)) {
++ const pgd_t *pgd_ref = pgd_offset_k(address);
++ struct page *page;
++
++ if (pgd_none(*pgd_ref))
++ continue;
++ spin_lock(&pgd_lock);
++ for (page = pgd_list; page;
++ page = (struct page *)page->index) {
++ pgd_t *pgd;
++ pgd = (pgd_t *)page_address(page) + pgd_index(address);
++ if (pgd_none(*pgd))
++ set_pgd(pgd, *pgd_ref);
++ else
++ BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
++ }
++ spin_unlock(&pgd_lock);
++ set_bit(pgd_index(address), insync);
++ }
++ if (address == start)
++ start = address + PGDIR_SIZE;
++ }
++ /* Check that there is no need to do the same for the modules area. */
++ BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
++ BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
++ (__START_KERNEL & PGDIR_MASK)));
++}
++
++static int __init enable_pagefaulttrace(char *str)
++{
++ page_fault_trace = 1;
++ return 1;
++}
++__setup("pagefaulttrace", enable_pagefaulttrace);
+diff -rpuN linux-2.6.18.8/arch/x86_64/mm/init-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/mm/init-xen.c
+--- linux-2.6.18.8/arch/x86_64/mm/init-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/mm/init-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1213 @@
++/*
++ * linux/arch/x86_64/mm/init.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
++ * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen.
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/pagemap.h>
++#include <linux/bootmem.h>
++#include <linux/proc_fs.h>
++#include <linux/pci.h>
++#include <linux/poison.h>
++#include <linux/dma-mapping.h>
++#include <linux/module.h>
++#include <linux/memory_hotplug.h>
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/dma.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/apic.h>
++#include <asm/tlb.h>
++#include <asm/mmu_context.h>
++#include <asm/proto.h>
++#include <asm/smp.h>
++#include <asm/sections.h>
++
++#include <xen/features.h>
++
++#ifndef Dprintk
++#define Dprintk(x...)
++#endif
++
++struct dma_mapping_ops* dma_ops;
++EXPORT_SYMBOL(dma_ops);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++unsigned int __kernel_page_user;
++EXPORT_SYMBOL(__kernel_page_user);
++#endif
++
++int after_bootmem;
++
++extern unsigned long *contiguous_bitmap;
++
++static unsigned long dma_reserve __initdata;
++
++DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
++extern unsigned long start_pfn;
++
++/*
++ * Use this until direct mapping is established, i.e. before __va() is
++ * available in init_memory_mapping().
++ */
++
++#define addr_to_page(addr, page) \
++ (addr) &= PHYSICAL_PAGE_MASK; \
++ (page) = ((unsigned long *) ((unsigned long) \
++ (((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + \
++ __START_KERNEL_map)))
++
++static void __meminit early_make_page_readonly(void *va, unsigned int feature)
++{
++ unsigned long addr, _va = (unsigned long)va;
++ pte_t pte, *ptep;
++ unsigned long *page = (unsigned long *) init_level4_pgt;
++
++ BUG_ON(after_bootmem);
++
++ if (xen_feature(feature))
++ return;
++
++ addr = (unsigned long) page[pgd_index(_va)];
++ addr_to_page(addr, page);
++
++ addr = page[pud_index(_va)];
++ addr_to_page(addr, page);
++
++ addr = page[pmd_index(_va)];
++ addr_to_page(addr, page);
++
++ ptep = (pte_t *) &page[pte_index(_va)];
++
++ pte.pte = ptep->pte & ~_PAGE_RW;
++ if (HYPERVISOR_update_va_mapping(_va, pte, 0))
++ BUG();
++}
++
++static void __make_page_readonly(void *va)
++{
++ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
++ unsigned long addr = (unsigned long) va;
++
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ ptep = pte_offset_kernel(pmd, addr);
++
++ pte.pte = ptep->pte & ~_PAGE_RW;
++ if (HYPERVISOR_update_va_mapping(addr, pte, 0))
++ xen_l1_entry_update(ptep, pte); /* fallback */
++
++ if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
++ __make_page_readonly(__va(pte_pfn(pte) << PAGE_SHIFT));
++}
++
++static void __make_page_writable(void *va)
++{
++ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
++ unsigned long addr = (unsigned long) va;
++
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ ptep = pte_offset_kernel(pmd, addr);
++
++ pte.pte = ptep->pte | _PAGE_RW;
++ if (HYPERVISOR_update_va_mapping(addr, pte, 0))
++ xen_l1_entry_update(ptep, pte); /* fallback */
++
++ if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
++ __make_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT));
++}
++
++void make_page_readonly(void *va, unsigned int feature)
++{
++ if (!xen_feature(feature))
++ __make_page_readonly(va);
++}
++
++void make_page_writable(void *va, unsigned int feature)
++{
++ if (!xen_feature(feature))
++ __make_page_writable(va);
++}
++
++void make_pages_readonly(void *va, unsigned nr, unsigned int feature)
++{
++ if (xen_feature(feature))
++ return;
++
++ while (nr-- != 0) {
++ __make_page_readonly(va);
++ va = (void*)((unsigned long)va + PAGE_SIZE);
++ }
++}
++
++void make_pages_writable(void *va, unsigned nr, unsigned int feature)
++{
++ if (xen_feature(feature))
++ return;
++
++ while (nr-- != 0) {
++ __make_page_writable(va);
++ va = (void*)((unsigned long)va + PAGE_SIZE);
++ }
++}
++
++/*
++ * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
++ * physical space so we can cache the place of the first one and move
++ * around without checking the pgd every time.
++ */
++
++void show_mem(void)
++{
++ long i, total = 0, reserved = 0;
++ long shared = 0, cached = 0;
++ pg_data_t *pgdat;
++ struct page *page;
++
++ printk(KERN_INFO "Mem-info:\n");
++ show_free_areas();
++ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
++
++ for_each_online_pgdat(pgdat) {
++ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
++ page = pfn_to_page(pgdat->node_start_pfn + i);
++ total++;
++ if (PageReserved(page))
++ reserved++;
++ else if (PageSwapCache(page))
++ cached++;
++ else if (page_count(page))
++ shared += page_count(page) - 1;
++ }
++ }
++ printk(KERN_INFO "%lu pages of RAM\n", total);
++ printk(KERN_INFO "%lu reserved pages\n",reserved);
++ printk(KERN_INFO "%lu pages shared\n",shared);
++ printk(KERN_INFO "%lu pages swap cached\n",cached);
++}
++
++
++static __init void *spp_getpage(void)
++{
++ void *ptr;
++ if (after_bootmem)
++ ptr = (void *) get_zeroed_page(GFP_ATOMIC);
++ else if (start_pfn < table_end) {
++ ptr = __va(start_pfn << PAGE_SHIFT);
++ start_pfn++;
++ memset(ptr, 0, PAGE_SIZE);
++ } else
++ ptr = alloc_bootmem_pages(PAGE_SIZE);
++ if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
++ panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
++
++ Dprintk("spp_getpage %p\n", ptr);
++ return ptr;
++}
++
++#define pgd_offset_u(address) (__user_pgd(init_level4_pgt) + pgd_index(address))
++#define pud_offset_u(address) (level3_user_pgt + pud_index(address))
++
++static __init void set_pte_phys(unsigned long vaddr,
++ unsigned long phys, pgprot_t prot, int user_mode)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte, new_pte;
++
++ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
++
++ pgd = (user_mode ? pgd_offset_u(vaddr) : pgd_offset_k(vaddr));
++ if (pgd_none(*pgd)) {
++ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
++ return;
++ }
++ pud = (user_mode ? pud_offset_u(vaddr) : pud_offset(pgd, vaddr));
++ if (pud_none(*pud)) {
++ pmd = (pmd_t *) spp_getpage();
++ make_page_readonly(pmd, XENFEAT_writable_page_tables);
++ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
++ if (pmd != pmd_offset(pud, 0)) {
++ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
++ return;
++ }
++ }
++ pmd = pmd_offset(pud, vaddr);
++ if (pmd_none(*pmd)) {
++ pte = (pte_t *) spp_getpage();
++ make_page_readonly(pte, XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
++ if (pte != pte_offset_kernel(pmd, 0)) {
++ printk("PAGETABLE BUG #02!\n");
++ return;
++ }
++ }
++ if (pgprot_val(prot))
++ new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
++ else
++ new_pte = __pte(0);
++
++ pte = pte_offset_kernel(pmd, vaddr);
++ if (!pte_none(*pte) &&
++ __pte_val(*pte) != (__pte_val(new_pte) & __supported_pte_mask))
++ pte_ERROR(*pte);
++ set_pte(pte, new_pte);
++
++ /*
++ * It's enough to flush this one mapping.
++ * (PGE mappings get flushed as well)
++ */
++ __flush_tlb_one(vaddr);
++}
++
++static __init void set_pte_phys_ma(unsigned long vaddr,
++ unsigned long phys, pgprot_t prot)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte, new_pte;
++
++ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
++
++ pgd = pgd_offset_k(vaddr);
++ if (pgd_none(*pgd)) {
++ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
++ return;
++ }
++ pud = pud_offset(pgd, vaddr);
++ if (pud_none(*pud)) {
++
++ pmd = (pmd_t *) spp_getpage();
++ make_page_readonly(pmd, XENFEAT_writable_page_tables);
++ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
++ if (pmd != pmd_offset(pud, 0)) {
++ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
++ return;
++ }
++ }
++ pmd = pmd_offset(pud, vaddr);
++ if (pmd_none(*pmd)) {
++ pte = (pte_t *) spp_getpage();
++ make_page_readonly(pte, XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
++ if (pte != pte_offset_kernel(pmd, 0)) {
++ printk("PAGETABLE BUG #02!\n");
++ return;
++ }
++ }
++ new_pte = pfn_pte_ma(phys >> PAGE_SHIFT, prot);
++
++ pte = pte_offset_kernel(pmd, vaddr);
++ if (!pte_none(*pte) &&
++#ifdef CONFIG_ACPI
++ /* __acpi_map_table() fails to properly call clear_fixmap() */
++ (vaddr < __fix_to_virt(FIX_ACPI_END) ||
++ vaddr > __fix_to_virt(FIX_ACPI_BEGIN)) &&
++#endif
++ __pte_val(*pte) != (__pte_val(new_pte) & __supported_pte_mask))
++ pte_ERROR(*pte);
++ set_pte(pte, new_pte);
++
++ /*
++ * It's enough to flush this one mapping.
++ * (PGE mappings get flushed as well)
++ */
++ __flush_tlb_one(vaddr);
++}
++
++/* NOTE: this is meant to be run only at boot */
++void __init
++__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
++{
++ unsigned long address = __fix_to_virt(idx);
++
++ if (idx >= __end_of_fixed_addresses) {
++ printk("Invalid __set_fixmap\n");
++ return;
++ }
++ switch (idx) {
++ case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
++ set_pte_phys(address, phys, prot, 0);
++ set_pte_phys(address, phys, prot, 1);
++ break;
++ default:
++ set_pte_phys_ma(address, phys, prot);
++ break;
++ }
++}
++
++unsigned long __initdata table_start, table_end;
++
++static __meminit void *alloc_static_page(unsigned long *phys)
++{
++ unsigned long va = (start_pfn << PAGE_SHIFT) + __START_KERNEL_map;
++
++ if (after_bootmem) {
++ void *adr = (void *)get_zeroed_page(GFP_ATOMIC);
++
++ *phys = __pa(adr);
++ return adr;
++ }
++
++ *phys = start_pfn << PAGE_SHIFT;
++ start_pfn++;
++ memset((void *)va, 0, PAGE_SIZE);
++ return (void *)va;
++}
++
++#define PTE_SIZE PAGE_SIZE
++
++static inline int make_readonly(unsigned long paddr)
++{
++ extern char __vsyscall_0;
++ int readonly = 0;
++
++ /* Make new page tables read-only. */
++ if (!xen_feature(XENFEAT_writable_page_tables)
++ && (paddr >= (table_start << PAGE_SHIFT))
++ && (paddr < (table_end << PAGE_SHIFT)))
++ readonly = 1;
++ /* Make old page tables read-only. */
++ if (!xen_feature(XENFEAT_writable_page_tables)
++ && (paddr >= (xen_start_info->pt_base - __START_KERNEL_map))
++ && (paddr < (start_pfn << PAGE_SHIFT)))
++ readonly = 1;
++
++ /*
++ * No need for writable mapping of kernel image. This also ensures that
++ * page and descriptor tables embedded inside don't have writable
++ * mappings. Exclude the vsyscall area here, allowing alternative
++ * instruction patching to work.
++ */
++ if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end))
++ && !(paddr >= __pa_symbol(&__vsyscall_0)
++ && paddr < __pa_symbol(&__vsyscall_0) + PAGE_SIZE))
++ readonly = 1;
++
++ return readonly;
++}
++
++#ifndef CONFIG_XEN
++/* Must run before zap_low_mappings */
++__init void *early_ioremap(unsigned long addr, unsigned long size)
++{
++ unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
++
++ /* actually usually some more */
++ if (size >= LARGE_PAGE_SIZE) {
++ printk("SMBIOS area too long %lu\n", size);
++ return NULL;
++ }
++ set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
++ map += LARGE_PAGE_SIZE;
++ set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
++ __flush_tlb();
++ return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
++}
++
++/* To avoid virtual aliases later */
++__init void early_iounmap(void *addr, unsigned long size)
++{
++ if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
++ printk("early_iounmap: bad address %p\n", addr);
++ set_pmd(temp_mappings[0].pmd, __pmd(0));
++ set_pmd(temp_mappings[1].pmd, __pmd(0));
++ __flush_tlb();
++}
++#endif
++
++static void __meminit
++phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
++{
++ int i, k;
++
++ for (i = 0; i < PTRS_PER_PMD; pmd++, i++) {
++ unsigned long pte_phys;
++ pte_t *pte, *pte_save;
++
++ if (address >= end)
++ break;
++ pte = alloc_static_page(&pte_phys);
++ pte_save = pte;
++ for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
++ unsigned long pteval = address | _PAGE_NX | _KERNPG_TABLE;
++
++ if (address >= (after_bootmem
++ ? end
++ : xen_start_info->nr_pages << PAGE_SHIFT))
++ pteval = 0;
++ else if (make_readonly(address))
++ pteval &= ~_PAGE_RW;
++ set_pte(pte, __pte(pteval & __supported_pte_mask));
++ }
++ if (!after_bootmem) {
++ early_make_page_readonly(pte_save, XENFEAT_writable_page_tables);
++ *pmd = __pmd(pte_phys | _KERNPG_TABLE);
++ } else {
++ make_page_readonly(pte_save, XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
++ }
++ }
++}
++
++static void __meminit
++phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
++{
++ pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
++
++ if (pmd_none(*pmd)) {
++ spin_lock(&init_mm.page_table_lock);
++ phys_pmd_init(pmd, address, end);
++ spin_unlock(&init_mm.page_table_lock);
++ __flush_tlb_all();
++ }
++}
++
++static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
++{
++ long i = pud_index(address);
++
++ pud = pud + i;
++
++ if (after_bootmem && pud_val(*pud)) {
++ phys_pmd_update(pud, address, end);
++ return;
++ }
++
++ for (; i < PTRS_PER_PUD; pud++, i++) {
++ unsigned long paddr, pmd_phys;
++ pmd_t *pmd;
++
++ paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
++ if (paddr >= end)
++ break;
++
++ pmd = alloc_static_page(&pmd_phys);
++
++ spin_lock(&init_mm.page_table_lock);
++ *pud = __pud(pmd_phys | _KERNPG_TABLE);
++ phys_pmd_init(pmd, paddr, end);
++ spin_unlock(&init_mm.page_table_lock);
++
++ early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
++ }
++ __flush_tlb();
++}
++
++void __init xen_init_pt(void)
++{
++ unsigned long addr, *page;
++
++ /* Find the initial pte page that was built for us. */
++ page = (unsigned long *)xen_start_info->pt_base;
++ addr = page[pgd_index(__START_KERNEL_map)];
++ addr_to_page(addr, page);
++ addr = page[pud_index(__START_KERNEL_map)];
++ addr_to_page(addr, page);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ /* On Xen 3.0.2 and older we may need to explicitly specify _PAGE_USER
++ in kernel PTEs. We check that here. */
++ if (HYPERVISOR_xen_version(XENVER_version, NULL) <= 0x30000) {
++ unsigned long *pg;
++ pte_t pte;
++
++ /* Mess with the initial mapping of page 0. It's not needed. */
++ BUILD_BUG_ON(__START_KERNEL <= __START_KERNEL_map);
++ addr = page[pmd_index(__START_KERNEL_map)];
++ addr_to_page(addr, pg);
++ pte.pte = pg[pte_index(__START_KERNEL_map)];
++ BUG_ON(!(pte.pte & _PAGE_PRESENT));
++
++ /* If _PAGE_USER isn't set, we obviously do not need it. */
++ if (pte.pte & _PAGE_USER) {
++ /* _PAGE_USER is needed, but is it set implicitly? */
++ pte.pte &= ~_PAGE_USER;
++ if ((HYPERVISOR_update_va_mapping(__START_KERNEL_map,
++ pte, 0) != 0) ||
++ !(pg[pte_index(__START_KERNEL_map)] & _PAGE_USER))
++ /* We need to explicitly specify _PAGE_USER. */
++ __kernel_page_user = _PAGE_USER;
++ }
++ }
++#endif
++
++ /* Construct mapping of initial pte page in our own directories. */
++ init_level4_pgt[pgd_index(__START_KERNEL_map)] =
++ __pgd(__pa_symbol(level3_kernel_pgt) | _PAGE_TABLE);
++ level3_kernel_pgt[pud_index(__START_KERNEL_map)] =
++ __pud(__pa_symbol(level2_kernel_pgt) | _PAGE_TABLE);
++ memcpy(level2_kernel_pgt, page, PAGE_SIZE);
++
++ __user_pgd(init_level4_pgt)[pgd_index(VSYSCALL_START)] =
++ __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE);
++
++ early_make_page_readonly(init_level4_pgt,
++ XENFEAT_writable_page_tables);
++ early_make_page_readonly(__user_pgd(init_level4_pgt),
++ XENFEAT_writable_page_tables);
++ early_make_page_readonly(level3_kernel_pgt,
++ XENFEAT_writable_page_tables);
++ early_make_page_readonly(level3_user_pgt,
++ XENFEAT_writable_page_tables);
++ early_make_page_readonly(level2_kernel_pgt,
++ XENFEAT_writable_page_tables);
++
++ if (!xen_feature(XENFEAT_writable_page_tables)) {
++ xen_pgd_pin(__pa_symbol(init_level4_pgt));
++ xen_pgd_pin(__pa_symbol(__user_pgd(init_level4_pgt)));
++ }
++}
++
++static void __init extend_init_mapping(unsigned long tables_space)
++{
++ unsigned long va = __START_KERNEL_map;
++ unsigned long phys, addr, *pte_page;
++ pmd_t *pmd;
++ pte_t *pte, new_pte;
++ unsigned long *page = (unsigned long *)init_level4_pgt;
++
++ addr = page[pgd_index(va)];
++ addr_to_page(addr, page);
++ addr = page[pud_index(va)];
++ addr_to_page(addr, page);
++
++ /* Kill mapping of low 1MB. */
++ while (va < (unsigned long)&_text) {
++ if (HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0))
++ BUG();
++ va += PAGE_SIZE;
++ }
++
++ /* Ensure init mappings cover kernel text/data and initial tables. */
++ while (va < (__START_KERNEL_map
++ + (start_pfn << PAGE_SHIFT)
++ + tables_space)) {
++ pmd = (pmd_t *)&page[pmd_index(va)];
++ if (pmd_none(*pmd)) {
++ pte_page = alloc_static_page(&phys);
++ early_make_page_readonly(
++ pte_page, XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(phys | _KERNPG_TABLE));
++ } else {
++ addr = page[pmd_index(va)];
++ addr_to_page(addr, pte_page);
++ }
++ pte = (pte_t *)&pte_page[pte_index(va)];
++ if (pte_none(*pte)) {
++ new_pte = pfn_pte(
++ (va - __START_KERNEL_map) >> PAGE_SHIFT,
++ __pgprot(_KERNPG_TABLE));
++ xen_l1_entry_update(pte, new_pte);
++ }
++ va += PAGE_SIZE;
++ }
++
++ /* Finally, blow away any spurious initial mappings. */
++ while (1) {
++ pmd = (pmd_t *)&page[pmd_index(va)];
++ if (pmd_none(*pmd))
++ break;
++ if (HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0))
++ BUG();
++ va += PAGE_SIZE;
++ }
++}
++
++static void __init find_early_table_space(unsigned long end)
++{
++ unsigned long puds, pmds, ptes, tables;
++
++ puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
++ pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
++ ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
++
++ tables = round_up(puds * 8, PAGE_SIZE) +
++ round_up(pmds * 8, PAGE_SIZE) +
++ round_up(ptes * 8, PAGE_SIZE);
++
++ extend_init_mapping(tables);
++
++ table_start = start_pfn;
++ table_end = table_start + (tables>>PAGE_SHIFT);
++
++ early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
++ end, table_start << PAGE_SHIFT,
++ (table_start << PAGE_SHIFT) + tables);
++}
++
++static void xen_finish_init_mapping(void)
++{
++ unsigned long i, start, end;
++
++ /* Re-vector virtual addresses pointing into the initial
++ mapping to the just-established permanent ones. */
++ xen_start_info = __va(__pa(xen_start_info));
++ xen_start_info->pt_base = (unsigned long)
++ __va(__pa(xen_start_info->pt_base));
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ phys_to_machine_mapping =
++ __va(__pa(xen_start_info->mfn_list));
++ xen_start_info->mfn_list = (unsigned long)
++ phys_to_machine_mapping;
++ }
++ if (xen_start_info->mod_start)
++ xen_start_info->mod_start = (unsigned long)
++ __va(__pa(xen_start_info->mod_start));
++
++ /* Destroy the Xen-created mappings beyond the kernel image as
++ * well as the temporary mappings created above. Prevents
++ * overlap with modules area (if init mapping is very big).
++ */
++ start = PAGE_ALIGN((unsigned long)_end);
++ end = __START_KERNEL_map + (table_end << PAGE_SHIFT);
++ for (; start < end; start += PAGE_SIZE)
++ if (HYPERVISOR_update_va_mapping(start, __pte_ma(0), 0))
++ BUG();
++
++ /* Allocate pte's for initial fixmaps from 'start_pfn' allocator. */
++ table_end = ~0UL;
++
++ /*
++ * Prefetch pte's for the bt_ioremap() area. It gets used before the
++ * boot-time allocator is online, so allocate-on-demand would fail.
++ */
++ for (i = FIX_BTMAP_END; i <= FIX_BTMAP_BEGIN; i++)
++ __set_fixmap(i, 0, __pgprot(0));
++
++ /* Switch to the real shared_info page, and clear the dummy page. */
++ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
++ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++ memset(empty_zero_page, 0, sizeof(empty_zero_page));
++
++ /* Set up mapping of lowest 1MB of physical memory. */
++ for (i = 0; i < NR_FIX_ISAMAPS; i++)
++ if (is_initial_xendomain())
++ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
++ else
++ __set_fixmap(FIX_ISAMAP_BEGIN - i,
++ virt_to_mfn(empty_zero_page)
++ << PAGE_SHIFT,
++ PAGE_KERNEL_RO);
++
++ /* Disable the 'start_pfn' allocator. */
++ table_end = start_pfn;
++}
++
++/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
++ This runs before bootmem is initialized and gets pages directly from the
++ physical memory. To access them they are temporarily mapped. */
++void __meminit init_memory_mapping(unsigned long start, unsigned long end)
++{
++ unsigned long next;
++
++ Dprintk("init_memory_mapping\n");
++
++ /*
++ * Find space for the kernel direct mapping tables.
++ * Later we should allocate these tables in the local node of the memory
++ * mapped. Unfortunately this is done currently before the nodes are
++ * discovered.
++ */
++ if (!after_bootmem)
++ find_early_table_space(end);
++
++ start = (unsigned long)__va(start);
++ end = (unsigned long)__va(end);
++
++ for (; start < end; start = next) {
++ unsigned long pud_phys;
++ pgd_t *pgd = pgd_offset_k(start);
++ pud_t *pud;
++
++ if (after_bootmem)
++ pud = pud_offset(pgd, start & PGDIR_MASK);
++ else
++ pud = alloc_static_page(&pud_phys);
++ next = start + PGDIR_SIZE;
++ if (next > end)
++ next = end;
++ phys_pud_init(pud, __pa(start), __pa(next));
++ if (!after_bootmem) {
++ early_make_page_readonly(pud, XENFEAT_writable_page_tables);
++ set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
++ }
++ }
++
++ if (!after_bootmem) {
++ BUG_ON(start_pfn != table_end);
++ xen_finish_init_mapping();
++ }
++
++ __flush_tlb_all();
++}
++
++void __cpuinit zap_low_mappings(int cpu)
++{
++ /* this is not required for Xen */
++#if 0
++ swap_low_mappings();
++#endif
++}
++
++/* Compute zone sizes for the DMA and DMA32 zones in a node. */
++__init void
++size_zones(unsigned long *z, unsigned long *h,
++ unsigned long start_pfn, unsigned long end_pfn)
++{
++ int i;
++ unsigned long w;
++
++ for (i = 0; i < MAX_NR_ZONES; i++)
++ z[i] = 0;
++
++ if (start_pfn < MAX_DMA_PFN)
++ z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
++ if (start_pfn < MAX_DMA32_PFN) {
++ unsigned long dma32_pfn = MAX_DMA32_PFN;
++ if (dma32_pfn > end_pfn)
++ dma32_pfn = end_pfn;
++ z[ZONE_DMA32] = dma32_pfn - start_pfn;
++ }
++ z[ZONE_NORMAL] = end_pfn - start_pfn;
++
++ /* Remove lower zones from higher ones. */
++ w = 0;
++ for (i = 0; i < MAX_NR_ZONES; i++) {
++ if (z[i])
++ z[i] -= w;
++ w += z[i];
++ }
++
++ /* Compute holes */
++ w = start_pfn;
++ for (i = 0; i < MAX_NR_ZONES; i++) {
++ unsigned long s = w;
++ w += z[i];
++ h[i] = e820_hole_size(s, w);
++ }
++
++ /* Add the space pace needed for mem_map to the holes too. */
++ for (i = 0; i < MAX_NR_ZONES; i++)
++ h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
++
++ /* The 16MB DMA zone has the kernel and other misc mappings.
++ Account them too */
++ if (h[ZONE_DMA]) {
++ h[ZONE_DMA] += dma_reserve;
++ if (h[ZONE_DMA] >= z[ZONE_DMA]) {
++ printk(KERN_WARNING
++ "Kernel too large and filling up ZONE_DMA?\n");
++ h[ZONE_DMA] = z[ZONE_DMA];
++ }
++ }
++}
++
++#ifndef CONFIG_NUMA
++void __init paging_init(void)
++{
++ unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
++
++ memory_present(0, 0, end_pfn);
++ sparse_init();
++ size_zones(zones, holes, 0, end_pfn);
++ free_area_init_node(0, NODE_DATA(0), zones,
++ __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
++
++ init_mm.context.pinned = 1;
++}
++#endif
++
++/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
++ from the CPU leading to inconsistent cache lines. address and size
++ must be aligned to 2MB boundaries.
++ Does nothing when the mapping doesn't exist. */
++void __init clear_kernel_mapping(unsigned long address, unsigned long size)
++{
++ unsigned long end = address + size;
++
++ BUG_ON(address & ~LARGE_PAGE_MASK);
++ BUG_ON(size & ~LARGE_PAGE_MASK);
++
++ for (; address < end; address += LARGE_PAGE_SIZE) {
++ pgd_t *pgd = pgd_offset_k(address);
++ pud_t *pud;
++ pmd_t *pmd;
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, address);
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, address);
++ if (!pmd || pmd_none(*pmd))
++ continue;
++ if (0 == (__pmd_val(*pmd) & _PAGE_PSE)) {
++ /* Could handle this, but it should not happen currently. */
++ printk(KERN_ERR
++ "clear_kernel_mapping: mapping has been split. will leak memory\n");
++ pmd_ERROR(*pmd);
++ }
++ set_pmd(pmd, __pmd(0));
++ }
++ __flush_tlb_all();
++}
++
++/*
++ * Memory hotplug specific functions
++ */
++void online_page(struct page *page)
++{
++ ClearPageReserved(page);
++ init_page_count(page);
++ __free_page(page);
++ totalram_pages++;
++ num_physpages++;
++}
++
++#ifdef CONFIG_MEMORY_HOTPLUG
++/*
++ * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
++ * via probe interface of sysfs. If acpi notifies hot-add event, then it
++ * can tell node id by searching dsdt. But, probe interface doesn't have
++ * node id. So, return 0 as node id at this time.
++ */
++#ifdef CONFIG_NUMA
++int memory_add_physaddr_to_nid(u64 start)
++{
++ return 0;
++}
++#endif
++
++/*
++ * Memory is added always to NORMAL zone. This means you will never get
++ * additional DMA/DMA32 memory.
++ */
++int arch_add_memory(int nid, u64 start, u64 size)
++{
++ struct pglist_data *pgdat = NODE_DATA(nid);
++ struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
++ unsigned long start_pfn = start >> PAGE_SHIFT;
++ unsigned long nr_pages = size >> PAGE_SHIFT;
++ int ret;
++
++ ret = __add_pages(zone, start_pfn, nr_pages);
++ if (ret)
++ goto error;
++
++ init_memory_mapping(start, (start + size -1));
++
++ return ret;
++error:
++ printk("%s: Problem encountered in __add_pages!\n", __func__);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(arch_add_memory);
++
++int remove_memory(u64 start, u64 size)
++{
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(remove_memory);
++
++#else /* CONFIG_MEMORY_HOTPLUG */
++/*
++ * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
++ * just online the pages.
++ */
++int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
++{
++ int err = -EIO;
++ unsigned long pfn;
++ unsigned long total = 0, mem = 0;
++ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
++ if (pfn_valid(pfn)) {
++ online_page(pfn_to_page(pfn));
++ err = 0;
++ mem++;
++ }
++ total++;
++ }
++ if (!err) {
++ z->spanned_pages += total;
++ z->present_pages += mem;
++ z->zone_pgdat->node_spanned_pages += total;
++ z->zone_pgdat->node_present_pages += mem;
++ }
++ return err;
++}
++#endif /* CONFIG_MEMORY_HOTPLUG */
++
++static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
++ kcore_vsyscall;
++
++void __init mem_init(void)
++{
++ long codesize, reservedpages, datasize, initsize;
++ unsigned long pfn;
++
++ contiguous_bitmap = alloc_bootmem_low_pages(
++ (end_pfn + 2*BITS_PER_LONG) >> 3);
++ BUG_ON(!contiguous_bitmap);
++ memset(contiguous_bitmap, 0, (end_pfn + 2*BITS_PER_LONG) >> 3);
++
++ pci_iommu_alloc();
++
++ /* How many end-of-memory variables you have, grandma! */
++ max_low_pfn = end_pfn;
++ max_pfn = end_pfn;
++ num_physpages = end_pfn;
++ high_memory = (void *) __va(end_pfn * PAGE_SIZE);
++
++ /* clear the zero-page */
++ memset(empty_zero_page, 0, PAGE_SIZE);
++
++ reservedpages = 0;
++
++ /* this will put all low memory onto the freelists */
++#ifdef CONFIG_NUMA
++ totalram_pages = numa_free_all_bootmem();
++#else
++ totalram_pages = free_all_bootmem();
++#endif
++ /* XEN: init and count pages outside initial allocation. */
++ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
++ ClearPageReserved(pfn_to_page(pfn));
++ init_page_count(pfn_to_page(pfn));
++ totalram_pages++;
++ }
++ reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
++
++ after_bootmem = 1;
++
++ codesize = (unsigned long) &_etext - (unsigned long) &_text;
++ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
++
++ /* Register memory areas for /proc/kcore */
++ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
++ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
++ VMALLOC_END-VMALLOC_START);
++ kclist_add(&kcore_kernel, &_stext, _end - _stext);
++ kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
++ kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
++ VSYSCALL_END - VSYSCALL_START);
++
++ printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
++ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
++ end_pfn << (PAGE_SHIFT-10),
++ codesize >> 10,
++ reservedpages << (PAGE_SHIFT-10),
++ datasize >> 10,
++ initsize >> 10);
++
++#ifndef CONFIG_XEN
++#ifdef CONFIG_SMP
++ /*
++ * Sync boot_level4_pgt mappings with the init_level4_pgt
++ * except for the low identity mappings which are already zapped
++ * in init_level4_pgt. This sync-up is essential for AP's bringup
++ */
++ memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
++#endif
++#endif
++}
++
++void free_init_pages(char *what, unsigned long begin, unsigned long end)
++{
++ unsigned long addr;
++
++ if (begin >= end)
++ return;
++
++ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
++ for (addr = begin; addr < end; addr += PAGE_SIZE) {
++ ClearPageReserved(virt_to_page(addr));
++ init_page_count(virt_to_page(addr));
++ memset((void *)(addr & ~(PAGE_SIZE-1)),
++ POISON_FREE_INITMEM, PAGE_SIZE);
++ if (addr >= __START_KERNEL_map) {
++ /* make_readonly() reports all kernel addresses. */
++ __make_page_writable(__va(__pa(addr)));
++ if (HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
++ pgd_t *pgd = pgd_offset_k(addr);
++ pud_t *pud = pud_offset(pgd, addr);
++ pmd_t *pmd = pmd_offset(pud, addr);
++ pte_t *pte = pte_offset_kernel(pmd, addr);
++
++ xen_l1_entry_update(pte, __pte(0)); /* fallback */
++ }
++ }
++ free_page(addr);
++ totalram_pages++;
++ }
++}
++
++void free_initmem(void)
++{
++ memset(__initdata_begin, POISON_FREE_INITDATA,
++ __initdata_end - __initdata_begin);
++ free_init_pages("unused kernel memory",
++ (unsigned long)(&__init_begin),
++ (unsigned long)(&__init_end));
++}
++
++#ifdef CONFIG_DEBUG_RODATA
++
++void mark_rodata_ro(void)
++{
++ unsigned long addr = (unsigned long)__start_rodata;
++
++ for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
++ change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
++
++ printk ("Write protecting the kernel read-only data: %luk\n",
++ (__end_rodata - __start_rodata) >> 10);
++
++ /*
++ * change_page_attr_addr() requires a global_flush_tlb() call after it.
++ * We do this after the printk so that if something went wrong in the
++ * change, the printk gets out at least to give a better debug hint
++ * of who is the culprit.
++ */
++ global_flush_tlb();
++}
++#endif
++
++#ifdef CONFIG_BLK_DEV_INITRD
++void free_initrd_mem(unsigned long start, unsigned long end)
++{
++ free_init_pages("initrd memory", start, end);
++}
++#endif
++
++void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
++{
++ /* Should check here against the e820 map to avoid double free */
++#ifdef CONFIG_NUMA
++ int nid = phys_to_nid(phys);
++ reserve_bootmem_node(NODE_DATA(nid), phys, len);
++#else
++ reserve_bootmem(phys, len);
++#endif
++ if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
++ dma_reserve += len / PAGE_SIZE;
++}
++
++int kern_addr_valid(unsigned long addr)
++{
++ unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ if (above != 0 && above != -1UL)
++ return 0;
++
++ pgd = pgd_offset_k(addr);
++ if (pgd_none(*pgd))
++ return 0;
++
++ pud = pud_offset(pgd, addr);
++ if (pud_none(*pud))
++ return 0;
++
++ pmd = pmd_offset(pud, addr);
++ if (pmd_none(*pmd))
++ return 0;
++ if (pmd_large(*pmd))
++ return pfn_valid(pmd_pfn(*pmd));
++
++ pte = pte_offset_kernel(pmd, addr);
++ if (pte_none(*pte))
++ return 0;
++ return pfn_valid(pte_pfn(*pte));
++}
++
++#ifdef CONFIG_SYSCTL
++#include <linux/sysctl.h>
++
++extern int exception_trace, page_fault_trace;
++
++static ctl_table debug_table2[] = {
++ { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
++ proc_dointvec },
++ { 0, }
++};
++
++static ctl_table debug_root_table2[] = {
++ { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
++ .child = debug_table2 },
++ { 0 },
++};
++
++static __init int x8664_sysctl_init(void)
++{
++ register_sysctl_table(debug_root_table2, 1);
++ return 0;
++}
++__initcall(x8664_sysctl_init);
++#endif
++
++/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
++ covers the 64bit vsyscall page now. 32bit has a real VMA now and does
++ not need special handling anymore. */
++
++static struct vm_area_struct gate_vma = {
++ .vm_start = VSYSCALL_START,
++ .vm_end = VSYSCALL_END,
++ .vm_page_prot = PAGE_READONLY
++};
++
++struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
++{
++#ifdef CONFIG_IA32_EMULATION
++ if (test_tsk_thread_flag(tsk, TIF_IA32))
++ return NULL;
++#endif
++ return &gate_vma;
++}
++
++int in_gate_area(struct task_struct *task, unsigned long addr)
++{
++ struct vm_area_struct *vma = get_gate_vma(task);
++ if (!vma)
++ return 0;
++ return (addr >= vma->vm_start) && (addr < vma->vm_end);
++}
++
++/* Use this when you have no reliable task/vma, typically from interrupt
++ * context. It is less reliable than using the task's vma and may give
++ * false positives.
++ */
++int in_gate_area_no_task(unsigned long addr)
++{
++ return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
++}
+diff -rpuN linux-2.6.18.8/arch/x86_64/mm/Makefile linux-2.6.18-xen-3.3.0/arch/x86_64/mm/Makefile
+--- linux-2.6.18.8/arch/x86_64/mm/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/mm/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -7,5 +7,8 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpag
+ obj-$(CONFIG_NUMA) += numa.o
+ obj-$(CONFIG_K8_NUMA) += k8topology.o
+ obj-$(CONFIG_ACPI_NUMA) += srat.o
++obj-$(CONFIG_XEN) += hypervisor.o
+
+ hugetlbpage-y = ../../i386/mm/hugetlbpage.o
++ioremap-$(CONFIG_XEN) := ../../i386/mm/ioremap.o
++hypervisor-y := ../../i386/mm/hypervisor.o
+diff -rpuN linux-2.6.18.8/arch/x86_64/mm/pageattr-xen.c linux-2.6.18-xen-3.3.0/arch/x86_64/mm/pageattr-xen.c
+--- linux-2.6.18.8/arch/x86_64/mm/pageattr-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/mm/pageattr-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,502 @@
++/*
++ * Copyright 2002 Andi Kleen, SuSE Labs.
++ * Thanks to Ben LaHaise for precious feedback.
++ */
++
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++#include <asm/tlbflush.h>
++#include <asm/io.h>
++
++#ifdef CONFIG_XEN
++#include <asm/pgalloc.h>
++#include <asm/mmu_context.h>
++
++LIST_HEAD(mm_unpinned);
++DEFINE_SPINLOCK(mm_unpinned_lock);
++
++static void _pin_lock(struct mm_struct *mm, int lock) {
++ if (lock)
++ spin_lock(&mm->page_table_lock);
++#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
++ /* While mm->page_table_lock protects us against insertions and
++ * removals of higher level page table pages, it doesn't protect
++ * against updates of pte-s. Such updates, however, require the
++ * pte pages to be in consistent state (unpinned+writable or
++ * pinned+readonly). The pinning and attribute changes, however
++ * cannot be done atomically, which is why such updates must be
++ * prevented from happening concurrently.
++ * Note that no pte lock can ever elsewhere be acquired nesting
++ * with an already acquired one in the same mm, or with the mm's
++ * page_table_lock already acquired, as that would break in the
++ * non-split case (where all these are actually resolving to the
++ * one page_table_lock). Thus acquiring all of them here is not
++ * going to result in dead locks, and the order of acquires
++ * doesn't matter.
++ */
++ {
++ pgd_t *pgd = mm->pgd;
++ unsigned g;
++
++ for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
++ pud_t *pud;
++ unsigned u;
++
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, 0);
++ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++ pmd_t *pmd;
++ unsigned m;
++
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, 0);
++ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++ spinlock_t *ptl;
++
++ if (pmd_none(*pmd))
++ continue;
++ ptl = pte_lockptr(0, pmd);
++ if (lock)
++ spin_lock(ptl);
++ else
++ spin_unlock(ptl);
++ }
++ }
++ }
++ }
++#endif
++ if (!lock)
++ spin_unlock(&mm->page_table_lock);
++}
++#define pin_lock(mm) _pin_lock(mm, 1)
++#define pin_unlock(mm) _pin_lock(mm, 0)
++
++#define PIN_BATCH 8
++static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl);
++
++static inline unsigned int mm_walk_set_prot(void *pt, pgprot_t flags,
++ unsigned int cpu, unsigned int seq)
++{
++ struct page *page = virt_to_page(pt);
++ unsigned long pfn = page_to_pfn(page);
++
++ MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
++ (unsigned long)__va(pfn << PAGE_SHIFT),
++ pfn_pte(pfn, flags), 0);
++ if (unlikely(++seq == PIN_BATCH)) {
++ if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
++ PIN_BATCH, NULL)))
++ BUG();
++ seq = 0;
++ }
++
++ return seq;
++}
++
++static void mm_walk(struct mm_struct *mm, pgprot_t flags)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++ int g,u,m;
++ unsigned int cpu, seq;
++ multicall_entry_t *mcl;
++
++ pgd = mm->pgd;
++ cpu = get_cpu();
++
++ /*
++ * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
++ * be the 'current' task's pagetables (e.g., current may be 32-bit,
++ * but the pagetables may be for a 64-bit task).
++ * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
++ * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
++ */
++ for (g = 0, seq = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, 0);
++ if (PTRS_PER_PUD > 1) /* not folded */
++ seq = mm_walk_set_prot(pud,flags,cpu,seq);
++ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, 0);
++ if (PTRS_PER_PMD > 1) /* not folded */
++ seq = mm_walk_set_prot(pmd,flags,cpu,seq);
++ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++ if (pmd_none(*pmd))
++ continue;
++ pte = pte_offset_kernel(pmd,0);
++ seq = mm_walk_set_prot(pte,flags,cpu,seq);
++ }
++ }
++ }
++
++ mcl = per_cpu(pb_mcl, cpu);
++ if (unlikely(seq > PIN_BATCH - 2)) {
++ if (unlikely(HYPERVISOR_multicall_check(mcl, seq, NULL)))
++ BUG();
++ seq = 0;
++ }
++ MULTI_update_va_mapping(mcl + seq,
++ (unsigned long)__user_pgd(mm->pgd),
++ pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, flags),
++ 0);
++ MULTI_update_va_mapping(mcl + seq + 1,
++ (unsigned long)mm->pgd,
++ pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, flags),
++ UVMF_TLB_FLUSH);
++ if (unlikely(HYPERVISOR_multicall_check(mcl, seq + 2, NULL)))
++ BUG();
++
++ put_cpu();
++}
++
++void mm_pin(struct mm_struct *mm)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++
++ pin_lock(mm);
++
++ mm_walk(mm, PAGE_KERNEL_RO);
++ xen_pgd_pin(__pa(mm->pgd)); /* kernel */
++ xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
++ mm->context.pinned = 1;
++ spin_lock(&mm_unpinned_lock);
++ list_del(&mm->context.unpinned);
++ spin_unlock(&mm_unpinned_lock);
++
++ pin_unlock(mm);
++}
++
++void mm_unpin(struct mm_struct *mm)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++
++ pin_lock(mm);
++
++ xen_pgd_unpin(__pa(mm->pgd));
++ xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
++ mm_walk(mm, PAGE_KERNEL);
++ mm->context.pinned = 0;
++ spin_lock(&mm_unpinned_lock);
++ list_add(&mm->context.unpinned, &mm_unpinned);
++ spin_unlock(&mm_unpinned_lock);
++
++ pin_unlock(mm);
++}
++
++void mm_pin_all(void)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++
++ /*
++ * Allow uninterrupted access to the mm_unpinned list. We don't
++ * actually take the mm_unpinned_lock as it is taken inside mm_pin().
++ * All other CPUs must be at a safe point (e.g., in stop_machine
++ * or offlined entirely).
++ */
++ preempt_disable();
++ while (!list_empty(&mm_unpinned))
++ mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
++ context.unpinned));
++ preempt_enable();
++}
++
++void _arch_dup_mmap(struct mm_struct *mm)
++{
++ if (!mm->context.pinned)
++ mm_pin(mm);
++}
++
++void _arch_exit_mmap(struct mm_struct *mm)
++{
++ struct task_struct *tsk = current;
++
++ task_lock(tsk);
++
++ /*
++ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
++ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
++ */
++ if (tsk->active_mm == mm) {
++ tsk->active_mm = &init_mm;
++ atomic_inc(&init_mm.mm_count);
++
++ switch_mm(mm, &init_mm, tsk);
++
++ atomic_dec(&mm->mm_count);
++ BUG_ON(atomic_read(&mm->mm_count) == 0);
++ }
++
++ task_unlock(tsk);
++
++ if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) &&
++ !mm->context.has_foreign_mappings )
++ mm_unpin(mm);
++}
++
++struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
++{
++ struct page *pte;
++
++ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
++ if (pte) {
++ SetPageForeign(pte, pte_free);
++ init_page_count(pte);
++ }
++ return pte;
++}
++
++void pte_free(struct page *pte)
++{
++ unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
++
++ if (!pte_write(*virt_to_ptep(va)))
++ if (HYPERVISOR_update_va_mapping(
++ va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0))
++ BUG();
++
++ ClearPageForeign(pte);
++ init_page_count(pte);
++
++ __free_page(pte);
++}
++#endif /* CONFIG_XEN */
++
++pte_t *lookup_address(unsigned long address)
++{
++ pgd_t *pgd = pgd_offset_k(address);
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++ if (pgd_none(*pgd))
++ return NULL;
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return NULL;
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return NULL;
++ if (pmd_large(*pmd))
++ return (pte_t *)pmd;
++ pte = pte_offset_kernel(pmd, address);
++ if (pte && !pte_present(*pte))
++ pte = NULL;
++ return pte;
++}
++
++static struct page *split_large_page(unsigned long address, pgprot_t prot,
++ pgprot_t ref_prot)
++{
++ int i;
++ unsigned long addr;
++ struct page *base = alloc_pages(GFP_KERNEL, 0);
++ pte_t *pbase;
++ if (!base)
++ return NULL;
++ /*
++ * page_private is used to track the number of entries in
++ * the page table page have non standard attributes.
++ */
++ SetPagePrivate(base);
++ page_private(base) = 0;
++
++ address = __pa(address);
++ addr = address & LARGE_PAGE_MASK;
++ pbase = (pte_t *)page_address(base);
++ for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
++ pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
++ addr == address ? prot : ref_prot);
++ }
++ return base;
++}
++
++
++static void flush_kernel_map(void *address)
++{
++ if (0 && address && cpu_has_clflush) {
++ /* is this worth it? */
++ int i;
++ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
++ asm volatile("clflush (%0)" :: "r" (address + i));
++ } else
++ asm volatile("wbinvd":::"memory");
++ if (address)
++ __flush_tlb_one(address);
++ else
++ __flush_tlb_all();
++}
++
++
++static inline void flush_map(unsigned long address)
++{
++ on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
++}
++
++static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
++
++static inline void save_page(struct page *fpage)
++{
++ fpage->lru.next = (struct list_head *)deferred_pages;
++ deferred_pages = fpage;
++}
++
++/*
++ * No more special protections in this 2/4MB area - revert to a
++ * large page again.
++ */
++static void revert_page(unsigned long address, pgprot_t ref_prot)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t large_pte;
++
++ pgd = pgd_offset_k(address);
++ BUG_ON(pgd_none(*pgd));
++ pud = pud_offset(pgd,address);
++ BUG_ON(pud_none(*pud));
++ pmd = pmd_offset(pud, address);
++ BUG_ON(__pmd_val(*pmd) & _PAGE_PSE);
++ pgprot_val(ref_prot) |= _PAGE_PSE;
++ large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
++ set_pte((pte_t *)pmd, large_pte);
++}
++
++static int
++__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
++ pgprot_t ref_prot)
++{
++ pte_t *kpte;
++ struct page *kpte_page;
++ unsigned kpte_flags;
++ pgprot_t ref_prot2;
++ kpte = lookup_address(address);
++ if (!kpte) return 0;
++ kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
++ kpte_flags = pte_val(*kpte);
++ if (pgprot_val(prot) != pgprot_val(ref_prot)) {
++ if ((kpte_flags & _PAGE_PSE) == 0) {
++ set_pte(kpte, pfn_pte(pfn, prot));
++ } else {
++ /*
++ * split_large_page will take the reference for this
++ * change_page_attr on the split page.
++ */
++
++ struct page *split;
++ ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
++
++ split = split_large_page(address, prot, ref_prot2);
++ if (!split)
++ return -ENOMEM;
++ set_pte(kpte,mk_pte(split, ref_prot2));
++ kpte_page = split;
++ }
++ page_private(kpte_page)++;
++ } else if ((kpte_flags & _PAGE_PSE) == 0) {
++ set_pte(kpte, pfn_pte(pfn, ref_prot));
++ BUG_ON(page_private(kpte_page) == 0);
++ page_private(kpte_page)--;
++ } else
++ BUG();
++
++ /* on x86-64 the direct mapping set at boot is not using 4k pages */
++ /*
++ * ..., but the XEN guest kernels (currently) do:
++ * If the pte was reserved, it means it was created at boot
++ * time (not via split_large_page) and in turn we must not
++ * replace it with a large page.
++ */
++#ifndef CONFIG_XEN
++ BUG_ON(PageReserved(kpte_page));
++#else
++ if (PageReserved(kpte_page))
++ return 0;
++#endif
++
++ if (page_private(kpte_page) == 0) {
++ save_page(kpte_page);
++ revert_page(address, ref_prot);
++ }
++ return 0;
++}
++
++/*
++ * Change the page attributes of an page in the linear mapping.
++ *
++ * This should be used when a page is mapped with a different caching policy
++ * than write-back somewhere - some CPUs do not like it when mappings with
++ * different caching policies exist. This changes the page attributes of the
++ * in kernel linear mapping too.
++ *
++ * The caller needs to ensure that there are no conflicting mappings elsewhere.
++ * This function only deals with the kernel linear map.
++ *
++ * Caller must call global_flush_tlb() after this.
++ */
++int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
++{
++ int err = 0;
++ int i;
++
++ down_write(&init_mm.mmap_sem);
++ for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
++ unsigned long pfn = __pa(address) >> PAGE_SHIFT;
++
++ err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
++ if (err)
++ break;
++ /* Handle kernel mapping too which aliases part of the
++ * lowmem */
++ if (__pa(address) < KERNEL_TEXT_SIZE) {
++ unsigned long addr2;
++ pgprot_t prot2 = prot;
++ addr2 = __START_KERNEL_map + __pa(address);
++ pgprot_val(prot2) &= ~_PAGE_NX;
++ err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
++ }
++ }
++ up_write(&init_mm.mmap_sem);
++ return err;
++}
++
++/* Don't call this for MMIO areas that may not have a mem_map entry */
++int change_page_attr(struct page *page, int numpages, pgprot_t prot)
++{
++ unsigned long addr = (unsigned long)page_address(page);
++ return change_page_attr_addr(addr, numpages, prot);
++}
++
++void global_flush_tlb(void)
++{
++ struct page *dpage;
++
++ down_read(&init_mm.mmap_sem);
++ dpage = xchg(&deferred_pages, NULL);
++ up_read(&init_mm.mmap_sem);
++
++ flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
++ while (dpage) {
++ struct page *tmp = dpage;
++ dpage = (struct page *)dpage->lru.next;
++ ClearPagePrivate(tmp);
++ __free_page(tmp);
++ }
++}
++
++EXPORT_SYMBOL(change_page_attr);
++EXPORT_SYMBOL(global_flush_tlb);
+diff -rpuN linux-2.6.18.8/arch/x86_64/oprofile/Makefile linux-2.6.18-xen-3.3.0/arch/x86_64/oprofile/Makefile
+--- linux-2.6.18.8/arch/x86_64/oprofile/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/oprofile/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -11,9 +11,15 @@ DRIVER_OBJS = $(addprefix ../../../drive
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
++ifdef CONFIG_XEN
++XENOPROF_COMMON_OBJS = $(addprefix ../../../drivers/xen/xenoprof/, \
++ xenoprofile.o)
++OPROFILE-y := xenoprof.o
++else
+ OPROFILE-y := init.o backtrace.o
+ OPROFILE-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o op_model_p4.o \
+ op_model_ppro.o
+ OPROFILE-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
+-
+-oprofile-y = $(DRIVER_OBJS) $(addprefix ../../i386/oprofile/, $(OPROFILE-y))
++endif
++oprofile-y = $(DRIVER_OBJS) $(XENOPROF_COMMON_OBJS) \
++ $(addprefix ../../i386/oprofile/, $(OPROFILE-y))
+diff -rpuN linux-2.6.18.8/arch/x86_64/pci/Makefile linux-2.6.18-xen-3.3.0/arch/x86_64/pci/Makefile
+--- linux-2.6.18.8/arch/x86_64/pci/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/arch/x86_64/pci/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -15,8 +15,13 @@ obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
+
+ obj-$(CONFIG_NUMA) += k8-bus.o
+
++# pcifront should be after mmconfig.o and direct.o as it should only
++# take over if direct access to the PCI bus is unavailable
++obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront.o
++
+ direct-y += ../../i386/pci/direct.o
+ acpi-y += ../../i386/pci/acpi.o
++pcifront-y += ../../i386/pci/pcifront.o
+ legacy-y += ../../i386/pci/legacy.o
+ irq-y += ../../i386/pci/irq.o
+ common-y += ../../i386/pci/common.o
+diff -rpuN linux-2.6.18.8/block/elevator.c linux-2.6.18-xen-3.3.0/block/elevator.c
+--- linux-2.6.18.8/block/elevator.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/block/elevator.c 2008-08-21 11:36:07.000000000 +0200
+@@ -493,6 +493,16 @@ struct request *elv_next_request(request
+ int ret;
+
+ while ((rq = __elv_next_request(q)) != NULL) {
++ /*
++ * Kill the empty barrier place holder, the driver must
++ * not ever see it.
++ */
++ if (blk_empty_barrier(rq)) {
++ blkdev_dequeue_request(rq);
++ end_that_request_chunk(rq, 1, 0);
++ end_that_request_last(rq, 1);
++ continue;
++ }
+ if (!(rq->flags & REQ_STARTED)) {
+ elevator_t *e = q->elevator;
+
+diff -rpuN linux-2.6.18.8/block/ll_rw_blk.c linux-2.6.18-xen-3.3.0/block/ll_rw_blk.c
+--- linux-2.6.18.8/block/ll_rw_blk.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/block/ll_rw_blk.c 2008-08-21 11:36:07.000000000 +0200
+@@ -483,9 +483,12 @@ static inline struct request *start_orde
+ * Queue ordered sequence. As we stack them at the head, we
+ * need to queue in reverse order. Note that we rely on that
+ * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
+- * request gets inbetween ordered sequence.
++ * request gets inbetween ordered sequence. If this request is
++ * an empty barrier, we don't need to do a postflush ever since
++ * there will be no data written between the pre and post flush.
++ * Hence a single flush will suffice.
+ */
+- if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
++ if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
+ queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
+ else
+ q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
+@@ -2967,7 +2970,7 @@ static inline void blk_partition_remap(s
+ {
+ struct block_device *bdev = bio->bi_bdev;
+
+- if (bdev != bdev->bd_contains) {
++ if (bio_sectors(bio) && bdev != bdev->bd_contains) {
+ struct hd_struct *p = bdev->bd_part;
+ const int rw = bio_data_dir(bio);
+
+@@ -3028,7 +3031,7 @@ void generic_make_request(struct bio *bi
+ might_sleep();
+ /* Test device or partition size, when known. */
+ maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+- if (maxsector) {
++ if (maxsector && nr_sectors) {
+ sector_t sector = bio->bi_sector;
+
+ if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+@@ -3094,7 +3097,7 @@ end_io:
+ old_dev = bio->bi_bdev->bd_dev;
+
+ maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+- if (maxsector) {
++ if (maxsector && nr_sectors) {
+ sector_t sector = bio->bi_sector;
+
+ if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+@@ -3128,21 +3131,25 @@ void submit_bio(int rw, struct bio *bio)
+ {
+ int count = bio_sectors(bio);
+
+- BIO_BUG_ON(!bio->bi_size);
+- BIO_BUG_ON(!bio->bi_io_vec);
+ bio->bi_rw |= rw;
+- if (rw & WRITE)
+- count_vm_events(PGPGOUT, count);
+- else
+- count_vm_events(PGPGIN, count);
+
+- if (unlikely(block_dump)) {
+- char b[BDEVNAME_SIZE];
+- printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
+- current->comm, current->pid,
+- (rw & WRITE) ? "WRITE" : "READ",
+- (unsigned long long)bio->bi_sector,
+- bdevname(bio->bi_bdev,b));
++ if (!bio_empty_barrier(bio)) {
++ BIO_BUG_ON(!bio->bi_size);
++ BIO_BUG_ON(!bio->bi_io_vec);
++
++ if (rw & WRITE)
++ count_vm_events(PGPGOUT, count);
++ else
++ count_vm_events(PGPGIN, count);
++
++ if (unlikely(block_dump)) {
++ char b[BDEVNAME_SIZE];
++ printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
++ current->comm, current->pid,
++ (rw & WRITE) ? "WRITE" : "READ",
++ (unsigned long long)bio->bi_sector,
++ bdevname(bio->bi_bdev,b));
++ }
+ }
+
+ generic_make_request(bio);
+@@ -3260,6 +3267,13 @@ static int __end_that_request_first(stru
+ while ((bio = req->bio) != NULL) {
+ int nbytes;
+
++ /* For an empty barrier request, the low level driver must
++ * store a potential error location in ->sector. We pass
++ * that back up in ->bi_sector
++ */
++ if (blk_empty_barrier(req))
++ bio->bi_sector = req->sector;
++
+ if (nr_bytes >= bio->bi_size) {
+ req->bio = bio->bi_next;
+ nbytes = bio->bi_size;
+diff -rpuN linux-2.6.18.8/buildconfigs/conf.linux-native/00_xen_to_native linux-2.6.18-xen-3.3.0/buildconfigs/conf.linux-native/00_xen_to_native
+--- linux-2.6.18.8/buildconfigs/conf.linux-native/00_xen_to_native 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/buildconfigs/conf.linux-native/00_xen_to_native 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,86 @@
++# Linux kernel version: 2.6.16.13-native
++# Mon May 15 10:59:54 2006
++#
++CONFIG_X86_PC=y
++# CONFIG_X86_XEN is not set
++# CONFIG_HPET_TIMER is not set
++# CONFIG_SCHED_SMT is not set
++# CONFIG_X86_MCE is not set
++# CONFIG_X86_MSR is not set
++# CONFIG_SWIOTLB is not set
++# CONFIG_EDD is not set
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_SPARSEMEM_STATIC=y
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_HIGHPTE is not set
++# CONFIG_MATH_EMULATION is not set
++# CONFIG_EFI is not set
++CONFIG_IRQBALANCE=y
++# CONFIG_KEXEC is not set
++CONFIG_DOUBLEFAULT=y
++CONFIG_PM_LEGACY=y
++# CONFIG_PM_DEBUG is not set
++# CONFIG_SOFTWARE_SUSPEND is not set
++CONFIG_SUSPEND_SMP=y
++CONFIG_ACPI_SLEEP=y
++CONFIG_ACPI_SLEEP_PROC_FS=y
++# CONFIG_ACPI_SLEEP_PROC_SLEEP is not set
++CONFIG_X86_PM_TIMER=y
++# APM (Advanced Power Management) BIOS Support
++#
++# CONFIG_APM is not set
++
++#
++CONFIG_PCI_BIOS=y
++# CONFIG_XEN_PCIDEV_FRONTEND is not set
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCI_MSI is not set
++# CONFIG_ISA is not set
++# CONFIG_MCA is not set
++# CONFIG_HOTPLUG_PCI_COMPAQ is not set
++# CONFIG_HOTPLUG_PCI_IBM is not set
++# CONFIG_I2O_EXT_ADAPTEC_DMA64 is not set
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_TCG_XEN is not set
++# CONFIG_HUGETLBFS is not set
++# CONFIG_XEN is not set
++# CONFIG_XEN_INTERFACE_VERSION is not set
++
++#
++# XEN
++#
++# CONFIG_XEN_PRIVILEGED_GUEST is not set
++# CONFIG_XEN_UNPRIVILEGED_GUEST is not set
++# CONFIG_XEN_BACKEND is not set
++# CONFIG_XEN_PCIDEV_BACKEND is not set
++# CONFIG_XEN_PCIDEV_BACKEND_VPCI is not set
++# CONFIG_XEN_PCIDEV_BACKEND_PASS is not set
++# CONFIG_XEN_PCIDEV_BE_DEBUG is not set
++# CONFIG_XEN_BLKDEV_BACKEND is not set
++# CONFIG_XEN_BLKDEV_TAP_BE is not set
++# CONFIG_XEN_NETDEV_BACKEND is not set
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_NETDEV_LOOPBACK is not set
++# CONFIG_XEN_TPMDEV_BACKEND is not set
++# CONFIG_XEN_BLKDEV_FRONTEND is not set
++# CONFIG_XEN_NETDEV_FRONTEND is not set
++# CONFIG_XEN_BLKDEV_TAP is not set
++# CONFIG_XEN_SCRUB_PAGES is not set
++# CONFIG_XEN_DISABLE_SERIAL is not set
++# CONFIG_XEN_SYSFS is not set
++# CONFIG_XEN_COMPAT_030002_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++# CONFIG_XEN_COMPAT_030002 is not set
++# CONFIG_XEN_COMPAT_030004 is not set
++# CONFIG_HAVE_ARCH_ALLOC_SKB is not set
++# CONFIG_HAVE_ARCH_DEV_ALLOC_SKB is not set
++# CONFIG_NO_IDLE_HZ is not set
++CONFIG_X86_HT=y
++# CONFIG_X86_NO_TSS is not set
++# CONFIG_X86_NO_IDT is not set
+diff -rpuN linux-2.6.18.8/buildconfigs/create_config.sh linux-2.6.18-xen-3.3.0/buildconfigs/create_config.sh
+--- linux-2.6.18.8/buildconfigs/create_config.sh 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/buildconfigs/create_config.sh 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,54 @@
++#!/bin/sh
++set -e
++
++
++# Parse arguments
++#
++if [ $# -lt 1 -o $# -gt 4 ]; then
++ echo "Usage: $0 config-file EXTRAVERSION XEN_TARGET_ARCH XEN_SYSTYPE"
++ exit 1
++fi
++
++config_file=$1
++extraversion=$2
++target_arch=$3
++systype=$4
++
++
++# Start with initial config skeleton file, if any.
++# Derive from linux-defconfig_xen_x86_32 otherwise.
++#
++skeleton=buildconfigs/linux-defconfig_${extraversion#-}_${target_arch}${systype}
++[ -r $skeleton ] || skeleton=buildconfigs/linux-defconfig_xen_x86_32
++[ -r $skeleton.local ] && skeleton=$skeleton.local
++cp $skeleton $config_file
++
++echo "Using $skeleton as base config"
++
++# Update
++#
++filter_template="s/^#\{0,1\} *\(CONFIG[^= ]*\).*/\/^#\\\{0,1\\\} *\1[= ].*\/d/p"
++config_dirs="buildconfigs/conf.linux buildconfigs/conf.linux-${target_arch} buildconfigs/conf.linux${extraversion} buildconfigs/conf.linux-${target_arch}${extraversion}"
++
++for config_dir in $config_dirs
++do
++ if [ -d $config_dir ]; then
++ echo "Processing $config_dir..." 1>&2
++ # processing is done in alphanumeric order
++ find $config_dir -type f | sort | while read update
++ do
++ echo " ... $update" 1>&2
++ # create the filter rules in a temp file
++ filter_rules=`mktemp -t xenupdateconf.XXXXXXXXXX`
++ sed -n "${filter_template}" < $update > $filter_rules
++
++ # filter the config file in place, removing any options that
++ # will be updated.
++ sed -f $filter_rules -i $config_file
++ cat $update >> $config_file
++
++ # clean up
++ rm -f $filter_rules
++ done
++ fi
++done
+diff -rpuN linux-2.6.18.8/buildconfigs/linux-defconfig_xen0_ia64 linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xen0_ia64
+--- linux-2.6.18.8/buildconfigs/linux-defconfig_xen0_ia64 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xen0_ia64 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1703 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Tue Feb 19 11:20:09 2008
++#
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_CC_OPTIMIZE_FOR_SIZE=y
++# CONFIG_EMBEDDED is not set
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++CONFIG_KALLSYMS_EXTRA_PASS=y
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_BLK_DEV_IO_TRACE is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Processor type and features
++#
++CONFIG_IA64=y
++CONFIG_64BIT=y
++CONFIG_MMU=y
++CONFIG_SWIOTLB=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_TIME_INTERPOLATION=y
++CONFIG_DMI=y
++CONFIG_EFI=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_XEN=y
++CONFIG_XEN_IA64_EXPOSE_P2M=y
++CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_DMA_IS_DMA32=y
++CONFIG_AUDIT_ARCH=y
++CONFIG_IA64_GENERIC=y
++# CONFIG_IA64_DIG is not set
++# CONFIG_IA64_HP_ZX1 is not set
++# CONFIG_IA64_HP_ZX1_SWIOTLB is not set
++# CONFIG_IA64_SGI_SN2 is not set
++# CONFIG_IA64_HP_SIM is not set
++# CONFIG_IA64_XEN is not set
++# CONFIG_ITANIUM is not set
++CONFIG_MCKINLEY=y
++# CONFIG_IA64_PAGE_SIZE_4KB is not set
++# CONFIG_IA64_PAGE_SIZE_8KB is not set
++CONFIG_IA64_PAGE_SIZE_16KB=y
++# CONFIG_IA64_PAGE_SIZE_64KB is not set
++CONFIG_PGTABLE_3=y
++# CONFIG_PGTABLE_4 is not set
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++CONFIG_IA64_L1_CACHE_SHIFT=7
++CONFIG_IA64_CYCLONE=y
++CONFIG_IOSAPIC=y
++# CONFIG_IA64_SGI_SN_XP is not set
++CONFIG_FORCE_MAX_ZONEORDER=11
++CONFIG_SMP=y
++CONFIG_NR_CPUS=16
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++# CONFIG_SCHED_SMT is not set
++# CONFIG_PERMIT_BSP_REMOVE is not set
++# CONFIG_PREEMPT is not set
++CONFIG_SELECT_MEMORY_MODEL=y
++# CONFIG_FLATMEM_MANUAL is not set
++CONFIG_DISCONTIGMEM_MANUAL=y
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_DISCONTIGMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++CONFIG_NEED_MULTIPLE_NODES=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_MIGRATION is not set
++CONFIG_RESOURCES_64BIT=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
++CONFIG_NUMA=y
++CONFIG_NODES_SHIFT=10
++CONFIG_VIRTUAL_MEM_MAP=y
++CONFIG_HOLES_IN_ZONE=y
++CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
++CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y
++# CONFIG_IA32_SUPPORT is not set
++# CONFIG_IA64_MCA_RECOVERY is not set
++CONFIG_PERFMON=y
++CONFIG_IA64_PALINFO=y
++CONFIG_SGI_SN=y
++
++#
++# SN Devices
++#
++# CONFIG_SGI_IOC3 is not set
++CONFIG_KEXEC=y
++
++#
++# Firmware Drivers
++#
++CONFIG_EFI_VARS=y
++CONFIG_EFI_PCDP=y
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++
++#
++# Power management and ACPI
++#
++CONFIG_PM=y
++CONFIG_PM_LEGACY=y
++# CONFIG_PM_DEBUG is not set
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_BUTTON=y
++CONFIG_ACPI_FAN=y
++# CONFIG_ACPI_DOCK is not set
++CONFIG_ACPI_PROCESSOR=y
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=y
++CONFIG_ACPI_NUMA=y
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_SYSTEM=y
++CONFIG_ACPI_CONTAINER=y
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI, PCMCIA)
++#
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_XEN_PCIDEV_FRONTEND=y
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_DEBUG is not set
++
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=y
++# CONFIG_HOTPLUG_PCI_FAKE is not set
++CONFIG_HOTPLUG_PCI_ACPI=y
++# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
++# CONFIG_HOTPLUG_PCI_CPCI is not set
++# CONFIG_HOTPLUG_PCI_SHPC is not set
++# CONFIG_HOTPLUG_PCI_SGI is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++CONFIG_ARPD=y
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
++
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK is not set
++# CONFIG_NF_CONNTRACK is not set
++# CONFIG_NETFILTER_XTABLES is not set
++
++#
++# IP: Netfilter Configuration
++#
++# CONFIG_IP_NF_CONNTRACK is not set
++# CONFIG_IP_NF_QUEUE is not set
++
++#
++# Bridge: Netfilter Configuration
++#
++# CONFIG_BRIDGE_NF_EBTABLES is not set
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++CONFIG_BRIDGE=y
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++CONFIG_LLC=y
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++# CONFIG_PNP is not set
++
++#
++# Block devices
++#
++# CONFIG_BLK_CPQ_DA is not set
++CONFIG_BLK_CPQ_CISS_DA=y
++# CONFIG_CISS_SCSI_TAPE is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_CRYPTOLOOP=y
++CONFIG_BLK_DEV_NBD=m
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_IDE_MAX_HWIFS=4
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++CONFIG_BLK_DEV_IDEFLOPPY=y
++CONFIG_BLK_DEV_IDESCSI=y
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++# CONFIG_IDE_GENERIC is not set
++CONFIG_BLK_DEV_IDEPCI=y
++# CONFIG_IDEPCI_SHARE_IRQ is not set
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++CONFIG_BLK_DEV_CMD64X=y
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++CONFIG_BLK_DEV_PIIX=y
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++# CONFIG_BLK_DEV_SVWKS is not set
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++CONFIG_CHR_DEV_ST=y
++CONFIG_CHR_DEV_OSST=y
++CONFIG_BLK_DEV_SR=y
++CONFIG_BLK_DEV_SR_VENDOR=y
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++CONFIG_SCSI_FC_ATTRS=y
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++CONFIG_SCSI_SAS_ATTRS=y
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++CONFIG_SCSI_SYM53C8XX_2=y
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++CONFIG_SCSI_SYM53C8XX_MMIO=y
++# CONFIG_SCSI_IPR is not set
++CONFIG_SCSI_QLOGIC_1280=y
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++# CONFIG_BLK_DEV_MD is not set
++CONFIG_BLK_DEV_DM=y
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=y
++CONFIG_DM_MIRROR=m
++CONFIG_DM_ZERO=m
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_MULTIPATH_EMC=m
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=y
++# CONFIG_FUSION_FC is not set
++CONFIG_FUSION_SAS=y
++CONFIG_FUSION_MAX_SGE=128
++# CONFIG_FUSION_CTL is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=y
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++CONFIG_TUN=y
++
++#
++# ARCnet devices
++#
++CONFIG_ARCNET=y
++# CONFIG_ARCNET_1201 is not set
++# CONFIG_ARCNET_1051 is not set
++# CONFIG_ARCNET_RAW is not set
++# CONFIG_ARCNET_CAP is not set
++# CONFIG_ARCNET_COM90xx is not set
++# CONFIG_ARCNET_COM90xxIO is not set
++# CONFIG_ARCNET_RIM_I is not set
++# CONFIG_ARCNET_COM20020 is not set
++
++#
++# PHY device support
++#
++# CONFIG_PHYLIB is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++CONFIG_TULIP=y
++CONFIG_TULIP_MWI=y
++CONFIG_TULIP_MMIO=y
++CONFIG_TULIP_NAPI=y
++CONFIG_TULIP_NAPI_HW_MITIGATION=y
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_ULI526X is not set
++# CONFIG_HP100 is not set
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++CONFIG_EEPRO100=y
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_VIA_RHINE is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++# CONFIG_BNX2 is not set
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_SFC is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++CONFIG_NETCONSOLE=y
++CONFIG_NETPOLL=y
++# CONFIG_NETPOLL_RX is not set
++# CONFIG_NETPOLL_TRAP is not set
++CONFIG_NET_POLL_CONTROLLER=y
++
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=m
++
++#
++# Old ISDN4Linux
++#
++# CONFIG_ISDN_I4L is not set
++
++#
++# CAPI subsystem
++#
++# CONFIG_ISDN_CAPI is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=y
++# CONFIG_INPUT_TSDEV is not set
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++# CONFIG_SERIO_SERPORT is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++CONFIG_GAMEPORT=y
++# CONFIG_GAMEPORT_NS558 is not set
++# CONFIG_GAMEPORT_L4 is not set
++# CONFIG_GAMEPORT_EMU10K1 is not set
++# CONFIG_GAMEPORT_FM801 is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++CONFIG_SERIAL_NONSTANDARD=y
++# CONFIG_COMPUTONE is not set
++# CONFIG_ROCKETPORT is not set
++# CONFIG_CYCLADES is not set
++# CONFIG_DIGIEPCA is not set
++# CONFIG_MOXA_INTELLIO is not set
++# CONFIG_MOXA_SMARTIO is not set
++# CONFIG_ISI is not set
++# CONFIG_SYNCLINKMP is not set
++# CONFIG_SYNCLINK_GT is not set
++# CONFIG_N_HDLC is not set
++# CONFIG_SPECIALIX is not set
++# CONFIG_SX is not set
++# CONFIG_RIO is not set
++# CONFIG_STALDRV is not set
++# CONFIG_SGI_SNSC is not set
++# CONFIG_SGI_TIOCX is not set
++
++#
++# Serial drivers
++#
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_SGI_L1_CONSOLE is not set
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++# CONFIG_HW_RANDOM is not set
++CONFIG_EFI_RTC=y
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=y
++# CONFIG_AGP_SIS is not set
++# CONFIG_AGP_VIA is not set
++CONFIG_AGP_I460=y
++# CONFIG_AGP_HP_ZX1 is not set
++# CONFIG_AGP_SGI_TIOCA is not set
++CONFIG_DRM=y
++# CONFIG_DRM_TDFX is not set
++# CONFIG_DRM_R128 is not set
++# CONFIG_DRM_RADEON is not set
++# CONFIG_DRM_MGA is not set
++# CONFIG_DRM_SIS is not set
++# CONFIG_DRM_VIA is not set
++# CONFIG_DRM_SAVAGE is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++# CONFIG_MMTIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=y
++CONFIG_I2C_ALGOPCF=y
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++# CONFIG_I2C_PCA_ISA is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_ABITUGURU is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ASB100 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_FSCHER is not set
++# CONFIG_SENSORS_FSCPOS is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++
++#
++# Misc devices
++#
++
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=y
++CONFIG_VIDEO_V4L1=y
++CONFIG_VIDEO_V4L1_COMPAT=y
++CONFIG_VIDEO_V4L2=y
++
++#
++# Video Capture Adapters
++#
++
++#
++# Video Capture Adapters
++#
++# CONFIG_VIDEO_ADV_DEBUG is not set
++# CONFIG_VIDEO_VIVI is not set
++# CONFIG_VIDEO_BT848 is not set
++# CONFIG_VIDEO_CPIA is not set
++# CONFIG_VIDEO_CPIA2 is not set
++# CONFIG_VIDEO_SAA5246A is not set
++# CONFIG_VIDEO_SAA5249 is not set
++# CONFIG_TUNER_3036 is not set
++# CONFIG_VIDEO_STRADIS is not set
++# CONFIG_VIDEO_ZORAN is not set
++# CONFIG_VIDEO_SAA7134 is not set
++# CONFIG_VIDEO_MXB is not set
++# CONFIG_VIDEO_DPC is not set
++# CONFIG_VIDEO_HEXIUM_ORION is not set
++# CONFIG_VIDEO_HEXIUM_GEMINI is not set
++# CONFIG_VIDEO_CX88 is not set
++
++#
++# Encoders and Decoders
++#
++# CONFIG_VIDEO_MSP3400 is not set
++# CONFIG_VIDEO_CS53L32A is not set
++# CONFIG_VIDEO_TLV320AIC23B is not set
++# CONFIG_VIDEO_WM8775 is not set
++# CONFIG_VIDEO_WM8739 is not set
++# CONFIG_VIDEO_CX2341X is not set
++# CONFIG_VIDEO_CX25840 is not set
++# CONFIG_VIDEO_SAA711X is not set
++# CONFIG_VIDEO_SAA7127 is not set
++# CONFIG_VIDEO_UPD64031A is not set
++# CONFIG_VIDEO_UPD64083 is not set
++
++#
++# V4L USB devices
++#
++# CONFIG_VIDEO_PVRUSB2 is not set
++# CONFIG_VIDEO_EM28XX is not set
++# CONFIG_USB_VICAM is not set
++# CONFIG_USB_IBMCAM is not set
++# CONFIG_USB_KONICAWC is not set
++# CONFIG_USB_QUICKCAM_MESSENGER is not set
++# CONFIG_USB_ET61X251 is not set
++# CONFIG_VIDEO_OVCAMCHIP is not set
++# CONFIG_USB_W9968CF is not set
++# CONFIG_USB_OV511 is not set
++# CONFIG_USB_SE401 is not set
++# CONFIG_USB_SN9C102 is not set
++# CONFIG_USB_STV680 is not set
++# CONFIG_USB_ZC0301 is not set
++# CONFIG_USB_PWC is not set
++
++#
++# Radio Adapters
++#
++# CONFIG_RADIO_GEMTEK_PCI is not set
++# CONFIG_RADIO_MAXIRADIO is not set
++# CONFIG_RADIO_MAESTRO is not set
++# CONFIG_USB_DSBR is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++# CONFIG_USB_DABUSB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++CONFIG_FB_MODE_HELPERS=y
++# CONFIG_FB_TILEBLITTING is not set
++# CONFIG_FB_CIRRUS is not set
++# CONFIG_FB_PM2 is not set
++# CONFIG_FB_CYBER2000 is not set
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_NVIDIA is not set
++# CONFIG_FB_RIVA is not set
++# CONFIG_FB_MATROX is not set
++# CONFIG_FB_RADEON is not set
++# CONFIG_FB_ATY128 is not set
++# CONFIG_FB_ATY is not set
++# CONFIG_FB_SAVAGE is not set
++# CONFIG_FB_SIS is not set
++# CONFIG_FB_NEOMAGIC is not set
++# CONFIG_FB_KYRO is not set
++# CONFIG_FB_3DFX is not set
++# CONFIG_FB_VOODOO1 is not set
++# CONFIG_FB_TRIDENT is not set
++# CONFIG_FB_VIRTUAL is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++
++#
++# Logo configuration
++#
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_LOGO_LINUX_CLUT224=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++CONFIG_SOUND=y
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=y
++CONFIG_SND_TIMER=y
++CONFIG_SND_PCM=y
++CONFIG_SND_HWDEP=y
++CONFIG_SND_RAWMIDI=y
++CONFIG_SND_SEQUENCER=y
++CONFIG_SND_SEQ_DUMMY=y
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=y
++CONFIG_SND_PCM_OSS=y
++CONFIG_SND_PCM_OSS_PLUGINS=y
++CONFIG_SND_SEQUENCER_OSS=y
++# CONFIG_SND_DYNAMIC_MINORS is not set
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=y
++CONFIG_SND_OPL3_LIB=y
++CONFIG_SND_AC97_CODEC=y
++CONFIG_SND_AC97_BUS=y
++CONFIG_SND_DUMMY=y
++CONFIG_SND_VIRMIDI=y
++# CONFIG_SND_MTPAV is not set
++CONFIG_SND_SERIAL_U16550=y
++CONFIG_SND_MPU401=y
++
++#
++# PCI devices
++#
++# CONFIG_SND_AD1889 is not set
++# CONFIG_SND_ALS300 is not set
++# CONFIG_SND_ALI5451 is not set
++CONFIG_SND_ATIIXP=y
++# CONFIG_SND_ATIIXP_MODEM is not set
++# CONFIG_SND_AU8810 is not set
++# CONFIG_SND_AU8820 is not set
++# CONFIG_SND_AU8830 is not set
++# CONFIG_SND_AZT3328 is not set
++# CONFIG_SND_BT87X is not set
++# CONFIG_SND_CA0106 is not set
++# CONFIG_SND_CMIPCI is not set
++# CONFIG_SND_CS4281 is not set
++# CONFIG_SND_CS46XX is not set
++# CONFIG_SND_DARLA20 is not set
++# CONFIG_SND_GINA20 is not set
++# CONFIG_SND_LAYLA20 is not set
++# CONFIG_SND_DARLA24 is not set
++# CONFIG_SND_GINA24 is not set
++# CONFIG_SND_LAYLA24 is not set
++# CONFIG_SND_MONA is not set
++# CONFIG_SND_MIA is not set
++# CONFIG_SND_ECHO3G is not set
++# CONFIG_SND_INDIGO is not set
++# CONFIG_SND_INDIGOIO is not set
++# CONFIG_SND_INDIGODJ is not set
++# CONFIG_SND_EMU10K1 is not set
++# CONFIG_SND_EMU10K1X is not set
++# CONFIG_SND_ENS1370 is not set
++# CONFIG_SND_ENS1371 is not set
++# CONFIG_SND_ES1938 is not set
++# CONFIG_SND_ES1968 is not set
++CONFIG_SND_FM801=y
++# CONFIG_SND_FM801_TEA575X_BOOL is not set
++# CONFIG_SND_HDA_INTEL is not set
++# CONFIG_SND_HDSP is not set
++# CONFIG_SND_HDSPM is not set
++# CONFIG_SND_ICE1712 is not set
++# CONFIG_SND_ICE1724 is not set
++# CONFIG_SND_INTEL8X0 is not set
++# CONFIG_SND_INTEL8X0M is not set
++# CONFIG_SND_KORG1212 is not set
++# CONFIG_SND_MAESTRO3 is not set
++# CONFIG_SND_MIXART is not set
++# CONFIG_SND_NM256 is not set
++# CONFIG_SND_PCXHR is not set
++# CONFIG_SND_RIPTIDE is not set
++# CONFIG_SND_RME32 is not set
++# CONFIG_SND_RME96 is not set
++# CONFIG_SND_RME9652 is not set
++# CONFIG_SND_SONICVIBES is not set
++# CONFIG_SND_TRIDENT is not set
++# CONFIG_SND_VIA82XX is not set
++# CONFIG_SND_VIA82XX_MODEM is not set
++# CONFIG_SND_VX222 is not set
++# CONFIG_SND_YMFPCI is not set
++
++#
++# USB devices
++#
++# CONFIG_SND_USB_AUDIO is not set
++
++#
++# Open Sound System
++#
++CONFIG_SOUND_PRIME=y
++# CONFIG_OSS_OBSOLETE_DRIVER is not set
++# CONFIG_SOUND_BT878 is not set
++# CONFIG_SOUND_ES1371 is not set
++# CONFIG_SOUND_ICH is not set
++# CONFIG_SOUND_TRIDENT is not set
++# CONFIG_SOUND_MSNDCLAS is not set
++# CONFIG_SOUND_MSNDPIN is not set
++# CONFIG_SOUND_VIA82CXXX is not set
++# CONFIG_SOUND_TVMIXER is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_BANDWIDTH=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++# CONFIG_USB_EHCI_SPLIT_ISO is not set
++# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
++# CONFIG_USB_EHCI_TT_NEWSCHED is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_ISD200 is not set
++# CONFIG_USB_STORAGE_DPCM is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++# CONFIG_HID_FF is not set
++CONFIG_USB_HIDDEV=y
++# CONFIG_USB_AIPTEK is not set
++# CONFIG_USB_WACOM is not set
++# CONFIG_USB_ACECAD is not set
++# CONFIG_USB_KBTAB is not set
++# CONFIG_USB_POWERMATE is not set
++# CONFIG_USB_TOUCHSCREEN is not set
++# CONFIG_USB_YEALINK is not set
++# CONFIG_USB_XPAD is not set
++# CONFIG_USB_ATI_REMOTE is not set
++# CONFIG_USB_ATI_REMOTE2 is not set
++# CONFIG_USB_KEYSPAN_REMOTE is not set
++# CONFIG_USB_APPLETOUCH is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGETKIT is not set
++# CONFIG_USB_PHIDGETSERVO is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TEST is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++
++#
++# Real Time Clock
++#
++# CONFIG_RTC_CLASS is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++# CONFIG_JFS_FS is not set
++CONFIG_FS_POSIX_ACL=y
++CONFIG_XFS_FS=y
++# CONFIG_XFS_QUOTA is not set
++# CONFIG_XFS_SECURITY is not set
++# CONFIG_XFS_POSIX_ACL is not set
++# CONFIG_XFS_RT is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++# CONFIG_ZISOFS is not set
++CONFIG_UDF_FS=y
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_IOMEM_MACHINE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++CONFIG_NFS_V4=y
++CONFIG_NFS_DIRECTIO=y
++CONFIG_NFSD=y
++CONFIG_NFSD_V3=y
++# CONFIG_NFSD_V3_ACL is not set
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_TCP=y
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++CONFIG_SMB_FS=y
++CONFIG_SMB_NLS_DEFAULT=y
++CONFIG_SMB_NLS_REMOTE="cp437"
++CONFIG_CIFS=y
++# CONFIG_CIFS_STATS is not set
++# CONFIG_CIFS_WEAK_PW_HASH is not set
++# CONFIG_CIFS_XATTR is not set
++# CONFIG_CIFS_DEBUG2 is not set
++# CONFIG_CIFS_EXPERIMENTAL is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++CONFIG_SGI_PARTITION=y
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++CONFIG_EFI_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_CODEPAGE_737=y
++CONFIG_NLS_CODEPAGE_775=y
++CONFIG_NLS_CODEPAGE_850=y
++CONFIG_NLS_CODEPAGE_852=y
++CONFIG_NLS_CODEPAGE_855=y
++CONFIG_NLS_CODEPAGE_857=y
++CONFIG_NLS_CODEPAGE_860=y
++CONFIG_NLS_CODEPAGE_861=y
++CONFIG_NLS_CODEPAGE_862=y
++CONFIG_NLS_CODEPAGE_863=y
++CONFIG_NLS_CODEPAGE_864=y
++CONFIG_NLS_CODEPAGE_865=y
++CONFIG_NLS_CODEPAGE_866=y
++CONFIG_NLS_CODEPAGE_869=y
++CONFIG_NLS_CODEPAGE_936=y
++CONFIG_NLS_CODEPAGE_950=y
++CONFIG_NLS_CODEPAGE_932=y
++CONFIG_NLS_CODEPAGE_949=y
++CONFIG_NLS_CODEPAGE_874=y
++CONFIG_NLS_ISO8859_8=y
++# CONFIG_NLS_CODEPAGE_1250 is not set
++CONFIG_NLS_CODEPAGE_1251=y
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_2=y
++CONFIG_NLS_ISO8859_3=y
++CONFIG_NLS_ISO8859_4=y
++CONFIG_NLS_ISO8859_5=y
++CONFIG_NLS_ISO8859_6=y
++CONFIG_NLS_ISO8859_7=y
++CONFIG_NLS_ISO8859_9=y
++CONFIG_NLS_ISO8859_13=y
++CONFIG_NLS_ISO8859_14=y
++CONFIG_NLS_ISO8859_15=y
++CONFIG_NLS_KOI8_R=y
++CONFIG_NLS_KOI8_U=y
++CONFIG_NLS_UTF8=y
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++CONFIG_CRC32=y
++# CONFIG_LIBCRC32C is not set
++CONFIG_PLIST=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_PENDING_IRQ=y
++CONFIG_IRQ_PER_CPU=y
++
++#
++# HP Simulator drivers
++#
++# CONFIG_HP_SIMETH is not set
++# CONFIG_HP_SIMSERIAL is not set
++# CONFIG_HP_SIMSCSI is not set
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=20
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++CONFIG_DEBUG_MUTEXES=y
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_IA64_GRANULE_16MB=y
++# CONFIG_IA64_GRANULE_64MB is not set
++CONFIG_IA64_PRINT_HAZARDS=y
++# CONFIG_DISABLE_VHPT is not set
++# CONFIG_IA64_DEBUG_CMPXCHG is not set
++# CONFIG_IA64_DEBUG_IRQ is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++# CONFIG_XEN_SMPBOOT is not set
++# CONFIG_XEN_DEVMEM is not set
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++# CONFIG_XEN_UNPRIVILEGED_GUEST is not set
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++CONFIG_XEN_BACKEND=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XEN_BLKDEV_TAP=y
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND is not set
++# CONFIG_XEN_NETDEV_LOOPBACK is not set
++CONFIG_XEN_PCIDEV_BACKEND=y
++# CONFIG_XEN_PCIDEV_BACKEND_VPCI is not set
++# CONFIG_XEN_PCIDEV_BACKEND_PASS is not set
++# CONFIG_XEN_PCIDEV_BACKEND_SLOT is not set
++CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER=y
++# CONFIG_XEN_PCIDEV_BE_DEBUG is not set
++CONFIG_XEN_TPMDEV_BACKEND=m
++CONFIG_XEN_SCSI_BACKEND=m
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_XEN_GRANT_DEV=y
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_FRAMEBUFFER=y
++CONFIG_XEN_KEYBOARD=y
++# CONFIG_XEN_SCRUB_PAGES is not set
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_XENCOMM=y
+diff -rpuN linux-2.6.18.8/buildconfigs/linux-defconfig_xen0_x86_32 linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xen0_x86_32
+--- linux-2.6.18.8/buildconfigs/linux-defconfig_xen0_x86_32 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xen0_x86_32 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1458 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Tue Oct 16 09:31:19 2007
++#
++CONFIG_X86_32=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_SEMAPHORE_SLEEPERS=y
++CONFIG_X86=y
++CONFIG_MMU=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_DMI=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_UID16=y
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++CONFIG_LSF=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Processor type and features
++#
++CONFIG_SMP=y
++# CONFIG_X86_PC is not set
++CONFIG_X86_XEN=y
++# CONFIG_X86_ELAN is not set
++# CONFIG_X86_VOYAGER is not set
++# CONFIG_X86_NUMAQ is not set
++# CONFIG_X86_SUMMIT is not set
++# CONFIG_X86_BIGSMP is not set
++# CONFIG_X86_VISWS is not set
++# CONFIG_X86_GENERICARCH is not set
++# CONFIG_X86_ES7000 is not set
++# CONFIG_M386 is not set
++# CONFIG_M486 is not set
++# CONFIG_M586 is not set
++# CONFIG_M586TSC is not set
++# CONFIG_M586MMX is not set
++CONFIG_M686=y
++# CONFIG_MPENTIUMII is not set
++# CONFIG_MPENTIUMIII is not set
++# CONFIG_MPENTIUMM is not set
++# CONFIG_MPENTIUM4 is not set
++# CONFIG_MK6 is not set
++# CONFIG_MK7 is not set
++# CONFIG_MK8 is not set
++# CONFIG_MCRUSOE is not set
++# CONFIG_MEFFICEON is not set
++# CONFIG_MWINCHIPC6 is not set
++# CONFIG_MWINCHIP2 is not set
++# CONFIG_MWINCHIP3D is not set
++# CONFIG_MGEODEGX1 is not set
++# CONFIG_MGEODE_LX is not set
++# CONFIG_MCYRIXIII is not set
++# CONFIG_MVIAC3_2 is not set
++# CONFIG_X86_GENERIC is not set
++CONFIG_X86_CMPXCHG=y
++CONFIG_X86_XADD=y
++CONFIG_X86_L1_CACHE_SHIFT=5
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_PPRO_FENCE=y
++CONFIG_X86_WP_WORKS_OK=y
++CONFIG_X86_INVLPG=y
++CONFIG_X86_BSWAP=y
++CONFIG_X86_POPAD_OK=y
++CONFIG_X86_CMPXCHG64=y
++CONFIG_X86_GOOD_APIC=y
++CONFIG_X86_USE_PPRO_CHECKSUM=y
++CONFIG_NR_CPUS=8
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_PREEMPT_BKL=y
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_X86_IO_APIC=y
++CONFIG_VM86=y
++# CONFIG_TOSHIBA is not set
++# CONFIG_I8K is not set
++# CONFIG_X86_REBOOTFIXUPS is not set
++CONFIG_MICROCODE=y
++CONFIG_X86_MSR=y
++CONFIG_X86_CPUID=y
++CONFIG_SWIOTLB=y
++
++#
++# Firmware Drivers
++#
++CONFIG_EDD=y
++# CONFIG_DELL_RBU is not set
++# CONFIG_DCDBAS is not set
++# CONFIG_NOHIGHMEM is not set
++CONFIG_HIGHMEM4G=y
++# CONFIG_HIGHMEM64G is not set
++CONFIG_PAGE_OFFSET=0xC0000000
++CONFIG_HIGHMEM=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++# CONFIG_HIGHPTE is not set
++CONFIG_MTRR=y
++# CONFIG_REGPARM is not set
++CONFIG_SECCOMP=y
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++CONFIG_KEXEC=y
++# CONFIG_CRASH_DUMP is not set
++CONFIG_PHYSICAL_START=0x100000
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++
++#
++# Power management options (ACPI, APM)
++#
++CONFIG_PM=y
++CONFIG_PM_LEGACY=y
++# CONFIG_PM_DEBUG is not set
++# CONFIG_SOFTWARE_SUSPEND is not set
++CONFIG_SUSPEND_SMP=y
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_SLEEP=y
++CONFIG_ACPI_SLEEP_PROC_FS=y
++# CONFIG_ACPI_SLEEP_PROC_SLEEP is not set
++CONFIG_ACPI_AC=m
++CONFIG_ACPI_BATTERY=m
++CONFIG_ACPI_BUTTON=m
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_HOTKEY=m
++CONFIG_ACPI_FAN=m
++CONFIG_ACPI_DOCK=m
++CONFIG_ACPI_PROCESSOR=m
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=m
++CONFIG_ACPI_ASUS=m
++CONFIG_ACPI_IBM=m
++CONFIG_ACPI_TOSHIBA=m
++# CONFIG_ACPI_CUSTOM_DSDT is not set
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_SYSTEM=y
++CONFIG_ACPI_CONTAINER=m
++CONFIG_ACPI_PV_SLEEP=y
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
++#
++CONFIG_PCI=y
++# CONFIG_PCI_GOBIOS is not set
++# CONFIG_PCI_GOMMCONFIG is not set
++# CONFIG_PCI_GODIRECT is not set
++# CONFIG_PCI_GOXEN_FE is not set
++CONFIG_PCI_GOANY=y
++CONFIG_PCI_DIRECT=y
++CONFIG_PCI_MMCONFIG=y
++CONFIG_XEN_PCIDEV_FRONTEND=y
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_MSI is not set
++# CONFIG_PCI_DEBUG is not set
++CONFIG_ISA_DMA_API=y
++# CONFIG_SCx200 is not set
++CONFIG_K8_NB=y
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# PCI Hotplug Support
++#
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_AOUT is not set
++# CONFIG_BINFMT_MISC is not set
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++# CONFIG_INET_DIAG is not set
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
++
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK is not set
++# CONFIG_NETFILTER_XTABLES is not set
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++# CONFIG_IP_NF_CONNTRACK_MARK is not set
++# CONFIG_IP_NF_CONNTRACK_EVENTS is not set
++# CONFIG_IP_NF_CT_PROTO_SCTP is not set
++CONFIG_IP_NF_FTP=m
++# CONFIG_IP_NF_IRC is not set
++# CONFIG_IP_NF_NETBIOS_NS is not set
++# CONFIG_IP_NF_TFTP is not set
++# CONFIG_IP_NF_AMANDA is not set
++# CONFIG_IP_NF_PPTP is not set
++# CONFIG_IP_NF_H323 is not set
++# CONFIG_IP_NF_SIP is not set
++# CONFIG_IP_NF_QUEUE is not set
++
++#
++# Bridge: Netfilter Configuration
++#
++# CONFIG_BRIDGE_NF_EBTABLES is not set
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++CONFIG_BRIDGE=y
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++CONFIG_LLC=y
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++# CONFIG_STANDALONE is not set
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++CONFIG_PNP=y
++CONFIG_PNP_DEBUG=y
++
++#
++# Protocols
++#
++CONFIG_PNPACPI=y
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=y
++# CONFIG_BLK_CPQ_DA is not set
++CONFIG_BLK_CPQ_CISS_DA=y
++# CONFIG_CISS_SCSI_TAPE is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++# CONFIG_BLK_DEV_HD_IDE is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_BLK_DEV_IDESCSI is not set
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++# CONFIG_BLK_DEV_CMD640 is not set
++# CONFIG_BLK_DEV_IDEPNP is not set
++CONFIG_BLK_DEV_IDEPCI=y
++# CONFIG_IDEPCI_SHARE_IRQ is not set
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++# CONFIG_BLK_DEV_RZ1000 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++# CONFIG_BLK_DEV_ATIIXP is not set
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_CS5535 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++CONFIG_BLK_DEV_PIIX=y
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++CONFIG_BLK_DEV_SVWKS=y
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SIS5513 is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++CONFIG_BLK_DEV_3W_XXXX_RAID=y
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++CONFIG_SCSI_AACRAID=y
++CONFIG_SCSI_AIC7XXX=y
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++CONFIG_AIC7XXX_DEBUG_ENABLE=y
++CONFIG_AIC7XXX_DEBUG_MASK=0
++CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++CONFIG_SCSI_AIC79XX=y
++CONFIG_AIC79XX_CMDS_PER_DEVICE=32
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
++CONFIG_AIC79XX_DEBUG_ENABLE=y
++CONFIG_AIC79XX_DEBUG_MASK=0
++CONFIG_AIC79XX_REG_PRETTY_PRINT=y
++# CONFIG_SCSI_DPT_I2O is not set
++CONFIG_SCSI_ADVANSYS=y
++CONFIG_MEGARAID_NEWGEN=y
++# CONFIG_MEGARAID_MM is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++CONFIG_SCSI_SATA=y
++CONFIG_SCSI_SATA_AHCI=y
++# CONFIG_SCSI_SATA_SVW is not set
++CONFIG_SCSI_ATA_PIIX=y
++# CONFIG_SCSI_SATA_MV is not set
++# CONFIG_SCSI_SATA_NV is not set
++# CONFIG_SCSI_PDC_ADMA is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_SATA_QSTOR is not set
++CONFIG_SCSI_SATA_PROMISE=y
++CONFIG_SCSI_SATA_SX4=y
++CONFIG_SCSI_SATA_SIL=y
++CONFIG_SCSI_SATA_SIL24=y
++# CONFIG_SCSI_SATA_SIS is not set
++# CONFIG_SCSI_SATA_ULI is not set
++# CONFIG_SCSI_SATA_VIA is not set
++# CONFIG_SCSI_SATA_VITESSE is not set
++CONFIG_SCSI_SATA_INTEL_COMBINED=y
++# CONFIG_SCSI_BUSLOGIC is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_GDTH is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++# CONFIG_SCSI_IPR is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_NSP32 is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++# CONFIG_MD_LINEAR is not set
++CONFIG_MD_RAID0=y
++CONFIG_MD_RAID1=y
++# CONFIG_MD_RAID10 is not set
++# CONFIG_MD_RAID456 is not set
++# CONFIG_MD_MULTIPATH is not set
++# CONFIG_MD_FAULTY is not set
++CONFIG_BLK_DEV_DM=y
++# CONFIG_DM_CRYPT is not set
++CONFIG_DM_SNAPSHOT=y
++CONFIG_DM_MIRROR=y
++# CONFIG_DM_ZERO is not set
++# CONFIG_DM_MULTIPATH is not set
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=y
++# CONFIG_FUSION_FC is not set
++# CONFIG_FUSION_SAS is not set
++CONFIG_FUSION_MAX_SGE=128
++# CONFIG_FUSION_CTL is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++CONFIG_TUN=y
++# CONFIG_NET_SB1000 is not set
++
++#
++# ARCnet devices
++#
++# CONFIG_ARCNET is not set
++
++#
++# PHY device support
++#
++# CONFIG_PHYLIB is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_VORTEX=y
++# CONFIG_TYPHOON is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++CONFIG_TULIP=y
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++# CONFIG_TULIP_NAPI is not set
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_ULI526X is not set
++# CONFIG_HP100 is not set
++CONFIG_NET_PCI=y
++CONFIG_PCNET32=y
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++# CONFIG_EEPRO100 is not set
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++CONFIG_NE2K_PCI=y
++# CONFIG_8139CP is not set
++CONFIG_8139TOO=y
++CONFIG_8139TOO_PIO=y
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++# CONFIG_8139TOO_8129 is not set
++# CONFIG_8139_OLD_RX_RESET is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_TLAN is not set
++CONFIG_VIA_RHINE=y
++# CONFIG_VIA_RHINE_MMIO is not set
++# CONFIG_VIA_RHINE_NAPI is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++CONFIG_ACENIC=y
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++CONFIG_SK98LIN=y
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++CONFIG_BNX2=y
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_SFC is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_TSDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++# CONFIG_SERIO_CT82C710 is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++# CONFIG_RTC is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_SONYPI is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=m
++CONFIG_AGP_ALI=m
++CONFIG_AGP_ATI=m
++CONFIG_AGP_AMD=m
++CONFIG_AGP_AMD64=m
++CONFIG_AGP_INTEL=m
++CONFIG_AGP_NVIDIA=m
++CONFIG_AGP_SIS=m
++CONFIG_AGP_SWORKS=m
++CONFIG_AGP_VIA=m
++# CONFIG_AGP_EFFICEON is not set
++CONFIG_DRM=m
++CONFIG_DRM_TDFX=m
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++CONFIG_DRM_I810=m
++CONFIG_DRM_I830=m
++CONFIG_DRM_I915=m
++CONFIG_DRM_MGA=m
++CONFIG_DRM_SIS=m
++# CONFIG_DRM_VIA is not set
++# CONFIG_DRM_SAVAGE is not set
++# CONFIG_MWAVE is not set
++# CONFIG_PC8736x_GPIO is not set
++# CONFIG_NSC_GPIO is not set
++# CONFIG_CS5535_GPIO is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++# CONFIG_HWMON is not set
++# CONFIG_HWMON_VID is not set
++
++#
++# Misc devices
++#
++# CONFIG_IBM_ASM is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++# CONFIG_USB_DABUSB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++# CONFIG_FB is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++# CONFIG_USB_DEVICEFS is not set
++# CONFIG_USB_BANDWIDTH is not set
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++# CONFIG_USB_EHCI_HCD is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++# CONFIG_USB_STORAGE is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++# CONFIG_HID_FF is not set
++# CONFIG_USB_HIDDEV is not set
++# CONFIG_USB_AIPTEK is not set
++# CONFIG_USB_WACOM is not set
++# CONFIG_USB_ACECAD is not set
++# CONFIG_USB_KBTAB is not set
++# CONFIG_USB_POWERMATE is not set
++# CONFIG_USB_TOUCHSCREEN is not set
++# CONFIG_USB_YEALINK is not set
++# CONFIG_USB_XPAD is not set
++# CONFIG_USB_ATI_REMOTE is not set
++# CONFIG_USB_ATI_REMOTE2 is not set
++# CONFIG_USB_KEYSPAN_REMOTE is not set
++# CONFIG_USB_APPLETOUCH is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGETKIT is not set
++# CONFIG_USB_PHIDGETSERVO is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_LD is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++# CONFIG_EDAC is not set
++
++#
++# Real Time Clock
++#
++CONFIG_RTC_LIB=m
++CONFIG_RTC_CLASS=m
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=m
++CONFIG_RTC_INTF_PROC=m
++CONFIG_RTC_INTF_DEV=m
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++
++#
++# RTC drivers
++#
++# CONFIG_RTC_DRV_DS1553 is not set
++# CONFIG_RTC_DRV_DS1742 is not set
++CONFIG_RTC_DRV_M48T86=m
++CONFIG_RTC_DRV_TEST=m
++# CONFIG_RTC_DRV_V3020 is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++# CONFIG_REISERFS_FS_XATTR is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++CONFIG_NFSD=m
++CONFIG_NFSD_V3=y
++# CONFIG_NFSD_V3_ACL is not set
++# CONFIG_NFSD_V4 is not set
++CONFIG_NFSD_TCP=y
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_UTF8 is not set
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=14
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_HIGHMEM is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_FRAME_POINTER=y
++# CONFIG_UNWIND_INFO is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_EARLY_PRINTK=y
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUG_RODATA is not set
++# CONFIG_4KSTACKS is not set
++CONFIG_X86_FIND_SMP_CONFIG=y
++CONFIG_X86_MPPARSE=y
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=m
++CONFIG_CRYPTO_SHA1=m
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=m
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_AES_586 is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++CONFIG_CRYPTO_CRC32C=m
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++# CONFIG_CRYPTO_DEV_PADLOCK is not set
++CONFIG_XEN=y
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++# CONFIG_XEN_UNPRIVILEGED_GUEST is not set
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++CONFIG_XEN_BACKEND=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XEN_BLKDEV_TAP=y
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND is not set
++# CONFIG_XEN_NETDEV_LOOPBACK is not set
++CONFIG_XEN_PCIDEV_BACKEND=y
++# CONFIG_XEN_PCIDEV_BACKEND_VPCI is not set
++CONFIG_XEN_PCIDEV_BACKEND_PASS=y
++# CONFIG_XEN_PCIDEV_BACKEND_SLOT is not set
++# CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER is not set
++# CONFIG_XEN_PCIDEV_BE_DEBUG is not set
++CONFIG_XEN_TPMDEV_BACKEND=m
++CONFIG_XEN_SCSI_BACKEND=m
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_XEN_GRANT_DEV=y
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_SMPBOOT=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_DEVMEM=y
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=y
++CONFIG_ZLIB_INFLATE=y
++CONFIG_PLIST=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_PENDING_IRQ=y
++CONFIG_X86_SMP=y
++CONFIG_X86_BIOS_REBOOT=y
++CONFIG_X86_TRAMPOLINE=y
++CONFIG_X86_NO_TSS=y
++CONFIG_X86_NO_IDT=y
++CONFIG_KTIME_SCALAR=y
+diff -rpuN linux-2.6.18.8/buildconfigs/linux-defconfig_xen0_x86_64 linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xen0_x86_64
+--- linux-2.6.18.8/buildconfigs/linux-defconfig_xen0_x86_64 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xen0_x86_64 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1390 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Mon Feb 18 10:41:04 2008
++#
++CONFIG_X86_64=y
++CONFIG_64BIT=y
++CONFIG_X86=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_SEMAPHORE_SLEEPERS=y
++CONFIG_MMU=y
++CONFIG_RWSEM_GENERIC_SPINLOCK=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_CMPXCHG=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_DMI=y
++CONFIG_AUDIT_ARCH=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_UID16=y
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++CONFIG_LSF=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Processor type and features
++#
++CONFIG_X86_PC=y
++# CONFIG_X86_VSMP is not set
++# CONFIG_MK8 is not set
++# CONFIG_MPSC is not set
++CONFIG_GENERIC_CPU=y
++CONFIG_X86_64_XEN=y
++CONFIG_X86_NO_TSS=y
++CONFIG_X86_NO_IDT=y
++CONFIG_X86_L1_CACHE_BYTES=128
++CONFIG_X86_L1_CACHE_SHIFT=7
++CONFIG_X86_INTERNODE_CACHE_BYTES=128
++CONFIG_X86_GOOD_APIC=y
++CONFIG_MICROCODE=y
++CONFIG_X86_MSR=y
++CONFIG_X86_CPUID=y
++CONFIG_X86_IO_APIC=y
++CONFIG_X86_XEN_GENAPIC=y
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_MTRR=y
++CONFIG_SMP=y
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_PREEMPT_BKL=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++CONFIG_NR_CPUS=8
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_SWIOTLB=y
++CONFIG_KEXEC=y
++# CONFIG_CRASH_DUMP is not set
++CONFIG_PHYSICAL_START=0x200000
++CONFIG_SECCOMP=y
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++# CONFIG_REORDER is not set
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_ISA_DMA_API=y
++CONFIG_GENERIC_PENDING_IRQ=y
++
++#
++# Power management options
++#
++CONFIG_PM=y
++CONFIG_PM_LEGACY=y
++# CONFIG_PM_DEBUG is not set
++# CONFIG_SOFTWARE_SUSPEND is not set
++CONFIG_SUSPEND_SMP=y
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_SLEEP=y
++CONFIG_ACPI_SLEEP_PROC_FS=y
++# CONFIG_ACPI_SLEEP_PROC_SLEEP is not set
++CONFIG_ACPI_AC=m
++CONFIG_ACPI_BATTERY=m
++CONFIG_ACPI_BUTTON=m
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_HOTKEY=m
++CONFIG_ACPI_FAN=m
++CONFIG_ACPI_DOCK=m
++CONFIG_ACPI_PROCESSOR=m
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=m
++CONFIG_ACPI_ASUS=m
++CONFIG_ACPI_IBM=m
++CONFIG_ACPI_TOSHIBA=m
++# CONFIG_ACPI_CUSTOM_DSDT is not set
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_SYSTEM=y
++CONFIG_ACPI_CONTAINER=m
++CONFIG_ACPI_PV_SLEEP=y
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI etc.)
++#
++CONFIG_PCI=y
++CONFIG_PCI_DIRECT=y
++CONFIG_PCI_MMCONFIG=y
++CONFIG_XEN_PCIDEV_FRONTEND=y
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_MSI is not set
++# CONFIG_PCI_DEBUG is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# PCI Hotplug Support
++#
++# CONFIG_HOTPLUG_PCI is not set
++
++#
++# Executable file formats / Emulations
++#
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_IA32_EMULATION=y
++CONFIG_IA32_AOUT=y
++CONFIG_COMPAT=y
++CONFIG_SYSVIPC_COMPAT=y
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++# CONFIG_INET_DIAG is not set
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
++
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK is not set
++# CONFIG_NETFILTER_XTABLES is not set
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++# CONFIG_IP_NF_CONNTRACK_MARK is not set
++# CONFIG_IP_NF_CONNTRACK_EVENTS is not set
++# CONFIG_IP_NF_CT_PROTO_SCTP is not set
++CONFIG_IP_NF_FTP=m
++# CONFIG_IP_NF_IRC is not set
++# CONFIG_IP_NF_NETBIOS_NS is not set
++# CONFIG_IP_NF_TFTP is not set
++# CONFIG_IP_NF_AMANDA is not set
++# CONFIG_IP_NF_PPTP is not set
++# CONFIG_IP_NF_H323 is not set
++# CONFIG_IP_NF_SIP is not set
++# CONFIG_IP_NF_QUEUE is not set
++
++#
++# Bridge: Netfilter Configuration
++#
++# CONFIG_BRIDGE_NF_EBTABLES is not set
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++CONFIG_BRIDGE=y
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++CONFIG_LLC=y
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++# CONFIG_STANDALONE is not set
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++CONFIG_PNP=y
++CONFIG_PNP_DEBUG=y
++
++#
++# Protocols
++#
++CONFIG_PNPACPI=y
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=y
++# CONFIG_BLK_CPQ_DA is not set
++CONFIG_BLK_CPQ_CISS_DA=y
++# CONFIG_CISS_SCSI_TAPE is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++# CONFIG_BLK_DEV_HD_IDE is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_BLK_DEV_IDESCSI is not set
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++# CONFIG_BLK_DEV_CMD640 is not set
++# CONFIG_BLK_DEV_IDEPNP is not set
++CONFIG_BLK_DEV_IDEPCI=y
++# CONFIG_IDEPCI_SHARE_IRQ is not set
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++# CONFIG_BLK_DEV_RZ1000 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++# CONFIG_BLK_DEV_ATIIXP is not set
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++CONFIG_BLK_DEV_PIIX=y
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++CONFIG_BLK_DEV_SVWKS=y
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SIS5513 is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++CONFIG_BLK_DEV_3W_XXXX_RAID=y
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++CONFIG_SCSI_AACRAID=y
++CONFIG_SCSI_AIC7XXX=y
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++CONFIG_AIC7XXX_DEBUG_ENABLE=y
++CONFIG_AIC7XXX_DEBUG_MASK=0
++CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++CONFIG_SCSI_AIC79XX=y
++CONFIG_AIC79XX_CMDS_PER_DEVICE=32
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
++CONFIG_AIC79XX_DEBUG_ENABLE=y
++CONFIG_AIC79XX_DEBUG_MASK=0
++CONFIG_AIC79XX_REG_PRETTY_PRINT=y
++CONFIG_MEGARAID_NEWGEN=y
++# CONFIG_MEGARAID_MM is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++CONFIG_SCSI_SATA=y
++CONFIG_SCSI_SATA_AHCI=y
++# CONFIG_SCSI_SATA_SVW is not set
++CONFIG_SCSI_ATA_PIIX=y
++# CONFIG_SCSI_SATA_MV is not set
++# CONFIG_SCSI_SATA_NV is not set
++# CONFIG_SCSI_PDC_ADMA is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_SATA_QSTOR is not set
++CONFIG_SCSI_SATA_PROMISE=y
++CONFIG_SCSI_SATA_SX4=y
++CONFIG_SCSI_SATA_SIL=y
++CONFIG_SCSI_SATA_SIL24=y
++# CONFIG_SCSI_SATA_SIS is not set
++# CONFIG_SCSI_SATA_ULI is not set
++# CONFIG_SCSI_SATA_VIA is not set
++# CONFIG_SCSI_SATA_VITESSE is not set
++CONFIG_SCSI_SATA_INTEL_COMBINED=y
++# CONFIG_SCSI_BUSLOGIC is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_GDTH is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++# CONFIG_SCSI_IPR is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++# CONFIG_MD_LINEAR is not set
++CONFIG_MD_RAID0=y
++CONFIG_MD_RAID1=y
++# CONFIG_MD_RAID10 is not set
++# CONFIG_MD_RAID456 is not set
++# CONFIG_MD_MULTIPATH is not set
++# CONFIG_MD_FAULTY is not set
++CONFIG_BLK_DEV_DM=y
++# CONFIG_DM_CRYPT is not set
++CONFIG_DM_SNAPSHOT=y
++CONFIG_DM_MIRROR=y
++# CONFIG_DM_ZERO is not set
++# CONFIG_DM_MULTIPATH is not set
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=y
++# CONFIG_FUSION_FC is not set
++# CONFIG_FUSION_SAS is not set
++CONFIG_FUSION_MAX_SGE=128
++# CONFIG_FUSION_CTL is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++CONFIG_TUN=y
++# CONFIG_NET_SB1000 is not set
++
++#
++# ARCnet devices
++#
++# CONFIG_ARCNET is not set
++
++#
++# PHY device support
++#
++# CONFIG_PHYLIB is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_VORTEX=y
++# CONFIG_TYPHOON is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++CONFIG_TULIP=y
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++# CONFIG_TULIP_NAPI is not set
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_ULI526X is not set
++# CONFIG_HP100 is not set
++CONFIG_NET_PCI=y
++CONFIG_PCNET32=y
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++# CONFIG_EEPRO100 is not set
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++CONFIG_NE2K_PCI=y
++# CONFIG_8139CP is not set
++CONFIG_8139TOO=y
++CONFIG_8139TOO_PIO=y
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++# CONFIG_8139TOO_8129 is not set
++# CONFIG_8139_OLD_RX_RESET is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++CONFIG_VIA_RHINE=y
++# CONFIG_VIA_RHINE_MMIO is not set
++# CONFIG_VIA_RHINE_NAPI is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++CONFIG_ACENIC=y
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++CONFIG_SK98LIN=y
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++CONFIG_BNX2=y
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_SFC is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_TSDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++# CONFIG_SERIO_CT82C710 is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++# CONFIG_RTC is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=m
++# CONFIG_AGP_AMD64 is not set
++CONFIG_AGP_INTEL=m
++CONFIG_AGP_SIS=m
++CONFIG_AGP_VIA=m
++CONFIG_DRM=m
++CONFIG_DRM_TDFX=m
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++CONFIG_DRM_I810=m
++CONFIG_DRM_I830=m
++CONFIG_DRM_I915=m
++CONFIG_DRM_MGA=m
++CONFIG_DRM_SIS=m
++# CONFIG_DRM_VIA is not set
++# CONFIG_DRM_SAVAGE is not set
++# CONFIG_MWAVE is not set
++# CONFIG_PC8736x_GPIO is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++# CONFIG_HWMON is not set
++# CONFIG_HWMON_VID is not set
++
++#
++# Misc devices
++#
++# CONFIG_IBM_ASM is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++# CONFIG_USB_DABUSB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++# CONFIG_FB is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++# CONFIG_USB_DEVICEFS is not set
++# CONFIG_USB_BANDWIDTH is not set
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++# CONFIG_USB_EHCI_HCD is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++# CONFIG_USB_STORAGE is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++# CONFIG_HID_FF is not set
++# CONFIG_USB_HIDDEV is not set
++# CONFIG_USB_AIPTEK is not set
++# CONFIG_USB_WACOM is not set
++# CONFIG_USB_ACECAD is not set
++# CONFIG_USB_KBTAB is not set
++# CONFIG_USB_POWERMATE is not set
++# CONFIG_USB_TOUCHSCREEN is not set
++# CONFIG_USB_YEALINK is not set
++# CONFIG_USB_XPAD is not set
++# CONFIG_USB_ATI_REMOTE is not set
++# CONFIG_USB_ATI_REMOTE2 is not set
++# CONFIG_USB_KEYSPAN_REMOTE is not set
++# CONFIG_USB_APPLETOUCH is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGETKIT is not set
++# CONFIG_USB_PHIDGETSERVO is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_LD is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++# CONFIG_EDAC is not set
++
++#
++# Real Time Clock
++#
++CONFIG_RTC_LIB=m
++CONFIG_RTC_CLASS=m
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=m
++CONFIG_RTC_INTF_PROC=m
++CONFIG_RTC_INTF_DEV=m
++# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
++
++#
++# RTC drivers
++#
++# CONFIG_RTC_DRV_DS1553 is not set
++# CONFIG_RTC_DRV_DS1742 is not set
++CONFIG_RTC_DRV_M48T86=m
++CONFIG_RTC_DRV_TEST=m
++# CONFIG_RTC_DRV_V3020 is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# Firmware Drivers
++#
++CONFIG_EDD=y
++# CONFIG_DELL_RBU is not set
++# CONFIG_DCDBAS is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++# CONFIG_REISERFS_FS_XATTR is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++CONFIG_NFSD=m
++CONFIG_NFSD_V3=y
++# CONFIG_NFSD_V3_ACL is not set
++# CONFIG_NFSD_V4 is not set
++CONFIG_NFSD_TCP=y
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_UTF8 is not set
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=14
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_FRAME_POINTER=y
++# CONFIG_UNWIND_INFO is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_DEBUG_RODATA is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=m
++CONFIG_CRYPTO_SHA1=m
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=m
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_AES_X86_64 is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++CONFIG_CRYPTO_CRC32C=m
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++CONFIG_XEN=y
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++# CONFIG_XEN_UNPRIVILEGED_GUEST is not set
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++CONFIG_XEN_BACKEND=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XEN_BLKDEV_TAP=y
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND is not set
++# CONFIG_XEN_NETDEV_LOOPBACK is not set
++CONFIG_XEN_PCIDEV_BACKEND=y
++# CONFIG_XEN_PCIDEV_BACKEND_VPCI is not set
++CONFIG_XEN_PCIDEV_BACKEND_PASS=y
++# CONFIG_XEN_PCIDEV_BACKEND_SLOT is not set
++# CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER is not set
++# CONFIG_XEN_PCIDEV_BE_DEBUG is not set
++# CONFIG_XEN_TPMDEV_BACKEND is not set
++# CONFIG_XEN_SCSI_BACKEND is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_XEN_GRANT_DEV=y
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_SMPBOOT=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_DEVMEM=y
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=y
++CONFIG_ZLIB_INFLATE=y
++CONFIG_PLIST=y
+diff -rpuN linux-2.6.18.8/buildconfigs/linux-defconfig_xen_ia64 linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xen_ia64
+--- linux-2.6.18.8/buildconfigs/linux-defconfig_xen_ia64 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xen_ia64 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1703 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Tue Feb 19 11:20:00 2008
++#
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_CC_OPTIMIZE_FOR_SIZE=y
++# CONFIG_EMBEDDED is not set
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++CONFIG_KALLSYMS_EXTRA_PASS=y
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_BLK_DEV_IO_TRACE is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Processor type and features
++#
++CONFIG_IA64=y
++CONFIG_64BIT=y
++CONFIG_MMU=y
++CONFIG_SWIOTLB=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_TIME_INTERPOLATION=y
++CONFIG_DMI=y
++CONFIG_EFI=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_XEN=y
++CONFIG_XEN_IA64_EXPOSE_P2M=y
++CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_DMA_IS_DMA32=y
++CONFIG_AUDIT_ARCH=y
++CONFIG_IA64_GENERIC=y
++# CONFIG_IA64_DIG is not set
++# CONFIG_IA64_HP_ZX1 is not set
++# CONFIG_IA64_HP_ZX1_SWIOTLB is not set
++# CONFIG_IA64_SGI_SN2 is not set
++# CONFIG_IA64_HP_SIM is not set
++# CONFIG_IA64_XEN is not set
++# CONFIG_ITANIUM is not set
++CONFIG_MCKINLEY=y
++# CONFIG_IA64_PAGE_SIZE_4KB is not set
++# CONFIG_IA64_PAGE_SIZE_8KB is not set
++CONFIG_IA64_PAGE_SIZE_16KB=y
++# CONFIG_IA64_PAGE_SIZE_64KB is not set
++CONFIG_PGTABLE_3=y
++# CONFIG_PGTABLE_4 is not set
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++CONFIG_IA64_L1_CACHE_SHIFT=7
++CONFIG_IA64_CYCLONE=y
++CONFIG_IOSAPIC=y
++# CONFIG_IA64_SGI_SN_XP is not set
++CONFIG_FORCE_MAX_ZONEORDER=11
++CONFIG_SMP=y
++CONFIG_NR_CPUS=16
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++# CONFIG_SCHED_SMT is not set
++# CONFIG_PERMIT_BSP_REMOVE is not set
++# CONFIG_PREEMPT is not set
++CONFIG_SELECT_MEMORY_MODEL=y
++# CONFIG_FLATMEM_MANUAL is not set
++CONFIG_DISCONTIGMEM_MANUAL=y
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_DISCONTIGMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++CONFIG_NEED_MULTIPLE_NODES=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++# CONFIG_MIGRATION is not set
++CONFIG_RESOURCES_64BIT=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
++CONFIG_NUMA=y
++CONFIG_NODES_SHIFT=10
++CONFIG_VIRTUAL_MEM_MAP=y
++CONFIG_HOLES_IN_ZONE=y
++CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
++CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y
++# CONFIG_IA32_SUPPORT is not set
++# CONFIG_IA64_MCA_RECOVERY is not set
++CONFIG_PERFMON=y
++CONFIG_IA64_PALINFO=y
++CONFIG_SGI_SN=y
++
++#
++# SN Devices
++#
++# CONFIG_SGI_IOC3 is not set
++CONFIG_KEXEC=y
++
++#
++# Firmware Drivers
++#
++CONFIG_EFI_VARS=y
++CONFIG_EFI_PCDP=y
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++
++#
++# Power management and ACPI
++#
++CONFIG_PM=y
++CONFIG_PM_LEGACY=y
++# CONFIG_PM_DEBUG is not set
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_BUTTON=y
++CONFIG_ACPI_FAN=y
++# CONFIG_ACPI_DOCK is not set
++CONFIG_ACPI_PROCESSOR=y
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=y
++CONFIG_ACPI_NUMA=y
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_SYSTEM=y
++CONFIG_ACPI_CONTAINER=y
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI, PCMCIA)
++#
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_XEN_PCIDEV_FRONTEND=y
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_DEBUG is not set
++
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=y
++# CONFIG_HOTPLUG_PCI_FAKE is not set
++CONFIG_HOTPLUG_PCI_ACPI=y
++# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
++# CONFIG_HOTPLUG_PCI_CPCI is not set
++# CONFIG_HOTPLUG_PCI_SHPC is not set
++# CONFIG_HOTPLUG_PCI_SGI is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++CONFIG_ARPD=y
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
++
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK is not set
++# CONFIG_NF_CONNTRACK is not set
++# CONFIG_NETFILTER_XTABLES is not set
++
++#
++# IP: Netfilter Configuration
++#
++# CONFIG_IP_NF_CONNTRACK is not set
++# CONFIG_IP_NF_QUEUE is not set
++
++#
++# Bridge: Netfilter Configuration
++#
++# CONFIG_BRIDGE_NF_EBTABLES is not set
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++CONFIG_BRIDGE=y
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++CONFIG_LLC=y
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++# CONFIG_PNP is not set
++
++#
++# Block devices
++#
++# CONFIG_BLK_CPQ_DA is not set
++CONFIG_BLK_CPQ_CISS_DA=y
++# CONFIG_CISS_SCSI_TAPE is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_CRYPTOLOOP=y
++CONFIG_BLK_DEV_NBD=m
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_IDE_MAX_HWIFS=4
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++CONFIG_BLK_DEV_IDEFLOPPY=y
++CONFIG_BLK_DEV_IDESCSI=y
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++# CONFIG_IDE_GENERIC is not set
++CONFIG_BLK_DEV_IDEPCI=y
++# CONFIG_IDEPCI_SHARE_IRQ is not set
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++CONFIG_BLK_DEV_CMD64X=y
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++CONFIG_BLK_DEV_PIIX=y
++# CONFIG_BLK_DEV_IT821X is not set
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++# CONFIG_BLK_DEV_SVWKS is not set
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++CONFIG_CHR_DEV_ST=y
++CONFIG_CHR_DEV_OSST=y
++CONFIG_BLK_DEV_SR=y
++CONFIG_BLK_DEV_SR_VENDOR=y
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++CONFIG_SCSI_FC_ATTRS=y
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++CONFIG_SCSI_SAS_ATTRS=y
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++CONFIG_SCSI_SYM53C8XX_2=y
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++CONFIG_SCSI_SYM53C8XX_MMIO=y
++# CONFIG_SCSI_IPR is not set
++CONFIG_SCSI_QLOGIC_1280=y
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++# CONFIG_BLK_DEV_MD is not set
++CONFIG_BLK_DEV_DM=y
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=y
++CONFIG_DM_MIRROR=m
++CONFIG_DM_ZERO=m
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_MULTIPATH_EMC=m
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=y
++# CONFIG_FUSION_FC is not set
++CONFIG_FUSION_SAS=y
++CONFIG_FUSION_MAX_SGE=128
++# CONFIG_FUSION_CTL is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=y
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++CONFIG_TUN=y
++
++#
++# ARCnet devices
++#
++CONFIG_ARCNET=y
++# CONFIG_ARCNET_1201 is not set
++# CONFIG_ARCNET_1051 is not set
++# CONFIG_ARCNET_RAW is not set
++# CONFIG_ARCNET_CAP is not set
++# CONFIG_ARCNET_COM90xx is not set
++# CONFIG_ARCNET_COM90xxIO is not set
++# CONFIG_ARCNET_RIM_I is not set
++# CONFIG_ARCNET_COM20020 is not set
++
++#
++# PHY device support
++#
++# CONFIG_PHYLIB is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++CONFIG_TULIP=y
++CONFIG_TULIP_MWI=y
++CONFIG_TULIP_MMIO=y
++CONFIG_TULIP_NAPI=y
++CONFIG_TULIP_NAPI_HW_MITIGATION=y
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_ULI526X is not set
++# CONFIG_HP100 is not set
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++CONFIG_EEPRO100=y
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_VIA_RHINE is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++# CONFIG_BNX2 is not set
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_SFC is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++CONFIG_NETCONSOLE=y
++CONFIG_NETPOLL=y
++# CONFIG_NETPOLL_RX is not set
++# CONFIG_NETPOLL_TRAP is not set
++CONFIG_NET_POLL_CONTROLLER=y
++
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=m
++
++#
++# Old ISDN4Linux
++#
++# CONFIG_ISDN_I4L is not set
++
++#
++# CAPI subsystem
++#
++# CONFIG_ISDN_CAPI is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=y
++# CONFIG_INPUT_TSDEV is not set
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++# CONFIG_SERIO_SERPORT is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++CONFIG_GAMEPORT=y
++# CONFIG_GAMEPORT_NS558 is not set
++# CONFIG_GAMEPORT_L4 is not set
++# CONFIG_GAMEPORT_EMU10K1 is not set
++# CONFIG_GAMEPORT_FM801 is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++CONFIG_SERIAL_NONSTANDARD=y
++# CONFIG_COMPUTONE is not set
++# CONFIG_ROCKETPORT is not set
++# CONFIG_CYCLADES is not set
++# CONFIG_DIGIEPCA is not set
++# CONFIG_MOXA_INTELLIO is not set
++# CONFIG_MOXA_SMARTIO is not set
++# CONFIG_ISI is not set
++# CONFIG_SYNCLINKMP is not set
++# CONFIG_SYNCLINK_GT is not set
++# CONFIG_N_HDLC is not set
++# CONFIG_SPECIALIX is not set
++# CONFIG_SX is not set
++# CONFIG_RIO is not set
++# CONFIG_STALDRV is not set
++# CONFIG_SGI_SNSC is not set
++# CONFIG_SGI_TIOCX is not set
++
++#
++# Serial drivers
++#
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_SGI_L1_CONSOLE is not set
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++# CONFIG_HW_RANDOM is not set
++CONFIG_EFI_RTC=y
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=y
++# CONFIG_AGP_SIS is not set
++# CONFIG_AGP_VIA is not set
++CONFIG_AGP_I460=y
++# CONFIG_AGP_HP_ZX1 is not set
++# CONFIG_AGP_SGI_TIOCA is not set
++CONFIG_DRM=y
++# CONFIG_DRM_TDFX is not set
++# CONFIG_DRM_R128 is not set
++# CONFIG_DRM_RADEON is not set
++# CONFIG_DRM_MGA is not set
++# CONFIG_DRM_SIS is not set
++# CONFIG_DRM_VIA is not set
++# CONFIG_DRM_SAVAGE is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++# CONFIG_MMTIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=y
++CONFIG_I2C_ALGOPCF=y
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++# CONFIG_I2C_PCA_ISA is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_ABITUGURU is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ASB100 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_FSCHER is not set
++# CONFIG_SENSORS_FSCPOS is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++
++#
++# Misc devices
++#
++
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=y
++CONFIG_VIDEO_V4L1=y
++CONFIG_VIDEO_V4L1_COMPAT=y
++CONFIG_VIDEO_V4L2=y
++
++#
++# Video Capture Adapters
++#
++
++#
++# Video Capture Adapters
++#
++# CONFIG_VIDEO_ADV_DEBUG is not set
++# CONFIG_VIDEO_VIVI is not set
++# CONFIG_VIDEO_BT848 is not set
++# CONFIG_VIDEO_CPIA is not set
++# CONFIG_VIDEO_CPIA2 is not set
++# CONFIG_VIDEO_SAA5246A is not set
++# CONFIG_VIDEO_SAA5249 is not set
++# CONFIG_TUNER_3036 is not set
++# CONFIG_VIDEO_STRADIS is not set
++# CONFIG_VIDEO_ZORAN is not set
++# CONFIG_VIDEO_SAA7134 is not set
++# CONFIG_VIDEO_MXB is not set
++# CONFIG_VIDEO_DPC is not set
++# CONFIG_VIDEO_HEXIUM_ORION is not set
++# CONFIG_VIDEO_HEXIUM_GEMINI is not set
++# CONFIG_VIDEO_CX88 is not set
++
++#
++# Encoders and Decoders
++#
++# CONFIG_VIDEO_MSP3400 is not set
++# CONFIG_VIDEO_CS53L32A is not set
++# CONFIG_VIDEO_TLV320AIC23B is not set
++# CONFIG_VIDEO_WM8775 is not set
++# CONFIG_VIDEO_WM8739 is not set
++# CONFIG_VIDEO_CX2341X is not set
++# CONFIG_VIDEO_CX25840 is not set
++# CONFIG_VIDEO_SAA711X is not set
++# CONFIG_VIDEO_SAA7127 is not set
++# CONFIG_VIDEO_UPD64031A is not set
++# CONFIG_VIDEO_UPD64083 is not set
++
++#
++# V4L USB devices
++#
++# CONFIG_VIDEO_PVRUSB2 is not set
++# CONFIG_VIDEO_EM28XX is not set
++# CONFIG_USB_VICAM is not set
++# CONFIG_USB_IBMCAM is not set
++# CONFIG_USB_KONICAWC is not set
++# CONFIG_USB_QUICKCAM_MESSENGER is not set
++# CONFIG_USB_ET61X251 is not set
++# CONFIG_VIDEO_OVCAMCHIP is not set
++# CONFIG_USB_W9968CF is not set
++# CONFIG_USB_OV511 is not set
++# CONFIG_USB_SE401 is not set
++# CONFIG_USB_SN9C102 is not set
++# CONFIG_USB_STV680 is not set
++# CONFIG_USB_ZC0301 is not set
++# CONFIG_USB_PWC is not set
++
++#
++# Radio Adapters
++#
++# CONFIG_RADIO_GEMTEK_PCI is not set
++# CONFIG_RADIO_MAXIRADIO is not set
++# CONFIG_RADIO_MAESTRO is not set
++# CONFIG_USB_DSBR is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++# CONFIG_USB_DABUSB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++CONFIG_FB_MODE_HELPERS=y
++# CONFIG_FB_TILEBLITTING is not set
++# CONFIG_FB_CIRRUS is not set
++# CONFIG_FB_PM2 is not set
++# CONFIG_FB_CYBER2000 is not set
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_NVIDIA is not set
++# CONFIG_FB_RIVA is not set
++# CONFIG_FB_MATROX is not set
++# CONFIG_FB_RADEON is not set
++# CONFIG_FB_ATY128 is not set
++# CONFIG_FB_ATY is not set
++# CONFIG_FB_SAVAGE is not set
++# CONFIG_FB_SIS is not set
++# CONFIG_FB_NEOMAGIC is not set
++# CONFIG_FB_KYRO is not set
++# CONFIG_FB_3DFX is not set
++# CONFIG_FB_VOODOO1 is not set
++# CONFIG_FB_TRIDENT is not set
++# CONFIG_FB_VIRTUAL is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++
++#
++# Logo configuration
++#
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_LOGO_LINUX_CLUT224=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++CONFIG_SOUND=y
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=y
++CONFIG_SND_TIMER=y
++CONFIG_SND_PCM=y
++CONFIG_SND_HWDEP=y
++CONFIG_SND_RAWMIDI=y
++CONFIG_SND_SEQUENCER=y
++CONFIG_SND_SEQ_DUMMY=y
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=y
++CONFIG_SND_PCM_OSS=y
++CONFIG_SND_PCM_OSS_PLUGINS=y
++CONFIG_SND_SEQUENCER_OSS=y
++# CONFIG_SND_DYNAMIC_MINORS is not set
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=y
++CONFIG_SND_OPL3_LIB=y
++CONFIG_SND_AC97_CODEC=y
++CONFIG_SND_AC97_BUS=y
++CONFIG_SND_DUMMY=y
++CONFIG_SND_VIRMIDI=y
++# CONFIG_SND_MTPAV is not set
++CONFIG_SND_SERIAL_U16550=y
++CONFIG_SND_MPU401=y
++
++#
++# PCI devices
++#
++# CONFIG_SND_AD1889 is not set
++# CONFIG_SND_ALS300 is not set
++# CONFIG_SND_ALI5451 is not set
++CONFIG_SND_ATIIXP=y
++# CONFIG_SND_ATIIXP_MODEM is not set
++# CONFIG_SND_AU8810 is not set
++# CONFIG_SND_AU8820 is not set
++# CONFIG_SND_AU8830 is not set
++# CONFIG_SND_AZT3328 is not set
++# CONFIG_SND_BT87X is not set
++# CONFIG_SND_CA0106 is not set
++# CONFIG_SND_CMIPCI is not set
++# CONFIG_SND_CS4281 is not set
++# CONFIG_SND_CS46XX is not set
++# CONFIG_SND_DARLA20 is not set
++# CONFIG_SND_GINA20 is not set
++# CONFIG_SND_LAYLA20 is not set
++# CONFIG_SND_DARLA24 is not set
++# CONFIG_SND_GINA24 is not set
++# CONFIG_SND_LAYLA24 is not set
++# CONFIG_SND_MONA is not set
++# CONFIG_SND_MIA is not set
++# CONFIG_SND_ECHO3G is not set
++# CONFIG_SND_INDIGO is not set
++# CONFIG_SND_INDIGOIO is not set
++# CONFIG_SND_INDIGODJ is not set
++# CONFIG_SND_EMU10K1 is not set
++# CONFIG_SND_EMU10K1X is not set
++# CONFIG_SND_ENS1370 is not set
++# CONFIG_SND_ENS1371 is not set
++# CONFIG_SND_ES1938 is not set
++# CONFIG_SND_ES1968 is not set
++CONFIG_SND_FM801=y
++# CONFIG_SND_FM801_TEA575X_BOOL is not set
++# CONFIG_SND_HDA_INTEL is not set
++# CONFIG_SND_HDSP is not set
++# CONFIG_SND_HDSPM is not set
++# CONFIG_SND_ICE1712 is not set
++# CONFIG_SND_ICE1724 is not set
++# CONFIG_SND_INTEL8X0 is not set
++# CONFIG_SND_INTEL8X0M is not set
++# CONFIG_SND_KORG1212 is not set
++# CONFIG_SND_MAESTRO3 is not set
++# CONFIG_SND_MIXART is not set
++# CONFIG_SND_NM256 is not set
++# CONFIG_SND_PCXHR is not set
++# CONFIG_SND_RIPTIDE is not set
++# CONFIG_SND_RME32 is not set
++# CONFIG_SND_RME96 is not set
++# CONFIG_SND_RME9652 is not set
++# CONFIG_SND_SONICVIBES is not set
++# CONFIG_SND_TRIDENT is not set
++# CONFIG_SND_VIA82XX is not set
++# CONFIG_SND_VIA82XX_MODEM is not set
++# CONFIG_SND_VX222 is not set
++# CONFIG_SND_YMFPCI is not set
++
++#
++# USB devices
++#
++# CONFIG_SND_USB_AUDIO is not set
++
++#
++# Open Sound System
++#
++CONFIG_SOUND_PRIME=y
++# CONFIG_OSS_OBSOLETE_DRIVER is not set
++# CONFIG_SOUND_BT878 is not set
++# CONFIG_SOUND_ES1371 is not set
++# CONFIG_SOUND_ICH is not set
++# CONFIG_SOUND_TRIDENT is not set
++# CONFIG_SOUND_MSNDCLAS is not set
++# CONFIG_SOUND_MSNDPIN is not set
++# CONFIG_SOUND_VIA82CXXX is not set
++# CONFIG_SOUND_TVMIXER is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_BANDWIDTH=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++# CONFIG_USB_EHCI_SPLIT_ISO is not set
++# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
++# CONFIG_USB_EHCI_TT_NEWSCHED is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_ISD200 is not set
++# CONFIG_USB_STORAGE_DPCM is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++# CONFIG_HID_FF is not set
++CONFIG_USB_HIDDEV=y
++# CONFIG_USB_AIPTEK is not set
++# CONFIG_USB_WACOM is not set
++# CONFIG_USB_ACECAD is not set
++# CONFIG_USB_KBTAB is not set
++# CONFIG_USB_POWERMATE is not set
++# CONFIG_USB_TOUCHSCREEN is not set
++# CONFIG_USB_YEALINK is not set
++# CONFIG_USB_XPAD is not set
++# CONFIG_USB_ATI_REMOTE is not set
++# CONFIG_USB_ATI_REMOTE2 is not set
++# CONFIG_USB_KEYSPAN_REMOTE is not set
++# CONFIG_USB_APPLETOUCH is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGETKIT is not set
++# CONFIG_USB_PHIDGETSERVO is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_LD is not set
++# CONFIG_USB_TEST is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++
++#
++# Real Time Clock
++#
++# CONFIG_RTC_CLASS is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++# CONFIG_JFS_FS is not set
++CONFIG_FS_POSIX_ACL=y
++CONFIG_XFS_FS=y
++# CONFIG_XFS_QUOTA is not set
++# CONFIG_XFS_SECURITY is not set
++# CONFIG_XFS_POSIX_ACL is not set
++# CONFIG_XFS_RT is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++# CONFIG_ZISOFS is not set
++CONFIG_UDF_FS=y
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_PROC_IOMEM_MACHINE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++CONFIG_NFS_V4=y
++CONFIG_NFS_DIRECTIO=y
++CONFIG_NFSD=y
++CONFIG_NFSD_V3=y
++# CONFIG_NFSD_V3_ACL is not set
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_TCP=y
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++CONFIG_SMB_FS=y
++CONFIG_SMB_NLS_DEFAULT=y
++CONFIG_SMB_NLS_REMOTE="cp437"
++CONFIG_CIFS=y
++# CONFIG_CIFS_STATS is not set
++# CONFIG_CIFS_WEAK_PW_HASH is not set
++# CONFIG_CIFS_XATTR is not set
++# CONFIG_CIFS_DEBUG2 is not set
++# CONFIG_CIFS_EXPERIMENTAL is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++CONFIG_SGI_PARTITION=y
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++CONFIG_EFI_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_CODEPAGE_737=y
++CONFIG_NLS_CODEPAGE_775=y
++CONFIG_NLS_CODEPAGE_850=y
++CONFIG_NLS_CODEPAGE_852=y
++CONFIG_NLS_CODEPAGE_855=y
++CONFIG_NLS_CODEPAGE_857=y
++CONFIG_NLS_CODEPAGE_860=y
++CONFIG_NLS_CODEPAGE_861=y
++CONFIG_NLS_CODEPAGE_862=y
++CONFIG_NLS_CODEPAGE_863=y
++CONFIG_NLS_CODEPAGE_864=y
++CONFIG_NLS_CODEPAGE_865=y
++CONFIG_NLS_CODEPAGE_866=y
++CONFIG_NLS_CODEPAGE_869=y
++CONFIG_NLS_CODEPAGE_936=y
++CONFIG_NLS_CODEPAGE_950=y
++CONFIG_NLS_CODEPAGE_932=y
++CONFIG_NLS_CODEPAGE_949=y
++CONFIG_NLS_CODEPAGE_874=y
++CONFIG_NLS_ISO8859_8=y
++# CONFIG_NLS_CODEPAGE_1250 is not set
++CONFIG_NLS_CODEPAGE_1251=y
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_2=y
++CONFIG_NLS_ISO8859_3=y
++CONFIG_NLS_ISO8859_4=y
++CONFIG_NLS_ISO8859_5=y
++CONFIG_NLS_ISO8859_6=y
++CONFIG_NLS_ISO8859_7=y
++CONFIG_NLS_ISO8859_9=y
++CONFIG_NLS_ISO8859_13=y
++CONFIG_NLS_ISO8859_14=y
++CONFIG_NLS_ISO8859_15=y
++CONFIG_NLS_KOI8_R=y
++CONFIG_NLS_KOI8_U=y
++CONFIG_NLS_UTF8=y
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++CONFIG_CRC32=y
++# CONFIG_LIBCRC32C is not set
++CONFIG_PLIST=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_PENDING_IRQ=y
++CONFIG_IRQ_PER_CPU=y
++
++#
++# HP Simulator drivers
++#
++# CONFIG_HP_SIMETH is not set
++# CONFIG_HP_SIMSERIAL is not set
++# CONFIG_HP_SIMSCSI is not set
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=20
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++CONFIG_DEBUG_MUTEXES=y
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_IA64_GRANULE_16MB=y
++# CONFIG_IA64_GRANULE_64MB is not set
++CONFIG_IA64_PRINT_HAZARDS=y
++# CONFIG_DISABLE_VHPT is not set
++# CONFIG_IA64_DEBUG_CMPXCHG is not set
++# CONFIG_IA64_DEBUG_IRQ is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++# CONFIG_XEN_SMPBOOT is not set
++# CONFIG_XEN_DEVMEM is not set
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++# CONFIG_XEN_UNPRIVILEGED_GUEST is not set
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++CONFIG_XEN_BACKEND=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XEN_BLKDEV_TAP=y
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND is not set
++# CONFIG_XEN_NETDEV_LOOPBACK is not set
++CONFIG_XEN_PCIDEV_BACKEND=y
++# CONFIG_XEN_PCIDEV_BACKEND_VPCI is not set
++# CONFIG_XEN_PCIDEV_BACKEND_PASS is not set
++# CONFIG_XEN_PCIDEV_BACKEND_SLOT is not set
++CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER=y
++# CONFIG_XEN_PCIDEV_BE_DEBUG is not set
++CONFIG_XEN_TPMDEV_BACKEND=m
++CONFIG_XEN_SCSI_BACKEND=m
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_XEN_GRANT_DEV=y
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_FRAMEBUFFER=y
++CONFIG_XEN_KEYBOARD=y
++# CONFIG_XEN_SCRUB_PAGES is not set
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_XENCOMM=y
+diff -rpuN linux-2.6.18.8/buildconfigs/linux-defconfig_xenU_ia64 linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xenU_ia64
+--- linux-2.6.18.8/buildconfigs/linux-defconfig_xenU_ia64 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xenU_ia64 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1511 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Tue Feb 19 11:20:14 2008
++#
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_LOCALVERSION_AUTO=y
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_CC_OPTIMIZE_FOR_SIZE=y
++# CONFIG_EMBEDDED is not set
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++# CONFIG_MODULE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++# CONFIG_KMOD is not set
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_BLK_DEV_IO_TRACE is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Processor type and features
++#
++CONFIG_IA64=y
++CONFIG_64BIT=y
++CONFIG_MMU=y
++CONFIG_SWIOTLB=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_FIND_NEXT_BIT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_TIME_INTERPOLATION=y
++CONFIG_DMI=y
++CONFIG_EFI=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_XEN=y
++CONFIG_XEN_IA64_EXPOSE_P2M=y
++CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++CONFIG_DMA_IS_DMA32=y
++CONFIG_AUDIT_ARCH=y
++# CONFIG_IA64_GENERIC is not set
++# CONFIG_IA64_DIG is not set
++# CONFIG_IA64_HP_ZX1 is not set
++# CONFIG_IA64_HP_ZX1_SWIOTLB is not set
++# CONFIG_IA64_SGI_SN2 is not set
++# CONFIG_IA64_HP_SIM is not set
++CONFIG_IA64_XEN=y
++# CONFIG_ITANIUM is not set
++CONFIG_MCKINLEY=y
++# CONFIG_IA64_PAGE_SIZE_4KB is not set
++# CONFIG_IA64_PAGE_SIZE_8KB is not set
++CONFIG_IA64_PAGE_SIZE_16KB=y
++# CONFIG_IA64_PAGE_SIZE_64KB is not set
++CONFIG_PGTABLE_3=y
++# CONFIG_PGTABLE_4 is not set
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++CONFIG_IA64_L1_CACHE_SHIFT=7
++# CONFIG_IA64_CYCLONE is not set
++CONFIG_IOSAPIC=y
++CONFIG_FORCE_MAX_ZONEORDER=11
++CONFIG_SMP=y
++CONFIG_NR_CPUS=16
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++# CONFIG_SCHED_SMT is not set
++# CONFIG_PERMIT_BSP_REMOVE is not set
++# CONFIG_PREEMPT is not set
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++CONFIG_ARCH_SELECT_MEMORY_MODEL=y
++CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_ARCH_SPARSEMEM_ENABLE=y
++CONFIG_VIRTUAL_MEM_MAP=y
++CONFIG_HOLES_IN_ZONE=y
++# CONFIG_IA32_SUPPORT is not set
++CONFIG_IA64_MCA_RECOVERY=y
++CONFIG_PERFMON=y
++CONFIG_IA64_PALINFO=y
++# CONFIG_KEXEC is not set
++# CONFIG_CRASH_DUMP is not set
++
++#
++# Firmware Drivers
++#
++CONFIG_EFI_VARS=y
++CONFIG_EFI_PCDP=y
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++
++#
++# Power management and ACPI
++#
++CONFIG_PM=y
++CONFIG_PM_LEGACY=y
++# CONFIG_PM_DEBUG is not set
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_BUTTON=y
++CONFIG_ACPI_FAN=y
++# CONFIG_ACPI_DOCK is not set
++CONFIG_ACPI_PROCESSOR=y
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=y
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_SYSTEM=y
++CONFIG_ACPI_CONTAINER=y
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI, PCMCIA)
++#
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++CONFIG_XEN_PCIDEV_FRONTEND=y
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_DEBUG is not set
++
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=y
++# CONFIG_HOTPLUG_PCI_FAKE is not set
++CONFIG_HOTPLUG_PCI_ACPI=y
++# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
++# CONFIG_HOTPLUG_PCI_CPCI is not set
++# CONFIG_HOTPLUG_PCI_SHPC is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++CONFIG_INET_DIAG=y
++CONFIG_INET_TCP_DIAG=y
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
++
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++
++#
++# Core Netfilter Configuration
++#
++# CONFIG_NETFILTER_NETLINK is not set
++# CONFIG_NF_CONNTRACK is not set
++# CONFIG_NETFILTER_XTABLES is not set
++
++#
++# IP: Netfilter Configuration
++#
++# CONFIG_IP_NF_CONNTRACK is not set
++# CONFIG_IP_NF_QUEUE is not set
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++# CONFIG_PNP is not set
++
++#
++# Block devices
++#
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++CONFIG_CHR_DEV_ST=y
++CONFIG_CHR_DEV_OSST=y
++CONFIG_BLK_DEV_SR=y
++CONFIG_BLK_DEV_SR_VENDOR=y
++CONFIG_CHR_DEV_SG=y
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++CONFIG_SCSI_SAS_ATTRS=y
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_MEGARAID_SAS is not set
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_HPTIOP is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++CONFIG_SCSI_SYM53C8XX_2=y
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++CONFIG_SCSI_SYM53C8XX_MMIO=y
++# CONFIG_SCSI_IPR is not set
++CONFIG_SCSI_QLOGIC_1280=y
++# CONFIG_SCSI_QLA_FC is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++# CONFIG_MD is not set
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=y
++# CONFIG_FUSION_FC is not set
++CONFIG_FUSION_SAS=y
++CONFIG_FUSION_MAX_SGE=128
++# CONFIG_FUSION_CTL is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=y
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++
++#
++# ARCnet devices
++#
++# CONFIG_ARCNET is not set
++
++#
++# PHY device support
++#
++# CONFIG_PHYLIB is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_CASSINI is not set
++# CONFIG_NET_VENDOR_3COM is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++CONFIG_TULIP=y
++CONFIG_TULIP_MWI=y
++CONFIG_TULIP_MMIO=y
++CONFIG_TULIP_NAPI=y
++CONFIG_TULIP_NAPI_HW_MITIGATION=y
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_ULI526X is not set
++# CONFIG_HP100 is not set
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++# CONFIG_EEPRO100 is not set
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_VIA_RHINE is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SIS190 is not set
++# CONFIG_SKGE is not set
++# CONFIG_SKY2 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++# CONFIG_BNX2 is not set
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_CHELSIO_T1 is not set
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++# CONFIG_MYRI10GE is not set
++# CONFIG_SFC is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=y
++# CONFIG_INPUT_TSDEV is not set
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++# CONFIG_SERIO_I8042 is not set
++# CONFIG_SERIO_SERPORT is not set
++# CONFIG_SERIO_PCIPS2 is not set
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_PCI=y
++CONFIG_SERIAL_8250_NR_UARTS=8
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++CONFIG_SERIAL_8250_EXTENDED=y
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++# CONFIG_SERIAL_8250_RSA is not set
++
++#
++# Non-8250 serial port support
++#
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++# CONFIG_HW_RANDOM is not set
++CONFIG_EFI_RTC=y
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=y
++# CONFIG_AGP_SIS is not set
++# CONFIG_AGP_VIA is not set
++CONFIG_DRM=y
++# CONFIG_DRM_TDFX is not set
++# CONFIG_DRM_R128 is not set
++CONFIG_DRM_RADEON=y
++# CONFIG_DRM_MGA is not set
++# CONFIG_DRM_SIS is not set
++# CONFIG_DRM_VIA is not set
++# CONFIG_DRM_SAVAGE is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=y
++CONFIG_I2C_ALGOPCF=y
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_OCORES is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++# CONFIG_I2C_PCA_ISA is not set
++
++#
++# Miscellaneous I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_DS1374 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCA9539 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_MAX6875 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++CONFIG_HWMON=y
++# CONFIG_HWMON_VID is not set
++# CONFIG_SENSORS_ABITUGURU is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ADM9240 is not set
++# CONFIG_SENSORS_ASB100 is not set
++# CONFIG_SENSORS_ATXP1 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_F71805F is not set
++# CONFIG_SENSORS_FSCHER is not set
++# CONFIG_SENSORS_FSCPOS is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_SMSC47M192 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_VT8231 is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83791D is not set
++# CONFIG_SENSORS_W83792D is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++# CONFIG_SENSORS_W83627EHF is not set
++# CONFIG_HWMON_DEBUG_CHIP is not set
++
++#
++# Misc devices
++#
++
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=y
++CONFIG_VIDEO_V4L1=y
++CONFIG_VIDEO_V4L1_COMPAT=y
++CONFIG_VIDEO_V4L2=y
++
++#
++# Video Capture Adapters
++#
++
++#
++# Video Capture Adapters
++#
++# CONFIG_VIDEO_ADV_DEBUG is not set
++# CONFIG_VIDEO_VIVI is not set
++# CONFIG_VIDEO_BT848 is not set
++# CONFIG_VIDEO_CPIA is not set
++# CONFIG_VIDEO_CPIA2 is not set
++# CONFIG_VIDEO_SAA5246A is not set
++# CONFIG_VIDEO_SAA5249 is not set
++# CONFIG_TUNER_3036 is not set
++# CONFIG_VIDEO_STRADIS is not set
++# CONFIG_VIDEO_ZORAN is not set
++# CONFIG_VIDEO_SAA7134 is not set
++# CONFIG_VIDEO_MXB is not set
++# CONFIG_VIDEO_DPC is not set
++# CONFIG_VIDEO_HEXIUM_ORION is not set
++# CONFIG_VIDEO_HEXIUM_GEMINI is not set
++# CONFIG_VIDEO_CX88 is not set
++
++#
++# Encoders and Decoders
++#
++# CONFIG_VIDEO_MSP3400 is not set
++# CONFIG_VIDEO_CS53L32A is not set
++# CONFIG_VIDEO_TLV320AIC23B is not set
++# CONFIG_VIDEO_WM8775 is not set
++# CONFIG_VIDEO_WM8739 is not set
++# CONFIG_VIDEO_CX2341X is not set
++# CONFIG_VIDEO_CX25840 is not set
++# CONFIG_VIDEO_SAA711X is not set
++# CONFIG_VIDEO_SAA7127 is not set
++# CONFIG_VIDEO_UPD64031A is not set
++# CONFIG_VIDEO_UPD64083 is not set
++
++#
++# V4L USB devices
++#
++# CONFIG_VIDEO_PVRUSB2 is not set
++# CONFIG_VIDEO_EM28XX is not set
++# CONFIG_USB_VICAM is not set
++# CONFIG_USB_IBMCAM is not set
++# CONFIG_USB_KONICAWC is not set
++# CONFIG_USB_QUICKCAM_MESSENGER is not set
++# CONFIG_USB_ET61X251 is not set
++# CONFIG_VIDEO_OVCAMCHIP is not set
++# CONFIG_USB_W9968CF is not set
++# CONFIG_USB_OV511 is not set
++# CONFIG_USB_SE401 is not set
++# CONFIG_USB_SN9C102 is not set
++# CONFIG_USB_STV680 is not set
++# CONFIG_USB_ZC0301 is not set
++# CONFIG_USB_PWC is not set
++
++#
++# Radio Adapters
++#
++# CONFIG_RADIO_GEMTEK_PCI is not set
++# CONFIG_RADIO_MAXIRADIO is not set
++# CONFIG_RADIO_MAESTRO is not set
++# CONFIG_USB_DSBR is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++# CONFIG_USB_DABUSB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++CONFIG_FB_MODE_HELPERS=y
++# CONFIG_FB_TILEBLITTING is not set
++# CONFIG_FB_CIRRUS is not set
++# CONFIG_FB_PM2 is not set
++# CONFIG_FB_CYBER2000 is not set
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_NVIDIA is not set
++# CONFIG_FB_RIVA is not set
++# CONFIG_FB_MATROX is not set
++# CONFIG_FB_RADEON is not set
++# CONFIG_FB_ATY128 is not set
++# CONFIG_FB_ATY is not set
++# CONFIG_FB_SAVAGE is not set
++# CONFIG_FB_SIS is not set
++# CONFIG_FB_NEOMAGIC is not set
++# CONFIG_FB_KYRO is not set
++# CONFIG_FB_3DFX is not set
++# CONFIG_FB_VOODOO1 is not set
++# CONFIG_FB_TRIDENT is not set
++# CONFIG_FB_VIRTUAL is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++
++#
++# Logo configuration
++#
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_LOGO_LINUX_CLUT224=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++CONFIG_SOUND=y
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=y
++CONFIG_SND_TIMER=y
++CONFIG_SND_PCM=y
++CONFIG_SND_HWDEP=y
++CONFIG_SND_RAWMIDI=y
++CONFIG_SND_SEQUENCER=y
++# CONFIG_SND_SEQ_DUMMY is not set
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=y
++CONFIG_SND_PCM_OSS=y
++CONFIG_SND_PCM_OSS_PLUGINS=y
++CONFIG_SND_SEQUENCER_OSS=y
++# CONFIG_SND_DYNAMIC_MINORS is not set
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=y
++CONFIG_SND_OPL3_LIB=y
++CONFIG_SND_AC97_CODEC=y
++CONFIG_SND_AC97_BUS=y
++# CONFIG_SND_DUMMY is not set
++# CONFIG_SND_VIRMIDI is not set
++# CONFIG_SND_MTPAV is not set
++# CONFIG_SND_SERIAL_U16550 is not set
++# CONFIG_SND_MPU401 is not set
++
++#
++# PCI devices
++#
++# CONFIG_SND_AD1889 is not set
++# CONFIG_SND_ALS300 is not set
++# CONFIG_SND_ALI5451 is not set
++# CONFIG_SND_ATIIXP is not set
++# CONFIG_SND_ATIIXP_MODEM is not set
++# CONFIG_SND_AU8810 is not set
++# CONFIG_SND_AU8820 is not set
++# CONFIG_SND_AU8830 is not set
++# CONFIG_SND_AZT3328 is not set
++# CONFIG_SND_BT87X is not set
++# CONFIG_SND_CA0106 is not set
++# CONFIG_SND_CMIPCI is not set
++# CONFIG_SND_CS4281 is not set
++# CONFIG_SND_CS46XX is not set
++# CONFIG_SND_EMU10K1 is not set
++# CONFIG_SND_EMU10K1X is not set
++# CONFIG_SND_ENS1370 is not set
++# CONFIG_SND_ENS1371 is not set
++# CONFIG_SND_ES1938 is not set
++# CONFIG_SND_ES1968 is not set
++CONFIG_SND_FM801=y
++# CONFIG_SND_FM801_TEA575X_BOOL is not set
++# CONFIG_SND_HDA_INTEL is not set
++# CONFIG_SND_HDSP is not set
++# CONFIG_SND_HDSPM is not set
++# CONFIG_SND_ICE1712 is not set
++# CONFIG_SND_ICE1724 is not set
++# CONFIG_SND_INTEL8X0 is not set
++# CONFIG_SND_INTEL8X0M is not set
++# CONFIG_SND_KORG1212 is not set
++# CONFIG_SND_MAESTRO3 is not set
++# CONFIG_SND_MIXART is not set
++# CONFIG_SND_NM256 is not set
++# CONFIG_SND_PCXHR is not set
++# CONFIG_SND_RME32 is not set
++# CONFIG_SND_RME96 is not set
++# CONFIG_SND_RME9652 is not set
++# CONFIG_SND_SONICVIBES is not set
++# CONFIG_SND_TRIDENT is not set
++# CONFIG_SND_VIA82XX is not set
++# CONFIG_SND_VIA82XX_MODEM is not set
++# CONFIG_SND_VX222 is not set
++# CONFIG_SND_YMFPCI is not set
++
++#
++# USB devices
++#
++# CONFIG_SND_USB_AUDIO is not set
++
++#
++# Open Sound System
++#
++# CONFIG_SOUND_PRIME is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++# CONFIG_USB_DEVICEFS is not set
++CONFIG_USB_BANDWIDTH=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++# CONFIG_USB_EHCI_SPLIT_ISO is not set
++# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
++# CONFIG_USB_EHCI_TT_NEWSCHED is not set
++# CONFIG_USB_ISP116X_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_DPCM is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++# CONFIG_HID_FF is not set
++CONFIG_USB_HIDDEV=y
++# CONFIG_USB_AIPTEK is not set
++# CONFIG_USB_WACOM is not set
++# CONFIG_USB_ACECAD is not set
++# CONFIG_USB_KBTAB is not set
++# CONFIG_USB_POWERMATE is not set
++# CONFIG_USB_TOUCHSCREEN is not set
++# CONFIG_USB_YEALINK is not set
++# CONFIG_USB_XPAD is not set
++# CONFIG_USB_ATI_REMOTE is not set
++# CONFIG_USB_ATI_REMOTE2 is not set
++# CONFIG_USB_KEYSPAN_REMOTE is not set
++# CONFIG_USB_APPLETOUCH is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYPRESS_CY7C63 is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGETKIT is not set
++# CONFIG_USB_PHIDGETSERVO is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_APPLEDISPLAY is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_LD is not set
++
++#
++# USB DSL modem support
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++
++#
++# Real Time Clock
++#
++# CONFIG_RTC_CLASS is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++# CONFIG_EXT2_FS_POSIX_ACL is not set
++# CONFIG_EXT2_FS_SECURITY is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++# CONFIG_AUTOFS4_FS is not set
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++# CONFIG_ZISOFS is not set
++CONFIG_UDF_FS=y
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++CONFIG_NFS_V4=y
++# CONFIG_NFS_DIRECTIO is not set
++CONFIG_NFSD=y
++CONFIG_NFSD_V3=y
++# CONFIG_NFSD_V3_ACL is not set
++# CONFIG_NFSD_V4 is not set
++# CONFIG_NFSD_TCP is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++# CONFIG_KARMA_PARTITION is not set
++CONFIG_EFI_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_CODEPAGE_737=y
++CONFIG_NLS_CODEPAGE_775=y
++CONFIG_NLS_CODEPAGE_850=y
++CONFIG_NLS_CODEPAGE_852=y
++CONFIG_NLS_CODEPAGE_855=y
++CONFIG_NLS_CODEPAGE_857=y
++CONFIG_NLS_CODEPAGE_860=y
++CONFIG_NLS_CODEPAGE_861=y
++CONFIG_NLS_CODEPAGE_862=y
++CONFIG_NLS_CODEPAGE_863=y
++CONFIG_NLS_CODEPAGE_864=y
++CONFIG_NLS_CODEPAGE_865=y
++CONFIG_NLS_CODEPAGE_866=y
++CONFIG_NLS_CODEPAGE_869=y
++CONFIG_NLS_CODEPAGE_936=y
++CONFIG_NLS_CODEPAGE_950=y
++CONFIG_NLS_CODEPAGE_932=y
++CONFIG_NLS_CODEPAGE_949=y
++CONFIG_NLS_CODEPAGE_874=y
++CONFIG_NLS_ISO8859_8=y
++# CONFIG_NLS_CODEPAGE_1250 is not set
++CONFIG_NLS_CODEPAGE_1251=y
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_2=y
++CONFIG_NLS_ISO8859_3=y
++CONFIG_NLS_ISO8859_4=y
++CONFIG_NLS_ISO8859_5=y
++CONFIG_NLS_ISO8859_6=y
++CONFIG_NLS_ISO8859_7=y
++CONFIG_NLS_ISO8859_9=y
++CONFIG_NLS_ISO8859_13=y
++CONFIG_NLS_ISO8859_14=y
++CONFIG_NLS_ISO8859_15=y
++CONFIG_NLS_KOI8_R=y
++CONFIG_NLS_KOI8_U=y
++CONFIG_NLS_UTF8=y
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++CONFIG_CRC32=y
++# CONFIG_LIBCRC32C is not set
++CONFIG_PLIST=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_PENDING_IRQ=y
++CONFIG_IRQ_PER_CPU=y
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=17
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++CONFIG_DEBUG_MUTEXES=y
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_IA64_GRANULE_16MB=y
++# CONFIG_IA64_GRANULE_64MB is not set
++CONFIG_IA64_PRINT_HAZARDS=y
++# CONFIG_DISABLE_VHPT is not set
++# CONFIG_IA64_DEBUG_CMPXCHG is not set
++# CONFIG_IA64_DEBUG_IRQ is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++# CONFIG_XEN_SMPBOOT is not set
++# CONFIG_XEN_DEVMEM is not set
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
++
++#
++# XEN
++#
++# CONFIG_XEN_PRIVILEGED_GUEST is not set
++CONFIG_XEN_UNPRIVILEGED_GUEST=y
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++# CONFIG_XEN_BACKEND is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++# CONFIG_XEN_GRANT_DEV is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_FRAMEBUFFER=y
++CONFIG_XEN_KEYBOARD=y
++# CONFIG_XEN_SCRUB_PAGES is not set
++# CONFIG_XEN_DISABLE_SERIAL is not set
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_XENCOMM=y
+diff -rpuN linux-2.6.18.8/buildconfigs/linux-defconfig_xenU_x86_32 linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xenU_x86_32
+--- linux-2.6.18.8/buildconfigs/linux-defconfig_xenU_x86_32 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xenU_x86_32 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,953 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Tue Oct 16 09:31:29 2007
++#
++CONFIG_X86_32=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_SEMAPHORE_SLEEPERS=y
++CONFIG_X86=y
++CONFIG_MMU=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_DMI=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_UID16=y
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++CONFIG_LSF=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Processor type and features
++#
++CONFIG_SMP=y
++# CONFIG_X86_PC is not set
++CONFIG_X86_XEN=y
++# CONFIG_X86_ELAN is not set
++# CONFIG_X86_VOYAGER is not set
++# CONFIG_X86_NUMAQ is not set
++# CONFIG_X86_SUMMIT is not set
++# CONFIG_X86_BIGSMP is not set
++# CONFIG_X86_VISWS is not set
++# CONFIG_X86_GENERICARCH is not set
++# CONFIG_X86_ES7000 is not set
++# CONFIG_M386 is not set
++# CONFIG_M486 is not set
++# CONFIG_M586 is not set
++# CONFIG_M586TSC is not set
++# CONFIG_M586MMX is not set
++CONFIG_M686=y
++# CONFIG_MPENTIUMII is not set
++# CONFIG_MPENTIUMIII is not set
++# CONFIG_MPENTIUMM is not set
++# CONFIG_MPENTIUM4 is not set
++# CONFIG_MK6 is not set
++# CONFIG_MK7 is not set
++# CONFIG_MK8 is not set
++# CONFIG_MCRUSOE is not set
++# CONFIG_MEFFICEON is not set
++# CONFIG_MWINCHIPC6 is not set
++# CONFIG_MWINCHIP2 is not set
++# CONFIG_MWINCHIP3D is not set
++# CONFIG_MGEODEGX1 is not set
++# CONFIG_MGEODE_LX is not set
++# CONFIG_MCYRIXIII is not set
++# CONFIG_MVIAC3_2 is not set
++# CONFIG_X86_GENERIC is not set
++CONFIG_X86_CMPXCHG=y
++CONFIG_X86_XADD=y
++CONFIG_X86_L1_CACHE_SHIFT=5
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_PPRO_FENCE=y
++CONFIG_X86_WP_WORKS_OK=y
++CONFIG_X86_INVLPG=y
++CONFIG_X86_BSWAP=y
++CONFIG_X86_POPAD_OK=y
++CONFIG_X86_CMPXCHG64=y
++CONFIG_X86_GOOD_APIC=y
++CONFIG_X86_USE_PPRO_CHECKSUM=y
++CONFIG_NR_CPUS=8
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_PREEMPT_BKL=y
++CONFIG_VM86=y
++# CONFIG_TOSHIBA is not set
++# CONFIG_I8K is not set
++# CONFIG_X86_REBOOTFIXUPS is not set
++# CONFIG_X86_MSR is not set
++CONFIG_X86_CPUID=y
++CONFIG_SWIOTLB=y
++
++#
++# Firmware Drivers
++#
++# CONFIG_EDD is not set
++# CONFIG_DELL_RBU is not set
++# CONFIG_DCDBAS is not set
++# CONFIG_NOHIGHMEM is not set
++CONFIG_HIGHMEM4G=y
++# CONFIG_HIGHMEM64G is not set
++CONFIG_PAGE_OFFSET=0xC0000000
++CONFIG_HIGHMEM=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++# CONFIG_HIGHPTE is not set
++# CONFIG_REGPARM is not set
++CONFIG_SECCOMP=y
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++# CONFIG_CRASH_DUMP is not set
++CONFIG_PHYSICAL_START=0x100000
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++
++#
++# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
++#
++# CONFIG_PCI is not set
++CONFIG_ISA_DMA_API=y
++# CONFIG_SCx200 is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# PCI Hotplug Support
++#
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_AOUT is not set
++# CONFIG_BINFMT_MISC is not set
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++# CONFIG_IP_PNP_DHCP is not set
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++# CONFIG_INET_DIAG is not set
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++
++#
++# Block devices
++#
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=m
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=m
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++
++#
++# SCSI Transport Attributes
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++# CONFIG_MD is not set
++
++#
++# Fusion MPT device support
++#
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++
++#
++# I2O device support
++#
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++
++#
++# PHY device support
++#
++
++#
++# Ethernet (10 or 100Mbit)
++#
++# CONFIG_NET_ETHERNET is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++
++#
++# Ethernet (10000 Mbit)
++#
++
++#
++# Token Ring devices
++#
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_TSDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++# CONFIG_KEYBOARD_ATKBD is not set
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++# CONFIG_SERIO_CT82C710 is not set
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++
++#
++# Non-8250 serial port support
++#
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++CONFIG_HW_RANDOM=y
++CONFIG_HW_RANDOM_VIA=y
++# CONFIG_NVRAM is not set
++# CONFIG_RTC is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++# CONFIG_MWAVE is not set
++# CONFIG_PC8736x_GPIO is not set
++# CONFIG_NSC_GPIO is not set
++# CONFIG_CS5535_GPIO is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++# CONFIG_HWMON is not set
++# CONFIG_HWMON_VID is not set
++
++#
++# Misc devices
++#
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++# CONFIG_FB is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++
++#
++# USB support
++#
++# CONFIG_USB_ARCH_HAS_HCD is not set
++# CONFIG_USB_ARCH_HAS_OHCI is not set
++# CONFIG_USB_ARCH_HAS_EHCI is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++
++#
++# InfiniBand support
++#
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++# CONFIG_EDAC is not set
++
++#
++# Real Time Clock
++#
++# CONFIG_RTC_CLASS is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++# CONFIG_REISERFS_FS_XATTR is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_UTF8 is not set
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=14
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_HIGHMEM is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_FRAME_POINTER=y
++# CONFIG_UNWIND_INFO is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_EARLY_PRINTK=y
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUG_RODATA is not set
++# CONFIG_4KSTACKS is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=m
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_DES is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_AES_586 is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++CONFIG_CRYPTO_CRC32C=m
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++# CONFIG_CRYPTO_DEV_PADLOCK is not set
++CONFIG_XEN=y
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
++
++#
++# XEN
++#
++# CONFIG_XEN_PRIVILEGED_GUEST is not set
++CONFIG_XEN_UNPRIVILEGED_GUEST=y
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++# CONFIG_XEN_BACKEND is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++# CONFIG_XEN_GRANT_DEV is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_SMPBOOT=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_DEVMEM=y
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC32 is not set
++CONFIG_LIBCRC32C=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_PLIST=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_PENDING_IRQ=y
++CONFIG_X86_SMP=y
++CONFIG_X86_BIOS_REBOOT=y
++CONFIG_X86_TRAMPOLINE=y
++CONFIG_X86_NO_TSS=y
++CONFIG_X86_NO_IDT=y
++CONFIG_KTIME_SCALAR=y
+diff -rpuN linux-2.6.18.8/buildconfigs/linux-defconfig_xenU_x86_64 linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xenU_x86_64
+--- linux-2.6.18.8/buildconfigs/linux-defconfig_xenU_x86_64 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xenU_x86_64 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,906 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Mon Feb 18 10:42:42 2008
++#
++CONFIG_X86_64=y
++CONFIG_64BIT=y
++CONFIG_X86=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_SEMAPHORE_SLEEPERS=y
++CONFIG_MMU=y
++CONFIG_RWSEM_GENERIC_SPINLOCK=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_CMPXCHG=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_DMI=y
++CONFIG_AUDIT_ARCH=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++# CONFIG_TASKSTATS is not set
++# CONFIG_AUDIT is not set
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_UID16=y
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++# CONFIG_LBD is not set
++# CONFIG_BLK_DEV_IO_TRACE is not set
++CONFIG_LSF=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_DEFAULT_AS=y
++# CONFIG_DEFAULT_DEADLINE is not set
++# CONFIG_DEFAULT_CFQ is not set
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="anticipatory"
++
++#
++# Processor type and features
++#
++CONFIG_X86_PC=y
++# CONFIG_X86_VSMP is not set
++# CONFIG_MK8 is not set
++# CONFIG_MPSC is not set
++CONFIG_GENERIC_CPU=y
++CONFIG_X86_64_XEN=y
++CONFIG_X86_NO_TSS=y
++CONFIG_X86_NO_IDT=y
++CONFIG_X86_L1_CACHE_BYTES=128
++CONFIG_X86_L1_CACHE_SHIFT=7
++CONFIG_X86_INTERNODE_CACHE_BYTES=128
++CONFIG_X86_GOOD_APIC=y
++# CONFIG_MICROCODE is not set
++# CONFIG_X86_MSR is not set
++CONFIG_X86_CPUID=y
++CONFIG_X86_XEN_GENAPIC=y
++CONFIG_SMP=y
++CONFIG_PREEMPT_NONE=y
++# CONFIG_PREEMPT_VOLUNTARY is not set
++# CONFIG_PREEMPT is not set
++CONFIG_PREEMPT_BKL=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++CONFIG_NR_CPUS=8
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_SWIOTLB=y
++# CONFIG_CRASH_DUMP is not set
++CONFIG_PHYSICAL_START=0x200000
++CONFIG_SECCOMP=y
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++# CONFIG_REORDER is not set
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_ISA_DMA_API=y
++CONFIG_GENERIC_PENDING_IRQ=y
++
++#
++# Bus options (PCI etc.)
++#
++# CONFIG_PCI is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# PCI Hotplug Support
++#
++
++#
++# Executable file formats / Emulations
++#
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_MISC is not set
++CONFIG_IA32_EMULATION=y
++CONFIG_IA32_AOUT=y
++CONFIG_COMPAT=y
++CONFIG_SYSVIPC_COMPAT=y
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++# CONFIG_XFRM_USER is not set
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_PNP=y
++# CONFIG_IP_PNP_DHCP is not set
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_XFRM_TUNNEL is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_INET_XFRM_MODE_TRANSPORT=y
++CONFIG_INET_XFRM_MODE_TUNNEL=y
++# CONFIG_INET_DIAG is not set
++# CONFIG_TCP_CONG_ADVANCED is not set
++CONFIG_TCP_CONG_BIC=y
++# CONFIG_IPV6 is not set
++# CONFIG_INET6_XFRM_TUNNEL is not set
++# CONFIG_INET6_TUNNEL is not set
++# CONFIG_NETWORK_SECMARK is not set
++# CONFIG_NETFILTER is not set
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_DCCP is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++# CONFIG_IEEE80211 is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++# CONFIG_CONNECTOR is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++
++#
++# Block devices
++#
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=m
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++# CONFIG_CDROM_PKTCDVD is not set
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++# CONFIG_RAID_ATTRS is not set
++CONFIG_SCSI=m
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++# CONFIG_CHR_DEV_SCH is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++
++#
++# SCSI Transport Attributes
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++# CONFIG_SCSI_SAS_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_ISCSI_TCP is not set
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++# CONFIG_MD is not set
++
++#
++# Fusion MPT device support
++#
++# CONFIG_FUSION is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++
++#
++# I2O device support
++#
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++
++#
++# PHY device support
++#
++
++#
++# Ethernet (10 or 100Mbit)
++#
++# CONFIG_NET_ETHERNET is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++
++#
++# Ethernet (10000 Mbit)
++#
++
++#
++# Token Ring devices
++#
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_TSDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++# CONFIG_SERIO_CT82C710 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_VT_HW_CONSOLE_BINDING is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++
++#
++# Non-8250 serial port support
++#
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++CONFIG_HW_RANDOM=y
++# CONFIG_NVRAM is not set
++# CONFIG_RTC is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++# CONFIG_MWAVE is not set
++# CONFIG_PC8736x_GPIO is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++# CONFIG_TELCLOCK is not set
++
++#
++# I2C support
++#
++# CONFIG_I2C is not set
++
++#
++# SPI support
++#
++# CONFIG_SPI is not set
++# CONFIG_SPI_MASTER is not set
++
++#
++# Dallas's 1-wire bus
++#
++
++#
++# Hardware Monitoring support
++#
++# CONFIG_HWMON is not set
++# CONFIG_HWMON_VID is not set
++
++#
++# Misc devices
++#
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++# CONFIG_FB is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++
++#
++# USB support
++#
++# CONFIG_USB_ARCH_HAS_HCD is not set
++# CONFIG_USB_ARCH_HAS_OHCI is not set
++# CONFIG_USB_ARCH_HAS_EHCI is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# LED devices
++#
++# CONFIG_NEW_LEDS is not set
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++
++#
++# InfiniBand support
++#
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++# CONFIG_EDAC is not set
++
++#
++# Real Time Clock
++#
++# CONFIG_RTC_CLASS is not set
++
++#
++# DMA Engine support
++#
++# CONFIG_DMA_ENGINE is not set
++
++#
++# DMA Clients
++#
++
++#
++# DMA Devices
++#
++
++#
++# Firmware Drivers
++#
++# CONFIG_EDD is not set
++# CONFIG_DELL_RBU is not set
++# CONFIG_DCDBAS is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++# CONFIG_EXT2_FS_XIP is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++# CONFIG_REISERFS_FS_XATTR is not set
++# CONFIG_JFS_FS is not set
++# CONFIG_FS_POSIX_ACL is not set
++# CONFIG_XFS_FS is not set
++# CONFIG_OCFS2_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++# CONFIG_FUSE_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++# CONFIG_CONFIGFS_FS is not set
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V3_ACL is not set
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=y
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++# CONFIG_9P_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_UTF8 is not set
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=14
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++CONFIG_DEBUG_INFO=y
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++CONFIG_FRAME_POINTER=y
++# CONFIG_UNWIND_INFO is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_DEBUG_RODATA is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=m
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_DES is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_AES_X86_64 is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++CONFIG_CRYPTO_CRC32C=m
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++CONFIG_XEN=y
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
++
++#
++# XEN
++#
++# CONFIG_XEN_PRIVILEGED_GUEST is not set
++CONFIG_XEN_UNPRIVILEGED_GUEST=y
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++# CONFIG_XEN_BACKEND is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_XEN_GRANT_DEV=y
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_SMPBOOT=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_DEVMEM=y
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC16 is not set
++# CONFIG_CRC32 is not set
++CONFIG_LIBCRC32C=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_PLIST=y
+diff -rpuN linux-2.6.18.8/buildconfigs/linux-defconfig_xen_x86_32 linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xen_x86_32
+--- linux-2.6.18.8/buildconfigs/linux-defconfig_xen_x86_32 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xen_x86_32 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,3326 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Mon Oct 15 14:37:47 2007
++#
++CONFIG_X86_32=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_SEMAPHORE_SLEEPERS=y
++CONFIG_X86=y
++CONFIG_MMU=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_DMI=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++CONFIG_BSD_PROCESS_ACCT_V3=y
++CONFIG_TASKSTATS=y
++CONFIG_TASK_DELAY_ACCT=y
++CONFIG_AUDIT=y
++CONFIG_AUDITSYSCALL=y
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_CPUSETS=y
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_UID16=y
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODULE_FORCE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++CONFIG_LBD=y
++# CONFIG_BLK_DEV_IO_TRACE is not set
++CONFIG_LSF=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_DEFAULT_AS is not set
++# CONFIG_DEFAULT_DEADLINE is not set
++CONFIG_DEFAULT_CFQ=y
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="cfq"
++
++#
++# Processor type and features
++#
++CONFIG_SMP=y
++# CONFIG_X86_PC is not set
++CONFIG_X86_XEN=y
++# CONFIG_X86_ELAN is not set
++# CONFIG_X86_VOYAGER is not set
++# CONFIG_X86_NUMAQ is not set
++# CONFIG_X86_SUMMIT is not set
++# CONFIG_X86_BIGSMP is not set
++# CONFIG_X86_VISWS is not set
++# CONFIG_X86_GENERICARCH is not set
++# CONFIG_X86_ES7000 is not set
++# CONFIG_M386 is not set
++# CONFIG_M486 is not set
++# CONFIG_M586 is not set
++# CONFIG_M586TSC is not set
++# CONFIG_M586MMX is not set
++CONFIG_M686=y
++# CONFIG_MPENTIUMII is not set
++# CONFIG_MPENTIUMIII is not set
++# CONFIG_MPENTIUMM is not set
++# CONFIG_MPENTIUM4 is not set
++# CONFIG_MK6 is not set
++# CONFIG_MK7 is not set
++# CONFIG_MK8 is not set
++# CONFIG_MCRUSOE is not set
++# CONFIG_MEFFICEON is not set
++# CONFIG_MWINCHIPC6 is not set
++# CONFIG_MWINCHIP2 is not set
++# CONFIG_MWINCHIP3D is not set
++# CONFIG_MGEODEGX1 is not set
++# CONFIG_MGEODE_LX is not set
++# CONFIG_MCYRIXIII is not set
++# CONFIG_MVIAC3_2 is not set
++CONFIG_X86_GENERIC=y
++CONFIG_X86_CMPXCHG=y
++CONFIG_X86_XADD=y
++CONFIG_X86_L1_CACHE_SHIFT=7
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_PPRO_FENCE=y
++CONFIG_X86_WP_WORKS_OK=y
++CONFIG_X86_INVLPG=y
++CONFIG_X86_BSWAP=y
++CONFIG_X86_POPAD_OK=y
++CONFIG_X86_CMPXCHG64=y
++CONFIG_X86_GOOD_APIC=y
++CONFIG_X86_INTEL_USERCOPY=y
++CONFIG_X86_USE_PPRO_CHECKSUM=y
++CONFIG_NR_CPUS=32
++# CONFIG_PREEMPT_NONE is not set
++CONFIG_PREEMPT_VOLUNTARY=y
++# CONFIG_PREEMPT is not set
++CONFIG_PREEMPT_BKL=y
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_X86_IO_APIC=y
++CONFIG_VM86=y
++# CONFIG_TOSHIBA is not set
++# CONFIG_I8K is not set
++# CONFIG_X86_REBOOTFIXUPS is not set
++CONFIG_MICROCODE=y
++CONFIG_X86_MSR=y
++CONFIG_X86_CPUID=m
++CONFIG_SWIOTLB=y
++
++#
++# Firmware Drivers
++#
++CONFIG_EDD=y
++CONFIG_DELL_RBU=m
++CONFIG_DCDBAS=m
++# CONFIG_NOHIGHMEM is not set
++CONFIG_HIGHMEM4G=y
++# CONFIG_HIGHMEM64G is not set
++CONFIG_PAGE_OFFSET=0xC0000000
++CONFIG_HIGHMEM=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++# CONFIG_HIGHPTE is not set
++CONFIG_MTRR=y
++CONFIG_REGPARM=y
++CONFIG_SECCOMP=y
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++CONFIG_KEXEC=y
++# CONFIG_CRASH_DUMP is not set
++CONFIG_PHYSICAL_START=0x100000
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++
++#
++# Power management options (ACPI, APM)
++#
++CONFIG_PM=y
++# CONFIG_PM_LEGACY is not set
++CONFIG_PM_DEBUG=y
++# CONFIG_PM_TRACE is not set
++# CONFIG_SOFTWARE_SUSPEND is not set
++CONFIG_SUSPEND_SMP=y
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_SLEEP=y
++CONFIG_ACPI_SLEEP_PROC_FS=y
++# CONFIG_ACPI_SLEEP_PROC_SLEEP is not set
++CONFIG_ACPI_AC=m
++CONFIG_ACPI_BATTERY=m
++CONFIG_ACPI_BUTTON=m
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_HOTKEY=m
++CONFIG_ACPI_FAN=m
++CONFIG_ACPI_DOCK=m
++CONFIG_ACPI_PROCESSOR=m
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=m
++CONFIG_ACPI_ASUS=m
++CONFIG_ACPI_IBM=m
++CONFIG_ACPI_TOSHIBA=m
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_SYSTEM=y
++CONFIG_ACPI_CONTAINER=m
++CONFIG_ACPI_SBS=m
++CONFIG_ACPI_PV_SLEEP=y
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
++#
++CONFIG_PCI=y
++# CONFIG_PCI_GOBIOS is not set
++# CONFIG_PCI_GOMMCONFIG is not set
++# CONFIG_PCI_GODIRECT is not set
++# CONFIG_PCI_GOXEN_FE is not set
++CONFIG_PCI_GOANY=y
++CONFIG_PCI_DIRECT=y
++CONFIG_PCI_MMCONFIG=y
++CONFIG_XEN_PCIDEV_FRONTEND=y
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_MSI is not set
++# CONFIG_PCI_DEBUG is not set
++CONFIG_ISA_DMA_API=y
++CONFIG_SCx200=m
++CONFIG_K8_NB=y
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++CONFIG_PCCARD=m
++# CONFIG_PCMCIA_DEBUG is not set
++CONFIG_PCMCIA=m
++CONFIG_PCMCIA_LOAD_CIS=y
++CONFIG_PCMCIA_IOCTL=y
++CONFIG_CARDBUS=y
++
++#
++# PC-card bridges
++#
++CONFIG_YENTA=m
++CONFIG_YENTA_O2=y
++CONFIG_YENTA_RICOH=y
++CONFIG_YENTA_TI=y
++CONFIG_YENTA_ENE_TUNE=y
++CONFIG_YENTA_TOSHIBA=y
++CONFIG_PD6729=m
++CONFIG_I82092=m
++CONFIG_PCCARD_NONSTATIC=m
++
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=m
++CONFIG_HOTPLUG_PCI_FAKE=m
++CONFIG_HOTPLUG_PCI_ACPI=m
++CONFIG_HOTPLUG_PCI_ACPI_IBM=m
++CONFIG_HOTPLUG_PCI_CPCI=y
++CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
++CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
++CONFIG_HOTPLUG_PCI_SHPC=m
++# CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE is not set
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_AOUT=m
++CONFIG_BINFMT_MISC=m
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++CONFIG_PACKET_MMAP=y
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=m
++CONFIG_NET_KEY=m
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_ASK_IP_FIB_HASH=y
++# CONFIG_IP_FIB_TRIE is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_FWMARK=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
++CONFIG_IP_ROUTE_VERBOSE=y
++# CONFIG_IP_PNP is not set
++CONFIG_NET_IPIP=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_BROADCAST=y
++CONFIG_IP_MROUTE=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++CONFIG_INET_AH=m
++CONFIG_INET_ESP=m
++CONFIG_INET_IPCOMP=m
++CONFIG_INET_XFRM_TUNNEL=m
++CONFIG_INET_TUNNEL=m
++CONFIG_INET_XFRM_MODE_TRANSPORT=m
++CONFIG_INET_XFRM_MODE_TUNNEL=m
++CONFIG_INET_DIAG=m
++CONFIG_INET_TCP_DIAG=m
++CONFIG_TCP_CONG_ADVANCED=y
++
++#
++# TCP congestion control
++#
++CONFIG_TCP_CONG_BIC=m
++CONFIG_TCP_CONG_CUBIC=m
++CONFIG_TCP_CONG_WESTWOOD=m
++CONFIG_TCP_CONG_HTCP=m
++CONFIG_TCP_CONG_HSTCP=m
++CONFIG_TCP_CONG_HYBLA=m
++CONFIG_TCP_CONG_VEGAS=m
++CONFIG_TCP_CONG_SCALABLE=m
++CONFIG_TCP_CONG_LP=m
++CONFIG_TCP_CONG_VENO=m
++
++#
++# IP: Virtual Server Configuration
++#
++CONFIG_IP_VS=m
++# CONFIG_IP_VS_DEBUG is not set
++CONFIG_IP_VS_TAB_BITS=12
++
++#
++# IPVS transport protocol load balancing support
++#
++CONFIG_IP_VS_PROTO_TCP=y
++CONFIG_IP_VS_PROTO_UDP=y
++CONFIG_IP_VS_PROTO_ESP=y
++CONFIG_IP_VS_PROTO_AH=y
++
++#
++# IPVS scheduler
++#
++CONFIG_IP_VS_RR=m
++CONFIG_IP_VS_WRR=m
++CONFIG_IP_VS_LC=m
++CONFIG_IP_VS_WLC=m
++CONFIG_IP_VS_LBLC=m
++CONFIG_IP_VS_LBLCR=m
++CONFIG_IP_VS_DH=m
++CONFIG_IP_VS_SH=m
++CONFIG_IP_VS_SED=m
++CONFIG_IP_VS_NQ=m
++
++#
++# IPVS application helper
++#
++CONFIG_IP_VS_FTP=m
++CONFIG_IPV6=m
++CONFIG_IPV6_PRIVACY=y
++# CONFIG_IPV6_ROUTER_PREF is not set
++CONFIG_INET6_AH=m
++CONFIG_INET6_ESP=m
++CONFIG_INET6_IPCOMP=m
++CONFIG_INET6_XFRM_TUNNEL=m
++CONFIG_INET6_TUNNEL=m
++CONFIG_INET6_XFRM_MODE_TRANSPORT=m
++CONFIG_INET6_XFRM_MODE_TUNNEL=m
++CONFIG_IPV6_TUNNEL=m
++CONFIG_NETWORK_SECMARK=y
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# Core Netfilter Configuration
++#
++CONFIG_NETFILTER_NETLINK=m
++CONFIG_NETFILTER_NETLINK_QUEUE=m
++CONFIG_NETFILTER_NETLINK_LOG=m
++CONFIG_NETFILTER_XTABLES=m
++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
++CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
++CONFIG_NETFILTER_XT_TARGET_MARK=m
++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
++CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
++CONFIG_NETFILTER_XT_TARGET_SECMARK=m
++# CONFIG_NETFILTER_XT_TARGET_CONNSECMARK is not set
++CONFIG_NETFILTER_XT_MATCH_COMMENT=m
++CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
++CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
++CONFIG_NETFILTER_XT_MATCH_DCCP=m
++CONFIG_NETFILTER_XT_MATCH_ESP=m
++CONFIG_NETFILTER_XT_MATCH_HELPER=m
++CONFIG_NETFILTER_XT_MATCH_LENGTH=m
++CONFIG_NETFILTER_XT_MATCH_LIMIT=m
++CONFIG_NETFILTER_XT_MATCH_MAC=m
++CONFIG_NETFILTER_XT_MATCH_MARK=m
++CONFIG_NETFILTER_XT_MATCH_POLICY=m
++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
++CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
++CONFIG_NETFILTER_XT_MATCH_QUOTA=m
++CONFIG_NETFILTER_XT_MATCH_REALM=m
++CONFIG_NETFILTER_XT_MATCH_SCTP=m
++CONFIG_NETFILTER_XT_MATCH_STATE=m
++CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
++CONFIG_NETFILTER_XT_MATCH_STRING=m
++CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++CONFIG_IP_NF_CONNTRACK_MARK=y
++CONFIG_IP_NF_CONNTRACK_SECMARK=y
++CONFIG_IP_NF_CONNTRACK_EVENTS=y
++CONFIG_IP_NF_CONNTRACK_NETLINK=m
++CONFIG_IP_NF_CT_PROTO_SCTP=m
++CONFIG_IP_NF_FTP=m
++CONFIG_IP_NF_IRC=m
++CONFIG_IP_NF_NETBIOS_NS=m
++CONFIG_IP_NF_TFTP=m
++CONFIG_IP_NF_AMANDA=m
++CONFIG_IP_NF_PPTP=m
++CONFIG_IP_NF_H323=m
++CONFIG_IP_NF_SIP=m
++CONFIG_IP_NF_QUEUE=m
++CONFIG_IP_NF_IPTABLES=m
++CONFIG_IP_NF_MATCH_IPRANGE=m
++CONFIG_IP_NF_MATCH_TOS=m
++CONFIG_IP_NF_MATCH_RECENT=m
++CONFIG_IP_NF_MATCH_ECN=m
++CONFIG_IP_NF_MATCH_DSCP=m
++CONFIG_IP_NF_MATCH_AH=m
++CONFIG_IP_NF_MATCH_TTL=m
++CONFIG_IP_NF_MATCH_OWNER=m
++CONFIG_IP_NF_MATCH_ADDRTYPE=m
++CONFIG_IP_NF_MATCH_HASHLIMIT=m
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_TARGET_REJECT=m
++CONFIG_IP_NF_TARGET_LOG=m
++CONFIG_IP_NF_TARGET_ULOG=m
++CONFIG_IP_NF_TARGET_TCPMSS=m
++CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_NAT_NEEDED=y
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++CONFIG_IP_NF_TARGET_REDIRECT=m
++CONFIG_IP_NF_TARGET_NETMAP=m
++CONFIG_IP_NF_TARGET_SAME=m
++CONFIG_IP_NF_NAT_SNMP_BASIC=m
++CONFIG_IP_NF_NAT_IRC=m
++CONFIG_IP_NF_NAT_FTP=m
++CONFIG_IP_NF_NAT_TFTP=m
++CONFIG_IP_NF_NAT_AMANDA=m
++CONFIG_IP_NF_NAT_PPTP=m
++CONFIG_IP_NF_NAT_H323=m
++CONFIG_IP_NF_NAT_SIP=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_IP_NF_TARGET_TOS=m
++CONFIG_IP_NF_TARGET_ECN=m
++CONFIG_IP_NF_TARGET_DSCP=m
++CONFIG_IP_NF_TARGET_TTL=m
++CONFIG_IP_NF_TARGET_CLUSTERIP=m
++CONFIG_IP_NF_RAW=m
++CONFIG_IP_NF_ARPTABLES=m
++CONFIG_IP_NF_ARPFILTER=m
++CONFIG_IP_NF_ARP_MANGLE=m
++
++#
++# IPv6: Netfilter Configuration (EXPERIMENTAL)
++#
++CONFIG_IP6_NF_QUEUE=m
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_MATCH_RT=m
++CONFIG_IP6_NF_MATCH_OPTS=m
++CONFIG_IP6_NF_MATCH_FRAG=m
++CONFIG_IP6_NF_MATCH_HL=m
++CONFIG_IP6_NF_MATCH_OWNER=m
++CONFIG_IP6_NF_MATCH_IPV6HEADER=m
++CONFIG_IP6_NF_MATCH_AH=m
++CONFIG_IP6_NF_MATCH_EUI64=m
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_TARGET_LOG=m
++CONFIG_IP6_NF_TARGET_REJECT=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_IP6_NF_TARGET_HL=m
++CONFIG_IP6_NF_RAW=m
++
++#
++# DECnet: Netfilter Configuration
++#
++CONFIG_DECNET_NF_GRABULATOR=m
++
++#
++# Bridge: Netfilter Configuration
++#
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_BROUTE=m
++CONFIG_BRIDGE_EBT_T_FILTER=m
++CONFIG_BRIDGE_EBT_T_NAT=m
++CONFIG_BRIDGE_EBT_802_3=m
++CONFIG_BRIDGE_EBT_AMONG=m
++CONFIG_BRIDGE_EBT_ARP=m
++CONFIG_BRIDGE_EBT_IP=m
++CONFIG_BRIDGE_EBT_LIMIT=m
++CONFIG_BRIDGE_EBT_MARK=m
++CONFIG_BRIDGE_EBT_PKTTYPE=m
++CONFIG_BRIDGE_EBT_STP=m
++CONFIG_BRIDGE_EBT_VLAN=m
++CONFIG_BRIDGE_EBT_ARPREPLY=m
++CONFIG_BRIDGE_EBT_DNAT=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE_EBT_REDIRECT=m
++CONFIG_BRIDGE_EBT_SNAT=m
++CONFIG_BRIDGE_EBT_LOG=m
++CONFIG_BRIDGE_EBT_ULOG=m
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_DCCP=m
++CONFIG_INET_DCCP_DIAG=m
++CONFIG_IP_DCCP_ACKVEC=y
++
++#
++# DCCP CCIDs Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_DCCP_CCID2=m
++CONFIG_IP_DCCP_CCID3=m
++CONFIG_IP_DCCP_TFRC_LIB=m
++
++#
++# DCCP Kernel Hacking
++#
++# CONFIG_IP_DCCP_DEBUG is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_SCTP=m
++# CONFIG_SCTP_DBG_MSG is not set
++# CONFIG_SCTP_DBG_OBJCNT is not set
++# CONFIG_SCTP_HMAC_NONE is not set
++# CONFIG_SCTP_HMAC_SHA1 is not set
++CONFIG_SCTP_HMAC_MD5=y
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++# CONFIG_TIPC is not set
++CONFIG_ATM=m
++CONFIG_ATM_CLIP=m
++CONFIG_ATM_CLIP_NO_ICMP=y
++CONFIG_ATM_LANE=m
++CONFIG_ATM_MPOA=m
++CONFIG_ATM_BR2684=m
++# CONFIG_ATM_BR2684_IPFILTER is not set
++CONFIG_BRIDGE=m
++CONFIG_VLAN_8021Q=m
++CONFIG_DECNET=m
++CONFIG_DECNET_ROUTER=y
++CONFIG_DECNET_ROUTE_FWMARK=y
++CONFIG_LLC=y
++CONFIG_LLC2=m
++CONFIG_IPX=m
++# CONFIG_IPX_INTERN is not set
++CONFIG_ATALK=m
++CONFIG_DEV_APPLETALK=m
++CONFIG_IPDDP=m
++CONFIG_IPDDP_ENCAP=y
++CONFIG_IPDDP_DECAP=y
++CONFIG_X25=m
++CONFIG_LAPB=m
++CONFIG_ECONET=m
++CONFIG_ECONET_AUNUDP=y
++CONFIG_ECONET_NATIVE=y
++CONFIG_WAN_ROUTER=m
++
++#
++# QoS and/or fair queueing
++#
++CONFIG_NET_SCHED=y
++CONFIG_NET_SCH_CLK_JIFFIES=y
++# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
++# CONFIG_NET_SCH_CLK_CPU is not set
++
++#
++# Queueing/Scheduling
++#
++CONFIG_NET_SCH_CBQ=m
++CONFIG_NET_SCH_HTB=m
++CONFIG_NET_SCH_HFSC=m
++CONFIG_NET_SCH_ATM=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_SFQ=m
++CONFIG_NET_SCH_TEQL=m
++CONFIG_NET_SCH_TBF=m
++CONFIG_NET_SCH_GRED=m
++CONFIG_NET_SCH_DSMARK=m
++CONFIG_NET_SCH_NETEM=m
++CONFIG_NET_SCH_INGRESS=m
++
++#
++# Classification
++#
++CONFIG_NET_CLS=y
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_CLS_TCINDEX=m
++CONFIG_NET_CLS_ROUTE4=m
++CONFIG_NET_CLS_ROUTE=y
++CONFIG_NET_CLS_FW=m
++CONFIG_NET_CLS_U32=m
++CONFIG_CLS_U32_PERF=y
++CONFIG_CLS_U32_MARK=y
++CONFIG_NET_CLS_RSVP=m
++CONFIG_NET_CLS_RSVP6=m
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_STACK=32
++CONFIG_NET_EMATCH_CMP=m
++CONFIG_NET_EMATCH_NBYTE=m
++CONFIG_NET_EMATCH_U32=m
++CONFIG_NET_EMATCH_META=m
++CONFIG_NET_EMATCH_TEXT=m
++# CONFIG_NET_CLS_ACT is not set
++CONFIG_NET_CLS_POLICE=y
++# CONFIG_NET_CLS_IND is not set
++CONFIG_NET_ESTIMATOR=y
++
++#
++# Network testing
++#
++CONFIG_NET_PKTGEN=m
++CONFIG_HAMRADIO=y
++
++#
++# Packet Radio protocols
++#
++CONFIG_AX25=m
++# CONFIG_AX25_DAMA_SLAVE is not set
++CONFIG_NETROM=m
++CONFIG_ROSE=m
++
++#
++# AX.25 network device drivers
++#
++CONFIG_MKISS=m
++CONFIG_6PACK=m
++CONFIG_BPQETHER=m
++CONFIG_BAYCOM_SER_FDX=m
++CONFIG_BAYCOM_SER_HDX=m
++CONFIG_BAYCOM_PAR=m
++CONFIG_BAYCOM_EPP=m
++CONFIG_YAM=m
++CONFIG_IRDA=m
++
++#
++# IrDA protocols
++#
++CONFIG_IRLAN=m
++CONFIG_IRNET=m
++CONFIG_IRCOMM=m
++# CONFIG_IRDA_ULTRA is not set
++
++#
++# IrDA options
++#
++CONFIG_IRDA_CACHE_LAST_LSAP=y
++CONFIG_IRDA_FAST_RR=y
++CONFIG_IRDA_DEBUG=y
++
++#
++# Infrared-port device drivers
++#
++
++#
++# SIR device drivers
++#
++CONFIG_IRTTY_SIR=m
++
++#
++# Dongle support
++#
++CONFIG_DONGLE=y
++CONFIG_ESI_DONGLE=m
++CONFIG_ACTISYS_DONGLE=m
++CONFIG_TEKRAM_DONGLE=m
++CONFIG_TOIM3232_DONGLE=m
++CONFIG_LITELINK_DONGLE=m
++CONFIG_MA600_DONGLE=m
++CONFIG_GIRBIL_DONGLE=m
++CONFIG_MCP2120_DONGLE=m
++CONFIG_OLD_BELKIN_DONGLE=m
++CONFIG_ACT200L_DONGLE=m
++
++#
++# Old SIR device drivers
++#
++
++#
++# Old Serial dongle support
++#
++
++#
++# FIR device drivers
++#
++CONFIG_USB_IRDA=m
++CONFIG_SIGMATEL_FIR=m
++CONFIG_NSC_FIR=m
++CONFIG_WINBOND_FIR=m
++CONFIG_TOSHIBA_FIR=m
++CONFIG_SMC_IRCC_FIR=m
++CONFIG_ALI_FIR=m
++CONFIG_VLSI_FIR=m
++CONFIG_VIA_FIR=m
++CONFIG_MCS_FIR=m
++CONFIG_BT=m
++CONFIG_BT_L2CAP=m
++CONFIG_BT_SCO=m
++CONFIG_BT_RFCOMM=m
++CONFIG_BT_RFCOMM_TTY=y
++CONFIG_BT_BNEP=m
++CONFIG_BT_BNEP_MC_FILTER=y
++CONFIG_BT_BNEP_PROTO_FILTER=y
++CONFIG_BT_CMTP=m
++CONFIG_BT_HIDP=m
++
++#
++# Bluetooth device drivers
++#
++CONFIG_BT_HCIUSB=m
++CONFIG_BT_HCIUSB_SCO=y
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++CONFIG_BT_HCIBCM203X=m
++CONFIG_BT_HCIBPA10X=m
++CONFIG_BT_HCIBFUSB=m
++CONFIG_BT_HCIDTL1=m
++CONFIG_BT_HCIBT3C=m
++CONFIG_BT_HCIBLUECARD=m
++CONFIG_BT_HCIBTUART=m
++CONFIG_BT_HCIVHCI=m
++CONFIG_IEEE80211=m
++# CONFIG_IEEE80211_DEBUG is not set
++CONFIG_IEEE80211_CRYPT_WEP=m
++CONFIG_IEEE80211_CRYPT_CCMP=m
++CONFIG_IEEE80211_CRYPT_TKIP=m
++CONFIG_IEEE80211_SOFTMAC=m
++# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
++CONFIG_WIRELESS_EXT=y
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=m
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++CONFIG_CONNECTOR=m
++
++#
++# Memory Technology Devices (MTD)
++#
++CONFIG_MTD=m
++# CONFIG_MTD_DEBUG is not set
++CONFIG_MTD_CONCAT=m
++CONFIG_MTD_PARTITIONS=y
++CONFIG_MTD_REDBOOT_PARTS=m
++CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
++# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
++# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
++# CONFIG_MTD_CMDLINE_PARTS is not set
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=m
++CONFIG_MTD_BLOCK=m
++CONFIG_MTD_BLOCK_RO=m
++CONFIG_FTL=m
++CONFIG_NFTL=m
++CONFIG_NFTL_RW=y
++CONFIG_INFTL=m
++CONFIG_RFD_FTL=m
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=m
++CONFIG_MTD_JEDECPROBE=m
++CONFIG_MTD_GEN_PROBE=m
++CONFIG_MTD_CFI_ADV_OPTIONS=y
++CONFIG_MTD_CFI_NOSWAP=y
++# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
++# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
++# CONFIG_MTD_CFI_GEOMETRY is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_OTP is not set
++CONFIG_MTD_CFI_INTELEXT=m
++CONFIG_MTD_CFI_AMDSTD=m
++CONFIG_MTD_CFI_STAA=m
++CONFIG_MTD_CFI_UTIL=m
++CONFIG_MTD_RAM=m
++CONFIG_MTD_ROM=m
++CONFIG_MTD_ABSENT=m
++# CONFIG_MTD_OBSOLETE_CHIPS is not set
++
++#
++# Mapping drivers for chip access
++#
++CONFIG_MTD_COMPLEX_MAPPINGS=y
++CONFIG_MTD_PHYSMAP=m
++CONFIG_MTD_PHYSMAP_START=0x8000000
++CONFIG_MTD_PHYSMAP_LEN=0x4000000
++CONFIG_MTD_PHYSMAP_BANKWIDTH=2
++CONFIG_MTD_PNC2000=m
++CONFIG_MTD_SC520CDP=m
++CONFIG_MTD_NETSC520=m
++CONFIG_MTD_TS5500=m
++CONFIG_MTD_SBC_GXX=m
++CONFIG_MTD_SCx200_DOCFLASH=m
++CONFIG_MTD_AMD76XROM=m
++CONFIG_MTD_ICHXROM=m
++CONFIG_MTD_SCB2_FLASH=m
++CONFIG_MTD_NETtel=m
++CONFIG_MTD_DILNETPC=m
++CONFIG_MTD_DILNETPC_BOOTSIZE=0x80000
++CONFIG_MTD_L440GX=m
++CONFIG_MTD_PCI=m
++CONFIG_MTD_PLATRAM=m
++
++#
++# Self-contained MTD device drivers
++#
++CONFIG_MTD_PMC551=m
++# CONFIG_MTD_PMC551_BUGFIX is not set
++# CONFIG_MTD_PMC551_DEBUG is not set
++CONFIG_MTD_DATAFLASH=m
++CONFIG_MTD_M25P80=m
++CONFIG_MTD_SLRAM=m
++CONFIG_MTD_PHRAM=m
++CONFIG_MTD_MTDRAM=m
++CONFIG_MTDRAM_TOTAL_SIZE=4096
++CONFIG_MTDRAM_ERASE_SIZE=128
++CONFIG_MTD_BLOCK2MTD=m
++
++#
++# Disk-On-Chip Device Drivers
++#
++CONFIG_MTD_DOC2000=m
++CONFIG_MTD_DOC2001=m
++CONFIG_MTD_DOC2001PLUS=m
++CONFIG_MTD_DOCPROBE=m
++CONFIG_MTD_DOCECC=m
++# CONFIG_MTD_DOCPROBE_ADVANCED is not set
++CONFIG_MTD_DOCPROBE_ADDRESS=0
++
++#
++# NAND Flash Device Drivers
++#
++CONFIG_MTD_NAND=m
++# CONFIG_MTD_NAND_VERIFY_WRITE is not set
++# CONFIG_MTD_NAND_ECC_SMC is not set
++CONFIG_MTD_NAND_IDS=m
++CONFIG_MTD_NAND_DISKONCHIP=m
++# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
++CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
++CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y
++CONFIG_MTD_NAND_NANDSIM=m
++
++#
++# OneNAND Flash Device Drivers
++#
++CONFIG_MTD_ONENAND=m
++# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
++# CONFIG_MTD_ONENAND_OTP is not set
++
++#
++# Parallel port support
++#
++CONFIG_PARPORT=m
++CONFIG_PARPORT_PC=m
++CONFIG_PARPORT_SERIAL=m
++CONFIG_PARPORT_PC_FIFO=y
++CONFIG_PARPORT_PC_SUPERIO=y
++CONFIG_PARPORT_PC_PCMCIA=m
++CONFIG_PARPORT_NOT_PC=y
++# CONFIG_PARPORT_GSC is not set
++CONFIG_PARPORT_AX88796=m
++CONFIG_PARPORT_1284=y
++
++#
++# Plug and Play support
++#
++CONFIG_PNP=y
++# CONFIG_PNP_DEBUG is not set
++
++#
++# Protocols
++#
++CONFIG_PNPACPI=y
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=y
++CONFIG_PARIDE=m
++CONFIG_PARIDE_PARPORT=m
++
++#
++# Parallel IDE high-level drivers
++#
++CONFIG_PARIDE_PD=m
++CONFIG_PARIDE_PCD=m
++CONFIG_PARIDE_PF=m
++CONFIG_PARIDE_PT=m
++CONFIG_PARIDE_PG=m
++
++#
++# Parallel IDE protocol modules
++#
++CONFIG_PARIDE_ATEN=m
++CONFIG_PARIDE_BPCK=m
++CONFIG_PARIDE_BPCK6=m
++CONFIG_PARIDE_COMM=m
++CONFIG_PARIDE_DSTR=m
++CONFIG_PARIDE_FIT2=m
++CONFIG_PARIDE_FIT3=m
++CONFIG_PARIDE_EPAT=m
++CONFIG_PARIDE_EPATC8=y
++CONFIG_PARIDE_EPIA=m
++CONFIG_PARIDE_FRIQ=m
++CONFIG_PARIDE_FRPW=m
++CONFIG_PARIDE_KBIC=m
++CONFIG_PARIDE_KTTI=m
++CONFIG_PARIDE_ON20=m
++CONFIG_PARIDE_ON26=m
++CONFIG_BLK_CPQ_DA=m
++CONFIG_BLK_CPQ_CISS_DA=m
++CONFIG_CISS_SCSI_TAPE=y
++CONFIG_BLK_DEV_DAC960=m
++CONFIG_BLK_DEV_UMEM=m
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_CRYPTOLOOP=m
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_SX8=m
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_CDROM_PKTCDVD=m
++CONFIG_CDROM_PKTCDVD_BUFFERS=8
++CONFIG_CDROM_PKTCDVD_WCACHE=y
++CONFIG_ATA_OVER_ETH=m
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++# CONFIG_BLK_DEV_HD_IDE is not set
++CONFIG_BLK_DEV_IDEDISK=m
++CONFIG_IDEDISK_MULTI_MODE=y
++CONFIG_BLK_DEV_IDECS=m
++CONFIG_BLK_DEV_IDECD=m
++CONFIG_BLK_DEV_IDETAPE=m
++CONFIG_BLK_DEV_IDEFLOPPY=m
++CONFIG_BLK_DEV_IDESCSI=m
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=m
++CONFIG_BLK_DEV_CMD640=y
++CONFIG_BLK_DEV_CMD640_ENHANCED=y
++CONFIG_BLK_DEV_IDEPNP=y
++CONFIG_BLK_DEV_IDEPCI=y
++CONFIG_IDEPCI_SHARE_IRQ=y
++CONFIG_BLK_DEV_OFFBOARD=y
++CONFIG_BLK_DEV_GENERIC=y
++CONFIG_BLK_DEV_OPTI621=m
++CONFIG_BLK_DEV_RZ1000=m
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++CONFIG_BLK_DEV_AEC62XX=m
++CONFIG_BLK_DEV_ALI15X3=m
++# CONFIG_WDC_ALI15X3 is not set
++CONFIG_BLK_DEV_AMD74XX=m
++CONFIG_BLK_DEV_ATIIXP=m
++CONFIG_BLK_DEV_CMD64X=m
++CONFIG_BLK_DEV_TRIFLEX=m
++CONFIG_BLK_DEV_CY82C693=m
++CONFIG_BLK_DEV_CS5520=m
++CONFIG_BLK_DEV_CS5530=m
++CONFIG_BLK_DEV_CS5535=m
++CONFIG_BLK_DEV_HPT34X=m
++CONFIG_HPT34X_AUTODMA=y
++CONFIG_BLK_DEV_HPT366=m
++CONFIG_BLK_DEV_SC1200=m
++CONFIG_BLK_DEV_PIIX=m
++CONFIG_BLK_DEV_IT821X=m
++CONFIG_BLK_DEV_NS87415=m
++CONFIG_BLK_DEV_PDC202XX_OLD=m
++CONFIG_PDC202XX_BURST=y
++CONFIG_BLK_DEV_PDC202XX_NEW=m
++CONFIG_BLK_DEV_SVWKS=m
++CONFIG_BLK_DEV_SIIMAGE=m
++CONFIG_BLK_DEV_SIS5513=m
++CONFIG_BLK_DEV_SLC90E66=m
++CONFIG_BLK_DEV_TRM290=m
++CONFIG_BLK_DEV_VIA82CXXX=m
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++CONFIG_RAID_ATTRS=m
++CONFIG_SCSI=m
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++CONFIG_CHR_DEV_ST=m
++CONFIG_CHR_DEV_OSST=m
++CONFIG_BLK_DEV_SR=m
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_CHR_DEV_SG=m
++CONFIG_CHR_DEV_SCH=m
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=m
++CONFIG_SCSI_FC_ATTRS=m
++CONFIG_SCSI_ISCSI_ATTRS=m
++CONFIG_SCSI_SAS_ATTRS=m
++
++#
++# SCSI low-level drivers
++#
++CONFIG_ISCSI_TCP=m
++CONFIG_BLK_DEV_3W_XXXX_RAID=m
++CONFIG_SCSI_3W_9XXX=m
++CONFIG_SCSI_ACARD=m
++CONFIG_SCSI_AACRAID=m
++CONFIG_SCSI_AIC7XXX=m
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++CONFIG_AIC7XXX_DEBUG_ENABLE=y
++CONFIG_AIC7XXX_DEBUG_MASK=0
++CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
++CONFIG_SCSI_AIC7XXX_OLD=m
++CONFIG_SCSI_AIC79XX=m
++CONFIG_AIC79XX_CMDS_PER_DEVICE=32
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++CONFIG_AIC79XX_ENABLE_RD_STRM=y
++CONFIG_AIC79XX_DEBUG_ENABLE=y
++CONFIG_AIC79XX_DEBUG_MASK=0
++CONFIG_AIC79XX_REG_PRETTY_PRINT=y
++CONFIG_SCSI_DPT_I2O=m
++CONFIG_SCSI_ADVANSYS=m
++CONFIG_MEGARAID_NEWGEN=y
++CONFIG_MEGARAID_MM=m
++CONFIG_MEGARAID_MAILBOX=m
++CONFIG_MEGARAID_LEGACY=m
++CONFIG_MEGARAID_SAS=m
++CONFIG_SCSI_SATA=m
++CONFIG_SCSI_SATA_AHCI=m
++CONFIG_SCSI_SATA_SVW=m
++CONFIG_SCSI_ATA_PIIX=m
++CONFIG_SCSI_SATA_MV=m
++CONFIG_SCSI_SATA_NV=m
++CONFIG_SCSI_PDC_ADMA=m
++CONFIG_SCSI_HPTIOP=m
++CONFIG_SCSI_SATA_QSTOR=m
++CONFIG_SCSI_SATA_PROMISE=m
++CONFIG_SCSI_SATA_SX4=m
++CONFIG_SCSI_SATA_SIL=m
++CONFIG_SCSI_SATA_SIL24=m
++CONFIG_SCSI_SATA_SIS=m
++CONFIG_SCSI_SATA_ULI=m
++CONFIG_SCSI_SATA_VIA=m
++CONFIG_SCSI_SATA_VITESSE=m
++CONFIG_SCSI_SATA_INTEL_COMBINED=y
++CONFIG_SCSI_BUSLOGIC=m
++# CONFIG_SCSI_OMIT_FLASHPOINT is not set
++CONFIG_SCSI_DMX3191D=m
++CONFIG_SCSI_EATA=m
++CONFIG_SCSI_EATA_TAGGED_QUEUE=y
++CONFIG_SCSI_EATA_LINKED_COMMANDS=y
++CONFIG_SCSI_EATA_MAX_TAGS=16
++CONFIG_SCSI_FUTURE_DOMAIN=m
++CONFIG_SCSI_GDTH=m
++CONFIG_SCSI_IPS=m
++CONFIG_SCSI_INITIO=m
++CONFIG_SCSI_INIA100=m
++CONFIG_SCSI_PPA=m
++CONFIG_SCSI_IMM=m
++# CONFIG_SCSI_IZIP_EPP16 is not set
++# CONFIG_SCSI_IZIP_SLOW_CTR is not set
++CONFIG_SCSI_SYM53C8XX_2=m
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++CONFIG_SCSI_SYM53C8XX_MMIO=y
++CONFIG_SCSI_IPR=m
++CONFIG_SCSI_IPR_TRACE=y
++CONFIG_SCSI_IPR_DUMP=y
++CONFIG_SCSI_QLOGIC_1280=m
++CONFIG_SCSI_QLA_FC=m
++CONFIG_SCSI_LPFC=m
++CONFIG_SCSI_DC395x=m
++CONFIG_SCSI_DC390T=m
++CONFIG_SCSI_NSP32=m
++CONFIG_SCSI_DEBUG=m
++
++#
++# PCMCIA SCSI adapter support
++#
++CONFIG_PCMCIA_AHA152X=m
++CONFIG_PCMCIA_FDOMAIN=m
++CONFIG_PCMCIA_NINJA_SCSI=m
++CONFIG_PCMCIA_QLOGIC=m
++CONFIG_PCMCIA_SYM53C500=m
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_LINEAR=m
++CONFIG_MD_RAID0=m
++CONFIG_MD_RAID1=m
++CONFIG_MD_RAID10=m
++CONFIG_MD_RAID456=m
++CONFIG_MD_RAID5_RESHAPE=y
++CONFIG_MD_MULTIPATH=m
++CONFIG_MD_FAULTY=m
++CONFIG_BLK_DEV_DM=m
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=m
++CONFIG_DM_MIRROR=m
++CONFIG_DM_ZERO=m
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_MULTIPATH_EMC=m
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=m
++CONFIG_FUSION_FC=m
++CONFIG_FUSION_SAS=m
++CONFIG_FUSION_MAX_SGE=128
++CONFIG_FUSION_CTL=m
++CONFIG_FUSION_LAN=m
++
++#
++# IEEE 1394 (FireWire) support
++#
++CONFIG_IEEE1394=m
++
++#
++# Subsystem Options
++#
++# CONFIG_IEEE1394_VERBOSEDEBUG is not set
++# CONFIG_IEEE1394_OUI_DB is not set
++CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
++CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
++CONFIG_IEEE1394_EXPORT_FULL_API=y
++
++#
++# Device Drivers
++#
++CONFIG_IEEE1394_PCILYNX=m
++CONFIG_IEEE1394_OHCI1394=m
++
++#
++# Protocol Drivers
++#
++CONFIG_IEEE1394_VIDEO1394=m
++CONFIG_IEEE1394_SBP2=m
++# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
++CONFIG_IEEE1394_ETH1394=m
++CONFIG_IEEE1394_DV1394=m
++CONFIG_IEEE1394_RAWIO=m
++
++#
++# I2O device support
++#
++CONFIG_I2O=m
++CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y
++CONFIG_I2O_EXT_ADAPTEC=y
++CONFIG_I2O_EXT_ADAPTEC_DMA64=y
++CONFIG_I2O_CONFIG=m
++CONFIG_I2O_CONFIG_OLD_IOCTL=y
++CONFIG_I2O_BUS=m
++CONFIG_I2O_BLOCK=m
++CONFIG_I2O_SCSI=m
++CONFIG_I2O_PROC=m
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=m
++CONFIG_BONDING=m
++CONFIG_EQUALIZER=m
++CONFIG_TUN=m
++CONFIG_NET_SB1000=m
++
++#
++# ARCnet devices
++#
++CONFIG_ARCNET=m
++CONFIG_ARCNET_1201=m
++CONFIG_ARCNET_1051=m
++CONFIG_ARCNET_RAW=m
++CONFIG_ARCNET_CAP=m
++CONFIG_ARCNET_COM90xx=m
++CONFIG_ARCNET_COM90xxIO=m
++CONFIG_ARCNET_RIM_I=m
++# CONFIG_ARCNET_COM20020 is not set
++
++#
++# PHY device support
++#
++CONFIG_PHYLIB=m
++
++#
++# MII PHY device drivers
++#
++CONFIG_MARVELL_PHY=m
++CONFIG_DAVICOM_PHY=m
++CONFIG_QSEMI_PHY=m
++CONFIG_LXT_PHY=m
++CONFIG_CICADA_PHY=m
++CONFIG_VITESSE_PHY=m
++CONFIG_SMSC_PHY=m
++CONFIG_FIXED_PHY=m
++# CONFIG_FIXED_MII_10_FDX is not set
++# CONFIG_FIXED_MII_100_FDX is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=m
++CONFIG_HAPPYMEAL=m
++CONFIG_SUNGEM=m
++CONFIG_CASSINI=m
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_VORTEX=m
++CONFIG_TYPHOON=m
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++CONFIG_DE2104X=m
++CONFIG_TULIP=m
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++CONFIG_TULIP_NAPI=y
++CONFIG_TULIP_NAPI_HW_MITIGATION=y
++CONFIG_DE4X5=m
++CONFIG_WINBOND_840=m
++CONFIG_DM9102=m
++CONFIG_ULI526X=m
++CONFIG_PCMCIA_XIRCOM=m
++CONFIG_HP100=m
++CONFIG_NET_PCI=y
++CONFIG_PCNET32=m
++CONFIG_AMD8111_ETH=m
++# CONFIG_AMD8111E_NAPI is not set
++CONFIG_ADAPTEC_STARFIRE=m
++CONFIG_ADAPTEC_STARFIRE_NAPI=y
++CONFIG_B44=m
++CONFIG_FORCEDETH=m
++CONFIG_DGRS=m
++CONFIG_EEPRO100=m
++CONFIG_E100=m
++CONFIG_FEALNX=m
++CONFIG_NATSEMI=m
++CONFIG_NE2K_PCI=m
++CONFIG_8139CP=m
++CONFIG_8139TOO=m
++# CONFIG_8139TOO_PIO is not set
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++CONFIG_8139TOO_8129=y
++# CONFIG_8139_OLD_RX_RESET is not set
++CONFIG_SIS900=m
++CONFIG_EPIC100=m
++CONFIG_SUNDANCE=m
++# CONFIG_SUNDANCE_MMIO is not set
++CONFIG_TLAN=m
++CONFIG_VIA_RHINE=m
++# CONFIG_VIA_RHINE_MMIO is not set
++# CONFIG_VIA_RHINE_NAPI is not set
++CONFIG_NET_POCKET=y
++CONFIG_ATP=m
++CONFIG_DE600=m
++CONFIG_DE620=m
++
++#
++# Ethernet (1000 Mbit)
++#
++CONFIG_ACENIC=m
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++CONFIG_DL2K=m
++CONFIG_E1000=m
++CONFIG_E1000_NAPI=y
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++CONFIG_NS83820=m
++CONFIG_HAMACHI=m
++CONFIG_YELLOWFIN=m
++CONFIG_R8169=m
++# CONFIG_R8169_NAPI is not set
++CONFIG_R8169_VLAN=y
++CONFIG_SIS190=m
++CONFIG_SKGE=m
++CONFIG_SKY2=m
++CONFIG_SK98LIN=m
++CONFIG_VIA_VELOCITY=m
++CONFIG_TIGON3=m
++CONFIG_BNX2=m
++
++#
++# Ethernet (10000 Mbit)
++#
++CONFIG_CHELSIO_T1=m
++CONFIG_IXGB=m
++CONFIG_IXGB_NAPI=y
++CONFIG_S2IO=m
++CONFIG_S2IO_NAPI=y
++CONFIG_MYRI10GE=m
++# CONFIG_SFC is not set
++
++#
++# Token Ring devices
++#
++CONFIG_TR=y
++CONFIG_IBMOL=m
++CONFIG_IBMLS=m
++CONFIG_3C359=m
++CONFIG_TMS380TR=m
++CONFIG_TMSPCI=m
++CONFIG_ABYSS=m
++
++#
++# Wireless LAN (non-hamradio)
++#
++CONFIG_NET_RADIO=y
++CONFIG_NET_WIRELESS_RTNETLINK=y
++
++#
++# Obsolete Wireless cards support (pre-802.11)
++#
++CONFIG_STRIP=m
++CONFIG_PCMCIA_WAVELAN=m
++CONFIG_PCMCIA_NETWAVE=m
++
++#
++# Wireless 802.11 Frequency Hopping cards support
++#
++CONFIG_PCMCIA_RAYCS=m
++
++#
++# Wireless 802.11b ISA/PCI cards support
++#
++CONFIG_IPW2100=m
++CONFIG_IPW2100_MONITOR=y
++# CONFIG_IPW2100_DEBUG is not set
++CONFIG_IPW2200=m
++CONFIG_IPW2200_MONITOR=y
++CONFIG_IPW2200_RADIOTAP=y
++CONFIG_IPW2200_PROMISCUOUS=y
++CONFIG_IPW2200_QOS=y
++# CONFIG_IPW2200_DEBUG is not set
++CONFIG_AIRO=m
++CONFIG_HERMES=m
++CONFIG_PLX_HERMES=m
++CONFIG_TMD_HERMES=m
++CONFIG_NORTEL_HERMES=m
++CONFIG_PCI_HERMES=m
++CONFIG_ATMEL=m
++CONFIG_PCI_ATMEL=m
++
++#
++# Wireless 802.11b Pcmcia/Cardbus cards support
++#
++CONFIG_PCMCIA_HERMES=m
++CONFIG_PCMCIA_SPECTRUM=m
++CONFIG_AIRO_CS=m
++CONFIG_PCMCIA_ATMEL=m
++CONFIG_PCMCIA_WL3501=m
++
++#
++# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
++#
++CONFIG_PRISM54=m
++CONFIG_USB_ZD1201=m
++CONFIG_HOSTAP=m
++CONFIG_HOSTAP_FIRMWARE=y
++CONFIG_HOSTAP_FIRMWARE_NVRAM=y
++CONFIG_HOSTAP_PLX=m
++CONFIG_HOSTAP_PCI=m
++CONFIG_HOSTAP_CS=m
++CONFIG_BCM43XX=m
++CONFIG_BCM43XX_DEBUG=y
++CONFIG_BCM43XX_DMA=y
++CONFIG_BCM43XX_PIO=y
++CONFIG_BCM43XX_DMA_AND_PIO_MODE=y
++# CONFIG_BCM43XX_DMA_MODE is not set
++# CONFIG_BCM43XX_PIO_MODE is not set
++CONFIG_ZD1211RW=m
++# CONFIG_ZD1211RW_DEBUG is not set
++CONFIG_NET_WIRELESS=y
++
++#
++# PCMCIA network device support
++#
++CONFIG_NET_PCMCIA=y
++CONFIG_PCMCIA_3C589=m
++CONFIG_PCMCIA_3C574=m
++CONFIG_PCMCIA_FMVJ18X=m
++CONFIG_PCMCIA_PCNET=m
++CONFIG_PCMCIA_NMCLAN=m
++CONFIG_PCMCIA_SMC91C92=m
++CONFIG_PCMCIA_XIRC2PS=m
++CONFIG_PCMCIA_AXNET=m
++CONFIG_PCMCIA_IBMTR=m
++
++#
++# Wan interfaces
++#
++CONFIG_WAN=y
++CONFIG_DSCC4=m
++CONFIG_DSCC4_PCISYNC=y
++CONFIG_DSCC4_PCI_RST=y
++CONFIG_LANMEDIA=m
++CONFIG_HDLC=m
++CONFIG_HDLC_RAW=y
++CONFIG_HDLC_RAW_ETH=y
++CONFIG_HDLC_CISCO=y
++CONFIG_HDLC_FR=y
++CONFIG_HDLC_PPP=y
++CONFIG_HDLC_X25=y
++CONFIG_PCI200SYN=m
++CONFIG_WANXL=m
++CONFIG_PC300=m
++CONFIG_PC300_MLPPP=y
++CONFIG_FARSYNC=m
++CONFIG_DLCI=m
++CONFIG_DLCI_COUNT=24
++CONFIG_DLCI_MAX=8
++CONFIG_WAN_ROUTER_DRIVERS=y
++CONFIG_CYCLADES_SYNC=m
++CONFIG_CYCLOMX_X25=y
++CONFIG_LAPBETHER=m
++CONFIG_X25_ASY=m
++CONFIG_SBNI=m
++# CONFIG_SBNI_MULTILINE is not set
++
++#
++# ATM drivers
++#
++CONFIG_ATM_DUMMY=m
++CONFIG_ATM_TCP=m
++CONFIG_ATM_LANAI=m
++CONFIG_ATM_ENI=m
++# CONFIG_ATM_ENI_DEBUG is not set
++# CONFIG_ATM_ENI_TUNE_BURST is not set
++CONFIG_ATM_FIRESTREAM=m
++CONFIG_ATM_ZATM=m
++# CONFIG_ATM_ZATM_DEBUG is not set
++CONFIG_ATM_NICSTAR=m
++CONFIG_ATM_NICSTAR_USE_SUNI=y
++CONFIG_ATM_NICSTAR_USE_IDT77105=y
++CONFIG_ATM_IDT77252=m
++# CONFIG_ATM_IDT77252_DEBUG is not set
++CONFIG_ATM_IDT77252_RCV_ALL=y
++CONFIG_ATM_IDT77252_USE_SUNI=y
++CONFIG_ATM_AMBASSADOR=m
++# CONFIG_ATM_AMBASSADOR_DEBUG is not set
++CONFIG_ATM_HORIZON=m
++# CONFIG_ATM_HORIZON_DEBUG is not set
++CONFIG_ATM_IA=m
++# CONFIG_ATM_IA_DEBUG is not set
++CONFIG_ATM_FORE200E_MAYBE=m
++CONFIG_ATM_FORE200E_PCA=y
++CONFIG_ATM_FORE200E_PCA_DEFAULT_FW=y
++CONFIG_ATM_FORE200E_USE_TASKLET=y
++CONFIG_ATM_FORE200E_TX_RETRY=16
++CONFIG_ATM_FORE200E_DEBUG=0
++CONFIG_ATM_FORE200E=m
++CONFIG_ATM_HE=m
++CONFIG_ATM_HE_USE_SUNI=y
++CONFIG_FDDI=y
++# CONFIG_DEFXX is not set
++CONFIG_SKFP=m
++CONFIG_HIPPI=y
++CONFIG_ROADRUNNER=m
++CONFIG_ROADRUNNER_LARGE_RINGS=y
++CONFIG_PLIP=m
++CONFIG_PPP=m
++CONFIG_PPP_MULTILINK=y
++CONFIG_PPP_FILTER=y
++CONFIG_PPP_ASYNC=m
++CONFIG_PPP_SYNC_TTY=m
++CONFIG_PPP_DEFLATE=m
++CONFIG_PPP_BSDCOMP=m
++CONFIG_PPP_MPPE=m
++CONFIG_PPPOE=m
++CONFIG_PPPOATM=m
++CONFIG_SLIP=m
++CONFIG_SLIP_COMPRESSED=y
++CONFIG_SLIP_SMART=y
++CONFIG_SLIP_MODE_SLIP6=y
++CONFIG_NET_FC=y
++CONFIG_SHAPER=m
++CONFIG_NETCONSOLE=m
++CONFIG_NETPOLL=y
++CONFIG_NETPOLL_RX=y
++CONFIG_NETPOLL_TRAP=y
++CONFIG_NET_POLL_CONTROLLER=y
++
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=m
++
++#
++# Old ISDN4Linux
++#
++CONFIG_ISDN_I4L=m
++CONFIG_ISDN_PPP=y
++CONFIG_ISDN_PPP_VJ=y
++CONFIG_ISDN_MPP=y
++CONFIG_IPPP_FILTER=y
++CONFIG_ISDN_PPP_BSDCOMP=m
++CONFIG_ISDN_AUDIO=y
++CONFIG_ISDN_TTY_FAX=y
++CONFIG_ISDN_X25=y
++
++#
++# ISDN feature submodules
++#
++CONFIG_ISDN_DIVERSION=m
++
++#
++# ISDN4Linux hardware drivers
++#
++
++#
++# Passive cards
++#
++CONFIG_ISDN_DRV_HISAX=m
++
++#
++# D-channel protocol features
++#
++CONFIG_HISAX_EURO=y
++CONFIG_DE_AOC=y
++# CONFIG_HISAX_NO_SENDCOMPLETE is not set
++# CONFIG_HISAX_NO_LLC is not set
++# CONFIG_HISAX_NO_KEYPAD is not set
++CONFIG_HISAX_1TR6=y
++CONFIG_HISAX_NI1=y
++CONFIG_HISAX_MAX_CARDS=8
++
++#
++# HiSax supported cards
++#
++CONFIG_HISAX_16_3=y
++CONFIG_HISAX_TELESPCI=y
++CONFIG_HISAX_S0BOX=y
++CONFIG_HISAX_FRITZPCI=y
++CONFIG_HISAX_AVM_A1_PCMCIA=y
++CONFIG_HISAX_ELSA=y
++CONFIG_HISAX_DIEHLDIVA=y
++CONFIG_HISAX_SEDLBAUER=y
++CONFIG_HISAX_NETJET=y
++CONFIG_HISAX_NETJET_U=y
++CONFIG_HISAX_NICCY=y
++CONFIG_HISAX_BKM_A4T=y
++CONFIG_HISAX_SCT_QUADRO=y
++CONFIG_HISAX_GAZEL=y
++CONFIG_HISAX_HFC_PCI=y
++CONFIG_HISAX_W6692=y
++CONFIG_HISAX_HFC_SX=y
++CONFIG_HISAX_ENTERNOW_PCI=y
++# CONFIG_HISAX_DEBUG is not set
++
++#
++# HiSax PCMCIA card service modules
++#
++CONFIG_HISAX_SEDLBAUER_CS=m
++CONFIG_HISAX_ELSA_CS=m
++CONFIG_HISAX_AVM_A1_CS=m
++CONFIG_HISAX_TELES_CS=m
++
++#
++# HiSax sub driver modules
++#
++CONFIG_HISAX_ST5481=m
++CONFIG_HISAX_HFCUSB=m
++CONFIG_HISAX_HFC4S8S=m
++CONFIG_HISAX_FRITZ_PCIPNP=m
++CONFIG_HISAX_HDLC=y
++
++#
++# Active cards
++#
++
++#
++# Siemens Gigaset
++#
++CONFIG_ISDN_DRV_GIGASET=m
++CONFIG_GIGASET_BASE=m
++CONFIG_GIGASET_M105=m
++# CONFIG_GIGASET_DEBUG is not set
++# CONFIG_GIGASET_UNDOCREQ is not set
++
++#
++# CAPI subsystem
++#
++CONFIG_ISDN_CAPI=m
++CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
++CONFIG_ISDN_CAPI_MIDDLEWARE=y
++CONFIG_ISDN_CAPI_CAPI20=m
++CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
++CONFIG_ISDN_CAPI_CAPIFS=m
++CONFIG_ISDN_CAPI_CAPIDRV=m
++
++#
++# CAPI hardware drivers
++#
++
++#
++# Active AVM cards
++#
++CONFIG_CAPI_AVM=y
++CONFIG_ISDN_DRV_AVMB1_B1PCI=m
++CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
++CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
++CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
++CONFIG_ISDN_DRV_AVMB1_T1PCI=m
++CONFIG_ISDN_DRV_AVMB1_C4=m
++
++#
++# Active Eicon DIVA Server cards
++#
++CONFIG_CAPI_EICON=y
++CONFIG_ISDN_DIVAS=m
++CONFIG_ISDN_DIVAS_BRIPCI=y
++CONFIG_ISDN_DIVAS_PRIPCI=y
++CONFIG_ISDN_DIVAS_DIVACAPI=m
++CONFIG_ISDN_DIVAS_USERIDI=m
++CONFIG_ISDN_DIVAS_MAINT=m
++
++#
++# Telephony Support
++#
++CONFIG_PHONE=m
++CONFIG_PHONE_IXJ=m
++CONFIG_PHONE_IXJ_PCMCIA=m
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=m
++CONFIG_INPUT_TSDEV=m
++CONFIG_INPUT_TSDEV_SCREEN_X=240
++CONFIG_INPUT_TSDEV_SCREEN_Y=320
++CONFIG_INPUT_EVDEV=m
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++CONFIG_KEYBOARD_SUNKBD=m
++CONFIG_KEYBOARD_LKKBD=m
++CONFIG_KEYBOARD_XTKBD=m
++CONFIG_KEYBOARD_NEWTON=m
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_SERIAL=m
++CONFIG_MOUSE_VSXXXAA=m
++CONFIG_INPUT_JOYSTICK=y
++CONFIG_JOYSTICK_ANALOG=m
++CONFIG_JOYSTICK_A3D=m
++CONFIG_JOYSTICK_ADI=m
++CONFIG_JOYSTICK_COBRA=m
++CONFIG_JOYSTICK_GF2K=m
++CONFIG_JOYSTICK_GRIP=m
++CONFIG_JOYSTICK_GRIP_MP=m
++CONFIG_JOYSTICK_GUILLEMOT=m
++CONFIG_JOYSTICK_INTERACT=m
++CONFIG_JOYSTICK_SIDEWINDER=m
++CONFIG_JOYSTICK_TMDC=m
++CONFIG_JOYSTICK_IFORCE=m
++CONFIG_JOYSTICK_IFORCE_USB=y
++CONFIG_JOYSTICK_IFORCE_232=y
++CONFIG_JOYSTICK_WARRIOR=m
++CONFIG_JOYSTICK_MAGELLAN=m
++CONFIG_JOYSTICK_SPACEORB=m
++CONFIG_JOYSTICK_SPACEBALL=m
++CONFIG_JOYSTICK_STINGER=m
++CONFIG_JOYSTICK_TWIDJOY=m
++CONFIG_JOYSTICK_DB9=m
++CONFIG_JOYSTICK_GAMECON=m
++CONFIG_JOYSTICK_TURBOGRAFX=m
++CONFIG_JOYSTICK_JOYDUMP=m
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_ADS7846=m
++CONFIG_TOUCHSCREEN_GUNZE=m
++CONFIG_TOUCHSCREEN_ELO=m
++CONFIG_TOUCHSCREEN_MTOUCH=m
++CONFIG_TOUCHSCREEN_MK712=m
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_PCSPKR=m
++CONFIG_INPUT_WISTRON_BTNS=m
++CONFIG_INPUT_UINPUT=m
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_SERIO_CT82C710=m
++CONFIG_SERIO_PARKBD=m
++CONFIG_SERIO_PCIPS2=m
++CONFIG_SERIO_LIBPS2=y
++CONFIG_SERIO_RAW=m
++CONFIG_GAMEPORT=m
++CONFIG_GAMEPORT_NS558=m
++CONFIG_GAMEPORT_L4=m
++CONFIG_GAMEPORT_EMU10K1=m
++CONFIG_GAMEPORT_FM801=m
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=m
++CONFIG_SERIAL_8250_PCI=m
++CONFIG_SERIAL_8250_PNP=m
++# CONFIG_SERIAL_8250_CS is not set
++CONFIG_SERIAL_8250_NR_UARTS=4
++CONFIG_SERIAL_8250_RUNTIME_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++
++#
++# Non-8250 serial port support
++#
++CONFIG_SERIAL_CORE=m
++CONFIG_SERIAL_JSM=m
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++CONFIG_PRINTER=m
++# CONFIG_LP_CONSOLE is not set
++CONFIG_PPDEV=m
++CONFIG_TIPAR=m
++
++#
++# IPMI
++#
++CONFIG_IPMI_HANDLER=m
++# CONFIG_IPMI_PANIC_EVENT is not set
++CONFIG_IPMI_DEVICE_INTERFACE=m
++CONFIG_IPMI_SI=m
++CONFIG_IPMI_WATCHDOG=m
++CONFIG_IPMI_POWEROFF=m
++
++#
++# Watchdog Cards
++#
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++CONFIG_SOFT_WATCHDOG=m
++CONFIG_ACQUIRE_WDT=m
++CONFIG_ADVANTECH_WDT=m
++CONFIG_ALIM1535_WDT=m
++CONFIG_ALIM7101_WDT=m
++CONFIG_SC520_WDT=m
++CONFIG_EUROTECH_WDT=m
++CONFIG_IB700_WDT=m
++CONFIG_IBMASR=m
++CONFIG_WAFER_WDT=m
++CONFIG_I6300ESB_WDT=m
++CONFIG_I8XX_TCO=m
++CONFIG_SC1200_WDT=m
++CONFIG_SCx200_WDT=m
++CONFIG_60XX_WDT=m
++CONFIG_SBC8360_WDT=m
++CONFIG_CPU5_WDT=m
++CONFIG_W83627HF_WDT=m
++CONFIG_W83877F_WDT=m
++CONFIG_W83977F_WDT=m
++CONFIG_MACHZ_WDT=m
++CONFIG_SBC_EPX_C3_WATCHDOG=m
++
++#
++# PCI-based Watchdog Cards
++#
++CONFIG_PCIPCWATCHDOG=m
++CONFIG_WDTPCI=m
++CONFIG_WDT_501_PCI=y
++
++#
++# USB-based Watchdog Cards
++#
++CONFIG_USBPCWATCHDOG=m
++CONFIG_HW_RANDOM=y
++CONFIG_HW_RANDOM_INTEL=m
++CONFIG_HW_RANDOM_AMD=m
++CONFIG_HW_RANDOM_GEODE=m
++CONFIG_HW_RANDOM_VIA=m
++CONFIG_NVRAM=m
++CONFIG_RTC=m
++CONFIG_GEN_RTC=m
++CONFIG_GEN_RTC_X=y
++CONFIG_DTLK=m
++CONFIG_R3964=m
++CONFIG_APPLICOM=m
++CONFIG_SONYPI=m
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=m
++CONFIG_AGP_ALI=m
++CONFIG_AGP_ATI=m
++CONFIG_AGP_AMD=m
++CONFIG_AGP_AMD64=m
++CONFIG_AGP_INTEL=m
++CONFIG_AGP_NVIDIA=m
++CONFIG_AGP_SIS=m
++CONFIG_AGP_SWORKS=m
++CONFIG_AGP_VIA=m
++CONFIG_AGP_EFFICEON=m
++CONFIG_DRM=m
++CONFIG_DRM_TDFX=m
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++CONFIG_DRM_I810=m
++CONFIG_DRM_I830=m
++CONFIG_DRM_I915=m
++CONFIG_DRM_MGA=m
++CONFIG_DRM_SIS=m
++CONFIG_DRM_VIA=m
++CONFIG_DRM_SAVAGE=m
++
++#
++# PCMCIA character devices
++#
++CONFIG_SYNCLINK_CS=m
++CONFIG_CARDMAN_4000=m
++CONFIG_CARDMAN_4040=m
++CONFIG_MWAVE=m
++CONFIG_SCx200_GPIO=m
++CONFIG_PC8736x_GPIO=m
++CONFIG_NSC_GPIO=m
++CONFIG_CS5535_GPIO=m
++CONFIG_RAW_DRIVER=m
++CONFIG_MAX_RAW_DEVS=256
++# CONFIG_HPET is not set
++CONFIG_HANGCHECK_TIMER=m
++
++#
++# TPM devices
++#
++CONFIG_TCG_TPM=m
++CONFIG_TCG_TIS=m
++CONFIG_TCG_NSC=m
++CONFIG_TCG_ATMEL=m
++CONFIG_TCG_INFINEON=m
++CONFIG_TCG_XEN=m
++CONFIG_TELCLOCK=m
++
++#
++# I2C support
++#
++CONFIG_I2C=m
++CONFIG_I2C_CHARDEV=m
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=m
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
++
++#
++# I2C Hardware Bus support
++#
++CONFIG_I2C_ALI1535=m
++CONFIG_I2C_ALI1563=m
++CONFIG_I2C_ALI15X3=m
++CONFIG_I2C_AMD756=m
++CONFIG_I2C_AMD756_S4882=m
++CONFIG_I2C_AMD8111=m
++CONFIG_I2C_I801=m
++CONFIG_I2C_I810=m
++CONFIG_I2C_PIIX4=m
++CONFIG_I2C_ISA=m
++CONFIG_I2C_NFORCE2=m
++CONFIG_I2C_OCORES=m
++CONFIG_I2C_PARPORT=m
++CONFIG_I2C_PARPORT_LIGHT=m
++CONFIG_I2C_PROSAVAGE=m
++CONFIG_I2C_SAVAGE4=m
++CONFIG_SCx200_I2C=m
++CONFIG_SCx200_I2C_SCL=12
++CONFIG_SCx200_I2C_SDA=13
++CONFIG_SCx200_ACB=m
++CONFIG_I2C_SIS5595=m
++CONFIG_I2C_SIS630=m
++CONFIG_I2C_SIS96X=m
++CONFIG_I2C_STUB=m
++CONFIG_I2C_VIA=m
++CONFIG_I2C_VIAPRO=m
++CONFIG_I2C_VOODOO3=m
++CONFIG_I2C_PCA_ISA=m
++
++#
++# Miscellaneous I2C Chip support
++#
++CONFIG_SENSORS_DS1337=m
++CONFIG_SENSORS_DS1374=m
++CONFIG_SENSORS_EEPROM=m
++CONFIG_SENSORS_PCF8574=m
++CONFIG_SENSORS_PCA9539=m
++CONFIG_SENSORS_PCF8591=m
++CONFIG_SENSORS_MAX6875=m
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++CONFIG_SPI=y
++# CONFIG_SPI_DEBUG is not set
++CONFIG_SPI_MASTER=y
++
++#
++# SPI Master Controller Drivers
++#
++CONFIG_SPI_BITBANG=m
++CONFIG_SPI_BUTTERFLY=m
++
++#
++# SPI Protocol Masters
++#
++
++#
++# Dallas's 1-wire bus
++#
++CONFIG_W1=m
++CONFIG_W1_CON=y
++
++#
++# 1-wire Bus Masters
++#
++CONFIG_W1_MASTER_MATROX=m
++CONFIG_W1_MASTER_DS2490=m
++CONFIG_W1_MASTER_DS2482=m
++
++#
++# 1-wire Slaves
++#
++CONFIG_W1_SLAVE_THERM=m
++CONFIG_W1_SLAVE_SMEM=m
++CONFIG_W1_SLAVE_DS2433=m
++# CONFIG_W1_SLAVE_DS2433_CRC is not set
++
++#
++# Hardware Monitoring support
++#
++CONFIG_HWMON=m
++CONFIG_HWMON_VID=m
++CONFIG_SENSORS_ABITUGURU=m
++CONFIG_SENSORS_ADM1021=m
++CONFIG_SENSORS_ADM1025=m
++CONFIG_SENSORS_ADM1026=m
++CONFIG_SENSORS_ADM1031=m
++CONFIG_SENSORS_ADM9240=m
++CONFIG_SENSORS_ASB100=m
++CONFIG_SENSORS_ATXP1=m
++CONFIG_SENSORS_DS1621=m
++CONFIG_SENSORS_F71805F=m
++CONFIG_SENSORS_FSCHER=m
++CONFIG_SENSORS_FSCPOS=m
++CONFIG_SENSORS_GL518SM=m
++CONFIG_SENSORS_GL520SM=m
++CONFIG_SENSORS_IT87=m
++CONFIG_SENSORS_LM63=m
++CONFIG_SENSORS_LM70=m
++CONFIG_SENSORS_LM75=m
++CONFIG_SENSORS_LM77=m
++CONFIG_SENSORS_LM78=m
++CONFIG_SENSORS_LM80=m
++CONFIG_SENSORS_LM83=m
++CONFIG_SENSORS_LM85=m
++CONFIG_SENSORS_LM87=m
++CONFIG_SENSORS_LM90=m
++CONFIG_SENSORS_LM92=m
++CONFIG_SENSORS_MAX1619=m
++CONFIG_SENSORS_PC87360=m
++CONFIG_SENSORS_SIS5595=m
++CONFIG_SENSORS_SMSC47M1=m
++CONFIG_SENSORS_SMSC47M192=m
++CONFIG_SENSORS_SMSC47B397=m
++CONFIG_SENSORS_VIA686A=m
++CONFIG_SENSORS_VT8231=m
++CONFIG_SENSORS_W83781D=m
++CONFIG_SENSORS_W83791D=m
++CONFIG_SENSORS_W83792D=m
++CONFIG_SENSORS_W83L785TS=m
++CONFIG_SENSORS_W83627HF=m
++CONFIG_SENSORS_W83627EHF=m
++CONFIG_SENSORS_HDAPS=m
++# CONFIG_HWMON_DEBUG_CHIP is not set
++
++#
++# Misc devices
++#
++CONFIG_IBM_ASM=m
++
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=m
++CONFIG_VIDEO_V4L1=y
++CONFIG_VIDEO_V4L1_COMPAT=y
++CONFIG_VIDEO_V4L2=y
++
++#
++# Video Capture Adapters
++#
++
++#
++# Video Capture Adapters
++#
++# CONFIG_VIDEO_ADV_DEBUG is not set
++CONFIG_VIDEO_VIVI=m
++CONFIG_VIDEO_BT848=m
++CONFIG_VIDEO_BT848_DVB=y
++CONFIG_VIDEO_SAA6588=m
++CONFIG_VIDEO_BWQCAM=m
++CONFIG_VIDEO_CQCAM=m
++CONFIG_VIDEO_W9966=m
++CONFIG_VIDEO_CPIA=m
++CONFIG_VIDEO_CPIA_PP=m
++CONFIG_VIDEO_CPIA_USB=m
++CONFIG_VIDEO_CPIA2=m
++CONFIG_VIDEO_SAA5246A=m
++CONFIG_VIDEO_SAA5249=m
++CONFIG_TUNER_3036=m
++CONFIG_VIDEO_STRADIS=m
++CONFIG_VIDEO_ZORAN=m
++CONFIG_VIDEO_ZORAN_BUZ=m
++CONFIG_VIDEO_ZORAN_DC10=m
++CONFIG_VIDEO_ZORAN_DC30=m
++CONFIG_VIDEO_ZORAN_LML33=m
++CONFIG_VIDEO_ZORAN_LML33R10=m
++CONFIG_VIDEO_ZORAN_AVS6EYES=m
++CONFIG_VIDEO_MEYE=m
++CONFIG_VIDEO_SAA7134=m
++CONFIG_VIDEO_SAA7134_ALSA=m
++# CONFIG_VIDEO_SAA7134_OSS is not set
++CONFIG_VIDEO_SAA7134_DVB=m
++CONFIG_VIDEO_SAA7134_DVB_ALL_FRONTENDS=y
++CONFIG_VIDEO_MXB=m
++CONFIG_VIDEO_DPC=m
++CONFIG_VIDEO_HEXIUM_ORION=m
++CONFIG_VIDEO_HEXIUM_GEMINI=m
++CONFIG_VIDEO_CX88_VP3054=m
++CONFIG_VIDEO_CX88=m
++CONFIG_VIDEO_CX88_ALSA=m
++CONFIG_VIDEO_CX88_BLACKBIRD=m
++CONFIG_VIDEO_CX88_DVB=m
++CONFIG_VIDEO_CX88_DVB_ALL_FRONTENDS=y
++
++#
++# Encoders and Decoders
++#
++CONFIG_VIDEO_MSP3400=m
++CONFIG_VIDEO_CS53L32A=m
++CONFIG_VIDEO_TLV320AIC23B=m
++CONFIG_VIDEO_WM8775=m
++CONFIG_VIDEO_WM8739=m
++CONFIG_VIDEO_CX2341X=m
++CONFIG_VIDEO_CX25840=m
++CONFIG_VIDEO_SAA711X=m
++CONFIG_VIDEO_SAA7127=m
++CONFIG_VIDEO_UPD64031A=m
++CONFIG_VIDEO_UPD64083=m
++
++#
++# V4L USB devices
++#
++CONFIG_VIDEO_PVRUSB2=m
++CONFIG_VIDEO_PVRUSB2_24XXX=y
++CONFIG_VIDEO_PVRUSB2_SYSFS=y
++# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
++CONFIG_VIDEO_EM28XX=m
++CONFIG_VIDEO_USBVIDEO=m
++CONFIG_USB_VICAM=m
++CONFIG_USB_IBMCAM=m
++CONFIG_USB_KONICAWC=m
++CONFIG_USB_QUICKCAM_MESSENGER=m
++CONFIG_USB_ET61X251=m
++CONFIG_VIDEO_OVCAMCHIP=m
++CONFIG_USB_W9968CF=m
++CONFIG_USB_OV511=m
++CONFIG_USB_SE401=m
++CONFIG_USB_SN9C102=m
++CONFIG_USB_STV680=m
++CONFIG_USB_ZC0301=m
++CONFIG_USB_PWC=m
++# CONFIG_USB_PWC_DEBUG is not set
++
++#
++# Radio Adapters
++#
++CONFIG_RADIO_GEMTEK_PCI=m
++CONFIG_RADIO_MAXIRADIO=m
++CONFIG_RADIO_MAESTRO=m
++CONFIG_USB_DSBR=m
++
++#
++# Digital Video Broadcasting Devices
++#
++CONFIG_DVB=y
++CONFIG_DVB_CORE=m
++
++#
++# Supported SAA7146 based PCI Adapters
++#
++CONFIG_DVB_AV7110=m
++CONFIG_DVB_AV7110_OSD=y
++CONFIG_DVB_BUDGET=m
++CONFIG_DVB_BUDGET_CI=m
++CONFIG_DVB_BUDGET_AV=m
++CONFIG_DVB_BUDGET_PATCH=m
++
++#
++# Supported USB Adapters
++#
++CONFIG_DVB_USB=m
++# CONFIG_DVB_USB_DEBUG is not set
++CONFIG_DVB_USB_A800=m
++CONFIG_DVB_USB_DIBUSB_MB=m
++# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
++CONFIG_DVB_USB_DIBUSB_MC=m
++CONFIG_DVB_USB_UMT_010=m
++CONFIG_DVB_USB_CXUSB=m
++CONFIG_DVB_USB_DIGITV=m
++CONFIG_DVB_USB_VP7045=m
++CONFIG_DVB_USB_VP702X=m
++CONFIG_DVB_USB_GP8PSK=m
++CONFIG_DVB_USB_NOVA_T_USB2=m
++CONFIG_DVB_USB_DTT200U=m
++CONFIG_DVB_TTUSB_BUDGET=m
++CONFIG_DVB_TTUSB_DEC=m
++CONFIG_DVB_CINERGYT2=m
++# CONFIG_DVB_CINERGYT2_TUNING is not set
++
++#
++# Supported FlexCopII (B2C2) Adapters
++#
++CONFIG_DVB_B2C2_FLEXCOP=m
++CONFIG_DVB_B2C2_FLEXCOP_PCI=m
++CONFIG_DVB_B2C2_FLEXCOP_USB=m
++# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
++
++#
++# Supported BT878 Adapters
++#
++CONFIG_DVB_BT8XX=m
++
++#
++# Supported Pluto2 Adapters
++#
++CONFIG_DVB_PLUTO2=m
++
++#
++# Supported DVB Frontends
++#
++
++#
++# Customise DVB Frontends
++#
++
++#
++# DVB-S (satellite) frontends
++#
++CONFIG_DVB_STV0299=m
++CONFIG_DVB_CX24110=m
++CONFIG_DVB_CX24123=m
++CONFIG_DVB_TDA8083=m
++CONFIG_DVB_MT312=m
++CONFIG_DVB_VES1X93=m
++CONFIG_DVB_S5H1420=m
++
++#
++# DVB-T (terrestrial) frontends
++#
++CONFIG_DVB_SP8870=m
++CONFIG_DVB_SP887X=m
++CONFIG_DVB_CX22700=m
++CONFIG_DVB_CX22702=m
++CONFIG_DVB_L64781=m
++CONFIG_DVB_TDA1004X=m
++CONFIG_DVB_NXT6000=m
++CONFIG_DVB_MT352=m
++CONFIG_DVB_ZL10353=m
++CONFIG_DVB_DIB3000MB=m
++CONFIG_DVB_DIB3000MC=m
++
++#
++# DVB-C (cable) frontends
++#
++CONFIG_DVB_VES1820=m
++CONFIG_DVB_TDA10021=m
++CONFIG_DVB_STV0297=m
++
++#
++# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
++#
++CONFIG_DVB_NXT200X=m
++CONFIG_DVB_OR51211=m
++CONFIG_DVB_OR51132=m
++CONFIG_DVB_BCM3510=m
++CONFIG_DVB_LGDT330X=m
++
++#
++# Miscellaneous devices
++#
++CONFIG_DVB_PLL=m
++CONFIG_DVB_LNBP21=m
++CONFIG_DVB_ISL6421=m
++CONFIG_VIDEO_SAA7146=m
++CONFIG_VIDEO_SAA7146_VV=m
++CONFIG_VIDEO_VIDEOBUF=m
++CONFIG_VIDEO_TUNER=m
++CONFIG_VIDEO_BUF=m
++CONFIG_VIDEO_BUF_DVB=m
++CONFIG_VIDEO_BTCX=m
++CONFIG_VIDEO_IR=m
++CONFIG_VIDEO_TVEEPROM=m
++CONFIG_USB_DABUSB=m
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++CONFIG_FB_MODE_HELPERS=y
++CONFIG_FB_TILEBLITTING=y
++CONFIG_FB_CIRRUS=m
++CONFIG_FB_PM2=m
++CONFIG_FB_PM2_FIFO_DISCONNECT=y
++CONFIG_FB_CYBER2000=m
++CONFIG_FB_ARC=m
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++CONFIG_FB_VGA16=m
++CONFIG_FB_VESA=y
++CONFIG_FB_HGA=m
++# CONFIG_FB_HGA_ACCEL is not set
++CONFIG_FB_S1D13XXX=m
++CONFIG_FB_NVIDIA=m
++CONFIG_FB_NVIDIA_I2C=y
++CONFIG_FB_RIVA=m
++CONFIG_FB_RIVA_I2C=y
++CONFIG_FB_RIVA_DEBUG=y
++CONFIG_FB_I810=m
++# CONFIG_FB_I810_GTF is not set
++CONFIG_FB_INTEL=m
++# CONFIG_FB_INTEL_DEBUG is not set
++CONFIG_FB_MATROX=m
++CONFIG_FB_MATROX_MILLENIUM=y
++CONFIG_FB_MATROX_MYSTIQUE=y
++CONFIG_FB_MATROX_G=y
++# CONFIG_FB_MATROX_I2C is not set
++CONFIG_FB_MATROX_MULTIHEAD=y
++CONFIG_FB_RADEON=m
++CONFIG_FB_RADEON_I2C=y
++# CONFIG_FB_RADEON_DEBUG is not set
++CONFIG_FB_ATY128=m
++CONFIG_FB_ATY=m
++CONFIG_FB_ATY_CT=y
++CONFIG_FB_ATY_GENERIC_LCD=y
++CONFIG_FB_ATY_GX=y
++CONFIG_FB_SAVAGE=m
++CONFIG_FB_SAVAGE_I2C=y
++CONFIG_FB_SAVAGE_ACCEL=y
++CONFIG_FB_SIS=m
++CONFIG_FB_SIS_300=y
++CONFIG_FB_SIS_315=y
++CONFIG_FB_NEOMAGIC=m
++CONFIG_FB_KYRO=m
++CONFIG_FB_3DFX=m
++# CONFIG_FB_3DFX_ACCEL is not set
++CONFIG_FB_VOODOO1=m
++CONFIG_FB_CYBLA=m
++CONFIG_FB_TRIDENT=m
++# CONFIG_FB_TRIDENT_ACCEL is not set
++CONFIG_FB_GEODE=y
++CONFIG_FB_GEODE_GX=m
++CONFIG_FB_GEODE_GX1=m
++CONFIG_FB_VIRTUAL=m
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_VIDEO_SELECT=y
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=m
++# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++
++#
++# Logo configuration
++#
++# CONFIG_LOGO is not set
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_BACKLIGHT_CLASS_DEVICE=m
++CONFIG_BACKLIGHT_DEVICE=y
++CONFIG_LCD_CLASS_DEVICE=m
++CONFIG_LCD_DEVICE=y
++
++#
++# Sound
++#
++CONFIG_SOUND=m
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=m
++CONFIG_SND_TIMER=m
++CONFIG_SND_PCM=m
++CONFIG_SND_HWDEP=m
++CONFIG_SND_RAWMIDI=m
++CONFIG_SND_SEQUENCER=m
++CONFIG_SND_SEQ_DUMMY=m
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=m
++CONFIG_SND_PCM_OSS=m
++CONFIG_SND_PCM_OSS_PLUGINS=y
++CONFIG_SND_SEQUENCER_OSS=y
++CONFIG_SND_RTCTIMER=m
++CONFIG_SND_SEQ_RTCTIMER_DEFAULT=y
++# CONFIG_SND_DYNAMIC_MINORS is not set
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=m
++CONFIG_SND_OPL3_LIB=m
++CONFIG_SND_VX_LIB=m
++CONFIG_SND_AC97_CODEC=m
++CONFIG_SND_AC97_BUS=m
++CONFIG_SND_DUMMY=m
++CONFIG_SND_VIRMIDI=m
++CONFIG_SND_MTPAV=m
++CONFIG_SND_SERIAL_U16550=m
++CONFIG_SND_MPU401=m
++
++#
++# PCI devices
++#
++CONFIG_SND_AD1889=m
++CONFIG_SND_ALS300=m
++CONFIG_SND_ALS4000=m
++CONFIG_SND_ALI5451=m
++CONFIG_SND_ATIIXP=m
++CONFIG_SND_ATIIXP_MODEM=m
++CONFIG_SND_AU8810=m
++CONFIG_SND_AU8820=m
++CONFIG_SND_AU8830=m
++CONFIG_SND_AZT3328=m
++CONFIG_SND_BT87X=m
++# CONFIG_SND_BT87X_OVERCLOCK is not set
++CONFIG_SND_CA0106=m
++CONFIG_SND_CMIPCI=m
++CONFIG_SND_CS4281=m
++CONFIG_SND_CS46XX=m
++CONFIG_SND_CS46XX_NEW_DSP=y
++CONFIG_SND_CS5535AUDIO=m
++CONFIG_SND_DARLA20=m
++CONFIG_SND_GINA20=m
++CONFIG_SND_LAYLA20=m
++CONFIG_SND_DARLA24=m
++CONFIG_SND_GINA24=m
++CONFIG_SND_LAYLA24=m
++CONFIG_SND_MONA=m
++CONFIG_SND_MIA=m
++CONFIG_SND_ECHO3G=m
++CONFIG_SND_INDIGO=m
++CONFIG_SND_INDIGOIO=m
++CONFIG_SND_INDIGODJ=m
++CONFIG_SND_EMU10K1=m
++CONFIG_SND_EMU10K1X=m
++CONFIG_SND_ENS1370=m
++CONFIG_SND_ENS1371=m
++CONFIG_SND_ES1938=m
++CONFIG_SND_ES1968=m
++CONFIG_SND_FM801=m
++# CONFIG_SND_FM801_TEA575X_BOOL is not set
++CONFIG_SND_HDA_INTEL=m
++CONFIG_SND_HDSP=m
++CONFIG_SND_HDSPM=m
++CONFIG_SND_ICE1712=m
++CONFIG_SND_ICE1724=m
++CONFIG_SND_INTEL8X0=m
++CONFIG_SND_INTEL8X0M=m
++CONFIG_SND_KORG1212=m
++CONFIG_SND_MAESTRO3=m
++CONFIG_SND_MIXART=m
++CONFIG_SND_NM256=m
++CONFIG_SND_PCXHR=m
++CONFIG_SND_RIPTIDE=m
++CONFIG_SND_RME32=m
++CONFIG_SND_RME96=m
++CONFIG_SND_RME9652=m
++CONFIG_SND_SONICVIBES=m
++CONFIG_SND_TRIDENT=m
++CONFIG_SND_VIA82XX=m
++# CONFIG_SND_VIA82XX_MODEM is not set
++CONFIG_SND_VX222=m
++CONFIG_SND_YMFPCI=m
++
++#
++# USB devices
++#
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_USB_USX2Y=m
++
++#
++# PCMCIA devices
++#
++CONFIG_SND_VXPOCKET=m
++CONFIG_SND_PDAUDIOCF=m
++
++#
++# Open Sound System
++#
++CONFIG_SOUND_PRIME=m
++# CONFIG_OSS_OBSOLETE_DRIVER is not set
++CONFIG_SOUND_BT878=m
++CONFIG_SOUND_ES1371=m
++CONFIG_SOUND_ICH=m
++CONFIG_SOUND_TRIDENT=m
++# CONFIG_SOUND_MSNDCLAS is not set
++# CONFIG_SOUND_MSNDPIN is not set
++CONFIG_SOUND_VIA82CXXX=m
++# CONFIG_MIDI_VIA82CXXX is not set
++# CONFIG_SOUND_OSS is not set
++CONFIG_SOUND_TVMIXER=m
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=m
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_BANDWIDTH=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=m
++CONFIG_USB_EHCI_SPLIT_ISO=y
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_EHCI_TT_NEWSCHED=y
++CONFIG_USB_ISP116X_HCD=m
++CONFIG_USB_OHCI_HCD=m
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=m
++CONFIG_USB_SL811_HCD=m
++CONFIG_USB_SL811_CS=m
++
++#
++# USB Device Class drivers
++#
++CONFIG_USB_ACM=m
++CONFIG_USB_PRINTER=m
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=m
++# CONFIG_USB_STORAGE_DEBUG is not set
++CONFIG_USB_STORAGE_DATAFAB=y
++CONFIG_USB_STORAGE_FREECOM=y
++CONFIG_USB_STORAGE_ISD200=y
++CONFIG_USB_STORAGE_DPCM=y
++CONFIG_USB_STORAGE_USBAT=y
++CONFIG_USB_STORAGE_SDDR09=y
++CONFIG_USB_STORAGE_SDDR55=y
++CONFIG_USB_STORAGE_JUMPSHOT=y
++CONFIG_USB_STORAGE_ALAUDA=y
++CONFIG_USB_LIBUSUAL=y
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=m
++CONFIG_USB_HIDINPUT=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++CONFIG_HID_FF=y
++CONFIG_HID_PID=y
++CONFIG_LOGITECH_FF=y
++CONFIG_THRUSTMASTER_FF=y
++CONFIG_USB_HIDDEV=y
++
++#
++# USB HID Boot Protocol drivers
++#
++# CONFIG_USB_KBD is not set
++# CONFIG_USB_MOUSE is not set
++CONFIG_USB_AIPTEK=m
++CONFIG_USB_WACOM=m
++CONFIG_USB_ACECAD=m
++CONFIG_USB_KBTAB=m
++CONFIG_USB_POWERMATE=m
++CONFIG_USB_TOUCHSCREEN=m
++CONFIG_USB_TOUCHSCREEN_EGALAX=y
++CONFIG_USB_TOUCHSCREEN_PANJIT=y
++CONFIG_USB_TOUCHSCREEN_3M=y
++CONFIG_USB_TOUCHSCREEN_ITM=y
++CONFIG_USB_YEALINK=m
++CONFIG_USB_XPAD=m
++CONFIG_USB_ATI_REMOTE=m
++CONFIG_USB_ATI_REMOTE2=m
++CONFIG_USB_KEYSPAN_REMOTE=m
++CONFIG_USB_APPLETOUCH=m
++
++#
++# USB Imaging devices
++#
++CONFIG_USB_MDC800=m
++CONFIG_USB_MICROTEK=m
++
++#
++# USB Network Adapters
++#
++CONFIG_USB_CATC=m
++CONFIG_USB_KAWETH=m
++CONFIG_USB_PEGASUS=m
++CONFIG_USB_RTL8150=m
++CONFIG_USB_USBNET=m
++CONFIG_USB_NET_AX8817X=m
++CONFIG_USB_NET_CDCETHER=m
++CONFIG_USB_NET_GL620A=m
++CONFIG_USB_NET_NET1080=m
++CONFIG_USB_NET_PLUSB=m
++CONFIG_USB_NET_RNDIS_HOST=m
++CONFIG_USB_NET_CDC_SUBSET=m
++CONFIG_USB_ALI_M5632=y
++CONFIG_USB_AN2720=y
++CONFIG_USB_BELKIN=y
++CONFIG_USB_ARMLINUX=y
++# CONFIG_USB_EPSON2888 is not set
++CONFIG_USB_NET_ZAURUS=m
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++CONFIG_USB_USS720=m
++
++#
++# USB Serial Converter support
++#
++CONFIG_USB_SERIAL=m
++CONFIG_USB_SERIAL_GENERIC=y
++CONFIG_USB_SERIAL_AIRPRIME=m
++CONFIG_USB_SERIAL_ARK3116=m
++CONFIG_USB_SERIAL_BELKIN=m
++CONFIG_USB_SERIAL_WHITEHEAT=m
++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
++CONFIG_USB_SERIAL_CP2101=m
++CONFIG_USB_SERIAL_CYPRESS_M8=m
++CONFIG_USB_SERIAL_EMPEG=m
++CONFIG_USB_SERIAL_FTDI_SIO=m
++CONFIG_USB_SERIAL_FUNSOFT=m
++CONFIG_USB_SERIAL_VISOR=m
++CONFIG_USB_SERIAL_IPAQ=m
++CONFIG_USB_SERIAL_IR=m
++CONFIG_USB_SERIAL_EDGEPORT=m
++CONFIG_USB_SERIAL_EDGEPORT_TI=m
++CONFIG_USB_SERIAL_GARMIN=m
++CONFIG_USB_SERIAL_IPW=m
++CONFIG_USB_SERIAL_KEYSPAN_PDA=m
++CONFIG_USB_SERIAL_KEYSPAN=m
++CONFIG_USB_SERIAL_KEYSPAN_MPR=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19=y
++CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
++CONFIG_USB_SERIAL_KLSI=m
++CONFIG_USB_SERIAL_KOBIL_SCT=m
++CONFIG_USB_SERIAL_MCT_U232=m
++CONFIG_USB_SERIAL_NAVMAN=m
++CONFIG_USB_SERIAL_PL2303=m
++CONFIG_USB_SERIAL_HP4X=m
++CONFIG_USB_SERIAL_SAFE=m
++CONFIG_USB_SERIAL_SAFE_PADDED=y
++CONFIG_USB_SERIAL_SIERRAWIRELESS=m
++CONFIG_USB_SERIAL_TI=m
++CONFIG_USB_SERIAL_CYBERJACK=m
++CONFIG_USB_SERIAL_XIRCOM=m
++CONFIG_USB_SERIAL_OPTION=m
++CONFIG_USB_SERIAL_OMNINET=m
++CONFIG_USB_EZUSB=y
++
++#
++# USB Miscellaneous drivers
++#
++CONFIG_USB_EMI62=m
++CONFIG_USB_EMI26=m
++CONFIG_USB_AUERSWALD=m
++CONFIG_USB_RIO500=m
++CONFIG_USB_LEGOTOWER=m
++CONFIG_USB_LCD=m
++CONFIG_USB_LED=m
++CONFIG_USB_CYPRESS_CY7C63=m
++CONFIG_USB_CYTHERM=m
++CONFIG_USB_PHIDGETKIT=m
++CONFIG_USB_PHIDGETSERVO=m
++CONFIG_USB_IDMOUSE=m
++CONFIG_USB_APPLEDISPLAY=m
++CONFIG_USB_SISUSBVGA=m
++CONFIG_USB_SISUSBVGA_CON=y
++CONFIG_USB_LD=m
++CONFIG_USB_TEST=m
++
++#
++# USB DSL modem support
++#
++CONFIG_USB_ATM=m
++CONFIG_USB_SPEEDTOUCH=m
++CONFIG_USB_CXACRU=m
++CONFIG_USB_UEAGLEATM=m
++CONFIG_USB_XUSBATM=m
++
++#
++# USB Gadget Support
++#
++CONFIG_USB_GADGET=m
++# CONFIG_USB_GADGET_DEBUG_FILES is not set
++CONFIG_USB_GADGET_SELECTED=y
++CONFIG_USB_GADGET_NET2280=y
++CONFIG_USB_NET2280=m
++# CONFIG_USB_GADGET_PXA2XX is not set
++# CONFIG_USB_GADGET_GOKU is not set
++# CONFIG_USB_GADGET_LH7A40X is not set
++# CONFIG_USB_GADGET_OMAP is not set
++# CONFIG_USB_GADGET_AT91 is not set
++# CONFIG_USB_GADGET_DUMMY_HCD is not set
++CONFIG_USB_GADGET_DUALSPEED=y
++CONFIG_USB_ZERO=m
++CONFIG_USB_ETH=m
++CONFIG_USB_ETH_RNDIS=y
++CONFIG_USB_GADGETFS=m
++CONFIG_USB_FILE_STORAGE=m
++# CONFIG_USB_FILE_STORAGE_TEST is not set
++CONFIG_USB_G_SERIAL=m
++
++#
++# MMC/SD Card support
++#
++CONFIG_MMC=m
++# CONFIG_MMC_DEBUG is not set
++CONFIG_MMC_BLOCK=m
++CONFIG_MMC_SDHCI=m
++CONFIG_MMC_WBSD=m
++
++#
++# LED devices
++#
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=m
++
++#
++# LED drivers
++#
++CONFIG_LEDS_NET48XX=m
++
++#
++# LED Triggers
++#
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_TIMER=m
++CONFIG_LEDS_TRIGGER_IDE_DISK=y
++CONFIG_LEDS_TRIGGER_HEARTBEAT=m
++
++#
++# InfiniBand support
++#
++CONFIG_INFINIBAND=m
++CONFIG_INFINIBAND_USER_MAD=m
++CONFIG_INFINIBAND_USER_ACCESS=m
++CONFIG_INFINIBAND_ADDR_TRANS=y
++CONFIG_INFINIBAND_MTHCA=m
++CONFIG_INFINIBAND_MTHCA_DEBUG=y
++CONFIG_INFINIBAND_IPOIB=m
++CONFIG_INFINIBAND_IPOIB_DEBUG=y
++# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
++CONFIG_INFINIBAND_SRP=m
++CONFIG_INFINIBAND_ISER=m
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++CONFIG_EDAC=m
++
++#
++# Reporting subsystems
++#
++# CONFIG_EDAC_DEBUG is not set
++CONFIG_EDAC_MM_EDAC=m
++CONFIG_EDAC_AMD76X=m
++CONFIG_EDAC_E7XXX=m
++CONFIG_EDAC_E752X=m
++CONFIG_EDAC_I82875P=m
++CONFIG_EDAC_I82860=m
++CONFIG_EDAC_R82600=m
++CONFIG_EDAC_POLL=y
++
++#
++# Real Time Clock
++#
++CONFIG_RTC_LIB=m
++CONFIG_RTC_CLASS=m
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=m
++CONFIG_RTC_INTF_PROC=m
++CONFIG_RTC_INTF_DEV=m
++CONFIG_RTC_INTF_DEV_UIE_EMUL=y
++
++#
++# RTC drivers
++#
++CONFIG_RTC_DRV_X1205=m
++CONFIG_RTC_DRV_DS1307=m
++CONFIG_RTC_DRV_DS1553=m
++CONFIG_RTC_DRV_ISL1208=m
++CONFIG_RTC_DRV_DS1672=m
++CONFIG_RTC_DRV_DS1742=m
++CONFIG_RTC_DRV_PCF8563=m
++CONFIG_RTC_DRV_PCF8583=m
++CONFIG_RTC_DRV_RS5C348=m
++CONFIG_RTC_DRV_RS5C372=m
++CONFIG_RTC_DRV_M48T86=m
++CONFIG_RTC_DRV_TEST=m
++CONFIG_RTC_DRV_MAX6902=m
++CONFIG_RTC_DRV_V3020=m
++
++#
++# DMA Engine support
++#
++CONFIG_DMA_ENGINE=y
++
++#
++# DMA Clients
++#
++CONFIG_NET_DMA=y
++
++#
++# DMA Devices
++#
++CONFIG_INTEL_IOATDMA=m
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++CONFIG_EXT2_FS_XIP=y
++CONFIG_FS_XIP=y
++CONFIG_EXT3_FS=m
++CONFIG_EXT3_FS_XATTR=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_JBD=m
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=m
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++CONFIG_JFS_FS=m
++CONFIG_JFS_POSIX_ACL=y
++# CONFIG_JFS_SECURITY is not set
++# CONFIG_JFS_DEBUG is not set
++CONFIG_JFS_STATISTICS=y
++CONFIG_FS_POSIX_ACL=y
++CONFIG_XFS_FS=m
++CONFIG_XFS_QUOTA=y
++CONFIG_XFS_SECURITY=y
++CONFIG_XFS_POSIX_ACL=y
++CONFIG_XFS_RT=y
++CONFIG_OCFS2_FS=m
++CONFIG_OCFS2_DEBUG_MASKLOG=y
++CONFIG_MINIX_FS=m
++CONFIG_ROMFS_FS=m
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++CONFIG_QUOTA=y
++CONFIG_QFMT_V1=m
++CONFIG_QFMT_V2=m
++CONFIG_QUOTACTL=y
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=m
++CONFIG_AUTOFS4_FS=m
++CONFIG_FUSE_FS=m
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=m
++CONFIG_UDF_FS=m
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++CONFIG_NTFS_FS=m
++# CONFIG_NTFS_DEBUG is not set
++# CONFIG_NTFS_RW is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++CONFIG_CONFIGFS_FS=m
++
++#
++# Miscellaneous filesystems
++#
++CONFIG_ADFS_FS=m
++# CONFIG_ADFS_FS_RW is not set
++CONFIG_AFFS_FS=m
++CONFIG_HFS_FS=m
++CONFIG_HFSPLUS_FS=m
++CONFIG_BEFS_FS=m
++# CONFIG_BEFS_DEBUG is not set
++CONFIG_BFS_FS=m
++CONFIG_EFS_FS=m
++CONFIG_JFFS_FS=m
++CONFIG_JFFS_FS_VERBOSE=0
++CONFIG_JFFS_PROC_FS=y
++CONFIG_JFFS2_FS=m
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++# CONFIG_JFFS2_SUMMARY is not set
++# CONFIG_JFFS2_FS_XATTR is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++CONFIG_CRAMFS=m
++CONFIG_VXFS_FS=m
++CONFIG_HPFS_FS=m
++CONFIG_QNX4FS_FS=m
++CONFIG_SYSV_FS=m
++CONFIG_UFS_FS=m
++# CONFIG_UFS_FS_WRITE is not set
++# CONFIG_UFS_DEBUG is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=m
++CONFIG_NFS_V3=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_NFS_DIRECTIO=y
++CONFIG_NFSD=m
++CONFIG_NFSD_V2_ACL=y
++CONFIG_NFSD_V3=y
++CONFIG_NFSD_V3_ACL=y
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_TCP=y
++CONFIG_LOCKD=m
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_NFS_ACL_SUPPORT=m
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=m
++CONFIG_SUNRPC_GSS=m
++CONFIG_RPCSEC_GSS_KRB5=m
++CONFIG_RPCSEC_GSS_SPKM3=m
++CONFIG_SMB_FS=m
++CONFIG_SMB_NLS_DEFAULT=y
++CONFIG_SMB_NLS_REMOTE="cp850"
++CONFIG_CIFS=m
++CONFIG_CIFS_STATS=y
++CONFIG_CIFS_STATS2=y
++# CONFIG_CIFS_WEAK_PW_HASH is not set
++CONFIG_CIFS_XATTR=y
++# CONFIG_CIFS_POSIX is not set
++# CONFIG_CIFS_DEBUG2 is not set
++# CONFIG_CIFS_EXPERIMENTAL is not set
++CONFIG_NCP_FS=m
++CONFIG_NCPFS_PACKET_SIGNING=y
++CONFIG_NCPFS_IOCTL_LOCKING=y
++CONFIG_NCPFS_STRONG=y
++CONFIG_NCPFS_NFS_NS=y
++CONFIG_NCPFS_OS2_NS=y
++# CONFIG_NCPFS_SMALLDOS is not set
++CONFIG_NCPFS_NLS=y
++CONFIG_NCPFS_EXTRAS=y
++CONFIG_CODA_FS=m
++# CONFIG_CODA_FS_OLD_API is not set
++CONFIG_AFS_FS=m
++CONFIG_RXRPC=m
++CONFIG_9P_FS=m
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++CONFIG_OSF_PARTITION=y
++# CONFIG_AMIGA_PARTITION is not set
++CONFIG_ATARI_PARTITION=y
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++CONFIG_BSD_DISKLABEL=y
++# CONFIG_MINIX_SUBPARTITION is not set
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_UNIXWARE_DISKLABEL=y
++CONFIG_LDM_PARTITION=y
++# CONFIG_LDM_DEBUG is not set
++CONFIG_SGI_PARTITION=y
++CONFIG_ULTRIX_PARTITION=y
++CONFIG_SUN_PARTITION=y
++CONFIG_KARMA_PARTITION=y
++CONFIG_EFI_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="utf8"
++CONFIG_NLS_CODEPAGE_437=m
++CONFIG_NLS_CODEPAGE_737=m
++CONFIG_NLS_CODEPAGE_775=m
++CONFIG_NLS_CODEPAGE_850=m
++CONFIG_NLS_CODEPAGE_852=m
++CONFIG_NLS_CODEPAGE_855=m
++CONFIG_NLS_CODEPAGE_857=m
++CONFIG_NLS_CODEPAGE_860=m
++CONFIG_NLS_CODEPAGE_861=m
++CONFIG_NLS_CODEPAGE_862=m
++CONFIG_NLS_CODEPAGE_863=m
++CONFIG_NLS_CODEPAGE_864=m
++CONFIG_NLS_CODEPAGE_865=m
++CONFIG_NLS_CODEPAGE_866=m
++CONFIG_NLS_CODEPAGE_869=m
++CONFIG_NLS_CODEPAGE_936=m
++CONFIG_NLS_CODEPAGE_950=m
++CONFIG_NLS_CODEPAGE_932=m
++CONFIG_NLS_CODEPAGE_949=m
++CONFIG_NLS_CODEPAGE_874=m
++CONFIG_NLS_ISO8859_8=m
++CONFIG_NLS_CODEPAGE_1250=m
++CONFIG_NLS_CODEPAGE_1251=m
++CONFIG_NLS_ASCII=m
++CONFIG_NLS_ISO8859_1=m
++CONFIG_NLS_ISO8859_2=m
++CONFIG_NLS_ISO8859_3=m
++CONFIG_NLS_ISO8859_4=m
++CONFIG_NLS_ISO8859_5=m
++CONFIG_NLS_ISO8859_6=m
++CONFIG_NLS_ISO8859_7=m
++CONFIG_NLS_ISO8859_9=m
++CONFIG_NLS_ISO8859_13=m
++CONFIG_NLS_ISO8859_14=m
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_KOI8_R=m
++CONFIG_NLS_KOI8_U=m
++CONFIG_NLS_UTF8=m
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=14
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_HIGHMEM is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_FRAME_POINTER is not set
++# CONFIG_UNWIND_INFO is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++CONFIG_EARLY_PRINTK=y
++CONFIG_DEBUG_STACKOVERFLOW=y
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_DEBUG_RODATA is not set
++# CONFIG_4KSTACKS is not set
++CONFIG_X86_FIND_SMP_CONFIG=y
++CONFIG_X86_MPPARSE=y
++
++#
++# Security options
++#
++CONFIG_KEYS=y
++# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
++CONFIG_SECURITY=y
++CONFIG_SECURITY_NETWORK=y
++# CONFIG_SECURITY_NETWORK_XFRM is not set
++CONFIG_SECURITY_CAPABILITIES=y
++CONFIG_SECURITY_ROOTPLUG=m
++CONFIG_SECURITY_SECLVL=m
++# CONFIG_SECURITY_SELINUX is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++CONFIG_CRYPTO_NULL=m
++CONFIG_CRYPTO_MD4=m
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_SHA1=m
++CONFIG_CRYPTO_SHA256=m
++CONFIG_CRYPTO_SHA512=m
++CONFIG_CRYPTO_WP512=m
++CONFIG_CRYPTO_TGR192=m
++CONFIG_CRYPTO_DES=m
++CONFIG_CRYPTO_BLOWFISH=m
++CONFIG_CRYPTO_TWOFISH=m
++CONFIG_CRYPTO_SERPENT=m
++CONFIG_CRYPTO_AES=m
++CONFIG_CRYPTO_AES_586=m
++CONFIG_CRYPTO_CAST5=m
++CONFIG_CRYPTO_CAST6=m
++CONFIG_CRYPTO_TEA=m
++CONFIG_CRYPTO_ARC4=m
++CONFIG_CRYPTO_KHAZAD=m
++CONFIG_CRYPTO_ANUBIS=m
++CONFIG_CRYPTO_DEFLATE=m
++CONFIG_CRYPTO_MICHAEL_MIC=m
++CONFIG_CRYPTO_CRC32C=m
++CONFIG_CRYPTO_TEST=m
++
++#
++# Hardware crypto devices
++#
++# CONFIG_CRYPTO_DEV_PADLOCK is not set
++CONFIG_XEN=y
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++# CONFIG_XEN_UNPRIVILEGED_GUEST is not set
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++CONFIG_XEN_BACKEND=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XEN_BLKDEV_TAP=y
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND is not set
++# CONFIG_XEN_NETDEV_LOOPBACK is not set
++CONFIG_XEN_PCIDEV_BACKEND=m
++CONFIG_XEN_PCIDEV_BACKEND_VPCI=y
++# CONFIG_XEN_PCIDEV_BACKEND_PASS is not set
++# CONFIG_XEN_PCIDEV_BACKEND_SLOT is not set
++# CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER is not set
++# CONFIG_XEN_PCIDEV_BE_DEBUG is not set
++CONFIG_XEN_TPMDEV_BACKEND=m
++CONFIG_XEN_SCSI_BACKEND=m
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_XEN_GRANT_DEV=y
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_FRAMEBUFFER=y
++CONFIG_XEN_KEYBOARD=y
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_SMPBOOT=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_DEVMEM=y
++
++#
++# Library routines
++#
++CONFIG_CRC_CCITT=m
++CONFIG_CRC16=m
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=m
++CONFIG_AUDIT_GENERIC=y
++CONFIG_ZLIB_INFLATE=m
++CONFIG_ZLIB_DEFLATE=m
++CONFIG_REED_SOLOMON=m
++CONFIG_REED_SOLOMON_DEC16=y
++CONFIG_TEXTSEARCH=y
++CONFIG_TEXTSEARCH_KMP=m
++CONFIG_TEXTSEARCH_BM=m
++CONFIG_TEXTSEARCH_FSM=m
++CONFIG_PLIST=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_GENERIC_PENDING_IRQ=y
++CONFIG_X86_SMP=y
++CONFIG_X86_BIOS_REBOOT=y
++CONFIG_X86_TRAMPOLINE=y
++CONFIG_X86_NO_TSS=y
++CONFIG_X86_NO_IDT=y
++CONFIG_KTIME_SCALAR=y
+diff -rpuN linux-2.6.18.8/buildconfigs/linux-defconfig_xen_x86_64 linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xen_x86_64
+--- linux-2.6.18.8/buildconfigs/linux-defconfig_xen_x86_64 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/buildconfigs/linux-defconfig_xen_x86_64 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,3146 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.18.8
++# Mon Feb 18 10:38:24 2008
++#
++CONFIG_X86_64=y
++CONFIG_64BIT=y
++CONFIG_X86=y
++CONFIG_LOCKDEP_SUPPORT=y
++CONFIG_STACKTRACE_SUPPORT=y
++CONFIG_SEMAPHORE_SLEEPERS=y
++CONFIG_MMU=y
++CONFIG_RWSEM_GENERIC_SPINLOCK=y
++CONFIG_GENERIC_HWEIGHT=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_CMPXCHG=y
++CONFIG_EARLY_PRINTK=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_ARCH_MAY_HAVE_PC_FDC=y
++CONFIG_DMI=y
++CONFIG_AUDIT_ARCH=y
++CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++# CONFIG_LOCALVERSION_AUTO is not set
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++CONFIG_BSD_PROCESS_ACCT_V3=y
++CONFIG_TASKSTATS=y
++CONFIG_TASK_DELAY_ACCT=y
++CONFIG_AUDIT=y
++CONFIG_AUDITSYSCALL=y
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_CPUSETS=y
++# CONFIG_RELAY is not set
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_CC_OPTIMIZE_FOR_SIZE=y
++# CONFIG_EMBEDDED is not set
++CONFIG_UID16=y
++CONFIG_SYSCTL=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++CONFIG_KALLSYMS_EXTRA_PASS=y
++CONFIG_HOTPLUG=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_ELF_CORE=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_SLAB=y
++CONFIG_VM_EVENT_COUNTERS=y
++CONFIG_RT_MUTEXES=y
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++# CONFIG_SLOB is not set
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODULE_FORCE_UNLOAD=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Block layer
++#
++CONFIG_LBD=y
++# CONFIG_BLK_DEV_IO_TRACE is not set
++CONFIG_LSF=y
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_DEFAULT_AS is not set
++# CONFIG_DEFAULT_DEADLINE is not set
++CONFIG_DEFAULT_CFQ=y
++# CONFIG_DEFAULT_NOOP is not set
++CONFIG_DEFAULT_IOSCHED="cfq"
++
++#
++# Processor type and features
++#
++CONFIG_X86_PC=y
++# CONFIG_X86_VSMP is not set
++# CONFIG_MK8 is not set
++# CONFIG_MPSC is not set
++CONFIG_GENERIC_CPU=y
++CONFIG_X86_64_XEN=y
++CONFIG_X86_NO_TSS=y
++CONFIG_X86_NO_IDT=y
++CONFIG_X86_L1_CACHE_BYTES=128
++CONFIG_X86_L1_CACHE_SHIFT=7
++CONFIG_X86_INTERNODE_CACHE_BYTES=128
++CONFIG_X86_GOOD_APIC=y
++CONFIG_MICROCODE=y
++CONFIG_X86_MSR=m
++CONFIG_X86_CPUID=m
++CONFIG_X86_IO_APIC=y
++CONFIG_X86_XEN_GENAPIC=y
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_MTRR=y
++CONFIG_SMP=y
++# CONFIG_PREEMPT_NONE is not set
++CONFIG_PREEMPT_VOLUNTARY=y
++# CONFIG_PREEMPT is not set
++CONFIG_PREEMPT_BKL=y
++CONFIG_ARCH_FLATMEM_ENABLE=y
++CONFIG_SELECT_MEMORY_MODEL=y
++CONFIG_FLATMEM_MANUAL=y
++# CONFIG_DISCONTIGMEM_MANUAL is not set
++# CONFIG_SPARSEMEM_MANUAL is not set
++CONFIG_FLATMEM=y
++CONFIG_FLAT_NODE_MEM_MAP=y
++# CONFIG_SPARSEMEM_STATIC is not set
++CONFIG_SPLIT_PTLOCK_CPUS=4
++CONFIG_RESOURCES_64BIT=y
++CONFIG_NR_CPUS=32
++CONFIG_HOTPLUG_CPU=y
++CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
++CONFIG_SWIOTLB=y
++CONFIG_KEXEC=y
++# CONFIG_CRASH_DUMP is not set
++CONFIG_PHYSICAL_START=0x200000
++CONFIG_SECCOMP=y
++CONFIG_HZ_100=y
++# CONFIG_HZ_250 is not set
++# CONFIG_HZ_1000 is not set
++CONFIG_HZ=100
++# CONFIG_REORDER is not set
++CONFIG_K8_NB=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_ISA_DMA_API=y
++CONFIG_GENERIC_PENDING_IRQ=y
++
++#
++# Power management options
++#
++CONFIG_PM=y
++# CONFIG_PM_LEGACY is not set
++CONFIG_PM_DEBUG=y
++# CONFIG_SOFTWARE_SUSPEND is not set
++CONFIG_SUSPEND_SMP=y
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_SLEEP=y
++CONFIG_ACPI_SLEEP_PROC_FS=y
++# CONFIG_ACPI_SLEEP_PROC_SLEEP is not set
++CONFIG_ACPI_AC=m
++CONFIG_ACPI_BATTERY=m
++CONFIG_ACPI_BUTTON=m
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_HOTKEY=m
++CONFIG_ACPI_FAN=m
++CONFIG_ACPI_DOCK=m
++CONFIG_ACPI_PROCESSOR=m
++CONFIG_ACPI_HOTPLUG_CPU=y
++CONFIG_ACPI_THERMAL=m
++CONFIG_ACPI_ASUS=m
++CONFIG_ACPI_IBM=m
++CONFIG_ACPI_TOSHIBA=m
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_SYSTEM=y
++CONFIG_ACPI_CONTAINER=m
++CONFIG_ACPI_SBS=m
++CONFIG_ACPI_PV_SLEEP=y
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI etc.)
++#
++CONFIG_PCI=y
++CONFIG_PCI_DIRECT=y
++# CONFIG_PCI_MMCONFIG is not set
++CONFIG_XEN_PCIDEV_FRONTEND=y
++# CONFIG_XEN_PCIDEV_FE_DEBUG is not set
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_MSI is not set
++# CONFIG_PCI_DEBUG is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++CONFIG_PCCARD=m
++# CONFIG_PCMCIA_DEBUG is not set
++CONFIG_PCMCIA=m
++CONFIG_PCMCIA_LOAD_CIS=y
++CONFIG_PCMCIA_IOCTL=y
++CONFIG_CARDBUS=y
++
++#
++# PC-card bridges
++#
++CONFIG_YENTA=m
++CONFIG_YENTA_O2=y
++CONFIG_YENTA_RICOH=y
++CONFIG_YENTA_TI=y
++CONFIG_YENTA_ENE_TUNE=y
++CONFIG_YENTA_TOSHIBA=y
++CONFIG_PD6729=m
++CONFIG_I82092=m
++CONFIG_PCCARD_NONSTATIC=m
++
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=m
++# CONFIG_HOTPLUG_PCI_FAKE is not set
++# CONFIG_HOTPLUG_PCI_ACPI is not set
++# CONFIG_HOTPLUG_PCI_CPCI is not set
++# CONFIG_HOTPLUG_PCI_SHPC is not set
++
++#
++# Executable file formats / Emulations
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=m
++CONFIG_IA32_EMULATION=y
++CONFIG_IA32_AOUT=y
++CONFIG_COMPAT=y
++CONFIG_SYSVIPC_COMPAT=y
++
++#
++# Networking
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++# CONFIG_NETDEBUG is not set
++CONFIG_PACKET=y
++CONFIG_PACKET_MMAP=y
++CONFIG_UNIX=y
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=m
++CONFIG_NET_KEY=m
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_ASK_IP_FIB_HASH=y
++# CONFIG_IP_FIB_TRIE is not set
++CONFIG_IP_FIB_HASH=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_FWMARK=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
++CONFIG_IP_ROUTE_VERBOSE=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++CONFIG_IP_PNP_RARP=y
++CONFIG_NET_IPIP=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_BROADCAST=y
++CONFIG_IP_MROUTE=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++CONFIG_INET_AH=m
++CONFIG_INET_ESP=m
++CONFIG_INET_IPCOMP=m
++CONFIG_INET_XFRM_TUNNEL=m
++CONFIG_INET_TUNNEL=m
++CONFIG_INET_XFRM_MODE_TRANSPORT=m
++CONFIG_INET_XFRM_MODE_TUNNEL=m
++CONFIG_INET_DIAG=m
++CONFIG_INET_TCP_DIAG=m
++CONFIG_TCP_CONG_ADVANCED=y
++
++#
++# TCP congestion control
++#
++CONFIG_TCP_CONG_BIC=m
++CONFIG_TCP_CONG_CUBIC=m
++CONFIG_TCP_CONG_WESTWOOD=m
++CONFIG_TCP_CONG_HTCP=m
++CONFIG_TCP_CONG_HSTCP=m
++CONFIG_TCP_CONG_HYBLA=m
++CONFIG_TCP_CONG_VEGAS=m
++CONFIG_TCP_CONG_SCALABLE=m
++CONFIG_TCP_CONG_LP=m
++CONFIG_TCP_CONG_VENO=m
++
++#
++# IP: Virtual Server Configuration
++#
++CONFIG_IP_VS=m
++# CONFIG_IP_VS_DEBUG is not set
++CONFIG_IP_VS_TAB_BITS=12
++
++#
++# IPVS transport protocol load balancing support
++#
++CONFIG_IP_VS_PROTO_TCP=y
++CONFIG_IP_VS_PROTO_UDP=y
++CONFIG_IP_VS_PROTO_ESP=y
++CONFIG_IP_VS_PROTO_AH=y
++
++#
++# IPVS scheduler
++#
++CONFIG_IP_VS_RR=m
++CONFIG_IP_VS_WRR=m
++CONFIG_IP_VS_LC=m
++CONFIG_IP_VS_WLC=m
++CONFIG_IP_VS_LBLC=m
++CONFIG_IP_VS_LBLCR=m
++CONFIG_IP_VS_DH=m
++CONFIG_IP_VS_SH=m
++CONFIG_IP_VS_SED=m
++CONFIG_IP_VS_NQ=m
++
++#
++# IPVS application helper
++#
++CONFIG_IP_VS_FTP=m
++CONFIG_IPV6=m
++CONFIG_IPV6_PRIVACY=y
++# CONFIG_IPV6_ROUTER_PREF is not set
++CONFIG_INET6_AH=m
++CONFIG_INET6_ESP=m
++CONFIG_INET6_IPCOMP=m
++CONFIG_INET6_XFRM_TUNNEL=m
++CONFIG_INET6_TUNNEL=m
++CONFIG_INET6_XFRM_MODE_TRANSPORT=m
++CONFIG_INET6_XFRM_MODE_TUNNEL=m
++CONFIG_IPV6_TUNNEL=m
++CONFIG_NETWORK_SECMARK=y
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# Core Netfilter Configuration
++#
++CONFIG_NETFILTER_NETLINK=m
++CONFIG_NETFILTER_NETLINK_QUEUE=m
++CONFIG_NETFILTER_NETLINK_LOG=m
++CONFIG_NETFILTER_XTABLES=m
++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
++CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
++CONFIG_NETFILTER_XT_TARGET_MARK=m
++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
++CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
++CONFIG_NETFILTER_XT_TARGET_SECMARK=m
++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
++CONFIG_NETFILTER_XT_MATCH_COMMENT=m
++CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
++CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
++CONFIG_NETFILTER_XT_MATCH_DCCP=m
++CONFIG_NETFILTER_XT_MATCH_ESP=m
++CONFIG_NETFILTER_XT_MATCH_HELPER=m
++CONFIG_NETFILTER_XT_MATCH_LENGTH=m
++CONFIG_NETFILTER_XT_MATCH_LIMIT=m
++CONFIG_NETFILTER_XT_MATCH_MAC=m
++CONFIG_NETFILTER_XT_MATCH_MARK=m
++CONFIG_NETFILTER_XT_MATCH_POLICY=m
++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
++CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
++CONFIG_NETFILTER_XT_MATCH_QUOTA=m
++CONFIG_NETFILTER_XT_MATCH_REALM=m
++CONFIG_NETFILTER_XT_MATCH_SCTP=m
++CONFIG_NETFILTER_XT_MATCH_STATE=m
++CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
++CONFIG_NETFILTER_XT_MATCH_STRING=m
++CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++CONFIG_IP_NF_CONNTRACK_MARK=y
++CONFIG_IP_NF_CONNTRACK_SECMARK=y
++CONFIG_IP_NF_CONNTRACK_EVENTS=y
++CONFIG_IP_NF_CONNTRACK_NETLINK=m
++CONFIG_IP_NF_CT_PROTO_SCTP=m
++CONFIG_IP_NF_FTP=m
++CONFIG_IP_NF_IRC=m
++CONFIG_IP_NF_NETBIOS_NS=m
++CONFIG_IP_NF_TFTP=m
++CONFIG_IP_NF_AMANDA=m
++CONFIG_IP_NF_PPTP=m
++CONFIG_IP_NF_H323=m
++CONFIG_IP_NF_SIP=m
++CONFIG_IP_NF_QUEUE=m
++CONFIG_IP_NF_IPTABLES=m
++CONFIG_IP_NF_MATCH_IPRANGE=m
++CONFIG_IP_NF_MATCH_TOS=m
++CONFIG_IP_NF_MATCH_RECENT=m
++CONFIG_IP_NF_MATCH_ECN=m
++CONFIG_IP_NF_MATCH_DSCP=m
++CONFIG_IP_NF_MATCH_AH=m
++CONFIG_IP_NF_MATCH_TTL=m
++CONFIG_IP_NF_MATCH_OWNER=m
++CONFIG_IP_NF_MATCH_ADDRTYPE=m
++CONFIG_IP_NF_MATCH_HASHLIMIT=m
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_TARGET_REJECT=m
++CONFIG_IP_NF_TARGET_LOG=m
++CONFIG_IP_NF_TARGET_ULOG=m
++CONFIG_IP_NF_TARGET_TCPMSS=m
++CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_NAT_NEEDED=y
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++CONFIG_IP_NF_TARGET_REDIRECT=m
++CONFIG_IP_NF_TARGET_NETMAP=m
++CONFIG_IP_NF_TARGET_SAME=m
++CONFIG_IP_NF_NAT_SNMP_BASIC=m
++CONFIG_IP_NF_NAT_IRC=m
++CONFIG_IP_NF_NAT_FTP=m
++CONFIG_IP_NF_NAT_TFTP=m
++CONFIG_IP_NF_NAT_AMANDA=m
++CONFIG_IP_NF_NAT_PPTP=m
++CONFIG_IP_NF_NAT_H323=m
++CONFIG_IP_NF_NAT_SIP=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_IP_NF_TARGET_TOS=m
++CONFIG_IP_NF_TARGET_ECN=m
++CONFIG_IP_NF_TARGET_DSCP=m
++CONFIG_IP_NF_TARGET_TTL=m
++CONFIG_IP_NF_TARGET_CLUSTERIP=m
++CONFIG_IP_NF_RAW=m
++CONFIG_IP_NF_ARPTABLES=m
++CONFIG_IP_NF_ARPFILTER=m
++CONFIG_IP_NF_ARP_MANGLE=m
++
++#
++# IPv6: Netfilter Configuration (EXPERIMENTAL)
++#
++CONFIG_IP6_NF_QUEUE=m
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_MATCH_RT=m
++CONFIG_IP6_NF_MATCH_OPTS=m
++CONFIG_IP6_NF_MATCH_FRAG=m
++CONFIG_IP6_NF_MATCH_HL=m
++CONFIG_IP6_NF_MATCH_OWNER=m
++CONFIG_IP6_NF_MATCH_IPV6HEADER=m
++CONFIG_IP6_NF_MATCH_AH=m
++CONFIG_IP6_NF_MATCH_EUI64=m
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_TARGET_LOG=m
++CONFIG_IP6_NF_TARGET_REJECT=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_IP6_NF_TARGET_HL=m
++CONFIG_IP6_NF_RAW=m
++
++#
++# DECnet: Netfilter Configuration
++#
++# CONFIG_DECNET_NF_GRABULATOR is not set
++
++#
++# Bridge: Netfilter Configuration
++#
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_BROUTE=m
++CONFIG_BRIDGE_EBT_T_FILTER=m
++CONFIG_BRIDGE_EBT_T_NAT=m
++CONFIG_BRIDGE_EBT_802_3=m
++CONFIG_BRIDGE_EBT_AMONG=m
++CONFIG_BRIDGE_EBT_ARP=m
++CONFIG_BRIDGE_EBT_IP=m
++CONFIG_BRIDGE_EBT_LIMIT=m
++CONFIG_BRIDGE_EBT_MARK=m
++CONFIG_BRIDGE_EBT_PKTTYPE=m
++CONFIG_BRIDGE_EBT_STP=m
++CONFIG_BRIDGE_EBT_VLAN=m
++CONFIG_BRIDGE_EBT_ARPREPLY=m
++CONFIG_BRIDGE_EBT_DNAT=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE_EBT_REDIRECT=m
++CONFIG_BRIDGE_EBT_SNAT=m
++CONFIG_BRIDGE_EBT_LOG=m
++CONFIG_BRIDGE_EBT_ULOG=m
++
++#
++# DCCP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_DCCP=m
++CONFIG_INET_DCCP_DIAG=m
++CONFIG_IP_DCCP_ACKVEC=y
++
++#
++# DCCP CCIDs Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_DCCP_CCID2=m
++CONFIG_IP_DCCP_CCID3=m
++CONFIG_IP_DCCP_TFRC_LIB=m
++
++#
++# DCCP Kernel Hacking
++#
++# CONFIG_IP_DCCP_DEBUG is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_SCTP=m
++# CONFIG_SCTP_DBG_MSG is not set
++# CONFIG_SCTP_DBG_OBJCNT is not set
++# CONFIG_SCTP_HMAC_NONE is not set
++# CONFIG_SCTP_HMAC_SHA1 is not set
++CONFIG_SCTP_HMAC_MD5=y
++
++#
++# TIPC Configuration (EXPERIMENTAL)
++#
++CONFIG_TIPC=m
++# CONFIG_TIPC_ADVANCED is not set
++# CONFIG_TIPC_DEBUG is not set
++CONFIG_ATM=m
++CONFIG_ATM_CLIP=m
++# CONFIG_ATM_CLIP_NO_ICMP is not set
++CONFIG_ATM_LANE=m
++# CONFIG_ATM_MPOA is not set
++CONFIG_ATM_BR2684=m
++# CONFIG_ATM_BR2684_IPFILTER is not set
++CONFIG_BRIDGE=m
++CONFIG_VLAN_8021Q=m
++CONFIG_DECNET=m
++# CONFIG_DECNET_ROUTER is not set
++CONFIG_LLC=y
++CONFIG_LLC2=m
++CONFIG_IPX=m
++CONFIG_IPX_INTERN=y
++CONFIG_ATALK=m
++CONFIG_DEV_APPLETALK=m
++CONFIG_IPDDP=m
++CONFIG_IPDDP_ENCAP=y
++CONFIG_IPDDP_DECAP=y
++CONFIG_X25=m
++CONFIG_LAPB=m
++CONFIG_ECONET=m
++# CONFIG_ECONET_AUNUDP is not set
++# CONFIG_ECONET_NATIVE is not set
++CONFIG_WAN_ROUTER=m
++
++#
++# QoS and/or fair queueing
++#
++CONFIG_NET_SCHED=y
++CONFIG_NET_SCH_CLK_JIFFIES=y
++# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
++# CONFIG_NET_SCH_CLK_CPU is not set
++
++#
++# Queueing/Scheduling
++#
++CONFIG_NET_SCH_CBQ=m
++CONFIG_NET_SCH_HTB=m
++CONFIG_NET_SCH_HFSC=m
++CONFIG_NET_SCH_ATM=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_SFQ=m
++CONFIG_NET_SCH_TEQL=m
++CONFIG_NET_SCH_TBF=m
++CONFIG_NET_SCH_GRED=m
++CONFIG_NET_SCH_DSMARK=m
++CONFIG_NET_SCH_NETEM=m
++CONFIG_NET_SCH_INGRESS=m
++
++#
++# Classification
++#
++CONFIG_NET_CLS=y
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_CLS_TCINDEX=m
++CONFIG_NET_CLS_ROUTE4=m
++CONFIG_NET_CLS_ROUTE=y
++CONFIG_NET_CLS_FW=m
++CONFIG_NET_CLS_U32=m
++CONFIG_CLS_U32_PERF=y
++CONFIG_CLS_U32_MARK=y
++CONFIG_NET_CLS_RSVP=m
++CONFIG_NET_CLS_RSVP6=m
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_STACK=32
++CONFIG_NET_EMATCH_CMP=m
++CONFIG_NET_EMATCH_NBYTE=m
++CONFIG_NET_EMATCH_U32=m
++CONFIG_NET_EMATCH_META=m
++CONFIG_NET_EMATCH_TEXT=m
++# CONFIG_NET_CLS_ACT is not set
++CONFIG_NET_CLS_POLICE=y
++CONFIG_NET_CLS_IND=y
++CONFIG_NET_ESTIMATOR=y
++
++#
++# Network testing
++#
++CONFIG_NET_PKTGEN=m
++CONFIG_HAMRADIO=y
++
++#
++# Packet Radio protocols
++#
++CONFIG_AX25=m
++CONFIG_AX25_DAMA_SLAVE=y
++CONFIG_NETROM=m
++CONFIG_ROSE=m
++
++#
++# AX.25 network device drivers
++#
++CONFIG_MKISS=m
++CONFIG_6PACK=m
++CONFIG_BPQETHER=m
++CONFIG_BAYCOM_SER_FDX=m
++CONFIG_BAYCOM_SER_HDX=m
++CONFIG_BAYCOM_PAR=m
++CONFIG_YAM=m
++CONFIG_IRDA=m
++
++#
++# IrDA protocols
++#
++CONFIG_IRLAN=m
++CONFIG_IRNET=m
++CONFIG_IRCOMM=m
++CONFIG_IRDA_ULTRA=y
++
++#
++# IrDA options
++#
++CONFIG_IRDA_CACHE_LAST_LSAP=y
++CONFIG_IRDA_FAST_RR=y
++# CONFIG_IRDA_DEBUG is not set
++
++#
++# Infrared-port device drivers
++#
++
++#
++# SIR device drivers
++#
++CONFIG_IRTTY_SIR=m
++
++#
++# Dongle support
++#
++CONFIG_DONGLE=y
++CONFIG_ESI_DONGLE=m
++CONFIG_ACTISYS_DONGLE=m
++CONFIG_TEKRAM_DONGLE=m
++CONFIG_TOIM3232_DONGLE=m
++CONFIG_LITELINK_DONGLE=m
++CONFIG_MA600_DONGLE=m
++CONFIG_GIRBIL_DONGLE=m
++CONFIG_MCP2120_DONGLE=m
++CONFIG_OLD_BELKIN_DONGLE=m
++CONFIG_ACT200L_DONGLE=m
++
++#
++# Old SIR device drivers
++#
++
++#
++# Old Serial dongle support
++#
++
++#
++# FIR device drivers
++#
++CONFIG_USB_IRDA=m
++CONFIG_SIGMATEL_FIR=m
++CONFIG_NSC_FIR=m
++CONFIG_WINBOND_FIR=m
++CONFIG_SMC_IRCC_FIR=m
++CONFIG_ALI_FIR=m
++CONFIG_VLSI_FIR=m
++CONFIG_VIA_FIR=m
++CONFIG_MCS_FIR=m
++CONFIG_BT=m
++CONFIG_BT_L2CAP=m
++CONFIG_BT_SCO=m
++CONFIG_BT_RFCOMM=m
++CONFIG_BT_RFCOMM_TTY=y
++CONFIG_BT_BNEP=m
++CONFIG_BT_BNEP_MC_FILTER=y
++CONFIG_BT_BNEP_PROTO_FILTER=y
++CONFIG_BT_CMTP=m
++CONFIG_BT_HIDP=m
++
++#
++# Bluetooth device drivers
++#
++CONFIG_BT_HCIUSB=m
++CONFIG_BT_HCIUSB_SCO=y
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++CONFIG_BT_HCIBCM203X=m
++CONFIG_BT_HCIBPA10X=m
++CONFIG_BT_HCIBFUSB=m
++# CONFIG_BT_HCIDTL1 is not set
++# CONFIG_BT_HCIBT3C is not set
++# CONFIG_BT_HCIBLUECARD is not set
++# CONFIG_BT_HCIBTUART is not set
++CONFIG_BT_HCIVHCI=m
++CONFIG_IEEE80211=m
++# CONFIG_IEEE80211_DEBUG is not set
++CONFIG_IEEE80211_CRYPT_WEP=m
++CONFIG_IEEE80211_CRYPT_CCMP=m
++CONFIG_IEEE80211_CRYPT_TKIP=m
++CONFIG_IEEE80211_SOFTMAC=m
++# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
++CONFIG_WIRELESS_EXT=y
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=m
++# CONFIG_DEBUG_DRIVER is not set
++CONFIG_SYS_HYPERVISOR=y
++
++#
++# Connector - unified userspace <-> kernelspace linker
++#
++CONFIG_CONNECTOR=y
++CONFIG_PROC_EVENTS=y
++
++#
++# Memory Technology Devices (MTD)
++#
++CONFIG_MTD=m
++# CONFIG_MTD_DEBUG is not set
++CONFIG_MTD_CONCAT=m
++CONFIG_MTD_PARTITIONS=y
++CONFIG_MTD_REDBOOT_PARTS=m
++CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
++# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
++# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
++CONFIG_MTD_CMDLINE_PARTS=y
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=m
++CONFIG_MTD_BLOCK=m
++CONFIG_MTD_BLOCK_RO=m
++CONFIG_FTL=m
++CONFIG_NFTL=m
++CONFIG_NFTL_RW=y
++CONFIG_INFTL=m
++CONFIG_RFD_FTL=m
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=m
++CONFIG_MTD_JEDECPROBE=m
++CONFIG_MTD_GEN_PROBE=m
++CONFIG_MTD_CFI_ADV_OPTIONS=y
++CONFIG_MTD_CFI_NOSWAP=y
++# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
++# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
++# CONFIG_MTD_CFI_GEOMETRY is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++# CONFIG_MTD_OTP is not set
++CONFIG_MTD_CFI_INTELEXT=m
++CONFIG_MTD_CFI_AMDSTD=m
++CONFIG_MTD_CFI_STAA=m
++CONFIG_MTD_CFI_UTIL=m
++CONFIG_MTD_RAM=m
++# CONFIG_MTD_ROM is not set
++CONFIG_MTD_ABSENT=m
++# CONFIG_MTD_OBSOLETE_CHIPS is not set
++
++#
++# Mapping drivers for chip access
++#
++CONFIG_MTD_COMPLEX_MAPPINGS=y
++CONFIG_MTD_PHYSMAP=m
++CONFIG_MTD_PHYSMAP_START=0x8000000
++CONFIG_MTD_PHYSMAP_LEN=0x4000000
++CONFIG_MTD_PHYSMAP_BANKWIDTH=2
++# CONFIG_MTD_PNC2000 is not set
++CONFIG_MTD_SC520CDP=m
++CONFIG_MTD_NETSC520=m
++CONFIG_MTD_TS5500=m
++CONFIG_MTD_SBC_GXX=m
++# CONFIG_MTD_AMD76XROM is not set
++# CONFIG_MTD_ICHXROM is not set
++CONFIG_MTD_SCB2_FLASH=m
++# CONFIG_MTD_NETtel is not set
++# CONFIG_MTD_DILNETPC is not set
++# CONFIG_MTD_L440GX is not set
++CONFIG_MTD_PCI=m
++CONFIG_MTD_PLATRAM=m
++
++#
++# Self-contained MTD device drivers
++#
++CONFIG_MTD_PMC551=m
++# CONFIG_MTD_PMC551_BUGFIX is not set
++# CONFIG_MTD_PMC551_DEBUG is not set
++CONFIG_MTD_DATAFLASH=m
++CONFIG_MTD_M25P80=m
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++CONFIG_MTD_MTDRAM=m
++CONFIG_MTDRAM_TOTAL_SIZE=4096
++CONFIG_MTDRAM_ERASE_SIZE=128
++CONFIG_MTD_BLOCK2MTD=m
++
++#
++# Disk-On-Chip Device Drivers
++#
++CONFIG_MTD_DOC2000=m
++CONFIG_MTD_DOC2001=m
++CONFIG_MTD_DOC2001PLUS=m
++CONFIG_MTD_DOCPROBE=m
++CONFIG_MTD_DOCECC=m
++CONFIG_MTD_DOCPROBE_ADVANCED=y
++CONFIG_MTD_DOCPROBE_ADDRESS=0x0000
++CONFIG_MTD_DOCPROBE_HIGH=y
++CONFIG_MTD_DOCPROBE_55AA=y
++
++#
++# NAND Flash Device Drivers
++#
++CONFIG_MTD_NAND=m
++# CONFIG_MTD_NAND_VERIFY_WRITE is not set
++# CONFIG_MTD_NAND_ECC_SMC is not set
++CONFIG_MTD_NAND_IDS=m
++CONFIG_MTD_NAND_DISKONCHIP=m
++# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
++CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
++CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y
++CONFIG_MTD_NAND_NANDSIM=m
++
++#
++# OneNAND Flash Device Drivers
++#
++CONFIG_MTD_ONENAND=m
++# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
++# CONFIG_MTD_ONENAND_OTP is not set
++
++#
++# Parallel port support
++#
++CONFIG_PARPORT=m
++CONFIG_PARPORT_PC=m
++# CONFIG_PARPORT_PC_FIFO is not set
++# CONFIG_PARPORT_PC_SUPERIO is not set
++# CONFIG_PARPORT_PC_PCMCIA is not set
++CONFIG_PARPORT_NOT_PC=y
++# CONFIG_PARPORT_GSC is not set
++CONFIG_PARPORT_AX88796=m
++CONFIG_PARPORT_1284=y
++
++#
++# Plug and Play support
++#
++CONFIG_PNP=y
++CONFIG_PNP_DEBUG=y
++
++#
++# Protocols
++#
++CONFIG_PNPACPI=y
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=m
++CONFIG_PARIDE=m
++CONFIG_PARIDE_PARPORT=m
++
++#
++# Parallel IDE high-level drivers
++#
++CONFIG_PARIDE_PD=m
++CONFIG_PARIDE_PCD=m
++CONFIG_PARIDE_PF=m
++CONFIG_PARIDE_PT=m
++CONFIG_PARIDE_PG=m
++
++#
++# Parallel IDE protocol modules
++#
++CONFIG_PARIDE_ATEN=m
++CONFIG_PARIDE_BPCK=m
++CONFIG_PARIDE_COMM=m
++CONFIG_PARIDE_DSTR=m
++CONFIG_PARIDE_FIT2=m
++CONFIG_PARIDE_FIT3=m
++CONFIG_PARIDE_EPAT=m
++CONFIG_PARIDE_EPATC8=y
++CONFIG_PARIDE_EPIA=m
++CONFIG_PARIDE_FRIQ=m
++CONFIG_PARIDE_FRPW=m
++CONFIG_PARIDE_KBIC=m
++CONFIG_PARIDE_KTTI=m
++CONFIG_PARIDE_ON20=m
++CONFIG_PARIDE_ON26=m
++CONFIG_BLK_CPQ_DA=m
++CONFIG_BLK_CPQ_CISS_DA=m
++CONFIG_CISS_SCSI_TAPE=y
++CONFIG_BLK_DEV_DAC960=m
++CONFIG_BLK_DEV_UMEM=m
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_CRYPTOLOOP=m
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_SX8=m
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_CDROM_PKTCDVD=m
++CONFIG_CDROM_PKTCDVD_BUFFERS=8
++# CONFIG_CDROM_PKTCDVD_WCACHE is not set
++CONFIG_ATA_OVER_ETH=m
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++# CONFIG_BLK_DEV_HD_IDE is not set
++CONFIG_BLK_DEV_IDEDISK=m
++CONFIG_IDEDISK_MULTI_MODE=y
++# CONFIG_BLK_DEV_IDECS is not set
++CONFIG_BLK_DEV_IDECD=m
++CONFIG_BLK_DEV_IDETAPE=m
++CONFIG_BLK_DEV_IDEFLOPPY=m
++CONFIG_BLK_DEV_IDESCSI=m
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=m
++CONFIG_BLK_DEV_CMD640=y
++CONFIG_BLK_DEV_CMD640_ENHANCED=y
++CONFIG_BLK_DEV_IDEPNP=y
++CONFIG_BLK_DEV_IDEPCI=y
++CONFIG_IDEPCI_SHARE_IRQ=y
++CONFIG_BLK_DEV_OFFBOARD=y
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++CONFIG_BLK_DEV_RZ1000=m
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++CONFIG_BLK_DEV_AEC62XX=m
++CONFIG_BLK_DEV_ALI15X3=m
++# CONFIG_WDC_ALI15X3 is not set
++CONFIG_BLK_DEV_AMD74XX=m
++CONFIG_BLK_DEV_ATIIXP=m
++CONFIG_BLK_DEV_CMD64X=m
++CONFIG_BLK_DEV_TRIFLEX=m
++CONFIG_BLK_DEV_CY82C693=m
++CONFIG_BLK_DEV_CS5520=m
++CONFIG_BLK_DEV_CS5530=m
++CONFIG_BLK_DEV_HPT34X=m
++CONFIG_HPT34X_AUTODMA=y
++CONFIG_BLK_DEV_HPT366=m
++CONFIG_BLK_DEV_SC1200=m
++CONFIG_BLK_DEV_PIIX=m
++CONFIG_BLK_DEV_IT821X=m
++CONFIG_BLK_DEV_NS87415=m
++CONFIG_BLK_DEV_PDC202XX_OLD=m
++CONFIG_PDC202XX_BURST=y
++CONFIG_BLK_DEV_PDC202XX_NEW=m
++CONFIG_BLK_DEV_SVWKS=m
++CONFIG_BLK_DEV_SIIMAGE=m
++CONFIG_BLK_DEV_SIS5513=m
++CONFIG_BLK_DEV_SLC90E66=m
++# CONFIG_BLK_DEV_TRM290 is not set
++CONFIG_BLK_DEV_VIA82CXXX=m
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++CONFIG_RAID_ATTRS=m
++CONFIG_SCSI=m
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++CONFIG_CHR_DEV_ST=m
++CONFIG_CHR_DEV_OSST=m
++CONFIG_BLK_DEV_SR=m
++CONFIG_BLK_DEV_SR_VENDOR=y
++CONFIG_CHR_DEV_SG=m
++CONFIG_CHR_DEV_SCH=m
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=m
++CONFIG_SCSI_FC_ATTRS=m
++CONFIG_SCSI_ISCSI_ATTRS=m
++CONFIG_SCSI_SAS_ATTRS=m
++
++#
++# SCSI low-level drivers
++#
++CONFIG_ISCSI_TCP=m
++CONFIG_BLK_DEV_3W_XXXX_RAID=m
++CONFIG_SCSI_3W_9XXX=m
++CONFIG_SCSI_ACARD=m
++CONFIG_SCSI_AACRAID=m
++CONFIG_SCSI_AIC7XXX=m
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=4
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
++CONFIG_AIC7XXX_DEBUG_MASK=0
++# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
++CONFIG_SCSI_AIC7XXX_OLD=m
++CONFIG_SCSI_AIC79XX=m
++CONFIG_AIC79XX_CMDS_PER_DEVICE=4
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
++# CONFIG_AIC79XX_DEBUG_ENABLE is not set
++CONFIG_AIC79XX_DEBUG_MASK=0
++# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
++CONFIG_MEGARAID_NEWGEN=y
++CONFIG_MEGARAID_MM=m
++CONFIG_MEGARAID_MAILBOX=m
++CONFIG_MEGARAID_LEGACY=m
++CONFIG_MEGARAID_SAS=m
++CONFIG_SCSI_SATA=m
++CONFIG_SCSI_SATA_AHCI=m
++CONFIG_SCSI_SATA_SVW=m
++CONFIG_SCSI_ATA_PIIX=m
++CONFIG_SCSI_SATA_MV=m
++CONFIG_SCSI_SATA_NV=m
++CONFIG_SCSI_PDC_ADMA=m
++CONFIG_SCSI_HPTIOP=m
++CONFIG_SCSI_SATA_QSTOR=m
++CONFIG_SCSI_SATA_PROMISE=m
++CONFIG_SCSI_SATA_SX4=m
++CONFIG_SCSI_SATA_SIL=m
++CONFIG_SCSI_SATA_SIL24=m
++CONFIG_SCSI_SATA_SIS=m
++CONFIG_SCSI_SATA_ULI=m
++CONFIG_SCSI_SATA_VIA=m
++CONFIG_SCSI_SATA_VITESSE=m
++CONFIG_SCSI_SATA_INTEL_COMBINED=y
++CONFIG_SCSI_BUSLOGIC=m
++# CONFIG_SCSI_OMIT_FLASHPOINT is not set
++CONFIG_SCSI_DMX3191D=m
++CONFIG_SCSI_EATA=m
++CONFIG_SCSI_EATA_TAGGED_QUEUE=y
++CONFIG_SCSI_EATA_LINKED_COMMANDS=y
++CONFIG_SCSI_EATA_MAX_TAGS=16
++CONFIG_SCSI_FUTURE_DOMAIN=m
++CONFIG_SCSI_GDTH=m
++CONFIG_SCSI_IPS=m
++CONFIG_SCSI_INITIO=m
++CONFIG_SCSI_INIA100=m
++CONFIG_SCSI_PPA=m
++CONFIG_SCSI_IMM=m
++# CONFIG_SCSI_IZIP_EPP16 is not set
++# CONFIG_SCSI_IZIP_SLOW_CTR is not set
++CONFIG_SCSI_SYM53C8XX_2=m
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++CONFIG_SCSI_SYM53C8XX_MMIO=y
++# CONFIG_SCSI_IPR is not set
++CONFIG_SCSI_QLOGIC_1280=m
++CONFIG_SCSI_QLA_FC=m
++CONFIG_SCSI_LPFC=m
++CONFIG_SCSI_DC395x=m
++CONFIG_SCSI_DC390T=m
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# PCMCIA SCSI adapter support
++#
++# CONFIG_PCMCIA_FDOMAIN is not set
++# CONFIG_PCMCIA_QLOGIC is not set
++# CONFIG_PCMCIA_SYM53C500 is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_LINEAR=m
++CONFIG_MD_RAID0=m
++CONFIG_MD_RAID1=m
++CONFIG_MD_RAID10=m
++CONFIG_MD_RAID456=m
++CONFIG_MD_RAID5_RESHAPE=y
++CONFIG_MD_MULTIPATH=m
++CONFIG_MD_FAULTY=m
++CONFIG_BLK_DEV_DM=m
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=m
++CONFIG_DM_MIRROR=m
++CONFIG_DM_ZERO=m
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_MULTIPATH_EMC=m
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_SPI=m
++CONFIG_FUSION_FC=m
++CONFIG_FUSION_SAS=m
++CONFIG_FUSION_MAX_SGE=128
++CONFIG_FUSION_CTL=m
++CONFIG_FUSION_LAN=m
++
++#
++# IEEE 1394 (FireWire) support
++#
++CONFIG_IEEE1394=m
++
++#
++# Subsystem Options
++#
++# CONFIG_IEEE1394_VERBOSEDEBUG is not set
++CONFIG_IEEE1394_OUI_DB=y
++CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
++CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
++# CONFIG_IEEE1394_EXPORT_FULL_API is not set
++
++#
++# Device Drivers
++#
++CONFIG_IEEE1394_PCILYNX=m
++CONFIG_IEEE1394_OHCI1394=m
++
++#
++# Protocol Drivers
++#
++CONFIG_IEEE1394_VIDEO1394=m
++CONFIG_IEEE1394_SBP2=m
++CONFIG_IEEE1394_ETH1394=m
++CONFIG_IEEE1394_DV1394=m
++CONFIG_IEEE1394_RAWIO=m
++
++#
++# I2O device support
++#
++CONFIG_I2O=m
++CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y
++CONFIG_I2O_EXT_ADAPTEC=y
++CONFIG_I2O_EXT_ADAPTEC_DMA64=y
++CONFIG_I2O_CONFIG=m
++CONFIG_I2O_CONFIG_OLD_IOCTL=y
++CONFIG_I2O_BUS=m
++CONFIG_I2O_BLOCK=m
++CONFIG_I2O_SCSI=m
++CONFIG_I2O_PROC=m
++
++#
++# Network device support
++#
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=m
++CONFIG_BONDING=m
++CONFIG_EQUALIZER=m
++CONFIG_TUN=m
++CONFIG_NET_SB1000=m
++
++#
++# ARCnet devices
++#
++CONFIG_ARCNET=m
++CONFIG_ARCNET_1201=m
++CONFIG_ARCNET_1051=m
++CONFIG_ARCNET_RAW=m
++CONFIG_ARCNET_CAP=m
++CONFIG_ARCNET_COM90xx=m
++CONFIG_ARCNET_COM90xxIO=m
++CONFIG_ARCNET_RIM_I=m
++# CONFIG_ARCNET_COM20020 is not set
++
++#
++# PHY device support
++#
++CONFIG_PHYLIB=m
++
++#
++# MII PHY device drivers
++#
++CONFIG_MARVELL_PHY=m
++CONFIG_DAVICOM_PHY=m
++CONFIG_QSEMI_PHY=m
++CONFIG_LXT_PHY=m
++CONFIG_CICADA_PHY=m
++CONFIG_VITESSE_PHY=m
++CONFIG_SMSC_PHY=m
++CONFIG_FIXED_PHY=m
++# CONFIG_FIXED_MII_10_FDX is not set
++# CONFIG_FIXED_MII_100_FDX is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=m
++CONFIG_HAPPYMEAL=m
++CONFIG_SUNGEM=m
++CONFIG_CASSINI=m
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_VORTEX=m
++CONFIG_TYPHOON=m
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++CONFIG_DE2104X=m
++CONFIG_TULIP=m
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++CONFIG_TULIP_NAPI=y
++CONFIG_TULIP_NAPI_HW_MITIGATION=y
++CONFIG_DE4X5=m
++CONFIG_WINBOND_840=m
++CONFIG_DM9102=m
++CONFIG_ULI526X=m
++CONFIG_PCMCIA_XIRCOM=m
++CONFIG_HP100=m
++CONFIG_NET_PCI=y
++CONFIG_PCNET32=m
++CONFIG_AMD8111_ETH=m
++CONFIG_AMD8111E_NAPI=y
++CONFIG_ADAPTEC_STARFIRE=m
++CONFIG_ADAPTEC_STARFIRE_NAPI=y
++CONFIG_B44=m
++CONFIG_FORCEDETH=m
++CONFIG_DGRS=m
++CONFIG_EEPRO100=m
++CONFIG_E100=m
++CONFIG_FEALNX=m
++CONFIG_NATSEMI=m
++CONFIG_NE2K_PCI=m
++CONFIG_8139CP=m
++CONFIG_8139TOO=m
++# CONFIG_8139TOO_PIO is not set
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++CONFIG_8139TOO_8129=y
++# CONFIG_8139_OLD_RX_RESET is not set
++CONFIG_SIS900=m
++CONFIG_EPIC100=m
++CONFIG_SUNDANCE=m
++# CONFIG_SUNDANCE_MMIO is not set
++CONFIG_VIA_RHINE=m
++# CONFIG_VIA_RHINE_MMIO is not set
++# CONFIG_VIA_RHINE_NAPI is not set
++# CONFIG_NET_POCKET is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++CONFIG_ACENIC=m
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++CONFIG_DL2K=m
++CONFIG_E1000=m
++CONFIG_E1000_NAPI=y
++# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
++CONFIG_NS83820=m
++CONFIG_HAMACHI=m
++CONFIG_YELLOWFIN=m
++CONFIG_R8169=m
++CONFIG_R8169_NAPI=y
++CONFIG_R8169_VLAN=y
++CONFIG_SIS190=m
++CONFIG_SKGE=m
++CONFIG_SKY2=m
++CONFIG_SK98LIN=m
++CONFIG_VIA_VELOCITY=m
++CONFIG_TIGON3=m
++CONFIG_BNX2=m
++
++#
++# Ethernet (10000 Mbit)
++#
++CONFIG_CHELSIO_T1=m
++CONFIG_IXGB=m
++CONFIG_IXGB_NAPI=y
++CONFIG_S2IO=m
++CONFIG_S2IO_NAPI=y
++CONFIG_MYRI10GE=m
++# CONFIG_SFC is not set
++
++#
++# Token Ring devices
++#
++CONFIG_TR=y
++CONFIG_IBMOL=m
++CONFIG_3C359=m
++CONFIG_TMS380TR=m
++CONFIG_TMSPCI=m
++CONFIG_ABYSS=m
++
++#
++# Wireless LAN (non-hamradio)
++#
++CONFIG_NET_RADIO=y
++CONFIG_NET_WIRELESS_RTNETLINK=y
++
++#
++# Obsolete Wireless cards support (pre-802.11)
++#
++CONFIG_STRIP=m
++CONFIG_PCMCIA_WAVELAN=m
++CONFIG_PCMCIA_NETWAVE=m
++
++#
++# Wireless 802.11 Frequency Hopping cards support
++#
++CONFIG_PCMCIA_RAYCS=m
++
++#
++# Wireless 802.11b ISA/PCI cards support
++#
++CONFIG_IPW2100=m
++CONFIG_IPW2100_MONITOR=y
++# CONFIG_IPW2100_DEBUG is not set
++CONFIG_IPW2200=m
++CONFIG_IPW2200_MONITOR=y
++# CONFIG_IPW2200_RADIOTAP is not set
++# CONFIG_IPW2200_PROMISCUOUS is not set
++CONFIG_IPW2200_QOS=y
++# CONFIG_IPW2200_DEBUG is not set
++CONFIG_AIRO=m
++CONFIG_HERMES=m
++CONFIG_PLX_HERMES=m
++CONFIG_TMD_HERMES=m
++CONFIG_NORTEL_HERMES=m
++CONFIG_PCI_HERMES=m
++CONFIG_ATMEL=m
++CONFIG_PCI_ATMEL=m
++
++#
++# Wireless 802.11b Pcmcia/Cardbus cards support
++#
++# CONFIG_PCMCIA_HERMES is not set
++# CONFIG_PCMCIA_SPECTRUM is not set
++# CONFIG_AIRO_CS is not set
++# CONFIG_PCMCIA_ATMEL is not set
++# CONFIG_PCMCIA_WL3501 is not set
++
++#
++# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
++#
++CONFIG_PRISM54=m
++CONFIG_USB_ZD1201=m
++CONFIG_HOSTAP=m
++CONFIG_HOSTAP_FIRMWARE=y
++CONFIG_HOSTAP_FIRMWARE_NVRAM=y
++CONFIG_HOSTAP_PLX=m
++CONFIG_HOSTAP_PCI=m
++# CONFIG_HOSTAP_CS is not set
++CONFIG_BCM43XX=m
++CONFIG_BCM43XX_DEBUG=y
++CONFIG_BCM43XX_DMA=y
++CONFIG_BCM43XX_PIO=y
++CONFIG_BCM43XX_DMA_AND_PIO_MODE=y
++# CONFIG_BCM43XX_DMA_MODE is not set
++# CONFIG_BCM43XX_PIO_MODE is not set
++CONFIG_ZD1211RW=m
++# CONFIG_ZD1211RW_DEBUG is not set
++CONFIG_NET_WIRELESS=y
++
++#
++# PCMCIA network device support
++#
++# CONFIG_NET_PCMCIA is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++
++#
++# ATM drivers
++#
++CONFIG_ATM_DUMMY=m
++CONFIG_ATM_TCP=m
++CONFIG_ATM_LANAI=m
++CONFIG_ATM_ENI=m
++# CONFIG_ATM_ENI_DEBUG is not set
++# CONFIG_ATM_ENI_TUNE_BURST is not set
++CONFIG_ATM_FIRESTREAM=m
++# CONFIG_ATM_ZATM is not set
++CONFIG_ATM_IDT77252=m
++# CONFIG_ATM_IDT77252_DEBUG is not set
++# CONFIG_ATM_IDT77252_RCV_ALL is not set
++CONFIG_ATM_IDT77252_USE_SUNI=y
++CONFIG_ATM_AMBASSADOR=m
++# CONFIG_ATM_AMBASSADOR_DEBUG is not set
++CONFIG_ATM_HORIZON=m
++# CONFIG_ATM_HORIZON_DEBUG is not set
++CONFIG_ATM_FORE200E_MAYBE=m
++# CONFIG_ATM_FORE200E_PCA is not set
++CONFIG_ATM_HE=m
++# CONFIG_ATM_HE_USE_SUNI is not set
++CONFIG_FDDI=y
++# CONFIG_DEFXX is not set
++CONFIG_SKFP=m
++CONFIG_HIPPI=y
++CONFIG_ROADRUNNER=m
++CONFIG_ROADRUNNER_LARGE_RINGS=y
++CONFIG_PLIP=m
++CONFIG_PPP=m
++CONFIG_PPP_MULTILINK=y
++CONFIG_PPP_FILTER=y
++CONFIG_PPP_ASYNC=m
++CONFIG_PPP_SYNC_TTY=m
++CONFIG_PPP_DEFLATE=m
++CONFIG_PPP_BSDCOMP=m
++CONFIG_PPP_MPPE=m
++CONFIG_PPPOE=m
++CONFIG_PPPOATM=m
++CONFIG_SLIP=m
++CONFIG_SLIP_COMPRESSED=y
++CONFIG_SLIP_SMART=y
++CONFIG_SLIP_MODE_SLIP6=y
++CONFIG_NET_FC=y
++CONFIG_SHAPER=m
++CONFIG_NETCONSOLE=m
++CONFIG_NETPOLL=y
++CONFIG_NETPOLL_RX=y
++CONFIG_NETPOLL_TRAP=y
++CONFIG_NET_POLL_CONTROLLER=y
++
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=m
++
++#
++# Old ISDN4Linux
++#
++CONFIG_ISDN_I4L=m
++CONFIG_ISDN_PPP=y
++CONFIG_ISDN_PPP_VJ=y
++CONFIG_ISDN_MPP=y
++CONFIG_IPPP_FILTER=y
++CONFIG_ISDN_PPP_BSDCOMP=m
++CONFIG_ISDN_AUDIO=y
++CONFIG_ISDN_TTY_FAX=y
++CONFIG_ISDN_X25=y
++
++#
++# ISDN feature submodules
++#
++CONFIG_ISDN_DIVERSION=m
++
++#
++# ISDN4Linux hardware drivers
++#
++
++#
++# Passive cards
++#
++CONFIG_ISDN_DRV_HISAX=m
++
++#
++# D-channel protocol features
++#
++CONFIG_HISAX_EURO=y
++CONFIG_DE_AOC=y
++CONFIG_HISAX_NO_SENDCOMPLETE=y
++CONFIG_HISAX_NO_LLC=y
++CONFIG_HISAX_NO_KEYPAD=y
++CONFIG_HISAX_1TR6=y
++CONFIG_HISAX_NI1=y
++CONFIG_HISAX_MAX_CARDS=8
++
++#
++# HiSax supported cards
++#
++CONFIG_HISAX_16_3=y
++CONFIG_HISAX_TELESPCI=y
++CONFIG_HISAX_S0BOX=y
++CONFIG_HISAX_FRITZPCI=y
++CONFIG_HISAX_AVM_A1_PCMCIA=y
++CONFIG_HISAX_ELSA=y
++CONFIG_HISAX_DIEHLDIVA=y
++CONFIG_HISAX_SEDLBAUER=y
++CONFIG_HISAX_NETJET=y
++CONFIG_HISAX_NETJET_U=y
++CONFIG_HISAX_NICCY=y
++CONFIG_HISAX_BKM_A4T=y
++CONFIG_HISAX_SCT_QUADRO=y
++CONFIG_HISAX_GAZEL=y
++CONFIG_HISAX_HFC_PCI=y
++CONFIG_HISAX_W6692=y
++CONFIG_HISAX_HFC_SX=y
++CONFIG_HISAX_ENTERNOW_PCI=y
++# CONFIG_HISAX_DEBUG is not set
++
++#
++# HiSax PCMCIA card service modules
++#
++# CONFIG_HISAX_SEDLBAUER_CS is not set
++# CONFIG_HISAX_ELSA_CS is not set
++# CONFIG_HISAX_AVM_A1_CS is not set
++# CONFIG_HISAX_TELES_CS is not set
++
++#
++# HiSax sub driver modules
++#
++CONFIG_HISAX_ST5481=m
++CONFIG_HISAX_HFCUSB=m
++CONFIG_HISAX_HFC4S8S=m
++CONFIG_HISAX_FRITZ_PCIPNP=m
++CONFIG_HISAX_HDLC=y
++
++#
++# Active cards
++#
++
++#
++# Siemens Gigaset
++#
++CONFIG_ISDN_DRV_GIGASET=m
++CONFIG_GIGASET_BASE=m
++CONFIG_GIGASET_M105=m
++# CONFIG_GIGASET_DEBUG is not set
++# CONFIG_GIGASET_UNDOCREQ is not set
++
++#
++# CAPI subsystem
++#
++CONFIG_ISDN_CAPI=m
++CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
++CONFIG_ISDN_CAPI_MIDDLEWARE=y
++CONFIG_ISDN_CAPI_CAPI20=m
++CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
++CONFIG_ISDN_CAPI_CAPIFS=m
++CONFIG_ISDN_CAPI_CAPIDRV=m
++
++#
++# CAPI hardware drivers
++#
++
++#
++# Active AVM cards
++#
++CONFIG_CAPI_AVM=y
++CONFIG_ISDN_DRV_AVMB1_B1PCI=m
++CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
++CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
++# CONFIG_ISDN_DRV_AVMB1_AVM_CS is not set
++CONFIG_ISDN_DRV_AVMB1_T1PCI=m
++CONFIG_ISDN_DRV_AVMB1_C4=m
++
++#
++# Active Eicon DIVA Server cards
++#
++# CONFIG_CAPI_EICON is not set
++
++#
++# Telephony Support
++#
++CONFIG_PHONE=m
++CONFIG_PHONE_IXJ=m
++CONFIG_PHONE_IXJ_PCMCIA=m
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=m
++CONFIG_INPUT_TSDEV=m
++CONFIG_INPUT_TSDEV_SCREEN_X=240
++CONFIG_INPUT_TSDEV_SCREEN_Y=320
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++CONFIG_KEYBOARD_SUNKBD=m
++# CONFIG_KEYBOARD_LKKBD is not set
++CONFIG_KEYBOARD_XTKBD=m
++CONFIG_KEYBOARD_NEWTON=m
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_SERIAL=m
++# CONFIG_MOUSE_VSXXXAA is not set
++CONFIG_INPUT_JOYSTICK=y
++CONFIG_JOYSTICK_ANALOG=m
++CONFIG_JOYSTICK_A3D=m
++CONFIG_JOYSTICK_ADI=m
++CONFIG_JOYSTICK_COBRA=m
++CONFIG_JOYSTICK_GF2K=m
++CONFIG_JOYSTICK_GRIP=m
++CONFIG_JOYSTICK_GRIP_MP=m
++CONFIG_JOYSTICK_GUILLEMOT=m
++CONFIG_JOYSTICK_INTERACT=m
++CONFIG_JOYSTICK_SIDEWINDER=m
++CONFIG_JOYSTICK_TMDC=m
++CONFIG_JOYSTICK_IFORCE=m
++CONFIG_JOYSTICK_IFORCE_USB=y
++CONFIG_JOYSTICK_IFORCE_232=y
++CONFIG_JOYSTICK_WARRIOR=m
++CONFIG_JOYSTICK_MAGELLAN=m
++CONFIG_JOYSTICK_SPACEORB=m
++CONFIG_JOYSTICK_SPACEBALL=m
++CONFIG_JOYSTICK_STINGER=m
++CONFIG_JOYSTICK_TWIDJOY=m
++CONFIG_JOYSTICK_DB9=m
++CONFIG_JOYSTICK_GAMECON=m
++CONFIG_JOYSTICK_TURBOGRAFX=m
++CONFIG_JOYSTICK_JOYDUMP=m
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_ADS7846=m
++CONFIG_TOUCHSCREEN_GUNZE=m
++CONFIG_TOUCHSCREEN_ELO=m
++CONFIG_TOUCHSCREEN_MTOUCH=m
++CONFIG_TOUCHSCREEN_MK712=m
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_PCSPKR=m
++CONFIG_INPUT_UINPUT=m
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_SERIO_CT82C710=m
++CONFIG_SERIO_PARKBD=m
++CONFIG_SERIO_PCIPS2=m
++CONFIG_SERIO_LIBPS2=y
++CONFIG_SERIO_RAW=m
++CONFIG_GAMEPORT=m
++CONFIG_GAMEPORT_NS558=m
++CONFIG_GAMEPORT_L4=m
++CONFIG_GAMEPORT_EMU10K1=m
++CONFIG_GAMEPORT_FM801=m
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++CONFIG_VT_HW_CONSOLE_BINDING=y
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=64
++CONFIG_PRINTER=m
++CONFIG_LP_CONSOLE=y
++CONFIG_PPDEV=m
++CONFIG_TIPAR=m
++
++#
++# IPMI
++#
++CONFIG_IPMI_HANDLER=m
++CONFIG_IPMI_PANIC_EVENT=y
++# CONFIG_IPMI_PANIC_STRING is not set
++CONFIG_IPMI_DEVICE_INTERFACE=m
++CONFIG_IPMI_SI=m
++CONFIG_IPMI_WATCHDOG=m
++CONFIG_IPMI_POWEROFF=m
++
++#
++# Watchdog Cards
++#
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++CONFIG_SOFT_WATCHDOG=m
++CONFIG_ACQUIRE_WDT=m
++CONFIG_ADVANTECH_WDT=m
++CONFIG_ALIM1535_WDT=m
++CONFIG_ALIM7101_WDT=m
++CONFIG_SC520_WDT=m
++CONFIG_EUROTECH_WDT=m
++CONFIG_IB700_WDT=m
++CONFIG_IBMASR=m
++CONFIG_WAFER_WDT=m
++CONFIG_I6300ESB_WDT=m
++CONFIG_I8XX_TCO=m
++CONFIG_SC1200_WDT=m
++CONFIG_60XX_WDT=m
++CONFIG_SBC8360_WDT=m
++CONFIG_CPU5_WDT=m
++CONFIG_W83627HF_WDT=m
++CONFIG_W83877F_WDT=m
++CONFIG_W83977F_WDT=m
++CONFIG_MACHZ_WDT=m
++CONFIG_SBC_EPX_C3_WATCHDOG=m
++
++#
++# PCI-based Watchdog Cards
++#
++CONFIG_PCIPCWATCHDOG=m
++CONFIG_WDTPCI=m
++CONFIG_WDT_501_PCI=y
++
++#
++# USB-based Watchdog Cards
++#
++CONFIG_USBPCWATCHDOG=m
++CONFIG_HW_RANDOM=y
++CONFIG_HW_RANDOM_INTEL=m
++CONFIG_HW_RANDOM_AMD=m
++CONFIG_HW_RANDOM_GEODE=m
++CONFIG_NVRAM=y
++CONFIG_RTC=y
++CONFIG_DTLK=m
++CONFIG_R3964=m
++CONFIG_APPLICOM=m
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=m
++CONFIG_AGP_AMD64=m
++CONFIG_AGP_INTEL=m
++CONFIG_AGP_SIS=m
++CONFIG_AGP_VIA=m
++CONFIG_DRM=m
++CONFIG_DRM_TDFX=m
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++CONFIG_DRM_I810=m
++# CONFIG_DRM_I830 is not set
++CONFIG_DRM_I915=m
++CONFIG_DRM_MGA=m
++# CONFIG_DRM_SIS is not set
++CONFIG_DRM_VIA=m
++CONFIG_DRM_SAVAGE=m
++
++#
++# PCMCIA character devices
++#
++# CONFIG_SYNCLINK_CS is not set
++# CONFIG_CARDMAN_4000 is not set
++# CONFIG_CARDMAN_4040 is not set
++# CONFIG_MWAVE is not set
++CONFIG_PC8736x_GPIO=m
++CONFIG_NSC_GPIO=m
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++CONFIG_HANGCHECK_TIMER=m
++
++#
++# TPM devices
++#
++CONFIG_TCG_TPM=m
++CONFIG_TCG_TIS=m
++CONFIG_TCG_NSC=m
++CONFIG_TCG_ATMEL=m
++CONFIG_TCG_INFINEON=m
++CONFIG_TCG_XEN=m
++CONFIG_TELCLOCK=m
++
++#
++# I2C support
++#
++CONFIG_I2C=m
++CONFIG_I2C_CHARDEV=m
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=m
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
++
++#
++# I2C Hardware Bus support
++#
++CONFIG_I2C_ALI1535=m
++CONFIG_I2C_ALI1563=m
++CONFIG_I2C_ALI15X3=m
++CONFIG_I2C_AMD756=m
++CONFIG_I2C_AMD756_S4882=m
++CONFIG_I2C_AMD8111=m
++CONFIG_I2C_I801=m
++CONFIG_I2C_I810=m
++CONFIG_I2C_PIIX4=m
++CONFIG_I2C_ISA=m
++CONFIG_I2C_NFORCE2=m
++CONFIG_I2C_OCORES=m
++CONFIG_I2C_PARPORT=m
++CONFIG_I2C_PARPORT_LIGHT=m
++CONFIG_I2C_PROSAVAGE=m
++CONFIG_I2C_SAVAGE4=m
++CONFIG_I2C_SIS5595=m
++CONFIG_I2C_SIS630=m
++CONFIG_I2C_SIS96X=m
++CONFIG_I2C_STUB=m
++CONFIG_I2C_VIA=m
++CONFIG_I2C_VIAPRO=m
++CONFIG_I2C_VOODOO3=m
++CONFIG_I2C_PCA_ISA=m
++
++#
++# Miscellaneous I2C Chip support
++#
++CONFIG_SENSORS_DS1337=m
++CONFIG_SENSORS_DS1374=m
++CONFIG_SENSORS_EEPROM=m
++CONFIG_SENSORS_PCF8574=m
++CONFIG_SENSORS_PCA9539=m
++CONFIG_SENSORS_PCF8591=m
++CONFIG_SENSORS_MAX6875=m
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# SPI support
++#
++CONFIG_SPI=y
++# CONFIG_SPI_DEBUG is not set
++CONFIG_SPI_MASTER=y
++
++#
++# SPI Master Controller Drivers
++#
++CONFIG_SPI_BITBANG=m
++CONFIG_SPI_BUTTERFLY=m
++
++#
++# SPI Protocol Masters
++#
++
++#
++# Dallas's 1-wire bus
++#
++CONFIG_W1=m
++CONFIG_W1_CON=y
++
++#
++# 1-wire Bus Masters
++#
++CONFIG_W1_MASTER_MATROX=m
++CONFIG_W1_MASTER_DS2490=m
++CONFIG_W1_MASTER_DS2482=m
++
++#
++# 1-wire Slaves
++#
++CONFIG_W1_SLAVE_THERM=m
++CONFIG_W1_SLAVE_SMEM=m
++CONFIG_W1_SLAVE_DS2433=m
++# CONFIG_W1_SLAVE_DS2433_CRC is not set
++
++#
++# Hardware Monitoring support
++#
++CONFIG_HWMON=m
++CONFIG_HWMON_VID=m
++CONFIG_SENSORS_ABITUGURU=m
++CONFIG_SENSORS_ADM1021=m
++CONFIG_SENSORS_ADM1025=m
++CONFIG_SENSORS_ADM1026=m
++CONFIG_SENSORS_ADM1031=m
++CONFIG_SENSORS_ADM9240=m
++CONFIG_SENSORS_ASB100=m
++CONFIG_SENSORS_ATXP1=m
++CONFIG_SENSORS_DS1621=m
++CONFIG_SENSORS_F71805F=m
++CONFIG_SENSORS_FSCHER=m
++CONFIG_SENSORS_FSCPOS=m
++CONFIG_SENSORS_GL518SM=m
++CONFIG_SENSORS_GL520SM=m
++CONFIG_SENSORS_IT87=m
++CONFIG_SENSORS_LM63=m
++CONFIG_SENSORS_LM70=m
++CONFIG_SENSORS_LM75=m
++CONFIG_SENSORS_LM77=m
++CONFIG_SENSORS_LM78=m
++CONFIG_SENSORS_LM80=m
++CONFIG_SENSORS_LM83=m
++CONFIG_SENSORS_LM85=m
++CONFIG_SENSORS_LM87=m
++CONFIG_SENSORS_LM90=m
++CONFIG_SENSORS_LM92=m
++CONFIG_SENSORS_MAX1619=m
++CONFIG_SENSORS_PC87360=m
++CONFIG_SENSORS_SIS5595=m
++CONFIG_SENSORS_SMSC47M1=m
++CONFIG_SENSORS_SMSC47M192=m
++CONFIG_SENSORS_SMSC47B397=m
++CONFIG_SENSORS_VIA686A=m
++CONFIG_SENSORS_VT8231=m
++CONFIG_SENSORS_W83781D=m
++CONFIG_SENSORS_W83791D=m
++CONFIG_SENSORS_W83792D=m
++CONFIG_SENSORS_W83L785TS=m
++CONFIG_SENSORS_W83627HF=m
++CONFIG_SENSORS_W83627EHF=m
++CONFIG_SENSORS_HDAPS=m
++# CONFIG_HWMON_DEBUG_CHIP is not set
++
++#
++# Misc devices
++#
++CONFIG_IBM_ASM=m
++
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=m
++CONFIG_VIDEO_V4L1=y
++CONFIG_VIDEO_V4L1_COMPAT=y
++CONFIG_VIDEO_V4L2=y
++
++#
++# Video Capture Adapters
++#
++
++#
++# Video Capture Adapters
++#
++# CONFIG_VIDEO_ADV_DEBUG is not set
++CONFIG_VIDEO_VIVI=m
++CONFIG_VIDEO_BT848=m
++CONFIG_VIDEO_BT848_DVB=y
++CONFIG_VIDEO_SAA6588=m
++CONFIG_VIDEO_BWQCAM=m
++CONFIG_VIDEO_CQCAM=m
++CONFIG_VIDEO_W9966=m
++CONFIG_VIDEO_CPIA=m
++CONFIG_VIDEO_CPIA_PP=m
++CONFIG_VIDEO_CPIA_USB=m
++CONFIG_VIDEO_CPIA2=m
++CONFIG_VIDEO_SAA5246A=m
++CONFIG_VIDEO_SAA5249=m
++CONFIG_TUNER_3036=m
++CONFIG_VIDEO_STRADIS=m
++CONFIG_VIDEO_ZORAN=m
++CONFIG_VIDEO_ZORAN_BUZ=m
++CONFIG_VIDEO_ZORAN_DC10=m
++CONFIG_VIDEO_ZORAN_DC30=m
++CONFIG_VIDEO_ZORAN_LML33=m
++CONFIG_VIDEO_ZORAN_LML33R10=m
++CONFIG_VIDEO_ZORAN_AVS6EYES=m
++CONFIG_VIDEO_SAA7134=m
++CONFIG_VIDEO_SAA7134_ALSA=m
++CONFIG_VIDEO_SAA7134_DVB=m
++CONFIG_VIDEO_SAA7134_DVB_ALL_FRONTENDS=y
++CONFIG_VIDEO_MXB=m
++CONFIG_VIDEO_DPC=m
++CONFIG_VIDEO_HEXIUM_ORION=m
++CONFIG_VIDEO_HEXIUM_GEMINI=m
++CONFIG_VIDEO_CX88_VP3054=m
++CONFIG_VIDEO_CX88=m
++CONFIG_VIDEO_CX88_ALSA=m
++CONFIG_VIDEO_CX88_BLACKBIRD=m
++CONFIG_VIDEO_CX88_DVB=m
++CONFIG_VIDEO_CX88_DVB_ALL_FRONTENDS=y
++
++#
++# Encoders and Decoders
++#
++CONFIG_VIDEO_MSP3400=m
++CONFIG_VIDEO_CS53L32A=m
++CONFIG_VIDEO_TLV320AIC23B=m
++CONFIG_VIDEO_WM8775=m
++CONFIG_VIDEO_WM8739=m
++CONFIG_VIDEO_CX2341X=m
++CONFIG_VIDEO_CX25840=m
++CONFIG_VIDEO_SAA711X=m
++CONFIG_VIDEO_SAA7127=m
++CONFIG_VIDEO_UPD64031A=m
++CONFIG_VIDEO_UPD64083=m
++
++#
++# V4L USB devices
++#
++CONFIG_VIDEO_PVRUSB2=m
++CONFIG_VIDEO_PVRUSB2_24XXX=y
++CONFIG_VIDEO_PVRUSB2_SYSFS=y
++# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
++CONFIG_VIDEO_EM28XX=m
++CONFIG_VIDEO_USBVIDEO=m
++CONFIG_USB_VICAM=m
++CONFIG_USB_IBMCAM=m
++CONFIG_USB_KONICAWC=m
++CONFIG_USB_QUICKCAM_MESSENGER=m
++CONFIG_USB_ET61X251=m
++CONFIG_VIDEO_OVCAMCHIP=m
++CONFIG_USB_W9968CF=m
++CONFIG_USB_OV511=m
++CONFIG_USB_SE401=m
++CONFIG_USB_SN9C102=m
++CONFIG_USB_STV680=m
++CONFIG_USB_ZC0301=m
++CONFIG_USB_PWC=m
++# CONFIG_USB_PWC_DEBUG is not set
++
++#
++# Radio Adapters
++#
++CONFIG_RADIO_GEMTEK_PCI=m
++CONFIG_RADIO_MAXIRADIO=m
++CONFIG_RADIO_MAESTRO=m
++CONFIG_USB_DSBR=m
++
++#
++# Digital Video Broadcasting Devices
++#
++CONFIG_DVB=y
++CONFIG_DVB_CORE=m
++
++#
++# Supported SAA7146 based PCI Adapters
++#
++CONFIG_DVB_AV7110=m
++CONFIG_DVB_AV7110_OSD=y
++CONFIG_DVB_BUDGET=m
++CONFIG_DVB_BUDGET_CI=m
++CONFIG_DVB_BUDGET_AV=m
++CONFIG_DVB_BUDGET_PATCH=m
++
++#
++# Supported USB Adapters
++#
++CONFIG_DVB_USB=m
++# CONFIG_DVB_USB_DEBUG is not set
++CONFIG_DVB_USB_A800=m
++CONFIG_DVB_USB_DIBUSB_MB=m
++# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
++CONFIG_DVB_USB_DIBUSB_MC=m
++CONFIG_DVB_USB_UMT_010=m
++CONFIG_DVB_USB_CXUSB=m
++CONFIG_DVB_USB_DIGITV=m
++CONFIG_DVB_USB_VP7045=m
++CONFIG_DVB_USB_VP702X=m
++CONFIG_DVB_USB_GP8PSK=m
++CONFIG_DVB_USB_NOVA_T_USB2=m
++CONFIG_DVB_USB_DTT200U=m
++CONFIG_DVB_TTUSB_BUDGET=m
++CONFIG_DVB_TTUSB_DEC=m
++CONFIG_DVB_CINERGYT2=m
++CONFIG_DVB_CINERGYT2_TUNING=y
++CONFIG_DVB_CINERGYT2_STREAM_URB_COUNT=32
++CONFIG_DVB_CINERGYT2_STREAM_BUF_SIZE=512
++CONFIG_DVB_CINERGYT2_QUERY_INTERVAL=250
++CONFIG_DVB_CINERGYT2_ENABLE_RC_INPUT_DEVICE=y
++CONFIG_DVB_CINERGYT2_RC_QUERY_INTERVAL=100
++
++#
++# Supported FlexCopII (B2C2) Adapters
++#
++CONFIG_DVB_B2C2_FLEXCOP=m
++CONFIG_DVB_B2C2_FLEXCOP_PCI=m
++CONFIG_DVB_B2C2_FLEXCOP_USB=m
++# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
++
++#
++# Supported BT878 Adapters
++#
++CONFIG_DVB_BT8XX=m
++
++#
++# Supported Pluto2 Adapters
++#
++CONFIG_DVB_PLUTO2=m
++
++#
++# Supported DVB Frontends
++#
++
++#
++# Customise DVB Frontends
++#
++
++#
++# DVB-S (satellite) frontends
++#
++CONFIG_DVB_STV0299=m
++CONFIG_DVB_CX24110=m
++CONFIG_DVB_CX24123=m
++CONFIG_DVB_TDA8083=m
++CONFIG_DVB_MT312=m
++CONFIG_DVB_VES1X93=m
++CONFIG_DVB_S5H1420=m
++
++#
++# DVB-T (terrestrial) frontends
++#
++CONFIG_DVB_SP8870=m
++CONFIG_DVB_SP887X=m
++CONFIG_DVB_CX22700=m
++CONFIG_DVB_CX22702=m
++CONFIG_DVB_L64781=m
++CONFIG_DVB_TDA1004X=m
++CONFIG_DVB_NXT6000=m
++CONFIG_DVB_MT352=m
++CONFIG_DVB_ZL10353=m
++CONFIG_DVB_DIB3000MB=m
++CONFIG_DVB_DIB3000MC=m
++
++#
++# DVB-C (cable) frontends
++#
++CONFIG_DVB_VES1820=m
++CONFIG_DVB_TDA10021=m
++CONFIG_DVB_STV0297=m
++
++#
++# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
++#
++CONFIG_DVB_NXT200X=m
++CONFIG_DVB_OR51211=m
++CONFIG_DVB_OR51132=m
++CONFIG_DVB_BCM3510=m
++CONFIG_DVB_LGDT330X=m
++
++#
++# Miscellaneous devices
++#
++CONFIG_DVB_PLL=m
++CONFIG_DVB_LNBP21=m
++CONFIG_DVB_ISL6421=m
++CONFIG_VIDEO_SAA7146=m
++CONFIG_VIDEO_SAA7146_VV=m
++CONFIG_VIDEO_VIDEOBUF=m
++CONFIG_VIDEO_TUNER=m
++CONFIG_VIDEO_BUF=m
++CONFIG_VIDEO_BUF_DVB=m
++CONFIG_VIDEO_BTCX=m
++CONFIG_VIDEO_IR=m
++CONFIG_VIDEO_TVEEPROM=m
++CONFIG_USB_DABUSB=m
++
++#
++# Graphics support
++#
++CONFIG_FIRMWARE_EDID=y
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++# CONFIG_FB_MACMODES is not set
++# CONFIG_FB_BACKLIGHT is not set
++CONFIG_FB_MODE_HELPERS=y
++CONFIG_FB_TILEBLITTING=y
++CONFIG_FB_CIRRUS=m
++# CONFIG_FB_PM2 is not set
++CONFIG_FB_CYBER2000=m
++CONFIG_FB_ARC=m
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++CONFIG_FB_VGA16=m
++CONFIG_FB_VESA=y
++CONFIG_FB_HGA=m
++CONFIG_FB_HGA_ACCEL=y
++CONFIG_FB_S1D13XXX=m
++CONFIG_FB_NVIDIA=m
++CONFIG_FB_NVIDIA_I2C=y
++CONFIG_FB_RIVA=m
++CONFIG_FB_RIVA_I2C=y
++# CONFIG_FB_RIVA_DEBUG is not set
++CONFIG_FB_INTEL=m
++# CONFIG_FB_INTEL_DEBUG is not set
++CONFIG_FB_MATROX=m
++CONFIG_FB_MATROX_MILLENIUM=y
++CONFIG_FB_MATROX_MYSTIQUE=y
++CONFIG_FB_MATROX_G=y
++CONFIG_FB_MATROX_I2C=m
++CONFIG_FB_MATROX_MAVEN=m
++CONFIG_FB_MATROX_MULTIHEAD=y
++CONFIG_FB_RADEON=m
++CONFIG_FB_RADEON_I2C=y
++# CONFIG_FB_RADEON_DEBUG is not set
++CONFIG_FB_ATY128=m
++CONFIG_FB_ATY=m
++CONFIG_FB_ATY_CT=y
++CONFIG_FB_ATY_GENERIC_LCD=y
++CONFIG_FB_ATY_GX=y
++CONFIG_FB_SAVAGE=m
++CONFIG_FB_SAVAGE_I2C=y
++CONFIG_FB_SAVAGE_ACCEL=y
++CONFIG_FB_SIS=m
++CONFIG_FB_SIS_300=y
++CONFIG_FB_SIS_315=y
++CONFIG_FB_NEOMAGIC=m
++CONFIG_FB_KYRO=m
++CONFIG_FB_3DFX=m
++CONFIG_FB_3DFX_ACCEL=y
++CONFIG_FB_VOODOO1=m
++CONFIG_FB_TRIDENT=m
++CONFIG_FB_TRIDENT_ACCEL=y
++CONFIG_FB_GEODE=y
++CONFIG_FB_GEODE_GX=m
++CONFIG_FB_GEODE_GX1=m
++CONFIG_FB_VIRTUAL=m
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_VGACON_SOFT_SCROLLBACK is not set
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++
++#
++# Logo configuration
++#
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_LOGO_LINUX_CLUT224=y
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_BACKLIGHT_CLASS_DEVICE=m
++CONFIG_BACKLIGHT_DEVICE=y
++CONFIG_LCD_CLASS_DEVICE=m
++CONFIG_LCD_DEVICE=y
++
++#
++# Sound
++#
++CONFIG_SOUND=m
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=m
++CONFIG_SND_TIMER=m
++CONFIG_SND_PCM=m
++CONFIG_SND_HWDEP=m
++CONFIG_SND_RAWMIDI=m
++CONFIG_SND_SEQUENCER=m
++CONFIG_SND_SEQ_DUMMY=m
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=m
++CONFIG_SND_PCM_OSS=m
++CONFIG_SND_PCM_OSS_PLUGINS=y
++CONFIG_SND_SEQUENCER_OSS=y
++CONFIG_SND_RTCTIMER=m
++CONFIG_SND_SEQ_RTCTIMER_DEFAULT=y
++CONFIG_SND_DYNAMIC_MINORS=y
++CONFIG_SND_SUPPORT_OLD_API=y
++CONFIG_SND_VERBOSE_PROCFS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=m
++CONFIG_SND_OPL3_LIB=m
++CONFIG_SND_VX_LIB=m
++CONFIG_SND_AC97_CODEC=m
++CONFIG_SND_AC97_BUS=m
++CONFIG_SND_DUMMY=m
++CONFIG_SND_VIRMIDI=m
++CONFIG_SND_MTPAV=m
++CONFIG_SND_SERIAL_U16550=m
++CONFIG_SND_MPU401=m
++
++#
++# PCI devices
++#
++CONFIG_SND_AD1889=m
++CONFIG_SND_ALS300=m
++CONFIG_SND_ALS4000=m
++CONFIG_SND_ALI5451=m
++CONFIG_SND_ATIIXP=m
++CONFIG_SND_ATIIXP_MODEM=m
++CONFIG_SND_AU8810=m
++CONFIG_SND_AU8820=m
++CONFIG_SND_AU8830=m
++CONFIG_SND_AZT3328=m
++CONFIG_SND_BT87X=m
++# CONFIG_SND_BT87X_OVERCLOCK is not set
++CONFIG_SND_CA0106=m
++CONFIG_SND_CMIPCI=m
++CONFIG_SND_CS4281=m
++CONFIG_SND_CS46XX=m
++CONFIG_SND_CS46XX_NEW_DSP=y
++CONFIG_SND_DARLA20=m
++CONFIG_SND_GINA20=m
++CONFIG_SND_LAYLA20=m
++CONFIG_SND_DARLA24=m
++CONFIG_SND_GINA24=m
++CONFIG_SND_LAYLA24=m
++CONFIG_SND_MONA=m
++CONFIG_SND_MIA=m
++CONFIG_SND_ECHO3G=m
++CONFIG_SND_INDIGO=m
++CONFIG_SND_INDIGOIO=m
++CONFIG_SND_INDIGODJ=m
++CONFIG_SND_EMU10K1=m
++CONFIG_SND_EMU10K1X=m
++CONFIG_SND_ENS1370=m
++CONFIG_SND_ENS1371=m
++CONFIG_SND_ES1938=m
++CONFIG_SND_ES1968=m
++CONFIG_SND_FM801=m
++CONFIG_SND_FM801_TEA575X_BOOL=y
++CONFIG_SND_FM801_TEA575X=m
++CONFIG_SND_HDA_INTEL=m
++CONFIG_SND_HDSP=m
++CONFIG_SND_HDSPM=m
++CONFIG_SND_ICE1712=m
++CONFIG_SND_ICE1724=m
++CONFIG_SND_INTEL8X0=m
++CONFIG_SND_INTEL8X0M=m
++CONFIG_SND_KORG1212=m
++CONFIG_SND_MAESTRO3=m
++CONFIG_SND_MIXART=m
++CONFIG_SND_NM256=m
++CONFIG_SND_PCXHR=m
++CONFIG_SND_RIPTIDE=m
++CONFIG_SND_RME32=m
++CONFIG_SND_RME96=m
++CONFIG_SND_RME9652=m
++CONFIG_SND_SONICVIBES=m
++CONFIG_SND_TRIDENT=m
++CONFIG_SND_VIA82XX=m
++CONFIG_SND_VIA82XX_MODEM=m
++CONFIG_SND_VX222=m
++CONFIG_SND_YMFPCI=m
++
++#
++# USB devices
++#
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_USB_USX2Y=m
++
++#
++# PCMCIA devices
++#
++CONFIG_SND_VXPOCKET=m
++CONFIG_SND_PDAUDIOCF=m
++
++#
++# Open Sound System
++#
++# CONFIG_SOUND_PRIME is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB_ARCH_HAS_EHCI=y
++CONFIG_USB=m
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++# CONFIG_USB_BANDWIDTH is not set
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=m
++CONFIG_USB_EHCI_SPLIT_ISO=y
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_EHCI_TT_NEWSCHED=y
++CONFIG_USB_ISP116X_HCD=m
++CONFIG_USB_OHCI_HCD=m
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=m
++CONFIG_USB_SL811_HCD=m
++# CONFIG_USB_SL811_CS is not set
++
++#
++# USB Device Class drivers
++#
++CONFIG_USB_ACM=m
++CONFIG_USB_PRINTER=m
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
++#
++
++#
++# may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=m
++# CONFIG_USB_STORAGE_DEBUG is not set
++CONFIG_USB_STORAGE_DATAFAB=y
++CONFIG_USB_STORAGE_FREECOM=y
++CONFIG_USB_STORAGE_ISD200=y
++CONFIG_USB_STORAGE_DPCM=y
++CONFIG_USB_STORAGE_USBAT=y
++CONFIG_USB_STORAGE_SDDR09=y
++CONFIG_USB_STORAGE_SDDR55=y
++CONFIG_USB_STORAGE_JUMPSHOT=y
++# CONFIG_USB_STORAGE_ALAUDA is not set
++# CONFIG_USB_LIBUSUAL is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=m
++CONFIG_USB_HIDINPUT=y
++# CONFIG_USB_HIDINPUT_POWERBOOK is not set
++CONFIG_HID_FF=y
++CONFIG_HID_PID=y
++CONFIG_LOGITECH_FF=y
++CONFIG_THRUSTMASTER_FF=y
++CONFIG_USB_HIDDEV=y
++
++#
++# USB HID Boot Protocol drivers
++#
++CONFIG_USB_KBD=m
++CONFIG_USB_MOUSE=m
++CONFIG_USB_AIPTEK=m
++CONFIG_USB_WACOM=m
++CONFIG_USB_ACECAD=m
++CONFIG_USB_KBTAB=m
++CONFIG_USB_POWERMATE=m
++CONFIG_USB_TOUCHSCREEN=m
++CONFIG_USB_TOUCHSCREEN_EGALAX=y
++CONFIG_USB_TOUCHSCREEN_PANJIT=y
++CONFIG_USB_TOUCHSCREEN_3M=y
++CONFIG_USB_TOUCHSCREEN_ITM=y
++CONFIG_USB_YEALINK=m
++CONFIG_USB_XPAD=m
++CONFIG_USB_ATI_REMOTE=m
++CONFIG_USB_ATI_REMOTE2=m
++CONFIG_USB_KEYSPAN_REMOTE=m
++CONFIG_USB_APPLETOUCH=m
++
++#
++# USB Imaging devices
++#
++CONFIG_USB_MDC800=m
++CONFIG_USB_MICROTEK=m
++
++#
++# USB Network Adapters
++#
++CONFIG_USB_CATC=m
++CONFIG_USB_KAWETH=m
++CONFIG_USB_PEGASUS=m
++CONFIG_USB_RTL8150=m
++CONFIG_USB_USBNET=m
++CONFIG_USB_NET_AX8817X=m
++CONFIG_USB_NET_CDCETHER=m
++CONFIG_USB_NET_GL620A=m
++CONFIG_USB_NET_NET1080=m
++CONFIG_USB_NET_PLUSB=m
++CONFIG_USB_NET_RNDIS_HOST=m
++CONFIG_USB_NET_CDC_SUBSET=m
++CONFIG_USB_ALI_M5632=y
++CONFIG_USB_AN2720=y
++CONFIG_USB_BELKIN=y
++CONFIG_USB_ARMLINUX=y
++CONFIG_USB_EPSON2888=y
++CONFIG_USB_NET_ZAURUS=m
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++CONFIG_USB_USS720=m
++
++#
++# USB Serial Converter support
++#
++CONFIG_USB_SERIAL=m
++CONFIG_USB_SERIAL_GENERIC=y
++CONFIG_USB_SERIAL_AIRPRIME=m
++CONFIG_USB_SERIAL_ARK3116=m
++CONFIG_USB_SERIAL_BELKIN=m
++CONFIG_USB_SERIAL_WHITEHEAT=m
++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
++CONFIG_USB_SERIAL_CP2101=m
++CONFIG_USB_SERIAL_CYPRESS_M8=m
++CONFIG_USB_SERIAL_EMPEG=m
++CONFIG_USB_SERIAL_FTDI_SIO=m
++CONFIG_USB_SERIAL_FUNSOFT=m
++CONFIG_USB_SERIAL_VISOR=m
++CONFIG_USB_SERIAL_IPAQ=m
++CONFIG_USB_SERIAL_IR=m
++CONFIG_USB_SERIAL_EDGEPORT=m
++CONFIG_USB_SERIAL_EDGEPORT_TI=m
++CONFIG_USB_SERIAL_GARMIN=m
++CONFIG_USB_SERIAL_IPW=m
++CONFIG_USB_SERIAL_KEYSPAN_PDA=m
++CONFIG_USB_SERIAL_KEYSPAN=m
++CONFIG_USB_SERIAL_KEYSPAN_MPR=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19=y
++CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
++CONFIG_USB_SERIAL_KLSI=m
++CONFIG_USB_SERIAL_KOBIL_SCT=m
++CONFIG_USB_SERIAL_MCT_U232=m
++CONFIG_USB_SERIAL_NAVMAN=m
++CONFIG_USB_SERIAL_PL2303=m
++CONFIG_USB_SERIAL_HP4X=m
++CONFIG_USB_SERIAL_SAFE=m
++CONFIG_USB_SERIAL_SAFE_PADDED=y
++CONFIG_USB_SERIAL_SIERRAWIRELESS=m
++CONFIG_USB_SERIAL_TI=m
++CONFIG_USB_SERIAL_CYBERJACK=m
++CONFIG_USB_SERIAL_XIRCOM=m
++# CONFIG_USB_SERIAL_OPTION is not set
++CONFIG_USB_SERIAL_OMNINET=m
++CONFIG_USB_EZUSB=y
++
++#
++# USB Miscellaneous drivers
++#
++CONFIG_USB_EMI62=m
++CONFIG_USB_EMI26=m
++CONFIG_USB_AUERSWALD=m
++CONFIG_USB_RIO500=m
++CONFIG_USB_LEGOTOWER=m
++CONFIG_USB_LCD=m
++CONFIG_USB_LED=m
++CONFIG_USB_CYPRESS_CY7C63=m
++CONFIG_USB_CYTHERM=m
++CONFIG_USB_PHIDGETKIT=m
++CONFIG_USB_PHIDGETSERVO=m
++CONFIG_USB_IDMOUSE=m
++CONFIG_USB_APPLEDISPLAY=m
++CONFIG_USB_SISUSBVGA=m
++CONFIG_USB_SISUSBVGA_CON=y
++CONFIG_USB_LD=m
++CONFIG_USB_TEST=m
++
++#
++# USB DSL modem support
++#
++CONFIG_USB_ATM=m
++CONFIG_USB_SPEEDTOUCH=m
++CONFIG_USB_CXACRU=m
++CONFIG_USB_UEAGLEATM=m
++CONFIG_USB_XUSBATM=m
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++CONFIG_MMC=m
++# CONFIG_MMC_DEBUG is not set
++CONFIG_MMC_BLOCK=m
++CONFIG_MMC_SDHCI=m
++CONFIG_MMC_WBSD=m
++
++#
++# LED devices
++#
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=m
++
++#
++# LED drivers
++#
++
++#
++# LED Triggers
++#
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_TIMER=m
++CONFIG_LEDS_TRIGGER_IDE_DISK=y
++CONFIG_LEDS_TRIGGER_HEARTBEAT=m
++
++#
++# InfiniBand support
++#
++CONFIG_INFINIBAND=m
++CONFIG_INFINIBAND_USER_MAD=m
++CONFIG_INFINIBAND_USER_ACCESS=m
++CONFIG_INFINIBAND_ADDR_TRANS=y
++CONFIG_INFINIBAND_MTHCA=m
++CONFIG_INFINIBAND_MTHCA_DEBUG=y
++CONFIG_INFINIBAND_IPOIB=m
++CONFIG_INFINIBAND_IPOIB_DEBUG=y
++# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
++CONFIG_INFINIBAND_SRP=m
++CONFIG_INFINIBAND_ISER=m
++
++#
++# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
++#
++CONFIG_EDAC=m
++
++#
++# Reporting subsystems
++#
++# CONFIG_EDAC_DEBUG is not set
++CONFIG_EDAC_MM_EDAC=m
++CONFIG_EDAC_E752X=m
++CONFIG_EDAC_POLL=y
++
++#
++# Real Time Clock
++#
++CONFIG_RTC_LIB=m
++CONFIG_RTC_CLASS=m
++
++#
++# RTC interfaces
++#
++CONFIG_RTC_INTF_SYSFS=m
++CONFIG_RTC_INTF_PROC=m
++CONFIG_RTC_INTF_DEV=m
++CONFIG_RTC_INTF_DEV_UIE_EMUL=y
++
++#
++# RTC drivers
++#
++CONFIG_RTC_DRV_X1205=m
++CONFIG_RTC_DRV_DS1307=m
++CONFIG_RTC_DRV_DS1553=m
++CONFIG_RTC_DRV_ISL1208=m
++CONFIG_RTC_DRV_DS1672=m
++CONFIG_RTC_DRV_DS1742=m
++CONFIG_RTC_DRV_PCF8563=m
++CONFIG_RTC_DRV_PCF8583=m
++CONFIG_RTC_DRV_RS5C348=m
++CONFIG_RTC_DRV_RS5C372=m
++CONFIG_RTC_DRV_M48T86=m
++CONFIG_RTC_DRV_TEST=m
++CONFIG_RTC_DRV_MAX6902=m
++CONFIG_RTC_DRV_V3020=m
++
++#
++# DMA Engine support
++#
++CONFIG_DMA_ENGINE=y
++
++#
++# DMA Clients
++#
++CONFIG_NET_DMA=y
++
++#
++# DMA Devices
++#
++CONFIG_INTEL_IOATDMA=m
++
++#
++# Firmware Drivers
++#
++CONFIG_EDD=y
++CONFIG_DELL_RBU=m
++CONFIG_DCDBAS=m
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++CONFIG_EXT2_FS_XIP=y
++CONFIG_FS_XIP=y
++CONFIG_EXT3_FS=m
++CONFIG_EXT3_FS_XATTR=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_JBD=m
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=m
++# CONFIG_REISERFS_CHECK is not set
++CONFIG_REISERFS_PROC_INFO=y
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++CONFIG_JFS_FS=m
++CONFIG_JFS_POSIX_ACL=y
++CONFIG_JFS_SECURITY=y
++# CONFIG_JFS_DEBUG is not set
++# CONFIG_JFS_STATISTICS is not set
++CONFIG_FS_POSIX_ACL=y
++CONFIG_XFS_FS=m
++CONFIG_XFS_QUOTA=y
++CONFIG_XFS_SECURITY=y
++CONFIG_XFS_POSIX_ACL=y
++CONFIG_XFS_RT=y
++CONFIG_OCFS2_FS=m
++CONFIG_OCFS2_DEBUG_MASKLOG=y
++CONFIG_MINIX_FS=m
++CONFIG_ROMFS_FS=m
++CONFIG_INOTIFY=y
++CONFIG_INOTIFY_USER=y
++CONFIG_QUOTA=y
++# CONFIG_QFMT_V1 is not set
++CONFIG_QFMT_V2=m
++CONFIG_QUOTACTL=y
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=m
++CONFIG_AUTOFS4_FS=m
++CONFIG_FUSE_FS=m
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++CONFIG_UDF_FS=m
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++CONFIG_NTFS_FS=m
++# CONFIG_NTFS_DEBUG is not set
++# CONFIG_NTFS_RW is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++CONFIG_TMPFS=y
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++CONFIG_CONFIGFS_FS=m
++
++#
++# Miscellaneous filesystems
++#
++CONFIG_ADFS_FS=m
++# CONFIG_ADFS_FS_RW is not set
++CONFIG_AFFS_FS=m
++CONFIG_HFS_FS=m
++CONFIG_HFSPLUS_FS=m
++CONFIG_BEFS_FS=m
++# CONFIG_BEFS_DEBUG is not set
++CONFIG_BFS_FS=m
++CONFIG_EFS_FS=m
++CONFIG_JFFS_FS=m
++CONFIG_JFFS_FS_VERBOSE=0
++CONFIG_JFFS_PROC_FS=y
++CONFIG_JFFS2_FS=m
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_WRITEBUFFER=y
++CONFIG_JFFS2_SUMMARY=y
++CONFIG_JFFS2_FS_XATTR=y
++CONFIG_JFFS2_FS_POSIX_ACL=y
++CONFIG_JFFS2_FS_SECURITY=y
++CONFIG_JFFS2_COMPRESSION_OPTIONS=y
++CONFIG_JFFS2_ZLIB=y
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++# CONFIG_JFFS2_CMODE_NONE is not set
++CONFIG_JFFS2_CMODE_PRIORITY=y
++# CONFIG_JFFS2_CMODE_SIZE is not set
++CONFIG_CRAMFS=m
++CONFIG_VXFS_FS=m
++CONFIG_HPFS_FS=m
++CONFIG_QNX4FS_FS=m
++CONFIG_SYSV_FS=m
++CONFIG_UFS_FS=m
++# CONFIG_UFS_FS_WRITE is not set
++# CONFIG_UFS_DEBUG is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=m
++CONFIG_NFS_V3=y
++CONFIG_NFS_V3_ACL=y
++CONFIG_NFS_V4=y
++CONFIG_NFS_DIRECTIO=y
++CONFIG_NFSD=m
++CONFIG_NFSD_V2_ACL=y
++CONFIG_NFSD_V3=y
++CONFIG_NFSD_V3_ACL=y
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_TCP=y
++CONFIG_LOCKD=m
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_NFS_ACL_SUPPORT=m
++CONFIG_NFS_COMMON=y
++CONFIG_SUNRPC=m
++CONFIG_SUNRPC_GSS=m
++CONFIG_RPCSEC_GSS_KRB5=m
++CONFIG_RPCSEC_GSS_SPKM3=m
++CONFIG_SMB_FS=m
++CONFIG_SMB_NLS_DEFAULT=y
++CONFIG_SMB_NLS_REMOTE="cp850"
++CONFIG_CIFS=m
++CONFIG_CIFS_STATS=y
++# CONFIG_CIFS_STATS2 is not set
++# CONFIG_CIFS_WEAK_PW_HASH is not set
++CONFIG_CIFS_XATTR=y
++CONFIG_CIFS_POSIX=y
++# CONFIG_CIFS_DEBUG2 is not set
++# CONFIG_CIFS_EXPERIMENTAL is not set
++CONFIG_NCP_FS=m
++CONFIG_NCPFS_PACKET_SIGNING=y
++CONFIG_NCPFS_IOCTL_LOCKING=y
++CONFIG_NCPFS_STRONG=y
++CONFIG_NCPFS_NFS_NS=y
++CONFIG_NCPFS_OS2_NS=y
++CONFIG_NCPFS_SMALLDOS=y
++CONFIG_NCPFS_NLS=y
++CONFIG_NCPFS_EXTRAS=y
++CONFIG_CODA_FS=m
++# CONFIG_CODA_FS_OLD_API is not set
++CONFIG_AFS_FS=m
++CONFIG_RXRPC=m
++CONFIG_9P_FS=m
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++CONFIG_OSF_PARTITION=y
++CONFIG_AMIGA_PARTITION=y
++# CONFIG_ATARI_PARTITION is not set
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++CONFIG_BSD_DISKLABEL=y
++CONFIG_MINIX_SUBPARTITION=y
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_UNIXWARE_DISKLABEL=y
++# CONFIG_LDM_PARTITION is not set
++CONFIG_SGI_PARTITION=y
++# CONFIG_ULTRIX_PARTITION is not set
++CONFIG_SUN_PARTITION=y
++# CONFIG_KARMA_PARTITION is not set
++CONFIG_EFI_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="utf8"
++CONFIG_NLS_CODEPAGE_437=m
++CONFIG_NLS_CODEPAGE_737=m
++CONFIG_NLS_CODEPAGE_775=m
++CONFIG_NLS_CODEPAGE_850=m
++CONFIG_NLS_CODEPAGE_852=m
++CONFIG_NLS_CODEPAGE_855=m
++CONFIG_NLS_CODEPAGE_857=m
++CONFIG_NLS_CODEPAGE_860=m
++CONFIG_NLS_CODEPAGE_861=m
++CONFIG_NLS_CODEPAGE_862=m
++CONFIG_NLS_CODEPAGE_863=m
++CONFIG_NLS_CODEPAGE_864=m
++CONFIG_NLS_CODEPAGE_865=m
++CONFIG_NLS_CODEPAGE_866=m
++CONFIG_NLS_CODEPAGE_869=m
++CONFIG_NLS_CODEPAGE_936=m
++CONFIG_NLS_CODEPAGE_950=m
++CONFIG_NLS_CODEPAGE_932=m
++CONFIG_NLS_CODEPAGE_949=m
++CONFIG_NLS_CODEPAGE_874=m
++CONFIG_NLS_ISO8859_8=m
++CONFIG_NLS_CODEPAGE_1250=m
++CONFIG_NLS_CODEPAGE_1251=m
++CONFIG_NLS_ASCII=m
++CONFIG_NLS_ISO8859_1=m
++CONFIG_NLS_ISO8859_2=m
++CONFIG_NLS_ISO8859_3=m
++CONFIG_NLS_ISO8859_4=m
++CONFIG_NLS_ISO8859_5=m
++CONFIG_NLS_ISO8859_6=m
++CONFIG_NLS_ISO8859_7=m
++CONFIG_NLS_ISO8859_9=m
++CONFIG_NLS_ISO8859_13=m
++CONFIG_NLS_ISO8859_14=m
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_KOI8_R=m
++CONFIG_NLS_KOI8_U=m
++CONFIG_NLS_UTF8=m
++
++#
++# Instrumentation Support
++#
++# CONFIG_PROFILING is not set
++# CONFIG_KPROBES is not set
++
++#
++# Kernel hacking
++#
++CONFIG_TRACE_IRQFLAGS_SUPPORT=y
++# CONFIG_PRINTK_TIME is not set
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_UNUSED_SYMBOLS=y
++CONFIG_DEBUG_KERNEL=y
++CONFIG_LOG_BUF_SHIFT=15
++CONFIG_DETECT_SOFTLOCKUP=y
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_RT_MUTEXES is not set
++# CONFIG_RT_MUTEX_TESTER is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_MUTEXES is not set
++# CONFIG_DEBUG_RWSEMS is not set
++# CONFIG_DEBUG_LOCK_ALLOC is not set
++# CONFIG_PROVE_LOCKING is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_VM is not set
++# CONFIG_FRAME_POINTER is not set
++# CONFIG_UNWIND_INFO is not set
++CONFIG_FORCED_INLINING=y
++# CONFIG_RCU_TORTURE_TEST is not set
++# CONFIG_DEBUG_RODATA is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++
++#
++# Security options
++#
++CONFIG_KEYS=y
++CONFIG_KEYS_DEBUG_PROC_KEYS=y
++CONFIG_SECURITY=y
++CONFIG_SECURITY_NETWORK=y
++# CONFIG_SECURITY_NETWORK_XFRM is not set
++CONFIG_SECURITY_CAPABILITIES=y
++CONFIG_SECURITY_ROOTPLUG=m
++CONFIG_SECURITY_SECLVL=m
++# CONFIG_SECURITY_SELINUX is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++CONFIG_CRYPTO_NULL=m
++CONFIG_CRYPTO_MD4=m
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_SHA1=m
++CONFIG_CRYPTO_SHA256=m
++CONFIG_CRYPTO_SHA512=m
++CONFIG_CRYPTO_WP512=m
++CONFIG_CRYPTO_TGR192=m
++CONFIG_CRYPTO_DES=m
++CONFIG_CRYPTO_BLOWFISH=m
++CONFIG_CRYPTO_TWOFISH=m
++CONFIG_CRYPTO_SERPENT=m
++CONFIG_CRYPTO_AES=m
++CONFIG_CRYPTO_AES_X86_64=m
++CONFIG_CRYPTO_CAST5=m
++CONFIG_CRYPTO_CAST6=m
++CONFIG_CRYPTO_TEA=m
++CONFIG_CRYPTO_ARC4=m
++CONFIG_CRYPTO_KHAZAD=m
++CONFIG_CRYPTO_ANUBIS=m
++CONFIG_CRYPTO_DEFLATE=m
++CONFIG_CRYPTO_MICHAEL_MIC=m
++CONFIG_CRYPTO_CRC32C=m
++CONFIG_CRYPTO_TEST=m
++
++#
++# Hardware crypto devices
++#
++CONFIG_XEN=y
++CONFIG_XEN_INTERFACE_VERSION=0x00030207
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++# CONFIG_XEN_UNPRIVILEGED_GUEST is not set
++CONFIG_XEN_PRIVCMD=y
++CONFIG_XEN_XENBUS_DEV=y
++CONFIG_XEN_BACKEND=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++CONFIG_XEN_BLKDEV_TAP=y
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL is not set
++# CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND is not set
++# CONFIG_XEN_NETDEV_LOOPBACK is not set
++CONFIG_XEN_PCIDEV_BACKEND=m
++# CONFIG_XEN_PCIDEV_BACKEND_VPCI is not set
++CONFIG_XEN_PCIDEV_BACKEND_PASS=y
++# CONFIG_XEN_PCIDEV_BACKEND_SLOT is not set
++# CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER is not set
++# CONFIG_XEN_PCIDEV_BE_DEBUG is not set
++CONFIG_XEN_TPMDEV_BACKEND=m
++CONFIG_XEN_SCSI_BACKEND=m
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++CONFIG_XEN_SCSI_FRONTEND=m
++CONFIG_XEN_GRANT_DEV=y
++# CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND is not set
++CONFIG_XEN_FRAMEBUFFER=y
++CONFIG_XEN_KEYBOARD=y
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_DISABLE_SERIAL=y
++CONFIG_XEN_SYSFS=y
++CONFIG_XEN_COMPAT_030002_AND_LATER=y
++# CONFIG_XEN_COMPAT_030004_AND_LATER is not set
++# CONFIG_XEN_COMPAT_030100_AND_LATER is not set
++# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
++CONFIG_XEN_COMPAT=0x030002
++CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
++CONFIG_NO_IDLE_HZ=y
++CONFIG_XEN_SMPBOOT=y
++CONFIG_XEN_BALLOON=y
++CONFIG_XEN_DEVMEM=y
++
++#
++# Library routines
++#
++CONFIG_CRC_CCITT=m
++CONFIG_CRC16=m
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=m
++CONFIG_REED_SOLOMON=m
++CONFIG_REED_SOLOMON_DEC16=y
++CONFIG_TEXTSEARCH=y
++CONFIG_TEXTSEARCH_KMP=m
++CONFIG_TEXTSEARCH_BM=m
++CONFIG_TEXTSEARCH_FSM=m
++CONFIG_PLIST=y
+diff -rpuN linux-2.6.18.8/Documentation/i2c/busses/i2c-i801 linux-2.6.18-xen-3.3.0/Documentation/i2c/busses/i2c-i801
+--- linux-2.6.18.8/Documentation/i2c/busses/i2c-i801 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/Documentation/i2c/busses/i2c-i801 2008-08-21 11:36:07.000000000 +0200
+@@ -10,6 +10,11 @@ Supported adapters:
+ * Intel 6300ESB
+ * Intel 82801FB/FR/FW/FRW (ICH6)
+ * Intel ICH7
++ * Intel 82801G (ICH7)
++ * Intel 82801H (ICH9)
++ * Intel 82801I (ICH9)
++ * Intel Tolapai
++ * Intel ICH10
+ Datasheets: Publicly available at the Intel website
+
+ Authors:
+diff -rpuN linux-2.6.18.8/drivers/acpi/dispatcher/dsobject.c linux-2.6.18-xen-3.3.0/drivers/acpi/dispatcher/dsobject.c
+--- linux-2.6.18.8/drivers/acpi/dispatcher/dsobject.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/acpi/dispatcher/dsobject.c 2008-08-21 11:36:07.000000000 +0200
+@@ -137,6 +137,71 @@ acpi_ds_build_internal_object(struct acp
+ return_ACPI_STATUS(status);
+ }
+ }
++
++ /* Special object resolution for elements of a package */
++
++ if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
++ (op->common.parent->common.aml_opcode ==
++ AML_VAR_PACKAGE_OP)) {
++ /*
++ * Attempt to resolve the node to a value before we insert it into
++ * the package. If this is a reference to a common data type,
++ * resolve it immediately. According to the ACPI spec, package
++ * elements can only be "data objects" or method references.
++ * Attempt to resolve to an Integer, Buffer, String or Package.
++ * If cannot, return the named reference (for things like Devices,
++ * Methods, etc.) Buffer Fields and Fields will resolve to simple
++ * objects (int/buf/str/pkg).
++ *
++ * NOTE: References to things like Devices, Methods, Mutexes, etc.
++ * will remain as named references. This behavior is not described
++ * in the ACPI spec, but it appears to be an oversight.
++ */
++ obj_desc = (union acpi_operand_object *)op->common.node;
++
++ status =
++ acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
++ (struct
++ acpi_namespace_node,
++ &obj_desc),
++ walk_state);
++ if (ACPI_FAILURE(status)) {
++ return_ACPI_STATUS(status);
++ }
++
++ switch (op->common.node->type) {
++ /*
++ * For these types, we need the actual node, not the subobject.
++ * However, the subobject got an extra reference count above.
++ */
++ case ACPI_TYPE_MUTEX:
++ case ACPI_TYPE_METHOD:
++ case ACPI_TYPE_POWER:
++ case ACPI_TYPE_PROCESSOR:
++ case ACPI_TYPE_EVENT:
++ case ACPI_TYPE_REGION:
++ case ACPI_TYPE_DEVICE:
++ case ACPI_TYPE_THERMAL:
++
++ obj_desc =
++ (union acpi_operand_object *)op->common.
++ node;
++ break;
++
++ default:
++ break;
++ }
++
++ /*
++ * If above resolved to an operand object, we are done. Otherwise,
++ * we have a NS node, we must create the package entry as a named
++ * reference.
++ */
++ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) !=
++ ACPI_DESC_TYPE_NAMED) {
++ goto exit;
++ }
++ }
+ }
+
+ /* Create and init a new internal ACPI object */
+@@ -156,6 +221,7 @@ acpi_ds_build_internal_object(struct acp
+ return_ACPI_STATUS(status);
+ }
+
++ exit:
+ *obj_desc_ptr = obj_desc;
+ return_ACPI_STATUS(AE_OK);
+ }
+@@ -358,12 +424,25 @@ acpi_ds_build_internal_package_obj(struc
+ arg = arg->common.next;
+ for (i = 0; arg; i++) {
+ if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
+-
+- /* Object (package or buffer) is already built */
+-
+- obj_desc->package.elements[i] =
+- ACPI_CAST_PTR(union acpi_operand_object,
+- arg->common.node);
++ if (arg->common.node->type == ACPI_TYPE_METHOD) {
++ /*
++ * A method reference "looks" to the parser to be a method
++ * invocation, so we special case it here
++ */
++ arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
++ status =
++ acpi_ds_build_internal_object(walk_state,
++ arg,
++ &obj_desc->
++ package.
++ elements[i]);
++ } else {
++ /* This package element is already built, just get it */
++
++ obj_desc->package.elements[i] =
++ ACPI_CAST_PTR(union acpi_operand_object,
++ arg->common.node);
++ }
+ } else {
+ status = acpi_ds_build_internal_object(walk_state, arg,
+ &obj_desc->
+diff -rpuN linux-2.6.18.8/drivers/acpi/hardware/hwregs.c linux-2.6.18-xen-3.3.0/drivers/acpi/hardware/hwregs.c
+--- linux-2.6.18.8/drivers/acpi/hardware/hwregs.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/acpi/hardware/hwregs.c 2008-08-21 11:36:07.000000000 +0200
+@@ -665,8 +665,6 @@ acpi_status acpi_hw_register_write(u8 us
+
+ /*
+ * Perform a read first to preserve certain bits (per ACPI spec)
+- *
+- * Note: This includes SCI_EN, we never want to change this bit
+ */
+ status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
+ ACPI_REGISTER_PM1_CONTROL,
+diff -rpuN linux-2.6.18.8/drivers/acpi/hardware/hwsleep.c linux-2.6.18-xen-3.3.0/drivers/acpi/hardware/hwsleep.c
+--- linux-2.6.18.8/drivers/acpi/hardware/hwsleep.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/acpi/hardware/hwsleep.c 2008-08-21 11:36:07.000000000 +0200
+@@ -227,7 +227,11 @@ acpi_status asmlinkage acpi_enter_sleep_
+ u32 PM1Bcontrol;
+ struct acpi_bit_register_info *sleep_type_reg_info;
+ struct acpi_bit_register_info *sleep_enable_reg_info;
++#if !(defined(CONFIG_XEN) && defined(CONFIG_X86))
+ u32 in_value;
++#else
++ int err;
++#endif
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
+@@ -327,6 +331,7 @@ acpi_status asmlinkage acpi_enter_sleep_
+
+ ACPI_FLUSH_CPU_CACHE();
+
++#if !(defined(CONFIG_XEN) && defined(CONFIG_X86))
+ status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
+ ACPI_REGISTER_PM1A_CONTROL,
+ PM1Acontrol);
+@@ -376,6 +381,16 @@ acpi_status asmlinkage acpi_enter_sleep_
+ /* Spin until we wake */
+
+ } while (!in_value);
++#else
++ /* PV ACPI just need check hypercall return value */
++ err = acpi_notify_hypervisor_state(sleep_state,
++ PM1Acontrol, PM1Bcontrol);
++ if (err) {
++ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
++ "Hypervisor failure [%d]\n", err));
++ return_ACPI_STATUS(AE_ERROR);
++ }
++#endif
+
+ return_ACPI_STATUS(AE_OK);
+ }
+diff -rpuN linux-2.6.18.8/drivers/acpi/Kconfig linux-2.6.18-xen-3.3.0/drivers/acpi/Kconfig
+--- linux-2.6.18.8/drivers/acpi/Kconfig 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/acpi/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -305,6 +305,7 @@ config ACPI_SYSTEM
+ config X86_PM_TIMER
+ bool "Power Management Timer Support" if EMBEDDED
+ depends on X86
++ depends on !XEN
+ default y
+ help
+ The Power Management Timer is available on all ACPI-capable,
+@@ -362,6 +363,15 @@ config ACPI_SBS
+ A "Smart Battery" is quite old and quite rare compared
+ to today's ACPI "Control Method" battery.
+
++config ACPI_PV_SLEEP
++ bool
++ depends on X86 && XEN && ACPI_SLEEP
++ default y
++
++config PROCESSOR_EXTERNAL_CONTROL
++ bool
++ depends on X86 && XEN
++ default y
+ endif # ACPI
+
+ endmenu
+diff -rpuN linux-2.6.18.8/drivers/acpi/Makefile linux-2.6.18-xen-3.3.0/drivers/acpi/Makefile
+--- linux-2.6.18.8/drivers/acpi/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/acpi/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -34,6 +34,9 @@ processor-objs += processor_core.o proce
+ ifdef CONFIG_CPU_FREQ
+ processor-objs += processor_perflib.o
+ endif
++ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++processor-objs += processor_perflib.o processor_extcntl.o
++endif
+
+ obj-y += sleep/
+ obj-y += bus.o glue.o
+diff -rpuN linux-2.6.18.8/drivers/acpi/processor_core.c linux-2.6.18-xen-3.3.0/drivers/acpi/processor_core.c
+--- linux-2.6.18.8/drivers/acpi/processor_core.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/acpi/processor_core.c 2008-08-21 11:36:07.000000000 +0200
+@@ -475,7 +475,8 @@ static int acpi_processor_get_info(struc
+ */
+ if (cpu_index == -1) {
+ if (ACPI_FAILURE
+- (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
++ (acpi_processor_hotadd_init(pr->handle, &pr->id)) &&
++ !processor_cntl_external()) {
+ printk(KERN_ERR PREFIX
+ "Getting cpuindex for acpiid 0x%x\n",
+ pr->acpi_id);
+@@ -508,7 +509,7 @@ static int acpi_processor_get_info(struc
+ request_region(pr->throttling.address, 6, "ACPI CPU throttle");
+ }
+
+-#ifdef CONFIG_CPU_FREQ
++#if defined(CONFIG_CPU_FREQ) || defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL)
+ acpi_processor_ppc_has_changed(pr);
+ #endif
+ acpi_processor_get_throttling_info(pr);
+@@ -517,7 +518,7 @@ static int acpi_processor_get_info(struc
+ return 0;
+ }
+
+-static void *processor_device_array[NR_CPUS];
++static void *processor_device_array[NR_ACPI_CPUS];
+
+ static int acpi_processor_start(struct acpi_device *device)
+ {
+@@ -525,31 +526,46 @@ static int acpi_processor_start(struct a
+ acpi_status status = AE_OK;
+ struct acpi_processor *pr;
+
++ processor_extcntl_init();
+
+ pr = acpi_driver_data(device);
+
+ result = acpi_processor_get_info(pr);
+- if (result) {
++ if (result ||
++ ((pr->id == -1) && !processor_cntl_external())) {
+ /* Processor is physically not present */
+ return 0;
+ }
+
+- BUG_ON((pr->id >= NR_CPUS) || (pr->id < 0));
++ BUG_ON(!processor_cntl_external() &&
++ ((pr->id >= NR_CPUS) || (pr->id < 0)));
+
+ /*
+ * Buggy BIOS check
+ * ACPI id of processors can be reported wrongly by the BIOS.
+ * Don't trust it blindly
+ */
++#ifdef CONFIG_XEN
++ BUG_ON(pr->acpi_id >= NR_ACPI_CPUS);
++ if (processor_device_array[pr->acpi_id] != NULL &&
++ processor_device_array[pr->acpi_id] != (void *)device) {
++#else
+ if (processor_device_array[pr->id] != NULL &&
+ processor_device_array[pr->id] != (void *)device) {
++#endif /* CONFIG_XEN */
+ printk(KERN_WARNING "BIOS reported wrong ACPI id"
+ "for the processor\n");
+ return -ENODEV;
+ }
++#ifdef CONFIG_XEN
++ processor_device_array[pr->acpi_id] = (void *)device;
++ if (pr->id != -1)
++ processors[pr->id] = pr;
++#else
+ processor_device_array[pr->id] = (void *)device;
+
+ processors[pr->id] = pr;
++#endif /* CONFIG_XEN */
+
+ result = acpi_processor_add_fs(device);
+ if (result)
+@@ -564,6 +580,10 @@ static int acpi_processor_start(struct a
+
+ acpi_processor_power_init(pr, device);
+
++ result = processor_extcntl_prepare(pr);
++ if (result)
++ goto end;
++
+ if (pr->flags.throttling) {
+ printk(KERN_INFO PREFIX "%s [%s] (supports",
+ acpi_device_name(device), acpi_device_bid(device));
+@@ -656,7 +676,13 @@ static int acpi_processor_remove(struct
+
+ acpi_processor_remove_fs(device);
+
++#ifdef CONFIG_XEN
++ if (pr->id != -1)
++ processors[pr->id] = NULL;
++#else
+ processors[pr->id] = NULL;
++#endif /* CONFIG_XEN */
++
+
+ kfree(pr);
+
+@@ -710,6 +736,10 @@ int acpi_processor_device_add(acpi_handl
+ if (!pr)
+ return -ENODEV;
+
++ if (processor_cntl_external())
++ processor_notify_external(pr,
++ PROCESSOR_HOTPLUG, HOTPLUG_TYPE_ADD);
++
+ if ((pr->id >= 0) && (pr->id < NR_CPUS)) {
+ kobject_uevent(&(*device)->kobj, KOBJ_ONLINE);
+ }
+@@ -748,6 +778,10 @@ acpi_processor_hotplug_notify(acpi_handl
+ break;
+ }
+
++ if (processor_cntl_external())
++ processor_notify_external(pr,
++ PROCESSOR_HOTPLUG, HOTPLUG_TYPE_ADD);
++
+ if (pr->id >= 0 && (pr->id < NR_CPUS)) {
+ kobject_uevent(&device->kobj, KOBJ_OFFLINE);
+ break;
+@@ -777,8 +811,18 @@ acpi_processor_hotplug_notify(acpi_handl
+ return;
+ }
+
++#ifdef CONFIG_XEN
++ if ((pr->id >= 0) && (pr->id < NR_CPUS)
++ && (cpu_present(pr->id)))
++#else
+ if ((pr->id < NR_CPUS) && (cpu_present(pr->id)))
++#endif /* CONFIG_XEN */
+ kobject_uevent(&device->kobj, KOBJ_OFFLINE);
++
++ if (processor_cntl_external())
++ processor_notify_external(pr, PROCESSOR_HOTPLUG,
++ HOTPLUG_TYPE_REMOVE);
++
+ break;
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+@@ -843,6 +887,11 @@ static acpi_status acpi_processor_hotadd
+
+ static int acpi_processor_handle_eject(struct acpi_processor *pr)
+ {
++#ifdef CONFIG_XEN
++ if (pr->id == -1)
++ return (0);
++#endif /* CONFIG_XEN */
++
+ if (cpu_online(pr->id)) {
+ return (-EINVAL);
+ }
+diff -rpuN linux-2.6.18.8/drivers/acpi/processor_extcntl.c linux-2.6.18-xen-3.3.0/drivers/acpi/processor_extcntl.c
+--- linux-2.6.18.8/drivers/acpi/processor_extcntl.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/acpi/processor_extcntl.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,232 @@
++/*
++ * processor_extcntl.c - channel to external control logic
++ *
++ * Copyright (C) 2008, Intel corporation
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/acpi.h>
++#include <linux/pm.h>
++#include <linux/cpu.h>
++
++#include <acpi/processor.h>
++
++#define ACPI_PROCESSOR_COMPONENT 0x01000000
++#define ACPI_PROCESSOR_CLASS "processor"
++#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
++#define _COMPONENT ACPI_PROCESSOR_COMPONENT
++ACPI_MODULE_NAME("acpi_processor")
++
++static int processor_extcntl_parse_csd(struct acpi_processor *pr);
++static int processor_extcntl_get_performance(struct acpi_processor *pr);
++/*
++ * External processor control logic may register with its own set of
++ * ops to get ACPI related notification. One example is like VMM.
++ */
++const struct processor_extcntl_ops *processor_extcntl_ops;
++EXPORT_SYMBOL(processor_extcntl_ops);
++
++static int processor_notify_smm(void)
++{
++ acpi_status status;
++ static int is_done = 0;
++
++ /* only need successfully notify BIOS once */
++ /* avoid double notification which may lead to unexpected result */
++ if (is_done)
++ return 0;
++
++ /* Can't write pstate_cnt to smi_cmd if either value is zero */
++ if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) {
++ ACPI_DEBUG_PRINT((ACPI_DB_INFO,"No SMI port or pstate_cnt\n"));
++ return 0;
++ }
++
++ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
++ "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n",
++ acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd));
++
++ /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use
++ * it anyway, so we need to support it... */
++ if (acpi_fadt_is_v1) {
++ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
++ "Using v1.0 FADT reserved value for pstate_cnt\n"));
++ }
++
++ status = acpi_os_write_port(acpi_fadt.smi_cmd,
++ (u32) acpi_fadt.pstate_cnt, 8);
++ if (ACPI_FAILURE(status))
++ return status;
++
++ is_done = 1;
++
++ return 0;
++}
++
++int processor_notify_external(struct acpi_processor *pr, int event, int type)
++{
++ int ret = -EINVAL;
++
++ if (!processor_cntl_external())
++ return -EINVAL;
++
++ switch (event) {
++ case PROCESSOR_PM_INIT:
++ case PROCESSOR_PM_CHANGE:
++ if ((type >= PM_TYPE_MAX) ||
++ !processor_extcntl_ops->pm_ops[type])
++ break;
++
++ ret = processor_extcntl_ops->pm_ops[type](pr, event);
++ break;
++ case PROCESSOR_HOTPLUG:
++ if (processor_extcntl_ops->hotplug)
++ ret = processor_extcntl_ops->hotplug(pr, type);
++ break;
++ default:
++ printk(KERN_ERR "Unsupport processor events %d.\n", event);
++ break;
++ }
++
++ return ret;
++}
++
++/*
++ * External control logic can decide to grab full or part of physical
++ * processor control bits. Take a VMM for example, physical processors
++ * are owned by VMM and thus existence information like hotplug is
++ * always required to be notified to VMM. Similar is processor idle
++ * state which is also necessarily controlled by VMM. But for other
++ * control bits like performance/throttle states, VMM may choose to
++ * control or not upon its own policy.
++ */
++void processor_extcntl_init(void)
++{
++ if (!processor_extcntl_ops)
++ arch_acpi_processor_init_extcntl(&processor_extcntl_ops);
++}
++
++/*
++ * This is called from ACPI processor init, and targeted to hold
++ * some tricky housekeeping jobs to satisfy external control model.
++ * For example, we may put dependency parse stub here for idle
++ * and performance state. Those information may be not available
++ * if splitting from dom0 control logic like cpufreq driver.
++ */
++int processor_extcntl_prepare(struct acpi_processor *pr)
++{
++ /* parse cstate dependency information */
++ if (processor_pm_external())
++ processor_extcntl_parse_csd(pr);
++
++ /* Initialize performance states */
++ if (processor_pmperf_external())
++ processor_extcntl_get_performance(pr);
++
++ return 0;
++}
++
++/*
++ * Currently no _CSD is implemented which is why existing ACPI code
++ * doesn't parse _CSD at all. But to keep interface complete with
++ * external control logic, we put a placeholder here for future
++ * compatibility.
++ */
++static int processor_extcntl_parse_csd(struct acpi_processor *pr)
++{
++ int i;
++
++ for (i = 0; i < pr->power.count; i++) {
++ if (!pr->power.states[i].valid)
++ continue;
++
++ /* No dependency by default */
++ pr->power.states[i].domain_info = NULL;
++ pr->power.states[i].csd_count = 0;
++ }
++
++ return 0;
++}
++
++/*
++ * Existing ACPI module does parse performance states at some point,
++ * when acpi-cpufreq driver is loaded which however is something
++ * we'd like to disable to avoid confliction with external control
++ * logic. So we have to collect raw performance information here
++ * when ACPI processor object is found and started.
++ */
++static int processor_extcntl_get_performance(struct acpi_processor *pr)
++{
++ int ret;
++ struct acpi_processor_performance *perf;
++ struct acpi_psd_package *pdomain;
++
++ if (pr->performance)
++ return -EBUSY;
++
++ perf = kzalloc(sizeof(struct acpi_processor_performance), GFP_KERNEL);
++ if (!perf)
++ return -ENOMEM;
++
++ pr->performance = perf;
++ /* Get basic performance state information */
++ ret = acpi_processor_get_performance_info(pr);
++ if (ret < 0)
++ goto err_out;
++
++ /*
++ * Well, here we need retrieve performance dependency information
++ * from _PSD object. The reason why existing interface is not used
++ * is due to the reason that existing interface sticks to Linux cpu
++ * id to construct some bitmap, however we want to split ACPI
++ * processor objects from Linux cpu id logic. For example, even
++ * when Linux is configured as UP, we still want to parse all ACPI
++ * processor objects to external logic. In this case, it's preferred
++ * to use ACPI ID instead.
++ */
++ pr->performance->domain_info.num_processors = 0;
++ ret = acpi_processor_get_psd(pr);
++ if (ret < 0)
++ goto err_out;
++
++ /* Some sanity check */
++ pdomain = &pr->performance->domain_info;
++ if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
++ (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) ||
++ ((pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL) &&
++ (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY) &&
++ (pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL))) {
++ ret = -EINVAL;
++ goto err_out;
++ }
++
++ /* Last step is to notify BIOS that external logic exists */
++ processor_notify_smm();
++
++ processor_notify_external(pr, PROCESSOR_PM_INIT, PM_TYPE_PERF);
++
++ return 0;
++err_out:
++ pr->performance = NULL;
++ kfree(perf);
++ return ret;
++}
+diff -rpuN linux-2.6.18.8/drivers/acpi/processor_idle.c linux-2.6.18-xen-3.3.0/drivers/acpi/processor_idle.c
+--- linux-2.6.18.8/drivers/acpi/processor_idle.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/acpi/processor_idle.c 2008-08-21 11:36:07.000000000 +0200
+@@ -714,8 +714,12 @@ static int acpi_processor_get_power_info
+ (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
+ continue;
+
+- cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ?
+- 0 : reg->address;
++ if (!processor_pm_external())
++ cx.address = (reg->space_id ==
++ ACPI_ADR_SPACE_FIXED_HARDWARE) ?
++ 0 : reg->address;
++ else
++ cx.address = reg->address;
+
+ /* There should be an easy way to extract an integer... */
+ obj = (union acpi_object *)&(element->package.elements[1]);
+@@ -724,7 +728,9 @@ static int acpi_processor_get_power_info
+
+ cx.type = obj->integer.value;
+
+- if ((cx.type != ACPI_STATE_C1) &&
++ /* Following check doesn't apply to external control case */
++ if (!processor_pm_external() &&
++ (cx.type != ACPI_STATE_C1) &&
+ (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO))
+ continue;
+
+@@ -743,6 +749,12 @@ static int acpi_processor_get_power_info
+
+ cx.power = obj->integer.value;
+
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++ /* cache control methods to notify external logic */
++ if (processor_pm_external())
++ memcpy(&cx.reg, reg, sizeof(*reg));
++#endif
++
+ current_count++;
+ memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
+
+@@ -985,12 +997,16 @@ int acpi_processor_cst_has_changed(struc
+ return -ENODEV;
+
+ /* Fall back to the default idle loop */
+- pm_idle = pm_idle_save;
++ if (!processor_pm_external())
++ pm_idle = pm_idle_save;
+ synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
+
+ pr->flags.power = 0;
+ result = acpi_processor_get_power_info(pr);
+- if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
++ if (processor_pm_external())
++ processor_notify_external(pr,
++ PROCESSOR_PM_CHANGE, PM_TYPE_IDLE);
++ else if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
+ pm_idle = acpi_processor_idle;
+
+ return result;
+@@ -1122,7 +1138,7 @@ int acpi_processor_power_init(struct acp
+ pr->power.states[i].type);
+ printk(")\n");
+
+- if (pr->id == 0) {
++ if (!processor_pm_external() && (pr->id == 0)) {
+ pm_idle_save = pm_idle;
+ pm_idle = acpi_processor_idle;
+ }
+@@ -1141,6 +1157,9 @@ int acpi_processor_power_init(struct acp
+
+ pr->flags.power_setup_done = 1;
+
++ if (processor_pm_external())
++ processor_notify_external(pr,
++ PROCESSOR_PM_INIT, PM_TYPE_IDLE);
+ return 0;
+ }
+
+diff -rpuN linux-2.6.18.8/drivers/acpi/processor_perflib.c linux-2.6.18-xen-3.3.0/drivers/acpi/processor_perflib.c
+--- linux-2.6.18.8/drivers/acpi/processor_perflib.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/acpi/processor_perflib.c 2008-08-21 11:36:07.000000000 +0200
+@@ -66,6 +66,7 @@ static DEFINE_MUTEX(performance_mutex);
+
+ static int acpi_processor_ppc_status = 0;
+
++#ifdef CONFIG_CPU_FREQ
+ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+ {
+@@ -102,6 +103,7 @@ static int acpi_processor_ppc_notifier(s
+ static struct notifier_block acpi_ppc_notifier_block = {
+ .notifier_call = acpi_processor_ppc_notifier,
+ };
++#endif /* CONFIG_CPU_FREQ */
+
+ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
+ {
+@@ -137,9 +139,15 @@ int acpi_processor_ppc_has_changed(struc
+ if (ret < 0)
+ return (ret);
+ else
++#ifdef CONFIG_CPU_FREQ
+ return cpufreq_update_policy(pr->id);
++#elif CONFIG_PROCESSOR_EXTERNAL_CONTROL
++ return processor_notify_external(pr,
++ PROCESSOR_PM_CHANGE, PM_TYPE_PERF);
++#endif
+ }
+
++#ifdef CONFIG_CPU_FREQ
+ void acpi_processor_ppc_init(void)
+ {
+ if (!cpufreq_register_notifier
+@@ -158,6 +166,7 @@ void acpi_processor_ppc_exit(void)
+
+ acpi_processor_ppc_status &= ~PPC_REGISTERED;
+ }
++#endif /* CONFIG_CPU_FREQ */
+
+ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
+ {
+@@ -299,7 +308,10 @@ static int acpi_processor_get_performanc
+ return result;
+ }
+
+-static int acpi_processor_get_performance_info(struct acpi_processor *pr)
++#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++static
++#endif
++int acpi_processor_get_performance_info(struct acpi_processor *pr)
+ {
+ int result = 0;
+ acpi_status status = AE_OK;
+@@ -331,6 +343,7 @@ static int acpi_processor_get_performanc
+ return 0;
+ }
+
++#ifdef CONFIG_CPU_FREQ
+ int acpi_processor_notify_smm(struct module *calling_module)
+ {
+ acpi_status status;
+@@ -398,6 +411,7 @@ int acpi_processor_notify_smm(struct mod
+ }
+
+ EXPORT_SYMBOL(acpi_processor_notify_smm);
++#endif /* CONFIG_CPU_FREQ */
+
+ #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
+ /* /proc/acpi/processor/../performance interface (DEPRECATED) */
+@@ -538,7 +552,10 @@ static void acpi_cpufreq_remove_file(str
+ }
+ #endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
+
+-static int acpi_processor_get_psd(struct acpi_processor *pr)
++#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++static
++#endif
++int acpi_processor_get_psd(struct acpi_processor *pr)
+ {
+ int result = 0;
+ acpi_status status = AE_OK;
+diff -rpuN linux-2.6.18.8/drivers/acpi/resources/rsxface.c linux-2.6.18-xen-3.3.0/drivers/acpi/resources/rsxface.c
+--- linux-2.6.18.8/drivers/acpi/resources/rsxface.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/acpi/resources/rsxface.c 2008-08-21 11:36:07.000000000 +0200
+@@ -476,8 +476,6 @@ acpi_rs_match_vendor_resource(struct acp
+ return (AE_CTRL_TERMINATE);
+ }
+
+-ACPI_EXPORT_SYMBOL(acpi_rs_match_vendor_resource)
+-
+ /*******************************************************************************
+ *
+ * FUNCTION: acpi_walk_resources
+diff -rpuN linux-2.6.18.8/drivers/acpi/sleep/main.c linux-2.6.18-xen-3.3.0/drivers/acpi/sleep/main.c
+--- linux-2.6.18.8/drivers/acpi/sleep/main.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/acpi/sleep/main.c 2008-08-21 11:36:07.000000000 +0200
+@@ -91,7 +91,14 @@ static int acpi_pm_enter(suspend_state_t
+ break;
+
+ case PM_SUSPEND_MEM:
++#ifdef CONFIG_ACPI_PV_SLEEP
++ /* Hyperviosr will save and restore CPU context
++ * and then we can skip low level housekeeping here.
++ */
++ acpi_enter_sleep_state(acpi_state);
++#else
+ do_suspend_lowlevel();
++#endif
+ break;
+
+ case PM_SUSPEND_DISK:
+@@ -145,10 +152,12 @@ static int acpi_pm_finish(suspend_state_
+ /* reset firmware waking vector */
+ acpi_set_firmware_waking_vector((acpi_physical_address) 0);
+
++#ifndef CONFIG_ACPI_PV_SLEEP
+ if (init_8259A_after_S1) {
+ printk("Broken toshiba laptop -> kicking interrupts\n");
+ init_8259A(0);
+ }
++#endif
+ return 0;
+ }
+
+diff -rpuN linux-2.6.18.8/drivers/acpi/sleep/poweroff.c linux-2.6.18-xen-3.3.0/drivers/acpi/sleep/poweroff.c
+--- linux-2.6.18.8/drivers/acpi/sleep/poweroff.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/acpi/sleep/poweroff.c 2008-08-21 11:36:07.000000000 +0200
+@@ -20,6 +20,7 @@
+ int acpi_sleep_prepare(u32 acpi_state)
+ {
+ #ifdef CONFIG_ACPI_SLEEP
++#ifndef CONFIG_ACPI_PV_SLEEP
+ /* do we have a wakeup address for S2 and S3? */
+ if (acpi_state == ACPI_STATE_S3) {
+ if (!acpi_wakeup_address) {
+@@ -30,6 +31,7 @@ int acpi_sleep_prepare(u32 acpi_state)
+ acpi_wakeup_address));
+
+ }
++#endif
+ ACPI_FLUSH_CPU_CACHE();
+ acpi_enable_wakeup_device_prep(acpi_state);
+ #endif
+diff -rpuN linux-2.6.18.8/drivers/acpi/sleep/proc.c linux-2.6.18-xen-3.3.0/drivers/acpi/sleep/proc.c
+--- linux-2.6.18.8/drivers/acpi/sleep/proc.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/acpi/sleep/proc.c 2008-08-21 11:36:07.000000000 +0200
+@@ -251,6 +251,8 @@ acpi_system_write_alarm(struct file *fil
+
+ if (adjust) {
+ yr += CMOS_READ(RTC_YEAR);
++ if (acpi_gbl_FADT->century)
++ yr += CMOS_READ(acpi_gbl_FADT->century) * 100;
+ mo += CMOS_READ(RTC_MONTH);
+ day += CMOS_READ(RTC_DAY_OF_MONTH);
+ hr += CMOS_READ(RTC_HOURS);
+diff -rpuN linux-2.6.18.8/drivers/block/floppy.c linux-2.6.18-xen-3.3.0/drivers/block/floppy.c
+--- linux-2.6.18.8/drivers/block/floppy.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/block/floppy.c 2008-08-21 11:36:07.000000000 +0200
+@@ -4392,11 +4392,15 @@ static int floppy_grab_irq_and_dma(void)
+ if (fd_request_dma()) {
+ DPRINT("Unable to grab DMA%d for the floppy driver\n",
+ FLOPPY_DMA);
+- fd_free_irq();
+- spin_lock_irqsave(&floppy_usage_lock, flags);
+- usage_count--;
+- spin_unlock_irqrestore(&floppy_usage_lock, flags);
+- return -1;
++ if (can_use_virtual_dma & 2)
++ use_virtual_dma = can_use_virtual_dma = 1;
++ if (!(can_use_virtual_dma & 1)) {
++ fd_free_irq();
++ spin_lock_irqsave(&floppy_usage_lock, flags);
++ usage_count--;
++ spin_unlock_irqrestore(&floppy_usage_lock, flags);
++ return -1;
++ }
+ }
+
+ for (fdc = 0; fdc < N_FDC; fdc++) {
+diff -rpuN linux-2.6.18.8/drivers/char/agp/amd64-agp.c linux-2.6.18-xen-3.3.0/drivers/char/agp/amd64-agp.c
+--- linux-2.6.18.8/drivers/char/agp/amd64-agp.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/char/agp/amd64-agp.c 2008-08-21 11:36:07.000000000 +0200
+@@ -15,6 +15,7 @@
+ #include <linux/mmzone.h>
+ #include <asm/page.h> /* PAGE_SIZE */
+ #include <asm/k8.h>
++#include <asm/e820.h>
+ #include "agp.h"
+
+ /* PTE bits. */
+@@ -252,7 +253,6 @@ static struct agp_bridge_driver amd_8151
+ /* Some basic sanity checks for the aperture. */
+ static int __devinit aperture_valid(u64 aper, u32 size)
+ {
+- u32 pfn, c;
+ if (aper == 0) {
+ printk(KERN_ERR PFX "No aperture\n");
+ return 0;
+@@ -265,14 +265,9 @@ static int __devinit aperture_valid(u64
+ printk(KERN_ERR PFX "Aperture out of bounds\n");
+ return 0;
+ }
+- pfn = aper >> PAGE_SHIFT;
+- for (c = 0; c < size/PAGE_SIZE; c++) {
+- if (!pfn_valid(pfn + c))
+- break;
+- if (!PageReserved(pfn_to_page(pfn + c))) {
+- printk(KERN_ERR PFX "Aperture pointing to RAM\n");
+- return 0;
+- }
++ if (e820_any_mapped(aper, aper + size, E820_RAM)) {
++ printk(KERN_ERR PFX "Aperture pointing to RAM\n");
++ return 0;
+ }
+
+ /* Request the Aperture. This catches cases when someone else
+diff -rpuN linux-2.6.18.8/drivers/char/agp/generic.c linux-2.6.18-xen-3.3.0/drivers/char/agp/generic.c
+--- linux-2.6.18.8/drivers/char/agp/generic.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/char/agp/generic.c 2008-08-21 11:36:07.000000000 +0200
+@@ -51,28 +51,6 @@ int agp_memory_reserved;
+ */
+ EXPORT_SYMBOL_GPL(agp_memory_reserved);
+
+-#if defined(CONFIG_X86)
+-int map_page_into_agp(struct page *page)
+-{
+- int i;
+- i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
+- /* Caller's responsibility to call global_flush_tlb() for
+- * performance reasons */
+- return i;
+-}
+-EXPORT_SYMBOL_GPL(map_page_into_agp);
+-
+-int unmap_page_from_agp(struct page *page)
+-{
+- int i;
+- i = change_page_attr(page, 1, PAGE_KERNEL);
+- /* Caller's responsibility to call global_flush_tlb() for
+- * performance reasons */
+- return i;
+-}
+-EXPORT_SYMBOL_GPL(unmap_page_from_agp);
+-#endif
+-
+ /*
+ * Generic routines for handling agp_memory structures -
+ * They use the basic page allocation routines to do the brunt of the work.
+diff -rpuN linux-2.6.18.8/drivers/char/agp/intel-agp.c linux-2.6.18-xen-3.3.0/drivers/char/agp/intel-agp.c
+--- linux-2.6.18.8/drivers/char/agp/intel-agp.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/char/agp/intel-agp.c 2008-08-21 11:36:07.000000000 +0200
+@@ -164,9 +164,17 @@ static void *i8xx_alloc_pages(void)
+ if (page == NULL)
+ return NULL;
+
++#ifdef CONFIG_XEN
++ if (xen_create_contiguous_region((unsigned long)page_address(page), 2, 32)) {
++ __free_pages(page, 2);
++ return NULL;
++ }
++#endif
++
+ if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) {
++ change_page_attr(page, 4, PAGE_KERNEL);
+ global_flush_tlb();
+- __free_page(page);
++ __free_pages(page, 2);
+ return NULL;
+ }
+ global_flush_tlb();
+@@ -186,9 +194,12 @@ static void i8xx_destroy_pages(void *add
+ page = virt_to_page(addr);
+ change_page_attr(page, 4, PAGE_KERNEL);
+ global_flush_tlb();
++#ifdef CONFIG_XEN
++ xen_destroy_contiguous_region((unsigned long)page_address(page), 2);
++#endif
+ put_page(page);
+ unlock_page(page);
+- free_pages((unsigned long)addr, 2);
++ __free_pages(page, 2);
+ atomic_dec(&agp_bridge->current_memory_agp);
+ }
+
+diff -rpuN linux-2.6.18.8/drivers/char/Kconfig linux-2.6.18-xen-3.3.0/drivers/char/Kconfig
+--- linux-2.6.18.8/drivers/char/Kconfig 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/char/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -1005,7 +1005,7 @@ config MAX_RAW_DEVS
+ config HPET
+ bool "HPET - High Precision Event Timer" if (X86 || IA64)
+ default n
+- depends on ACPI
++ depends on ACPI && !XEN
+ help
+ If you say Y here, you will have a miscdevice named "/dev/hpet/". Each
+ open selects one of the timers supported by the HPET. The timers are
+diff -rpuN linux-2.6.18.8/drivers/char/mem.c linux-2.6.18-xen-3.3.0/drivers/char/mem.c
+--- linux-2.6.18.8/drivers/char/mem.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/char/mem.c 2008-08-21 11:36:07.000000000 +0200
+@@ -101,6 +101,7 @@ static inline int valid_mmap_phys_addr_r
+ }
+ #endif
+
++#ifndef ARCH_HAS_DEV_MEM
+ /*
+ * This funcion reads the *physical* memory. The f_pos points directly to the
+ * memory location.
+@@ -223,6 +224,7 @@ static ssize_t write_mem(struct file * f
+ *ppos += written;
+ return written;
+ }
++#endif
+
+ #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
+ static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+@@ -262,6 +264,9 @@ static int mmap_mem(struct file * file,
+ static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
+ {
+ unsigned long pfn;
++#ifdef CONFIG_XEN
++ unsigned long i, count;
++#endif
+
+ /* Turn a kernel-virtual address into a physical page frame */
+ pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
+@@ -276,6 +281,13 @@ static int mmap_kmem(struct file * file,
+ if (!pfn_valid(pfn))
+ return -EIO;
+
++#ifdef CONFIG_XEN
++ count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ for (i = 0; i < count; i++)
++ if ((pfn + i) != mfn_to_local_pfn(pfn_to_mfn(pfn + i)))
++ return -EIO;
++#endif
++
+ vma->vm_pgoff = pfn;
+ return mmap_mem(file, vma);
+ }
+@@ -780,6 +792,7 @@ static int open_port(struct inode * inod
+ #define open_kmem open_mem
+ #define open_oldmem open_mem
+
++#ifndef ARCH_HAS_DEV_MEM
+ static const struct file_operations mem_fops = {
+ .llseek = memory_lseek,
+ .read = read_mem,
+@@ -787,6 +800,9 @@ static const struct file_operations mem_
+ .mmap = mmap_mem,
+ .open = open_mem,
+ };
++#else
++extern const struct file_operations mem_fops;
++#endif
+
+ static const struct file_operations kmem_fops = {
+ .llseek = memory_lseek,
+diff -rpuN linux-2.6.18.8/drivers/char/tpm/Kconfig linux-2.6.18-xen-3.3.0/drivers/char/tpm/Kconfig
+--- linux-2.6.18.8/drivers/char/tpm/Kconfig 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/char/tpm/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -58,5 +58,13 @@ config TCG_INFINEON
+ Further information on this driver and the supported hardware
+ can be found at http://www.prosec.rub.de/tpm
+
+-endmenu
++config TCG_XEN
++ tristate "XEN TPM Interface"
++ depends on TCG_TPM && XEN
++ ---help---
++ If you want to make TPM support available to a Xen user domain,
++ say Yes and it will be accessible from within Linux.
++ To compile this driver as a module, choose M here; the module
++ will be called tpm_xenu.
+
++endmenu
+diff -rpuN linux-2.6.18.8/drivers/char/tpm/Makefile linux-2.6.18-xen-3.3.0/drivers/char/tpm/Makefile
+--- linux-2.6.18.8/drivers/char/tpm/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/char/tpm/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -9,3 +9,5 @@ obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+ obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
+ obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
+ obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
++obj-$(CONFIG_TCG_XEN) += tpm_xenu.o
++tpm_xenu-y = tpm_xen.o tpm_vtpm.o
+diff -rpuN linux-2.6.18.8/drivers/char/tpm/tpm.h linux-2.6.18-xen-3.3.0/drivers/char/tpm/tpm.h
+--- linux-2.6.18.8/drivers/char/tpm/tpm.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/char/tpm/tpm.h 2008-08-21 11:36:07.000000000 +0200
+@@ -105,6 +105,9 @@ struct tpm_chip {
+ struct dentry **bios_dir;
+
+ struct list_head list;
++#ifdef CONFIG_XEN
++ void *priv;
++#endif
+ };
+
+ #define to_tpm_chip(n) container_of(n, struct tpm_chip, vendor)
+@@ -121,6 +124,18 @@ static inline void tpm_write_index(int b
+ outb(value & 0xFF, base+1);
+ }
+
++#ifdef CONFIG_XEN
++static inline void *chip_get_private(const struct tpm_chip *chip)
++{
++ return chip->priv;
++}
++
++static inline void chip_set_private(struct tpm_chip *chip, void *priv)
++{
++ chip->priv = priv;
++}
++#endif
++
+ extern void tpm_get_timeouts(struct tpm_chip *);
+ extern void tpm_gen_interrupt(struct tpm_chip *);
+ extern void tpm_continue_selftest(struct tpm_chip *);
+diff -rpuN linux-2.6.18.8/drivers/char/tpm/tpm_vtpm.c linux-2.6.18-xen-3.3.0/drivers/char/tpm/tpm_vtpm.c
+--- linux-2.6.18.8/drivers/char/tpm/tpm_vtpm.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/char/tpm/tpm_vtpm.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,542 @@
++/*
++ * Copyright (C) 2006 IBM Corporation
++ *
++ * Authors:
++ * Stefan Berger <stefanb@us.ibm.com>
++ *
++ * Generic device driver part for device drivers in a virtualized
++ * environment.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ */
++
++#include <asm/uaccess.h>
++#include <linux/list.h>
++#include <linux/device.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include "tpm.h"
++#include "tpm_vtpm.h"
++
++/* read status bits */
++enum {
++ STATUS_BUSY = 0x01,
++ STATUS_DATA_AVAIL = 0x02,
++ STATUS_READY = 0x04
++};
++
++struct transmission {
++ struct list_head next;
++
++ unsigned char *request;
++ size_t request_len;
++ size_t request_buflen;
++
++ unsigned char *response;
++ size_t response_len;
++ size_t response_buflen;
++
++ unsigned int flags;
++};
++
++enum {
++ TRANSMISSION_FLAG_WAS_QUEUED = 0x1
++};
++
++
++enum {
++ DATAEX_FLAG_QUEUED_ONLY = 0x1
++};
++
++
++/* local variables */
++
++/* local function prototypes */
++static int _vtpm_send_queued(struct tpm_chip *chip);
++
++
++/* =============================================================
++ * Some utility functions
++ * =============================================================
++ */
++static void vtpm_state_init(struct vtpm_state *vtpms)
++{
++ vtpms->current_request = NULL;
++ spin_lock_init(&vtpms->req_list_lock);
++ init_waitqueue_head(&vtpms->req_wait_queue);
++ INIT_LIST_HEAD(&vtpms->queued_requests);
++
++ vtpms->current_response = NULL;
++ spin_lock_init(&vtpms->resp_list_lock);
++ init_waitqueue_head(&vtpms->resp_wait_queue);
++
++ vtpms->disconnect_time = jiffies;
++}
++
++
++static inline struct transmission *transmission_alloc(void)
++{
++ return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
++}
++
++static unsigned char *
++transmission_set_req_buffer(struct transmission *t,
++ unsigned char *buffer, size_t len)
++{
++ if (t->request_buflen < len) {
++ kfree(t->request);
++ t->request = kmalloc(len, GFP_KERNEL);
++ if (!t->request) {
++ t->request_buflen = 0;
++ return NULL;
++ }
++ t->request_buflen = len;
++ }
++
++ memcpy(t->request, buffer, len);
++ t->request_len = len;
++
++ return t->request;
++}
++
++static unsigned char *
++transmission_set_res_buffer(struct transmission *t,
++ const unsigned char *buffer, size_t len)
++{
++ if (t->response_buflen < len) {
++ kfree(t->response);
++ t->response = kmalloc(len, GFP_ATOMIC);
++ if (!t->response) {
++ t->response_buflen = 0;
++ return NULL;
++ }
++ t->response_buflen = len;
++ }
++
++ memcpy(t->response, buffer, len);
++ t->response_len = len;
++
++ return t->response;
++}
++
++static inline void transmission_free(struct transmission *t)
++{
++ kfree(t->request);
++ kfree(t->response);
++ kfree(t);
++}
++
++/* =============================================================
++ * Interface with the lower layer driver
++ * =============================================================
++ */
++/*
++ * Lower layer uses this function to make a response available.
++ */
++int vtpm_vd_recv(const struct tpm_chip *chip,
++ const unsigned char *buffer, size_t count,
++ void *ptr)
++{
++ unsigned long flags;
++ int ret_size = 0;
++ struct transmission *t;
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ /*
++ * The list with requests must contain one request
++ * only and the element there must be the one that
++ * was passed to me from the front-end.
++ */
++ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++ if (vtpms->current_request != ptr) {
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return 0;
++ }
++
++ if ((t = vtpms->current_request)) {
++ transmission_free(t);
++ vtpms->current_request = NULL;
++ }
++
++ t = transmission_alloc();
++ if (t) {
++ if (!transmission_set_res_buffer(t, buffer, count)) {
++ transmission_free(t);
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return -ENOMEM;
++ }
++ ret_size = count;
++ vtpms->current_response = t;
++ wake_up_interruptible(&vtpms->resp_wait_queue);
++ }
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++
++ return ret_size;
++}
++
++
++/*
++ * Lower layer indicates its status (connected/disconnected)
++ */
++void vtpm_vd_status(const struct tpm_chip *chip, u8 vd_status)
++{
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ vtpms->vd_status = vd_status;
++ if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
++ vtpms->disconnect_time = jiffies;
++ }
++}
++
++/* =============================================================
++ * Interface with the generic TPM driver
++ * =============================================================
++ */
++static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
++{
++ int rc = 0;
++ unsigned long flags;
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ /*
++ * Check if the previous operation only queued the command
++ * In this case there won't be a response, so I just
++ * return from here and reset that flag. In any other
++ * case I should receive a response from the back-end.
++ */
++ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++ if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
++ vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ /*
++ * The first few commands (measurements) must be
++ * queued since it might not be possible to talk to the
++ * TPM, yet.
++ * Return a response of up to 30 '0's.
++ */
++
++ count = min_t(size_t, count, 30);
++ memset(buf, 0x0, count);
++ return count;
++ }
++ /*
++ * Check whether something is in the responselist and if
++ * there's nothing in the list wait for something to appear.
++ */
++
++ if (!vtpms->current_response) {
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
++ 1000);
++ spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
++ }
++
++ if (vtpms->current_response) {
++ struct transmission *t = vtpms->current_response;
++ vtpms->current_response = NULL;
++ rc = min(count, t->response_len);
++ memcpy(buf, t->response, rc);
++ transmission_free(t);
++ }
++
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return rc;
++}
++
++static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
++{
++ int rc = 0;
++ unsigned long flags;
++ struct transmission *t = transmission_alloc();
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ if (!t)
++ return -ENOMEM;
++ /*
++ * If there's a current request, it must be the
++ * previous request that has timed out.
++ */
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ if (vtpms->current_request != NULL) {
++ printk("WARNING: Sending although there is a request outstanding.\n"
++ " Previous request must have timed out.\n");
++ transmission_free(vtpms->current_request);
++ vtpms->current_request = NULL;
++ }
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++
++ /*
++ * Queue the packet if the driver below is not
++ * ready, yet, or there is any packet already
++ * in the queue.
++ * If the driver below is ready, unqueue all
++ * packets first before sending our current
++ * packet.
++ * For each unqueued packet, except for the
++ * last (=current) packet, call the function
++ * tpm_xen_recv to wait for the response to come
++ * back.
++ */
++ if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
++ if (time_after(jiffies,
++ vtpms->disconnect_time + HZ * 10)) {
++ rc = -ENOENT;
++ } else {
++ goto queue_it;
++ }
++ } else {
++ /*
++ * Send all queued packets.
++ */
++ if (_vtpm_send_queued(chip) == 0) {
++
++ vtpms->current_request = t;
++
++ rc = vtpm_vd_send(vtpms->tpm_private,
++ buf,
++ count,
++ t);
++ /*
++ * The generic TPM driver will call
++ * the function to receive the response.
++ */
++ if (rc < 0) {
++ vtpms->current_request = NULL;
++ goto queue_it;
++ }
++ } else {
++queue_it:
++ if (!transmission_set_req_buffer(t, buf, count)) {
++ transmission_free(t);
++ rc = -ENOMEM;
++ goto exit;
++ }
++ /*
++ * An error occurred. Don't event try
++ * to send the current request. Just
++ * queue it.
++ */
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
++ list_add_tail(&t->next, &vtpms->queued_requests);
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++ }
++ }
++
++exit:
++ return rc;
++}
++
++
++/*
++ * Send all queued requests.
++ */
++static int _vtpm_send_queued(struct tpm_chip *chip)
++{
++ int rc;
++ int error = 0;
++ long flags;
++ unsigned char buffer[1];
++ struct vtpm_state *vtpms;
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++
++ while (!list_empty(&vtpms->queued_requests)) {
++ /*
++ * Need to dequeue them.
++ * Read the result into a dummy buffer.
++ */
++ struct transmission *qt = (struct transmission *)
++ vtpms->queued_requests.next;
++ list_del(&qt->next);
++ vtpms->current_request = qt;
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++
++ rc = vtpm_vd_send(vtpms->tpm_private,
++ qt->request,
++ qt->request_len,
++ qt);
++
++ if (rc < 0) {
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ if ((qt = vtpms->current_request) != NULL) {
++ /*
++ * requeue it at the beginning
++ * of the list
++ */
++ list_add(&qt->next,
++ &vtpms->queued_requests);
++ }
++ vtpms->current_request = NULL;
++ error = 1;
++ break;
++ }
++ /*
++ * After this point qt is not valid anymore!
++ * It is freed when the front-end is delivering
++ * the data by calling tpm_recv
++ */
++ /*
++ * Receive response into provided dummy buffer
++ */
++ rc = vtpm_recv(chip, buffer, sizeof(buffer));
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ }
++
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++
++ return error;
++}
++
++static void vtpm_cancel(struct tpm_chip *chip)
++{
++ unsigned long flags;
++ struct vtpm_state *vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ spin_lock_irqsave(&vtpms->resp_list_lock,flags);
++
++ if (!vtpms->current_response && vtpms->current_request) {
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ interruptible_sleep_on(&vtpms->resp_wait_queue);
++ spin_lock_irqsave(&vtpms->resp_list_lock,flags);
++ }
++
++ if (vtpms->current_response) {
++ struct transmission *t = vtpms->current_response;
++ vtpms->current_response = NULL;
++ transmission_free(t);
++ }
++
++ spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
++}
++
++static u8 vtpm_status(struct tpm_chip *chip)
++{
++ u8 rc = 0;
++ unsigned long flags;
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++ /*
++ * Data are available if:
++ * - there's a current response
++ * - the last packet was queued only (this is fake, but necessary to
++ * get the generic TPM layer to call the receive function.)
++ */
++ if (vtpms->current_response ||
++ 0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
++ rc = STATUS_DATA_AVAIL;
++ } else if (!vtpms->current_response && !vtpms->current_request) {
++ rc = STATUS_READY;
++ }
++
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return rc;
++}
++
++static struct file_operations vtpm_ops = {
++ .owner = THIS_MODULE,
++ .llseek = no_llseek,
++ .open = tpm_open,
++ .read = tpm_read,
++ .write = tpm_write,
++ .release = tpm_release,
++};
++
++static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
++static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
++static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
++static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
++static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
++static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
++ NULL);
++static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
++static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
++
++static struct attribute *vtpm_attrs[] = {
++ &dev_attr_pubek.attr,
++ &dev_attr_pcrs.attr,
++ &dev_attr_enabled.attr,
++ &dev_attr_active.attr,
++ &dev_attr_owned.attr,
++ &dev_attr_temp_deactivated.attr,
++ &dev_attr_caps.attr,
++ &dev_attr_cancel.attr,
++ NULL,
++};
++
++static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
++
++#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
++
++static struct tpm_vendor_specific tpm_vtpm = {
++ .recv = vtpm_recv,
++ .send = vtpm_send,
++ .cancel = vtpm_cancel,
++ .status = vtpm_status,
++ .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
++ .req_complete_val = STATUS_DATA_AVAIL,
++ .req_canceled = STATUS_READY,
++ .attr_group = &vtpm_attr_grp,
++ .miscdev = {
++ .fops = &vtpm_ops,
++ },
++ .duration = {
++ TPM_LONG_TIMEOUT,
++ TPM_LONG_TIMEOUT,
++ TPM_LONG_TIMEOUT,
++ },
++};
++
++struct tpm_chip *init_vtpm(struct device *dev,
++ struct tpm_private *tp)
++{
++ long rc;
++ struct tpm_chip *chip;
++ struct vtpm_state *vtpms;
++
++ vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
++ if (!vtpms)
++ return ERR_PTR(-ENOMEM);
++
++ vtpm_state_init(vtpms);
++ vtpms->tpm_private = tp;
++
++ chip = tpm_register_hardware(dev, &tpm_vtpm);
++ if (!chip) {
++ rc = -ENODEV;
++ goto err_free_mem;
++ }
++
++ chip_set_private(chip, vtpms);
++
++ return chip;
++
++err_free_mem:
++ kfree(vtpms);
++
++ return ERR_PTR(rc);
++}
++
++void cleanup_vtpm(struct device *dev)
++{
++ struct tpm_chip *chip = dev_get_drvdata(dev);
++ struct vtpm_state *vtpms = (struct vtpm_state*)chip_get_private(chip);
++ tpm_remove_hardware(dev);
++ kfree(vtpms);
++}
+diff -rpuN linux-2.6.18.8/drivers/char/tpm/tpm_vtpm.h linux-2.6.18-xen-3.3.0/drivers/char/tpm/tpm_vtpm.h
+--- linux-2.6.18.8/drivers/char/tpm/tpm_vtpm.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/char/tpm/tpm_vtpm.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,55 @@
++#ifndef TPM_VTPM_H
++#define TPM_VTPM_H
++
++struct tpm_chip;
++struct tpm_private;
++
++struct vtpm_state {
++ struct transmission *current_request;
++ spinlock_t req_list_lock;
++ wait_queue_head_t req_wait_queue;
++
++ struct list_head queued_requests;
++
++ struct transmission *current_response;
++ spinlock_t resp_list_lock;
++ wait_queue_head_t resp_wait_queue; // processes waiting for responses
++
++ u8 vd_status;
++ u8 flags;
++
++ unsigned long disconnect_time;
++
++ /*
++ * The following is a private structure of the underlying
++ * driver. It is passed as parameter in the send function.
++ */
++ struct tpm_private *tpm_private;
++};
++
++
++enum vdev_status {
++ TPM_VD_STATUS_DISCONNECTED = 0x0,
++ TPM_VD_STATUS_CONNECTED = 0x1
++};
++
++/* this function is called from tpm_vtpm.c */
++int vtpm_vd_send(struct tpm_private * tp,
++ const u8 * buf, size_t count, void *ptr);
++
++/* these functions are offered by tpm_vtpm.c */
++struct tpm_chip *init_vtpm(struct device *,
++ struct tpm_private *);
++void cleanup_vtpm(struct device *);
++int vtpm_vd_recv(const struct tpm_chip* chip,
++ const unsigned char *buffer, size_t count, void *ptr);
++void vtpm_vd_status(const struct tpm_chip *, u8 status);
++
++static inline struct tpm_private *tpm_private_from_dev(struct device *dev)
++{
++ struct tpm_chip *chip = dev_get_drvdata(dev);
++ struct vtpm_state *vtpms = chip_get_private(chip);
++ return vtpms->tpm_private;
++}
++
++#endif
+diff -rpuN linux-2.6.18.8/drivers/char/tpm/tpm_xen.c linux-2.6.18-xen-3.3.0/drivers/char/tpm/tpm_xen.c
+--- linux-2.6.18.8/drivers/char/tpm/tpm_xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/char/tpm/tpm_xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,722 @@
++/*
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb@us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from drivers/xen/netfront/netfront.c
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/errno.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/mutex.h>
++#include <asm/uaccess.h>
++#include <xen/evtchn.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/tpmif.h>
++#include <xen/gnttab.h>
++#include <xen/xenbus.h>
++#include "tpm.h"
++#include "tpm_vtpm.h"
++
++#undef DEBUG
++
++/* local structures */
++struct tpm_private {
++ struct tpm_chip *chip;
++
++ tpmif_tx_interface_t *tx;
++ atomic_t refcnt;
++ unsigned int irq;
++ u8 is_connected;
++ u8 is_suspended;
++
++ spinlock_t tx_lock;
++
++ struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
++
++ atomic_t tx_busy;
++ void *tx_remember;
++
++ domid_t backend_id;
++ wait_queue_head_t wait_q;
++
++ struct xenbus_device *dev;
++ int ring_ref;
++};
++
++struct tx_buffer {
++ unsigned int size; // available space in data
++ unsigned int len; // used space in data
++ unsigned char *data; // pointer to a page
++};
++
++
++/* locally visible variables */
++static grant_ref_t gref_head;
++static struct tpm_private *my_priv;
++
++/* local function prototypes */
++static irqreturn_t tpmif_int(int irq,
++ void *tpm_priv,
++ struct pt_regs *ptregs);
++static void tpmif_rx_action(unsigned long unused);
++static int tpmif_connect(struct xenbus_device *dev,
++ struct tpm_private *tp,
++ domid_t domid);
++static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
++static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
++static void tpmif_free_tx_buffers(struct tpm_private *tp);
++static void tpmif_set_connected_state(struct tpm_private *tp,
++ u8 newstate);
++static int tpm_xmit(struct tpm_private *tp,
++ const u8 * buf, size_t count, int userbuffer,
++ void *remember);
++static void destroy_tpmring(struct tpm_private *tp);
++void __exit tpmif_exit(void);
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
++
++#define GRANT_INVALID_REF 0
++
++
++static inline int
++tx_buffer_copy(struct tx_buffer *txb, const u8 *src, int len,
++ int isuserbuffer)
++{
++ int copied = len;
++
++ if (len > txb->size)
++ copied = txb->size;
++ if (isuserbuffer) {
++ if (copy_from_user(txb->data, src, copied))
++ return -EFAULT;
++ } else {
++ memcpy(txb->data, src, copied);
++ }
++ txb->len = len;
++ return copied;
++}
++
++static inline struct tx_buffer *tx_buffer_alloc(void)
++{
++ struct tx_buffer *txb;
++
++ txb = kzalloc(sizeof(struct tx_buffer), GFP_KERNEL);
++ if (!txb)
++ return NULL;
++
++ txb->len = 0;
++ txb->size = PAGE_SIZE;
++ txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
++ if (txb->data == NULL) {
++ kfree(txb);
++ txb = NULL;
++ }
++
++ return txb;
++}
++
++
++static inline void tx_buffer_free(struct tx_buffer *txb)
++{
++ if (txb) {
++ free_page((long)txb->data);
++ kfree(txb);
++ }
++}
++
++/**************************************************************
++ Utility function for the tpm_private structure
++**************************************************************/
++static void tpm_private_init(struct tpm_private *tp)
++{
++ spin_lock_init(&tp->tx_lock);
++ init_waitqueue_head(&tp->wait_q);
++ atomic_set(&tp->refcnt, 1);
++}
++
++static void tpm_private_put(void)
++{
++ if (!atomic_dec_and_test(&my_priv->refcnt))
++ return;
++
++ tpmif_free_tx_buffers(my_priv);
++ kfree(my_priv);
++ my_priv = NULL;
++}
++
++static struct tpm_private *tpm_private_get(void)
++{
++ int err;
++
++ if (my_priv) {
++ atomic_inc(&my_priv->refcnt);
++ return my_priv;
++ }
++
++ my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
++ if (!my_priv)
++ return NULL;
++
++ tpm_private_init(my_priv);
++ err = tpmif_allocate_tx_buffers(my_priv);
++ if (err < 0)
++ tpm_private_put();
++
++ return my_priv;
++}
++
++/**************************************************************
++
++ The interface to let the tpm plugin register its callback
++ function and send data to another partition using this module
++
++**************************************************************/
++
++static DEFINE_MUTEX(suspend_lock);
++/*
++ * Send data via this module by calling this function
++ */
++int vtpm_vd_send(struct tpm_private *tp,
++ const u8 * buf, size_t count, void *ptr)
++{
++ int sent;
++
++ mutex_lock(&suspend_lock);
++ sent = tpm_xmit(tp, buf, count, 0, ptr);
++ mutex_unlock(&suspend_lock);
++
++ return sent;
++}
++
++/**************************************************************
++ XENBUS support code
++**************************************************************/
++
++static int setup_tpmring(struct xenbus_device *dev,
++ struct tpm_private *tp)
++{
++ tpmif_tx_interface_t *sring;
++ int err;
++
++ tp->ring_ref = GRANT_INVALID_REF;
++
++ sring = (void *)__get_free_page(GFP_KERNEL);
++ if (!sring) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
++ return -ENOMEM;
++ }
++ tp->tx = sring;
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
++ if (err < 0) {
++ free_page((unsigned long)sring);
++ tp->tx = NULL;
++ xenbus_dev_fatal(dev, err, "allocating grant reference");
++ goto fail;
++ }
++ tp->ring_ref = err;
++
++ err = tpmif_connect(dev, tp, dev->otherend_id);
++ if (err)
++ goto fail;
++
++ return 0;
++fail:
++ destroy_tpmring(tp);
++ return err;
++}
++
++
++static void destroy_tpmring(struct tpm_private *tp)
++{
++ tpmif_set_connected_state(tp, 0);
++
++ if (tp->ring_ref != GRANT_INVALID_REF) {
++ gnttab_end_foreign_access(tp->ring_ref, (unsigned long)tp->tx);
++ tp->ring_ref = GRANT_INVALID_REF;
++ tp->tx = NULL;
++ }
++
++ if (tp->irq)
++ unbind_from_irqhandler(tp->irq, tp);
++
++ tp->irq = 0;
++}
++
++
++static int talk_to_backend(struct xenbus_device *dev,
++ struct tpm_private *tp)
++{
++ const char *message = NULL;
++ int err;
++ struct xenbus_transaction xbt;
++
++ err = setup_tpmring(dev, tp);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "setting up ring");
++ goto out;
++ }
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ goto destroy_tpmring;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename,
++ "ring-ref","%u", tp->ring_ref);
++ if (err) {
++ message = "writing ring-ref";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(tp->irq));
++ if (err) {
++ message = "writing event-channel";
++ goto abort_transaction;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN)
++ goto again;
++ if (err) {
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto destroy_tpmring;
++ }
++
++ xenbus_switch_state(dev, XenbusStateConnected);
++
++ return 0;
++
++abort_transaction:
++ xenbus_transaction_end(xbt, 1);
++ if (message)
++ xenbus_dev_error(dev, err, "%s", message);
++destroy_tpmring:
++ destroy_tpmring(tp);
++out:
++ return err;
++}
++
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ DPRINTK("\n");
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitWait:
++ case XenbusStateInitialised:
++ case XenbusStateReconfiguring:
++ case XenbusStateReconfigured:
++ case XenbusStateUnknown:
++ break;
++
++ case XenbusStateConnected:
++ tpmif_set_connected_state(tp, 1);
++ break;
++
++ case XenbusStateClosing:
++ tpmif_set_connected_state(tp, 0);
++ xenbus_frontend_closed(dev);
++ break;
++
++ case XenbusStateClosed:
++ tpmif_set_connected_state(tp, 0);
++ if (tp->is_suspended == 0)
++ device_unregister(&dev->dev);
++ xenbus_frontend_closed(dev);
++ break;
++ }
++}
++
++static int tpmfront_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ int handle;
++ struct tpm_private *tp = tpm_private_get();
++
++ if (!tp)
++ return -ENOMEM;
++
++ tp->chip = init_vtpm(&dev->dev, tp);
++ if (IS_ERR(tp->chip))
++ return PTR_ERR(tp->chip);
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename,
++ "handle", "%i", &handle);
++ if (XENBUS_EXIST_ERR(err))
++ return err;
++
++ if (err < 0) {
++ xenbus_dev_fatal(dev,err,"reading virtual-device");
++ return err;
++ }
++
++ tp->dev = dev;
++
++ err = talk_to_backend(dev, tp);
++ if (err) {
++ tpm_private_put();
++ return err;
++ }
++
++ return 0;
++}
++
++
++static int tpmfront_remove(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ destroy_tpmring(tp);
++ cleanup_vtpm(&dev->dev);
++ return 0;
++}
++
++static int tpmfront_suspend(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ u32 ctr;
++
++ /* Take the lock, preventing any application from sending. */
++ mutex_lock(&suspend_lock);
++ tp->is_suspended = 1;
++
++ for (ctr = 0; atomic_read(&tp->tx_busy); ctr++) {
++ if ((ctr % 10) == 0)
++ printk("TPM-FE [INFO]: Waiting for outstanding "
++ "request.\n");
++ /* Wait for a request to be responded to. */
++ interruptible_sleep_on_timeout(&tp->wait_q, 100);
++ }
++
++ return 0;
++}
++
++static int tpmfront_suspend_finish(struct tpm_private *tp)
++{
++ tp->is_suspended = 0;
++ /* Allow applications to send again. */
++ mutex_unlock(&suspend_lock);
++ return 0;
++}
++
++static int tpmfront_suspend_cancel(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ return tpmfront_suspend_finish(tp);
++}
++
++static int tpmfront_resume(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ destroy_tpmring(tp);
++ return talk_to_backend(dev, tp);
++}
++
++static int tpmif_connect(struct xenbus_device *dev,
++ struct tpm_private *tp,
++ domid_t domid)
++{
++ int err;
++
++ tp->backend_id = domid;
++
++ err = bind_listening_port_to_irqhandler(
++ domid, tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
++ if (err <= 0) {
++ WPRINTK("bind_listening_port_to_irqhandler failed "
++ "(err=%d)\n", err);
++ return err;
++ }
++ tp->irq = err;
++
++ return 0;
++}
++
++static struct xenbus_device_id tpmfront_ids[] = {
++ { "vtpm" },
++ { "" }
++};
++
++static struct xenbus_driver tpmfront = {
++ .name = "vtpm",
++ .owner = THIS_MODULE,
++ .ids = tpmfront_ids,
++ .probe = tpmfront_probe,
++ .remove = tpmfront_remove,
++ .resume = tpmfront_resume,
++ .otherend_changed = backend_changed,
++ .suspend = tpmfront_suspend,
++ .suspend_cancel = tpmfront_suspend_cancel,
++};
++
++static void __init init_tpm_xenbus(void)
++{
++ xenbus_register_frontend(&tpmfront);
++}
++
++static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
++{
++ unsigned int i;
++
++ for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
++ tp->tx_buffers[i] = tx_buffer_alloc();
++ if (!tp->tx_buffers[i]) {
++ tpmif_free_tx_buffers(tp);
++ return -ENOMEM;
++ }
++ }
++ return 0;
++}
++
++static void tpmif_free_tx_buffers(struct tpm_private *tp)
++{
++ unsigned int i;
++
++ for (i = 0; i < TPMIF_TX_RING_SIZE; i++)
++ tx_buffer_free(tp->tx_buffers[i]);
++}
++
++static void tpmif_rx_action(unsigned long priv)
++{
++ struct tpm_private *tp = (struct tpm_private *)priv;
++ int i = 0;
++ unsigned int received;
++ unsigned int offset = 0;
++ u8 *buffer;
++ tpmif_tx_request_t *tx = &tp->tx->ring[i].req;
++
++ atomic_set(&tp->tx_busy, 0);
++ wake_up_interruptible(&tp->wait_q);
++
++ received = tx->size;
++
++ buffer = kmalloc(received, GFP_ATOMIC);
++ if (!buffer)
++ return;
++
++ for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
++ struct tx_buffer *txb = tp->tx_buffers[i];
++ tpmif_tx_request_t *tx;
++ unsigned int tocopy;
++
++ tx = &tp->tx->ring[i].req;
++ tocopy = tx->size;
++ if (tocopy > PAGE_SIZE)
++ tocopy = PAGE_SIZE;
++
++ memcpy(&buffer[offset], txb->data, tocopy);
++
++ gnttab_release_grant_reference(&gref_head, tx->ref);
++
++ offset += tocopy;
++ }
++
++ vtpm_vd_recv(tp->chip, buffer, received, tp->tx_remember);
++ kfree(buffer);
++}
++
++
++static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
++{
++ struct tpm_private *tp = tpm_priv;
++ unsigned long flags;
++
++ spin_lock_irqsave(&tp->tx_lock, flags);
++ tpmif_rx_tasklet.data = (unsigned long)tp;
++ tasklet_schedule(&tpmif_rx_tasklet);
++ spin_unlock_irqrestore(&tp->tx_lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++
++static int tpm_xmit(struct tpm_private *tp,
++ const u8 * buf, size_t count, int isuserbuffer,
++ void *remember)
++{
++ tpmif_tx_request_t *tx;
++ TPMIF_RING_IDX i;
++ unsigned int offset = 0;
++
++ spin_lock_irq(&tp->tx_lock);
++
++ if (unlikely(atomic_read(&tp->tx_busy))) {
++ printk("tpm_xmit: There's an outstanding request/response "
++ "on the way!\n");
++ spin_unlock_irq(&tp->tx_lock);
++ return -EBUSY;
++ }
++
++ if (tp->is_connected != 1) {
++ spin_unlock_irq(&tp->tx_lock);
++ return -EIO;
++ }
++
++ for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
++ struct tx_buffer *txb = tp->tx_buffers[i];
++ int copied;
++
++ if (!txb) {
++ DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
++ "Not transmitting anything!\n", i);
++ spin_unlock_irq(&tp->tx_lock);
++ return -EFAULT;
++ }
++
++ copied = tx_buffer_copy(txb, &buf[offset], count,
++ isuserbuffer);
++ if (copied < 0) {
++ /* An error occurred */
++ spin_unlock_irq(&tp->tx_lock);
++ return copied;
++ }
++ count -= copied;
++ offset += copied;
++
++ tx = &tp->tx->ring[i].req;
++ tx->addr = virt_to_machine(txb->data);
++ tx->size = txb->len;
++ tx->unused = 0;
++
++ DPRINTK("First 4 characters sent by TPM-FE are "
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
++
++ /* Get the granttable reference for this page. */
++ tx->ref = gnttab_claim_grant_reference(&gref_head);
++ if (tx->ref == -ENOSPC) {
++ spin_unlock_irq(&tp->tx_lock);
++ DPRINTK("Grant table claim reference failed in "
++ "func:%s line:%d file:%s\n",
++ __FUNCTION__, __LINE__, __FILE__);
++ return -ENOSPC;
++ }
++ gnttab_grant_foreign_access_ref(tx->ref,
++ tp->backend_id,
++ virt_to_mfn(txb->data),
++ 0 /*RW*/);
++ wmb();
++ }
++
++ atomic_set(&tp->tx_busy, 1);
++ tp->tx_remember = remember;
++
++ mb();
++
++ notify_remote_via_irq(tp->irq);
++
++ spin_unlock_irq(&tp->tx_lock);
++ return offset;
++}
++
++
++static void tpmif_notify_upperlayer(struct tpm_private *tp)
++{
++ /* Notify upper layer about the state of the connection to the BE. */
++ vtpm_vd_status(tp->chip, (tp->is_connected
++ ? TPM_VD_STATUS_CONNECTED
++ : TPM_VD_STATUS_DISCONNECTED));
++}
++
++
++static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
++{
++ /*
++ * Don't notify upper layer if we are in suspend mode and
++ * should disconnect - assumption is that we will resume
++ * The mutex keeps apps from sending.
++ */
++ if (is_connected == 0 && tp->is_suspended == 1)
++ return;
++
++ /*
++ * Unlock the mutex if we are connected again
++ * after being suspended - now resuming.
++ * This also removes the suspend state.
++ */
++ if (is_connected == 1 && tp->is_suspended == 1)
++ tpmfront_suspend_finish(tp);
++
++ if (is_connected != tp->is_connected) {
++ tp->is_connected = is_connected;
++ tpmif_notify_upperlayer(tp);
++ }
++}
++
++
++
++/* =================================================================
++ * Initialization function.
++ * =================================================================
++ */
++
++
++static int __init tpmif_init(void)
++{
++ struct tpm_private *tp;
++
++ if (is_initial_xendomain())
++ return -EPERM;
++
++ tp = tpm_private_get();
++ if (!tp)
++ return -ENOMEM;
++
++ IPRINTK("Initialising the vTPM driver.\n");
++ if (gnttab_alloc_grant_references(TPMIF_TX_RING_SIZE,
++ &gref_head) < 0) {
++ tpm_private_put();
++ return -EFAULT;
++ }
++
++ init_tpm_xenbus();
++ return 0;
++}
++
++
++module_init(tpmif_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -rpuN linux-2.6.18.8/drivers/char/tty_io.c linux-2.6.18-xen-3.3.0/drivers/char/tty_io.c
+--- linux-2.6.18.8/drivers/char/tty_io.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/char/tty_io.c 2008-08-21 11:36:07.000000000 +0200
+@@ -130,6 +130,8 @@ LIST_HEAD(tty_drivers); /* linked list
+ vt.c for deeply disgusting hack reasons */
+ DEFINE_MUTEX(tty_mutex);
+
++int console_use_vt = 1;
++
+ #ifdef CONFIG_UNIX98_PTYS
+ extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */
+ extern int pty_limit; /* Config limit on Unix98 ptys */
+@@ -2483,7 +2485,7 @@ retry_open:
+ goto got_driver;
+ }
+ #ifdef CONFIG_VT
+- if (device == MKDEV(TTY_MAJOR,0)) {
++ if (console_use_vt && (device == MKDEV(TTY_MAJOR,0))) {
+ extern struct tty_driver *console_driver;
+ driver = console_driver;
+ index = fg_console;
+@@ -3909,6 +3911,8 @@ static int __init tty_init(void)
+ #endif
+
+ #ifdef CONFIG_VT
++ if (!console_use_vt)
++ goto out_vt;
+ cdev_init(&vc0_cdev, &console_fops);
+ if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
+ register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
+@@ -3916,6 +3920,7 @@ static int __init tty_init(void)
+ class_device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
+
+ vty_init();
++ out_vt:
+ #endif
+ return 0;
+ }
+diff -rpuN linux-2.6.18.8/drivers/cpufreq/cpufreq_ondemand.c linux-2.6.18-xen-3.3.0/drivers/cpufreq/cpufreq_ondemand.c
+--- linux-2.6.18.8/drivers/cpufreq/cpufreq_ondemand.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/cpufreq/cpufreq_ondemand.c 2008-08-21 11:36:07.000000000 +0200
+@@ -96,6 +96,7 @@ static inline cputime64_t get_cpu_idle_t
+ return retval;
+ }
+
++
+ /************************** sysfs interface ************************/
+ static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
+ {
+@@ -222,17 +223,14 @@ static struct attribute_group dbs_attr_g
+
+ /************************** sysfs end ************************/
+
+-static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
++#ifndef CONFIG_XEN
++static int dbs_calc_load(struct cpu_dbs_info_s *this_dbs_info)
+ {
+- unsigned int idle_ticks, total_ticks;
+- unsigned int load;
+- cputime64_t cur_jiffies;
+-
+ struct cpufreq_policy *policy;
++ cputime64_t cur_jiffies;
++ cputime64_t total_ticks, idle_ticks;
+ unsigned int j;
+-
+- if (!this_dbs_info->enable)
+- return;
++ int load;
+
+ policy = this_dbs_info->cur_policy;
+ cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
+@@ -240,7 +238,7 @@ static void dbs_check_cpu(struct cpu_dbs
+ this_dbs_info->prev_cpu_wall);
+ this_dbs_info->prev_cpu_wall = cur_jiffies;
+ if (!total_ticks)
+- return;
++ return 200;
+ /*
+ * Every sampling_rate, we check, if current idle time is less
+ * than 20% (default), then we try to increase frequency
+@@ -270,6 +268,81 @@ static void dbs_check_cpu(struct cpu_dbs
+ idle_ticks = tmp_idle_ticks;
+ }
+ load = (100 * (total_ticks - idle_ticks)) / total_ticks;
++ return load;
++}
++#else
++
++#include <xen/interface/platform.h>
++static int dbs_calc_load(struct cpu_dbs_info_s *this_dbs_info)
++{
++ int load = 0;
++ struct xen_platform_op op;
++ uint64_t idletime[NR_CPUS];
++ struct cpufreq_policy *policy;
++ unsigned int j;
++ cpumask_t cpumap;
++
++ policy = this_dbs_info->cur_policy;
++ cpumap = policy->cpus;
++
++ op.cmd = XENPF_getidletime;
++ set_xen_guest_handle(op.u.getidletime.cpumap_bitmap, (uint8_t *) cpus_addr(cpumap));
++ op.u.getidletime.cpumap_nr_cpus = NR_CPUS;
++ set_xen_guest_handle(op.u.getidletime.idletime, idletime);
++ if (HYPERVISOR_platform_op(&op))
++ return 200;
++
++ for_each_cpu_mask(j, cpumap) {
++ cputime64_t total_idle_nsecs, tmp_idle_nsecs;
++ cputime64_t total_wall_nsecs, tmp_wall_nsecs;
++ struct cpu_dbs_info_s *j_dbs_info;
++ unsigned long tmp_load, tmp_wall_msecs, tmp_idle_msecs;
++
++ j_dbs_info = &per_cpu(cpu_dbs_info, j);
++ total_idle_nsecs = idletime[j];
++ tmp_idle_nsecs = cputime64_sub(total_idle_nsecs,
++ j_dbs_info->prev_cpu_idle);
++ total_wall_nsecs = op.u.getidletime.now;
++ tmp_wall_nsecs = cputime64_sub(total_wall_nsecs,
++ j_dbs_info->prev_cpu_wall);
++
++ if (tmp_wall_nsecs == 0)
++ return 200;
++
++ j_dbs_info->prev_cpu_wall = total_wall_nsecs;
++ j_dbs_info->prev_cpu_idle = total_idle_nsecs;
++
++ /* Convert nsecs to msecs and clamp times to sane values. */
++ do_div(tmp_wall_nsecs, 1000000);
++ tmp_wall_msecs = tmp_wall_nsecs;
++ do_div(tmp_idle_nsecs, 1000000);
++ tmp_idle_msecs = tmp_idle_nsecs;
++ if (tmp_wall_msecs == 0)
++ tmp_wall_msecs = 1;
++ if (tmp_idle_msecs > tmp_wall_msecs)
++ tmp_idle_msecs = tmp_wall_msecs;
++
++ tmp_load = (100 * (tmp_wall_msecs - tmp_idle_msecs)) /
++ tmp_wall_msecs;
++ load = max(load, min(100, (int) tmp_load));
++ }
++ return load;
++}
++#endif
++
++static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
++{
++ int load;
++
++ struct cpufreq_policy *policy;
++
++ if (!this_dbs_info->enable)
++ return;
++
++ policy = this_dbs_info->cur_policy;
++ load = dbs_calc_load(this_dbs_info);
++ if (load > 100)
++ return;
+
+ /* Check for frequency increase */
+ if (load > dbs_tuners_ins.up_threshold) {
+diff -rpuN linux-2.6.18.8/drivers/cpufreq/cpufreq_stats.c linux-2.6.18-xen-3.3.0/drivers/cpufreq/cpufreq_stats.c
+--- linux-2.6.18.8/drivers/cpufreq/cpufreq_stats.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/cpufreq/cpufreq_stats.c 2008-08-21 11:36:07.000000000 +0200
+@@ -292,6 +292,9 @@ cpufreq_stat_notifier_trans (struct noti
+ if (old_index == new_index)
+ return 0;
+
++ if ((old_index < 0) || (new_index < 0))
++ return 0;
++
+ spin_lock(&cpufreq_stats_lock);
+ stat->last_index = new_index;
+ #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+diff -rpuN linux-2.6.18.8/drivers/cpufreq/Kconfig linux-2.6.18-xen-3.3.0/drivers/cpufreq/Kconfig
+--- linux-2.6.18.8/drivers/cpufreq/Kconfig 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/cpufreq/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -1,5 +1,6 @@
+ config CPU_FREQ
+ bool "CPU Frequency scaling"
++ depends on !PROCESSOR_EXTERNAL_CONTROL
+ help
+ CPU Frequency scaling allows you to change the clock speed of
+ CPUs on the fly. This is a nice method to save power, because
+diff -rpuN linux-2.6.18.8/drivers/i2c/busses/i2c-i801.c linux-2.6.18-xen-3.3.0/drivers/i2c/busses/i2c-i801.c
+--- linux-2.6.18.8/drivers/i2c/busses/i2c-i801.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/i2c/busses/i2c-i801.c 2008-08-21 11:36:07.000000000 +0200
+@@ -33,6 +33,9 @@
+ ICH7 27DA
+ ESB2 269B
+ ICH8 283E
++ ICH9 2930
++ ICH10 3A30
++ ICH10 3A60
+ This driver supports several versions of Intel's I/O Controller Hubs (ICH).
+ For SMBus support, they are similar to the PIIX4 and are part
+ of Intel's '810' and other chipsets.
+@@ -457,6 +460,9 @@ static struct pci_device_id i801_ids[] =
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_17) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
+ { 0, }
+ };
+
+diff -rpuN linux-2.6.18.8/drivers/i2c/busses/Kconfig linux-2.6.18-xen-3.3.0/drivers/i2c/busses/Kconfig
+--- linux-2.6.18.8/drivers/i2c/busses/Kconfig 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/i2c/busses/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -125,6 +125,8 @@ config I2C_I801
+ ICH7
+ ESB2
+ ICH8
++ ICH9
++ ICH10
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-i801.
+diff -rpuN linux-2.6.18.8/drivers/ide/ide-lib.c linux-2.6.18-xen-3.3.0/drivers/ide/ide-lib.c
+--- linux-2.6.18.8/drivers/ide/ide-lib.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/ide/ide-lib.c 2008-08-21 11:36:07.000000000 +0200
+@@ -408,10 +408,10 @@ void ide_toggle_bounce(ide_drive_t *driv
+ {
+ u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */
+
+- if (!PCI_DMA_BUS_IS_PHYS) {
+- addr = BLK_BOUNCE_ANY;
+- } else if (on && drive->media == ide_disk) {
+- if (HWIF(drive)->pci_dev)
++ if (on && drive->media == ide_disk) {
++ if (!PCI_DMA_BUS_IS_PHYS)
++ addr = BLK_BOUNCE_ANY;
++ else if (HWIF(drive)->pci_dev)
+ addr = HWIF(drive)->pci_dev->dma_mask;
+ }
+
+diff -rpuN linux-2.6.18.8/drivers/ide/pci/atiixp.c linux-2.6.18-xen-3.3.0/drivers/ide/pci/atiixp.c
+--- linux-2.6.18.8/drivers/ide/pci/atiixp.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/ide/pci/atiixp.c 2008-08-21 11:36:07.000000000 +0200
+@@ -348,6 +348,7 @@ static struct pci_device_id atiixp_pci_t
+ { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
++ { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { 0, },
+ };
+ MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl);
+diff -rpuN linux-2.6.18.8/drivers/input/gameport/gameport.c linux-2.6.18-xen-3.3.0/drivers/input/gameport/gameport.c
+--- linux-2.6.18.8/drivers/input/gameport/gameport.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/input/gameport/gameport.c 2008-08-21 11:36:07.000000000 +0200
+@@ -37,8 +37,6 @@ EXPORT_SYMBOL(gameport_unregister_driver
+ EXPORT_SYMBOL(gameport_open);
+ EXPORT_SYMBOL(gameport_close);
+ EXPORT_SYMBOL(gameport_rescan);
+-EXPORT_SYMBOL(gameport_cooked_read);
+-EXPORT_SYMBOL(gameport_set_name);
+ EXPORT_SYMBOL(gameport_set_phys);
+ EXPORT_SYMBOL(gameport_start_polling);
+ EXPORT_SYMBOL(gameport_stop_polling);
+diff -rpuN linux-2.6.18.8/drivers/Makefile linux-2.6.18-xen-3.3.0/drivers/Makefile
+--- linux-2.6.18.8/drivers/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -31,6 +31,7 @@ obj-y += base/ block/ misc/ mfd/ net/
+ obj-$(CONFIG_NUBUS) += nubus/
+ obj-$(CONFIG_ATM) += atm/
+ obj-$(CONFIG_PPC_PMAC) += macintosh/
++obj-$(CONFIG_XEN) += xen/
+ obj-$(CONFIG_IDE) += ide/
+ obj-$(CONFIG_FC4) += fc4/
+ obj-$(CONFIG_SCSI) += scsi/
+diff -rpuN linux-2.6.18.8/drivers/net/Kconfig linux-2.6.18-xen-3.3.0/drivers/net/Kconfig
+--- linux-2.6.18.8/drivers/net/Kconfig 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -2399,6 +2399,8 @@ config MYRI10GE
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called myri10ge.
+
++source "drivers/net/sfc/Kconfig"
++
+ endmenu
+
+ source "drivers/net/tokenring/Kconfig"
+diff -rpuN linux-2.6.18.8/drivers/net/Makefile linux-2.6.18-xen-3.3.0/drivers/net/Makefile
+--- linux-2.6.18.8/drivers/net/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -219,3 +219,5 @@ obj-$(CONFIG_NETCONSOLE) += netconsole.o
+
+ obj-$(CONFIG_FS_ENET) += fs_enet/
+
++obj-$(CONFIG_SFC) += sfc/
++
+diff -rpuN linux-2.6.18.8/drivers/net/r8169.c linux-2.6.18-xen-3.3.0/drivers/net/r8169.c
+--- linux-2.6.18.8/drivers/net/r8169.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/r8169.c 2008-08-21 11:36:07.000000000 +0200
+@@ -6,26 +6,26 @@
+ History:
+ Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>.
+ May 20 2002 - Add link status force-mode and TBI mode support.
+- 2004 - Massive updates. See kernel SCM system for details.
++ 2004 - Massive updates. See kernel SCM system for details.
+ =========================================================================
+ 1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
+ Command: 'insmod r8169 media = SET_MEDIA'
+ Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex.
+-
++
+ SET_MEDIA can be:
+ _10_Half = 0x01
+ _10_Full = 0x02
+ _100_Half = 0x04
+ _100_Full = 0x08
+ _1000_Full = 0x10
+-
++
+ 2. Support TBI mode.
+ =========================================================================
+ VERSION 1.1 <2002/10/4>
+
+ The bit4:0 of MII register 4 is called "selector field", and have to be
+ 00001b to indicate support of IEEE std 802.3 during NWay process of
+- exchanging Link Code Word (FLP).
++ exchanging Link Code Word (FLP).
+
+ VERSION 1.2 <2002/11/30>
+
+@@ -81,10 +81,10 @@ VERSION 2.2LK <2005/01/25>
+
+ #ifdef RTL8169_DEBUG
+ #define assert(expr) \
+- if(!(expr)) { \
+- printk( "Assertion failed! %s,%s,%s,line=%d\n", \
+- #expr,__FILE__,__FUNCTION__,__LINE__); \
+- }
++ if (!(expr)) { \
++ printk( "Assertion failed! %s,%s,%s,line=%d\n", \
++ #expr,__FILE__,__FUNCTION__,__LINE__); \
++ }
+ #define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0)
+ #else
+ #define assert(expr) do {} while (0)
+@@ -150,11 +150,16 @@ static const int multicast_filter_limit
+ #define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
+
+ enum mac_version {
+- RTL_GIGA_MAC_VER_B = 0x00,
+- /* RTL_GIGA_MAC_VER_C = 0x03, */
+- RTL_GIGA_MAC_VER_D = 0x01,
+- RTL_GIGA_MAC_VER_E = 0x02,
+- RTL_GIGA_MAC_VER_X = 0x04 /* Greater than RTL_GIGA_MAC_VER_E */
++ RTL_GIGA_MAC_VER_01 = 0x00,
++ RTL_GIGA_MAC_VER_02 = 0x01,
++ RTL_GIGA_MAC_VER_03 = 0x02,
++ RTL_GIGA_MAC_VER_04 = 0x03,
++ RTL_GIGA_MAC_VER_05 = 0x04,
++ RTL_GIGA_MAC_VER_11 = 0x0b,
++ RTL_GIGA_MAC_VER_12 = 0x0c,
++ RTL_GIGA_MAC_VER_13 = 0x0d,
++ RTL_GIGA_MAC_VER_14 = 0x0e,
++ RTL_GIGA_MAC_VER_15 = 0x0f
+ };
+
+ enum phy_version {
+@@ -166,7 +171,6 @@ enum phy_version {
+ RTL_GIGA_PHY_VER_H = 0x08, /* PHY Reg 0x03 bit0-3 == 0x0003 */
+ };
+
+-
+ #define _R(NAME,MAC,MASK) \
+ { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
+
+@@ -175,19 +179,45 @@ static const struct {
+ u8 mac_version;
+ u32 RxConfigMask; /* Clears the bits supported by this chip */
+ } rtl_chip_info[] = {
+- _R("RTL8169", RTL_GIGA_MAC_VER_B, 0xff7e1880),
+- _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_D, 0xff7e1880),
+- _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_E, 0xff7e1880),
+- _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_X, 0xff7e1880),
++ _R("RTL8169", RTL_GIGA_MAC_VER_01, 0xff7e1880),
++ _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_02, 0xff7e1880),
++ _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_03, 0xff7e1880),
++ _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880),
++ _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880),
++ _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
++ _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
++ _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
++ _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
++ _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880) // PCI-E 8139
+ };
+ #undef _R
+
++enum cfg_version {
++ RTL_CFG_0 = 0x00,
++ RTL_CFG_1,
++ RTL_CFG_2
++};
++
++static const struct {
++ unsigned int region;
++ unsigned int align;
++} rtl_cfg_info[] = {
++ [RTL_CFG_0] = { 1, NET_IP_ALIGN },
++ [RTL_CFG_1] = { 2, NET_IP_ALIGN },
++ [RTL_CFG_2] = { 2, 8 }
++};
++
+ static struct pci_device_id rtl8169_pci_tbl[] = {
+- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), },
+- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), },
+- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), },
+- { PCI_DEVICE(0x16ec, 0x0116), },
+- { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024, },
++ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
++ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
++ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
++ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_2 },
++ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
++ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
++ { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 },
++ { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
++ { PCI_VENDOR_ID_LINKSYS, 0x1032,
++ PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
+ {0,},
+ };
+
+@@ -257,10 +287,11 @@ enum RTL8169_register_content {
+ RxOK = 0x01,
+
+ /* RxStatusDesc */
+- RxRES = 0x00200000,
+- RxCRC = 0x00080000,
+- RxRUNT = 0x00100000,
+- RxRWT = 0x00400000,
++ RxFOVF = (1 << 23),
++ RxRWT = (1 << 22),
++ RxRES = (1 << 21),
++ RxRUNT = (1 << 20),
++ RxCRC = (1 << 19),
+
+ /* ChipCmdBits */
+ CmdReset = 0x10,
+@@ -326,30 +357,6 @@ enum RTL8169_register_content {
+ LinkStatus = 0x02,
+ FullDup = 0x01,
+
+- /* GIGABIT_PHY_registers */
+- PHY_CTRL_REG = 0,
+- PHY_STAT_REG = 1,
+- PHY_AUTO_NEGO_REG = 4,
+- PHY_1000_CTRL_REG = 9,
+-
+- /* GIGABIT_PHY_REG_BIT */
+- PHY_Restart_Auto_Nego = 0x0200,
+- PHY_Enable_Auto_Nego = 0x1000,
+-
+- /* PHY_STAT_REG = 1 */
+- PHY_Auto_Neco_Comp = 0x0020,
+-
+- /* PHY_AUTO_NEGO_REG = 4 */
+- PHY_Cap_10_Half = 0x0020,
+- PHY_Cap_10_Full = 0x0040,
+- PHY_Cap_100_Half = 0x0080,
+- PHY_Cap_100_Full = 0x0100,
+-
+- /* PHY_1000_CTRL_REG = 9 */
+- PHY_Cap_1000_Full = 0x0200,
+-
+- PHY_Cap_Null = 0x0,
+-
+ /* _MediaType */
+ _10_Half = 0x01,
+ _10_Full = 0x02,
+@@ -417,6 +424,7 @@ struct ring_info {
+ struct rtl8169_private {
+ void __iomem *mmio_addr; /* memory map physical address */
+ struct pci_dev *pci_dev; /* Index of PCI device */
++ struct net_device *dev;
+ struct net_device_stats stats; /* statistics of net device */
+ spinlock_t lock; /* spin lock flag */
+ u32 msg_enable;
+@@ -433,6 +441,7 @@ struct rtl8169_private {
+ dma_addr_t RxPhyAddr;
+ struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
+ struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
++ unsigned align;
+ unsigned rx_buf_sz;
+ struct timer_list timer;
+ u16 cp_cmd;
+@@ -466,8 +475,7 @@ MODULE_VERSION(RTL8169_VERSION);
+
+ static int rtl8169_open(struct net_device *dev);
+ static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev);
+-static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance,
+- struct pt_regs *regs);
++static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+ static int rtl8169_init_ring(struct net_device *dev);
+ static void rtl8169_hw_start(struct net_device *dev);
+ static int rtl8169_close(struct net_device *dev);
+@@ -488,12 +496,7 @@ static const u16 rtl8169_intr_mask =
+ static const u16 rtl8169_napi_event =
+ RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
+ static const unsigned int rtl8169_rx_config =
+- (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
+-
+-#define PHY_Cap_10_Half_Or_Less PHY_Cap_10_Half
+-#define PHY_Cap_10_Full_Or_Less PHY_Cap_10_Full | PHY_Cap_10_Half_Or_Less
+-#define PHY_Cap_100_Half_Or_Less PHY_Cap_100_Half | PHY_Cap_10_Full_Or_Less
+-#define PHY_Cap_100_Full_Or_Less PHY_Cap_100_Full | PHY_Cap_100_Half_Or_Less
++ (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
+
+ static void mdio_write(void __iomem *ioaddr, int RegAddr, int value)
+ {
+@@ -503,7 +506,7 @@ static void mdio_write(void __iomem *ioa
+
+ for (i = 20; i > 0; i--) {
+ /* Check if the RTL8169 has completed writing to the specified MII register */
+- if (!(RTL_R32(PHYAR) & 0x80000000))
++ if (!(RTL_R32(PHYAR) & 0x80000000))
+ break;
+ udelay(25);
+ }
+@@ -547,7 +550,7 @@ static unsigned int rtl8169_tbi_reset_pe
+
+ static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr)
+ {
+- return mdio_read(ioaddr, 0) & 0x8000;
++ return mdio_read(ioaddr, MII_BMCR) & BMCR_RESET;
+ }
+
+ static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
+@@ -569,8 +572,8 @@ static void rtl8169_xmii_reset_enable(vo
+ {
+ unsigned int val;
+
+- val = (mdio_read(ioaddr, PHY_CTRL_REG) | 0x8000) & 0xffff;
+- mdio_write(ioaddr, PHY_CTRL_REG, val);
++ mdio_write(ioaddr, MII_BMCR, BMCR_RESET);
++ val = mdio_read(ioaddr, MII_BMCR);
+ }
+
+ static void rtl8169_check_link_status(struct net_device *dev,
+@@ -608,7 +611,7 @@ static void rtl8169_link_option(int idx,
+ { SPEED_1000, DUPLEX_FULL, AUTONEG_ENABLE, 0xff }
+ }, *p;
+ unsigned char option;
+-
++
+ option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff;
+
+ if ((option != 0xff) && !idx && netif_msg_drv(&debug))
+@@ -650,9 +653,9 @@ static void rtl8169_get_wol(struct net_d
+ if (options & UWF)
+ wol->wolopts |= WAKE_UCAST;
+ if (options & BWF)
+- wol->wolopts |= WAKE_BCAST;
++ wol->wolopts |= WAKE_BCAST;
+ if (options & MWF)
+- wol->wolopts |= WAKE_MCAST;
++ wol->wolopts |= WAKE_MCAST;
+
+ out_unlock:
+ spin_unlock_irq(&tp->lock);
+@@ -745,38 +748,57 @@ static int rtl8169_set_speed_xmii(struct
+ void __iomem *ioaddr = tp->mmio_addr;
+ int auto_nego, giga_ctrl;
+
+- auto_nego = mdio_read(ioaddr, PHY_AUTO_NEGO_REG);
+- auto_nego &= ~(PHY_Cap_10_Half | PHY_Cap_10_Full |
+- PHY_Cap_100_Half | PHY_Cap_100_Full);
+- giga_ctrl = mdio_read(ioaddr, PHY_1000_CTRL_REG);
+- giga_ctrl &= ~(PHY_Cap_1000_Full | PHY_Cap_Null);
++ auto_nego = mdio_read(ioaddr, MII_ADVERTISE);
++ auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
++ ADVERTISE_100HALF | ADVERTISE_100FULL);
++ giga_ctrl = mdio_read(ioaddr, MII_CTRL1000);
++ giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+
+ if (autoneg == AUTONEG_ENABLE) {
+- auto_nego |= (PHY_Cap_10_Half | PHY_Cap_10_Full |
+- PHY_Cap_100_Half | PHY_Cap_100_Full);
+- giga_ctrl |= PHY_Cap_1000_Full;
++ auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
++ ADVERTISE_100HALF | ADVERTISE_100FULL);
++ giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
+ } else {
+ if (speed == SPEED_10)
+- auto_nego |= PHY_Cap_10_Half | PHY_Cap_10_Full;
++ auto_nego |= ADVERTISE_10HALF | ADVERTISE_10FULL;
+ else if (speed == SPEED_100)
+- auto_nego |= PHY_Cap_100_Half | PHY_Cap_100_Full;
++ auto_nego |= ADVERTISE_100HALF | ADVERTISE_100FULL;
+ else if (speed == SPEED_1000)
+- giga_ctrl |= PHY_Cap_1000_Full;
++ giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
+
+ if (duplex == DUPLEX_HALF)
+- auto_nego &= ~(PHY_Cap_10_Full | PHY_Cap_100_Full);
++ auto_nego &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL);
+
+ if (duplex == DUPLEX_FULL)
+- auto_nego &= ~(PHY_Cap_10_Half | PHY_Cap_100_Half);
++ auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF);
++
++ /* This tweak comes straight from Realtek's driver. */
++ if ((speed == SPEED_100) && (duplex == DUPLEX_HALF) &&
++ (tp->mac_version == RTL_GIGA_MAC_VER_13)) {
++ auto_nego = ADVERTISE_100HALF | ADVERTISE_CSMA;
++ }
+ }
+
++ /* The 8100e/8101e do Fast Ethernet only. */
++ if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
++ if ((giga_ctrl & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)) &&
++ netif_msg_link(tp)) {
++ printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
++ dev->name);
++ }
++ giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
++ }
++
++ auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
++
+ tp->phy_auto_nego_reg = auto_nego;
+ tp->phy_1000_ctrl_reg = giga_ctrl;
+
+- mdio_write(ioaddr, PHY_AUTO_NEGO_REG, auto_nego);
+- mdio_write(ioaddr, PHY_1000_CTRL_REG, giga_ctrl);
+- mdio_write(ioaddr, PHY_CTRL_REG, PHY_Enable_Auto_Nego |
+- PHY_Restart_Auto_Nego);
++ mdio_write(ioaddr, MII_ADVERTISE, auto_nego);
++ mdio_write(ioaddr, MII_CTRL1000, giga_ctrl);
++ mdio_write(ioaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
+ return 0;
+ }
+
+@@ -788,7 +810,7 @@ static int rtl8169_set_speed(struct net_
+
+ ret = tp->set_speed(dev, autoneg, speed, duplex);
+
+- if (netif_running(dev) && (tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full))
++ if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
+ mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
+
+ return ret;
+@@ -803,7 +825,7 @@ static int rtl8169_set_settings(struct n
+ spin_lock_irqsave(&tp->lock, flags);
+ ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
+ spin_unlock_irqrestore(&tp->lock, flags);
+-
++
+ return ret;
+ }
+
+@@ -936,20 +958,20 @@ static void rtl8169_gset_xmii(struct net
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+- SUPPORTED_TP;
++ SUPPORTED_TP;
+
+ cmd->autoneg = 1;
+ cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
+
+- if (tp->phy_auto_nego_reg & PHY_Cap_10_Half)
++ if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
+ cmd->advertising |= ADVERTISED_10baseT_Half;
+- if (tp->phy_auto_nego_reg & PHY_Cap_10_Full)
++ if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
+ cmd->advertising |= ADVERTISED_10baseT_Full;
+- if (tp->phy_auto_nego_reg & PHY_Cap_100_Half)
++ if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
+ cmd->advertising |= ADVERTISED_100baseT_Half;
+- if (tp->phy_auto_nego_reg & PHY_Cap_100_Full)
++ if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+- if (tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full)
++ if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
+ cmd->advertising |= ADVERTISED_1000baseT_Full;
+
+ status = RTL_R8(PHYstatus);
+@@ -961,6 +983,11 @@ static void rtl8169_gset_xmii(struct net
+ else if (status & _10bps)
+ cmd->speed = SPEED_10;
+
++ if (status & TxFlowCtrl)
++ cmd->advertising |= ADVERTISED_Asym_Pause;
++ if (status & RxFlowCtrl)
++ cmd->advertising |= ADVERTISED_Pause;
++
+ cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+@@ -981,15 +1008,15 @@ static int rtl8169_get_settings(struct n
+ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+ {
+- struct rtl8169_private *tp = netdev_priv(dev);
+- unsigned long flags;
++ struct rtl8169_private *tp = netdev_priv(dev);
++ unsigned long flags;
+
+- if (regs->len > R8169_REGS_SIZE)
+- regs->len = R8169_REGS_SIZE;
++ if (regs->len > R8169_REGS_SIZE)
++ regs->len = R8169_REGS_SIZE;
+
+- spin_lock_irqsave(&tp->lock, flags);
+- memcpy_fromio(p, tp->mmio_addr, regs->len);
+- spin_unlock_irqrestore(&tp->lock, flags);
++ spin_lock_irqsave(&tp->lock, flags);
++ memcpy_fromio(p, tp->mmio_addr, regs->len);
++ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+
+ static u32 rtl8169_get_msglevel(struct net_device *dev)
+@@ -1071,7 +1098,7 @@ static void rtl8169_get_ethtool_stats(st
+ RTL_W32(CounterAddrLow, 0);
+ RTL_W32(CounterAddrHigh, 0);
+
+- data[0] = le64_to_cpu(counters->tx_packets);
++ data[0] = le64_to_cpu(counters->tx_packets);
+ data[1] = le64_to_cpu(counters->rx_packets);
+ data[2] = le64_to_cpu(counters->tx_errors);
+ data[3] = le32_to_cpu(counters->rx_errors);
+@@ -1098,7 +1125,7 @@ static void rtl8169_get_strings(struct n
+ }
+
+
+-static struct ethtool_ops rtl8169_ethtool_ops = {
++static /*const*/ struct ethtool_ops rtl8169_ethtool_ops = {
+ .get_drvinfo = rtl8169_get_drvinfo,
+ .get_regs_len = rtl8169_get_regs_len,
+ .get_link = ethtool_op_get_link,
+@@ -1131,7 +1158,7 @@ static void rtl8169_write_gmii_reg_bit(v
+ val = mdio_read(ioaddr, reg);
+ val = (bitval == 1) ?
+ val | (bitval << bitnum) : val & ~(0x0001 << bitnum);
+- mdio_write(ioaddr, reg, val & 0xffff);
++ mdio_write(ioaddr, reg, val & 0xffff);
+ }
+
+ static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr)
+@@ -1140,10 +1167,16 @@ static void rtl8169_get_mac_version(stru
+ u32 mask;
+ int mac_version;
+ } mac_info[] = {
+- { 0x1 << 28, RTL_GIGA_MAC_VER_X },
+- { 0x1 << 26, RTL_GIGA_MAC_VER_E },
+- { 0x1 << 23, RTL_GIGA_MAC_VER_D },
+- { 0x00000000, RTL_GIGA_MAC_VER_B } /* Catch-all */
++ { 0x38800000, RTL_GIGA_MAC_VER_15 },
++ { 0x38000000, RTL_GIGA_MAC_VER_12 },
++ { 0x34000000, RTL_GIGA_MAC_VER_13 },
++ { 0x30800000, RTL_GIGA_MAC_VER_14 },
++ { 0x30000000, RTL_GIGA_MAC_VER_11 },
++ { 0x18000000, RTL_GIGA_MAC_VER_05 },
++ { 0x10000000, RTL_GIGA_MAC_VER_04 },
++ { 0x04000000, RTL_GIGA_MAC_VER_03 },
++ { 0x00800000, RTL_GIGA_MAC_VER_02 },
++ { 0x00000000, RTL_GIGA_MAC_VER_01 } /* Catch-all */
+ }, *p = mac_info;
+ u32 reg;
+
+@@ -1155,24 +1188,7 @@ static void rtl8169_get_mac_version(stru
+
+ static void rtl8169_print_mac_version(struct rtl8169_private *tp)
+ {
+- struct {
+- int version;
+- char *msg;
+- } mac_print[] = {
+- { RTL_GIGA_MAC_VER_E, "RTL_GIGA_MAC_VER_E" },
+- { RTL_GIGA_MAC_VER_D, "RTL_GIGA_MAC_VER_D" },
+- { RTL_GIGA_MAC_VER_B, "RTL_GIGA_MAC_VER_B" },
+- { 0, NULL }
+- }, *p;
+-
+- for (p = mac_print; p->msg; p++) {
+- if (tp->mac_version == p->version) {
+- dprintk("mac_version == %s (%04d)\n", p->msg,
+- p->version);
+- return;
+- }
+- }
+- dprintk("mac_version == Unknown\n");
++ dprintk("mac_version = 0x%02x\n", tp->mac_version);
+ }
+
+ static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr)
+@@ -1189,7 +1205,7 @@ static void rtl8169_get_phy_version(stru
+ }, *p = phy_info;
+ u16 reg;
+
+- reg = mdio_read(ioaddr, 3) & 0xffff;
++ reg = mdio_read(ioaddr, MII_PHYSID2) & 0xffff;
+ while ((reg & p->mask) != p->set)
+ p++;
+ tp->phy_version = p->phy_version;
+@@ -1257,7 +1273,7 @@ static void rtl8169_hw_phy_config(struct
+ rtl8169_print_mac_version(tp);
+ rtl8169_print_phy_version(tp);
+
+- if (tp->mac_version <= RTL_GIGA_MAC_VER_B)
++ if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
+ return;
+ if (tp->phy_version >= RTL_GIGA_PHY_VER_H)
+ return;
+@@ -1267,12 +1283,7 @@ static void rtl8169_hw_phy_config(struct
+
+ /* Shazam ! */
+
+- if (tp->mac_version == RTL_GIGA_MAC_VER_X) {
+- mdio_write(ioaddr, 31, 0x0001);
+- mdio_write(ioaddr, 9, 0x273a);
+- mdio_write(ioaddr, 14, 0x7bfb);
+- mdio_write(ioaddr, 27, 0x841e);
+-
++ if (tp->mac_version == RTL_GIGA_MAC_VER_04) {
+ mdio_write(ioaddr, 31, 0x0002);
+ mdio_write(ioaddr, 1, 0x90d0);
+ mdio_write(ioaddr, 31, 0x0000);
+@@ -1306,16 +1317,16 @@ static void rtl8169_phy_timer(unsigned l
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long timeout = RTL8169_PHY_TIMEOUT;
+
+- assert(tp->mac_version > RTL_GIGA_MAC_VER_B);
++ assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
+ assert(tp->phy_version < RTL_GIGA_PHY_VER_H);
+
+- if (!(tp->phy_1000_ctrl_reg & PHY_Cap_1000_Full))
++ if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
+ return;
+
+ spin_lock_irq(&tp->lock);
+
+ if (tp->phy_reset_pending(ioaddr)) {
+- /*
++ /*
+ * A busy loop could burn quite a few cycles on nowadays CPU.
+ * Let's delay the execution of the timer for a few ticks.
+ */
+@@ -1342,7 +1353,7 @@ static inline void rtl8169_delete_timer(
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct timer_list *timer = &tp->timer;
+
+- if ((tp->mac_version <= RTL_GIGA_MAC_VER_B) ||
++ if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
+ (tp->phy_version >= RTL_GIGA_PHY_VER_H))
+ return;
+
+@@ -1354,7 +1365,7 @@ static inline void rtl8169_request_timer
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct timer_list *timer = &tp->timer;
+
+- if ((tp->mac_version <= RTL_GIGA_MAC_VER_B) ||
++ if ((tp->mac_version <= RTL_GIGA_MAC_VER_01) ||
+ (tp->phy_version >= RTL_GIGA_PHY_VER_H))
+ return;
+
+@@ -1391,28 +1402,111 @@ static void rtl8169_release_board(struct
+ free_netdev(dev);
+ }
+
++static void rtl8169_phy_reset(struct net_device *dev,
++ struct rtl8169_private *tp)
++{
++ void __iomem *ioaddr = tp->mmio_addr;
++ int i;
++
++ tp->phy_reset_enable(ioaddr);
++ for (i = 0; i < 100; i++) {
++ if (!tp->phy_reset_pending(ioaddr))
++ return;
++ msleep(1);
++ }
++ if (netif_msg_link(tp))
++ printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
++}
++
++static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
++{
++ void __iomem *ioaddr = tp->mmio_addr;
++ static int board_idx = -1;
++ u8 autoneg, duplex;
++ u16 speed;
++
++ board_idx++;
++
++ rtl8169_hw_phy_config(dev);
++
++ dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
++ RTL_W8(0x82, 0x01);
++
++ if (tp->mac_version < RTL_GIGA_MAC_VER_03) {
++ dprintk("Set PCI Latency=0x40\n");
++ pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
++ }
++
++ if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
++ dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
++ RTL_W8(0x82, 0x01);
++ dprintk("Set PHY Reg 0x0bh = 0x00h\n");
++ mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
++ }
++
++ rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
++
++ rtl8169_phy_reset(dev, tp);
++
++ rtl8169_set_speed(dev, autoneg, speed, duplex);
++
++ if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
++ printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
++}
++
++static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
++{
++ struct rtl8169_private *tp = netdev_priv(dev);
++ struct mii_ioctl_data *data = if_mii(ifr);
++
++ if (!netif_running(dev))
++ return -ENODEV;
++
++ switch (cmd) {
++ case SIOCGMIIPHY:
++ data->phy_id = 32; /* Internal PHY */
++ return 0;
++
++ case SIOCGMIIREG:
++ data->val_out = mdio_read(tp->mmio_addr, data->reg_num & 0x1f);
++ return 0;
++
++ case SIOCSMIIREG:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ mdio_write(tp->mmio_addr, data->reg_num & 0x1f, data->val_in);
++ return 0;
++ }
++ return -EOPNOTSUPP;
++}
++
+ static int __devinit
+-rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
+- void __iomem **ioaddr_out)
++rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+- void __iomem *ioaddr;
+- struct net_device *dev;
++ const unsigned int region = rtl_cfg_info[ent->driver_data].region;
+ struct rtl8169_private *tp;
+- int rc = -ENOMEM, i, acpi_idle_state = 0, pm_cap;
++ struct net_device *dev;
++ void __iomem *ioaddr;
++ unsigned int pm_cap;
++ int i, rc;
+
+- assert(ioaddr_out != NULL);
++ if (netif_msg_drv(&debug)) {
++ printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
++ MODULENAME, RTL8169_VERSION);
++ }
+
+- /* dev zeroed in alloc_etherdev */
+ dev = alloc_etherdev(sizeof (*tp));
+- if (dev == NULL) {
++ if (!dev) {
+ if (netif_msg_drv(&debug))
+ dev_err(&pdev->dev, "unable to alloc new ethernet\n");
+- goto err_out;
++ rc = -ENOMEM;
++ goto out;
+ }
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ tp = netdev_priv(dev);
++ tp->dev = dev;
+ tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
+
+ /* enable device (incl. PCI PM wakeup and hotplug setup) */
+@@ -1420,48 +1514,53 @@ rtl8169_init_board(struct pci_dev *pdev,
+ if (rc < 0) {
+ if (netif_msg_probe(tp))
+ dev_err(&pdev->dev, "enable failure\n");
+- goto err_out_free_dev;
++ goto err_out_free_dev_1;
+ }
+
+ rc = pci_set_mwi(pdev);
+ if (rc < 0)
+- goto err_out_disable;
++ goto err_out_disable_2;
+
+ /* save power state before pci_enable_device overwrites it */
+ pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (pm_cap) {
+- u16 pwr_command;
++ u16 pwr_command, acpi_idle_state;
+
+ pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
+ acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
+ } else {
+- if (netif_msg_probe(tp))
++ if (netif_msg_probe(tp)) {
+ dev_err(&pdev->dev,
+- "PowerManagement capability not found.\n");
++ "PowerManagement capability not found.\n");
++ }
+ }
+
+ /* make sure PCI base addr 1 is MMIO */
+- if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+- if (netif_msg_probe(tp))
++ if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
++ if (netif_msg_probe(tp)) {
+ dev_err(&pdev->dev,
+- "region #1 not an MMIO resource, aborting\n");
++ "region #%d not an MMIO resource, aborting\n",
++ region);
++ }
+ rc = -ENODEV;
+- goto err_out_mwi;
++ goto err_out_mwi_3;
+ }
++
+ /* check for weird/broken PCI region reporting */
+- if (pci_resource_len(pdev, 1) < R8169_REGS_SIZE) {
+- if (netif_msg_probe(tp))
++ if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
++ if (netif_msg_probe(tp)) {
+ dev_err(&pdev->dev,
+- "Invalid PCI region size(s), aborting\n");
++ "Invalid PCI region size(s), aborting\n");
++ }
+ rc = -ENODEV;
+- goto err_out_mwi;
++ goto err_out_mwi_3;
+ }
+
+ rc = pci_request_regions(pdev, MODULENAME);
+ if (rc < 0) {
+ if (netif_msg_probe(tp))
+ dev_err(&pdev->dev, "could not request regions.\n");
+- goto err_out_mwi;
++ goto err_out_mwi_3;
+ }
+
+ tp->cp_cmd = PCIMulRW | RxChkSum;
+@@ -1473,22 +1572,23 @@ rtl8169_init_board(struct pci_dev *pdev,
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc < 0) {
+- if (netif_msg_probe(tp))
++ if (netif_msg_probe(tp)) {
+ dev_err(&pdev->dev,
+- "DMA configuration failed.\n");
+- goto err_out_free_res;
++ "DMA configuration failed.\n");
++ }
++ goto err_out_free_res_4;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ /* ioremap MMIO region */
+- ioaddr = ioremap(pci_resource_start(pdev, 1), R8169_REGS_SIZE);
+- if (ioaddr == NULL) {
++ ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
++ if (!ioaddr) {
+ if (netif_msg_probe(tp))
+ dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
+ rc = -EIO;
+- goto err_out_free_res;
++ goto err_out_free_res_4;
+ }
+
+ /* Unneeded ? Don't mess with Mrs. Murphy. */
+@@ -1498,10 +1598,10 @@ rtl8169_init_board(struct pci_dev *pdev,
+ RTL_W8(ChipCmd, CmdReset);
+
+ /* Check that the chip has finished the reset. */
+- for (i = 1000; i > 0; i--) {
++ for (i = 100; i > 0; i--) {
+ if ((RTL_R8(ChipCmd) & CmdReset) == 0)
+ break;
+- udelay(10);
++ msleep_interruptible(1);
+ }
+
+ /* Identify chip attached to board */
+@@ -1519,8 +1619,8 @@ rtl8169_init_board(struct pci_dev *pdev,
+ /* Unknown chip: assume array element #0, original RTL-8169 */
+ if (netif_msg_probe(tp)) {
+ dev_printk(KERN_DEBUG, &pdev->dev,
+- "unknown chip version, assuming %s\n",
+- rtl_chip_info[0].name);
++ "unknown chip version, assuming %s\n",
++ rtl_chip_info[0].name);
+ }
+ i++;
+ }
+@@ -1531,56 +1631,6 @@ rtl8169_init_board(struct pci_dev *pdev,
+ RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+- *ioaddr_out = ioaddr;
+- *dev_out = dev;
+-out:
+- return rc;
+-
+-err_out_free_res:
+- pci_release_regions(pdev);
+-
+-err_out_mwi:
+- pci_clear_mwi(pdev);
+-
+-err_out_disable:
+- pci_disable_device(pdev);
+-
+-err_out_free_dev:
+- free_netdev(dev);
+-err_out:
+- *ioaddr_out = NULL;
+- *dev_out = NULL;
+- goto out;
+-}
+-
+-static int __devinit
+-rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+-{
+- struct net_device *dev = NULL;
+- struct rtl8169_private *tp;
+- void __iomem *ioaddr = NULL;
+- static int board_idx = -1;
+- u8 autoneg, duplex;
+- u16 speed;
+- int i, rc;
+-
+- assert(pdev != NULL);
+- assert(ent != NULL);
+-
+- board_idx++;
+-
+- if (netif_msg_drv(&debug)) {
+- printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
+- MODULENAME, RTL8169_VERSION);
+- }
+-
+- rc = rtl8169_init_board(pdev, &dev, &ioaddr);
+- if (rc)
+- return rc;
+-
+- tp = netdev_priv(dev);
+- assert(ioaddr != NULL);
+-
+ if (RTL_R8(PHYstatus) & TBI_Enable) {
+ tp->set_speed = rtl8169_set_speed_tbi;
+ tp->get_settings = rtl8169_gset_tbi;
+@@ -1588,13 +1638,15 @@ rtl8169_init_one(struct pci_dev *pdev, c
+ tp->phy_reset_pending = rtl8169_tbi_reset_pending;
+ tp->link_ok = rtl8169_tbi_link_ok;
+
+- tp->phy_1000_ctrl_reg = PHY_Cap_1000_Full; /* Implied by TBI */
++ tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */
+ } else {
+ tp->set_speed = rtl8169_set_speed_xmii;
+ tp->get_settings = rtl8169_gset_xmii;
+ tp->phy_reset_enable = rtl8169_xmii_reset_enable;
+ tp->phy_reset_pending = rtl8169_xmii_reset_pending;
+ tp->link_ok = rtl8169_xmii_link_ok;
++
++ dev->do_ioctl = rtl8169_ioctl;
+ }
+
+ /* Get MAC address. FIXME: read EEPROM */
+@@ -1632,19 +1684,13 @@ rtl8169_init_one(struct pci_dev *pdev, c
+ tp->intr_mask = 0xffff;
+ tp->pci_dev = pdev;
+ tp->mmio_addr = ioaddr;
++ tp->align = rtl_cfg_info[ent->driver_data].align;
+
+ spin_lock_init(&tp->lock);
+
+ rc = register_netdev(dev);
+- if (rc) {
+- rtl8169_release_board(pdev, dev, ioaddr);
+- return rc;
+- }
+-
+- if (netif_msg_probe(tp)) {
+- printk(KERN_DEBUG "%s: Identified chip type is '%s'.\n",
+- dev->name, rtl_chip_info[tp->chipset].name);
+- }
++ if (rc < 0)
++ goto err_out_unmap_5;
+
+ pci_set_drvdata(pdev, dev);
+
+@@ -1653,38 +1699,29 @@ rtl8169_init_one(struct pci_dev *pdev, c
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
+ "IRQ %d\n",
+ dev->name,
+- rtl_chip_info[ent->driver_data].name,
++ rtl_chip_info[tp->chipset].name,
+ dev->base_addr,
+ dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3],
+ dev->dev_addr[4], dev->dev_addr[5], dev->irq);
+ }
+
+- rtl8169_hw_phy_config(dev);
+-
+- dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+- RTL_W8(0x82, 0x01);
+-
+- if (tp->mac_version < RTL_GIGA_MAC_VER_E) {
+- dprintk("Set PCI Latency=0x40\n");
+- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+- }
+-
+- if (tp->mac_version == RTL_GIGA_MAC_VER_D) {
+- dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+- RTL_W8(0x82, 0x01);
+- dprintk("Set PHY Reg 0x0bh = 0x00h\n");
+- mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
+- }
+-
+- rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
++ rtl8169_init_phy(dev, tp);
+
+- rtl8169_set_speed(dev, autoneg, speed, duplex);
+-
+- if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
+- printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
++out:
++ return rc;
+
+- return 0;
++err_out_unmap_5:
++ iounmap(ioaddr);
++err_out_free_res_4:
++ pci_release_regions(pdev);
++err_out_mwi_3:
++ pci_clear_mwi(pdev);
++err_out_disable_2:
++ pci_disable_device(pdev);
++err_out_free_dev_1:
++ free_netdev(dev);
++ goto out;
+ }
+
+ static void __devexit
+@@ -1775,62 +1812,120 @@ static void rtl8169_hw_reset(void __iome
+ RTL_R8(ChipCmd);
+ }
+
+-static void
+-rtl8169_hw_start(struct net_device *dev)
++static void rtl8169_set_rx_tx_config_registers(struct rtl8169_private *tp)
++{
++ void __iomem *ioaddr = tp->mmio_addr;
++ u32 cfg = rtl8169_rx_config;
++
++ cfg |= (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
++ RTL_W32(RxConfig, cfg);
++
++ /* Set DMA burst size and Interframe Gap Time */
++ RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
++ (InterFrameGap << TxInterFrameGapShift));
++}
++
++static void rtl8169_hw_start(struct net_device *dev)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
++ struct pci_dev *pdev = tp->pci_dev;
++ u16 cmd;
+ u32 i;
+
+ /* Soft reset the chip. */
+ RTL_W8(ChipCmd, CmdReset);
+
+ /* Check that the chip has finished the reset. */
+- for (i = 1000; i > 0; i--) {
++ for (i = 100; i > 0; i--) {
+ if ((RTL_R8(ChipCmd) & CmdReset) == 0)
+ break;
+- udelay(10);
++ msleep_interruptible(1);
++ }
++
++ if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
++ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
++ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
++ }
++
++ if (tp->mac_version == RTL_GIGA_MAC_VER_13) {
++ pci_write_config_word(pdev, 0x68, 0x00);
++ pci_write_config_word(pdev, 0x69, 0x08);
++ }
++
++ /* Undocumented stuff. */
++ if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
++ /* Realtek's r1000_n.c driver uses '&& 0x01' here. Well... */
++ if ((RTL_R8(Config2) & 0x07) & 0x01)
++ RTL_W32(0x7c, 0x0007ffff);
++
++ RTL_W32(0x7c, 0x0007ff00);
++
++ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
++ cmd = cmd & 0xef;
++ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
++ if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_04))
++ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
++
+ RTL_W8(EarlyTxThres, EarlyTxThld);
+
+ /* Low hurts. Let's disable the filtering. */
+ RTL_W16(RxMaxSize, 16383);
+
+- /* Set Rx Config register */
+- i = rtl8169_rx_config |
+- (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+- RTL_W32(RxConfig, i);
++ if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_04))
++ rtl8169_set_rx_tx_config_registers(tp);
+
+- /* Set DMA burst size and Interframe Gap Time */
+- RTL_W32(TxConfig,
+- (TX_DMA_BURST << TxDMAShift) | (InterFrameGap <<
+- TxInterFrameGapShift));
+- tp->cp_cmd |= RTL_R16(CPlusCmd);
+- RTL_W16(CPlusCmd, tp->cp_cmd);
++ cmd = RTL_R16(CPlusCmd);
++ RTL_W16(CPlusCmd, cmd);
+
+- if ((tp->mac_version == RTL_GIGA_MAC_VER_D) ||
+- (tp->mac_version == RTL_GIGA_MAC_VER_E)) {
++ tp->cp_cmd |= cmd | PCIMulRW;
++
++ if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
+ dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0. "
+ "Bit-3 and bit-14 MUST be 1\n");
+- tp->cp_cmd |= (1 << 14) | PCIMulRW;
+- RTL_W16(CPlusCmd, tp->cp_cmd);
++ tp->cp_cmd |= (1 << 14);
+ }
+
++ RTL_W16(CPlusCmd, tp->cp_cmd);
++
+ /*
+ * Undocumented corner. Supposedly:
+ * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
+ */
+ RTL_W16(IntrMitigate, 0x0000);
+
+- RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
++ /*
++ * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
++ * register to be written before TxDescAddrLow to work.
++ * Switching from MMIO to I/O access fixes the issue as well.
++ */
+ RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32));
+- RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
++ RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
+ RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32));
++ RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
++
++ if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
++ (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
++ (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
++ (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
++ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
++ rtl8169_set_rx_tx_config_registers(tp);
++ }
++
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+- udelay(10);
++
++ /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
++ RTL_R8(IntrMask);
+
+ RTL_W32(RxMissed, 0);
+
+@@ -1910,17 +2005,18 @@ static inline void rtl8169_map_to_asic(s
+ }
+
+ static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
+- struct RxDesc *desc, int rx_buf_sz)
++ struct RxDesc *desc, int rx_buf_sz,
++ unsigned int align)
+ {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int ret = 0;
+
+- skb = dev_alloc_skb(rx_buf_sz + NET_IP_ALIGN);
++ skb = dev_alloc_skb(rx_buf_sz + align);
+ if (!skb)
+ goto err_out;
+
+- skb_reserve(skb, NET_IP_ALIGN);
++ skb_reserve(skb, (align - 1) & (u32)skb->data);
+ *sk_buff = skb;
+
+ mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
+@@ -1953,15 +2049,15 @@ static u32 rtl8169_rx_fill(struct rtl816
+ u32 start, u32 end)
+ {
+ u32 cur;
+-
++
+ for (cur = start; end - cur > 0; cur++) {
+ int ret, i = cur % NUM_RX_DESC;
+
+ if (tp->Rx_skbuff[i])
+ continue;
+-
++
+ ret = rtl8169_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
+- tp->RxDescArray + i, tp->rx_buf_sz);
++ tp->RxDescArray + i, tp->rx_buf_sz, tp->align);
+ if (ret < 0)
+ break;
+ }
+@@ -2190,8 +2286,8 @@ static int rtl8169_start_xmit(struct sk_
+ dma_addr_t mapping;
+ u32 status, len;
+ u32 opts1;
+- int ret = 0;
+-
++ int ret = NETDEV_TX_OK;
++
+ if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
+ if (netif_msg_drv(tp)) {
+ printk(KERN_ERR
+@@ -2255,7 +2351,7 @@ out:
+
+ err_stop:
+ netif_stop_queue(dev);
+- ret = 1;
++ ret = NETDEV_TX_BUSY;
+ err_update_stats:
+ tp->stats.tx_dropped++;
+ goto out;
+@@ -2280,12 +2376,17 @@ static void rtl8169_pcierr_interrupt(str
+ /*
+ * The recovery sequence below admits a very elaborated explanation:
+ * - it seems to work;
+- * - I did not see what else could be done.
++ * - I did not see what else could be done;
++ * - it makes iop3xx happy.
+ *
+ * Feel free to adjust to your needs.
+ */
+- pci_write_config_word(pdev, PCI_COMMAND,
+- pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
++ if (pdev->broken_parity_status)
++ pci_cmd &= ~PCI_COMMAND_PARITY;
++ else
++ pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
++
++ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+
+ pci_write_config_word(pdev, PCI_STATUS,
+ pci_status & (PCI_STATUS_DETECTED_PARITY |
+@@ -2299,10 +2400,11 @@ static void rtl8169_pcierr_interrupt(str
+ tp->cp_cmd &= ~PCIDAC;
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ dev->features &= ~NETIF_F_HIGHDMA;
+- rtl8169_schedule_work(dev, rtl8169_reinit_task);
+ }
+
+ rtl8169_hw_reset(ioaddr);
++
++ rtl8169_schedule_work(dev, rtl8169_reinit_task);
+ }
+
+ static void
+@@ -2372,16 +2474,17 @@ static inline void rtl8169_rx_csum(struc
+ }
+
+ static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
+- struct RxDesc *desc, int rx_buf_sz)
++ struct RxDesc *desc, int rx_buf_sz,
++ unsigned int align)
+ {
+ int ret = -1;
+
+ if (pkt_size < rx_copybreak) {
+ struct sk_buff *skb;
+
+- skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
++ skb = dev_alloc_skb(pkt_size + align);
+ if (skb) {
+- skb_reserve(skb, NET_IP_ALIGN);
++ skb_reserve(skb, (align - 1) & (u32)skb->data);
+ eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
+ *sk_buff = skb;
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+@@ -2427,6 +2530,10 @@ rtl8169_rx_interrupt(struct net_device *
+ tp->stats.rx_length_errors++;
+ if (status & RxCRC)
+ tp->stats.rx_crc_errors++;
++ if (status & RxFOVF) {
++ rtl8169_schedule_work(dev, rtl8169_reset_task);
++ tp->stats.rx_fifo_errors++;
++ }
+ rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
+ } else {
+ struct sk_buff *skb = tp->Rx_skbuff[entry];
+@@ -2447,13 +2554,13 @@ rtl8169_rx_interrupt(struct net_device *
+ }
+
+ rtl8169_rx_csum(skb, desc);
+-
++
+ pci_dma_sync_single_for_cpu(tp->pci_dev,
+ le64_to_cpu(desc->addr), tp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+
+ if (rtl8169_try_rx_copy(&skb, pkt_size, desc,
+- tp->rx_buf_sz)) {
++ tp->rx_buf_sz, tp->align)) {
+ pci_action = pci_unmap_single;
+ tp->Rx_skbuff[entry] = NULL;
+ }
+@@ -2543,7 +2650,7 @@ rtl8169_interrupt(int irq, void *dev_ins
+ __netif_rx_schedule(dev);
+ else if (netif_msg_intr(tp)) {
+ printk(KERN_INFO "%s: interrupt %04x taken in poll\n",
+- dev->name, status);
++ dev->name, status);
+ }
+ break;
+ #else
+@@ -2606,6 +2713,7 @@ static void rtl8169_down(struct net_devi
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned int poll_locked = 0;
++ unsigned int intrmask;
+
+ rtl8169_delete_timer(dev);
+
+@@ -2644,8 +2752,11 @@ core_down:
+ * 2) dev->change_mtu
+ * -> rtl8169_poll can not be issued again and re-enable the
+ * interruptions. Let's simply issue the IRQ down sequence again.
++ *
++ * No loop if hotpluged or major error (0xffff).
+ */
+- if (RTL_R16(IntrMask))
++ intrmask = RTL_R16(IntrMask);
++ if (intrmask && (intrmask != 0xffff))
+ goto core_down;
+
+ rtl8169_tx_clear(tp);
+@@ -2716,6 +2827,15 @@ rtl8169_set_rx_mode(struct net_device *d
+ tmp = rtl8169_rx_config | rx_mode |
+ (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+
++ if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_15)) {
++ mc_filter[0] = 0xffffffff;
++ mc_filter[1] = 0xffffffff;
++ }
++
+ RTL_W32(RxConfig, tmp);
+ RTL_W32(MAR0 + 0, mc_filter[0]);
+ RTL_W32(MAR0 + 4, mc_filter[1]);
+@@ -2741,7 +2861,7 @@ static struct net_device_stats *rtl8169_
+ RTL_W32(RxMissed, 0);
+ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+-
++
+ return &tp->stats;
+ }
+
+@@ -2809,7 +2929,7 @@ static struct pci_driver rtl8169_pci_dri
+ static int __init
+ rtl8169_init_module(void)
+ {
+- return pci_module_init(&rtl8169_pci_driver);
++ return pci_register_driver(&rtl8169_pci_driver);
+ }
+
+ static void __exit
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/alaska.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/alaska.c
+--- linux-2.6.18.8/drivers/net/sfc/alaska.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/alaska.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,159 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005: Fen Systems Ltd.
++ * Copyright 2006-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include "net_driver.h"
++#include <linux/ethtool.h>
++#include "gmii.h"
++#include "phy.h"
++
++/* Marvell 88E1111 "Alaska" PHY control */
++#define ALASKA_PHY_SPECIFIC 16
++#define ALASKA_ALLOW_SLEEP 0x0200
++
++#define ALASKA_EXTENDED_CONTROL 20
++#define EXTENDED_LINE_LOOPBACK 0x8000
++
++#define ALASKA_LED_CONTROL 24
++#define LED_BLINK_MASK 0x0700
++#define LED_BLINK_FAST 0x0100
++#define LED_BLINK_SLOW 0x0300
++#define LED_TX_CTRL_MASK 0x0041
++#define LED_TX_CTRL_LINK_AND_ACTIVITY 0x0001
++
++#define ALASKA_LED_OVERRIDE 25
++#define LED_LINK1000_MASK 0x0030
++#define LED_LINK1000_BLINK 0x0010
++#define LED_TX_MASK 0x0003
++#define LED_TX_BLINK 0x0001
++
++static void alaska_reconfigure(struct efx_nic *efx)
++{
++ struct mii_if_info *gmii = &efx->mii;
++ u32 bmcr, phy_ext;
++
++ /* Configure line loopback if requested */
++ phy_ext = gmii->mdio_read(gmii->dev, gmii->phy_id,
++ ALASKA_EXTENDED_CONTROL);
++ if (efx->loopback_mode == LOOPBACK_NETWORK)
++ phy_ext |= EXTENDED_LINE_LOOPBACK;
++ else
++ phy_ext &= ~EXTENDED_LINE_LOOPBACK;
++ gmii->mdio_write(gmii->dev, gmii->phy_id, ALASKA_EXTENDED_CONTROL,
++ phy_ext);
++
++ /* Configure PHY loopback if requested */
++ bmcr = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_BMCR);
++ if (efx->loopback_mode == LOOPBACK_PHY)
++ bmcr |= BMCR_LOOPBACK;
++ else
++ bmcr &= ~BMCR_LOOPBACK;
++ gmii->mdio_write(gmii->dev, gmii->phy_id, MII_BMCR, bmcr);
++
++ /* Read link up status */
++ if (efx->loopback_mode == LOOPBACK_NONE)
++ efx->link_up = mii_link_ok(gmii);
++ else
++ efx->link_up = 1;
++
++ /* Determine link options from PHY */
++ if (gmii->force_media) {
++ efx->link_options = gmii_forced_result(bmcr);
++ } else {
++ int lpa = gmii_lpa(gmii);
++ int adv = gmii_advertised(gmii);
++ efx->link_options = gmii_nway_result(adv & lpa);
++ }
++}
++
++static void alaska_clear_interrupt(struct efx_nic *efx)
++{
++ struct mii_if_info *gmii = &efx->mii;
++
++ /* Read interrupt status register to clear */
++ gmii->mdio_read(gmii->dev, gmii->phy_id, GMII_ISR);
++}
++
++static int alaska_init(struct efx_nic *efx)
++{
++ struct mii_if_info *gmii = &efx->mii;
++ u32 ier, leds, ctrl_1g, phy_spec;
++
++ /* Read ISR to clear any outstanding PHY interrupts */
++ gmii->mdio_read(gmii->dev, gmii->phy_id, GMII_ISR);
++
++ /* Enable PHY interrupts */
++ ier = gmii->mdio_read(gmii->dev, gmii->phy_id, GMII_IER);
++ ier |= IER_LINK_CHG;
++ gmii->mdio_write(gmii->dev, gmii->phy_id, GMII_IER, ier);
++
++ /* Remove 1G half-duplex as unsupported in Mentor MAC */
++ ctrl_1g = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_CTRL1000);
++ ctrl_1g &= ~(ADVERTISE_1000HALF);
++ gmii->mdio_write(gmii->dev, gmii->phy_id, MII_CTRL1000, ctrl_1g);
++
++ /*
++ * The PHY can save power when there is no external connection
++ * (sleep mode). However, this is incompatible with PHY
++ * loopback, and if enable and disable it quickly the PHY can
++ * go to sleep even when sleep mode is disabled. (SFC bug
++ * 9309.) Therefore we disable it all the time.
++ */
++ phy_spec = gmii->mdio_read(gmii->dev, gmii->phy_id,
++ ALASKA_PHY_SPECIFIC);
++ phy_spec &= ~ALASKA_ALLOW_SLEEP;
++ gmii->mdio_write(gmii->dev, gmii->phy_id, ALASKA_PHY_SPECIFIC,
++ phy_spec);
++
++ /* Configure LEDs */
++ leds = gmii->mdio_read(gmii->dev, gmii->phy_id, ALASKA_LED_CONTROL);
++ leds &= ~(LED_BLINK_MASK | LED_TX_CTRL_MASK);
++ leds |= (LED_BLINK_FAST | LED_TX_CTRL_LINK_AND_ACTIVITY);
++ gmii->mdio_write(gmii->dev, gmii->phy_id, ALASKA_LED_CONTROL, leds);
++
++ return 0;
++}
++
++static void alaska_fini(struct efx_nic *efx)
++{
++ struct mii_if_info *gmii = &efx->mii;
++ u32 ier;
++
++ /* Disable PHY interrupts */
++ ier = gmii->mdio_read(gmii->dev, gmii->phy_id, GMII_IER);
++ ier &= ~IER_LINK_CHG;
++ gmii->mdio_write(gmii->dev, gmii->phy_id, GMII_IER, ier);
++}
++
++
++struct efx_phy_operations alaska_phy_operations = {
++ .init = alaska_init,
++ .fini = alaska_fini,
++ .reconfigure = alaska_reconfigure,
++ .clear_interrupt = alaska_clear_interrupt,
++ .loopbacks = (1 << LOOPBACK_PHY) | (1 << LOOPBACK_NETWORK),
++ .startup_loopback = LOOPBACK_PHY,
++};
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/bitfield.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/bitfield.h
+--- linux-2.6.18.8/drivers/net/sfc/bitfield.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/bitfield.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,544 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_BITFIELD_H
++#define EFX_BITFIELD_H
++
++/*
++ * Efx bitfield access
++ *
++ * Efx NICs make extensive use of bitfields up to 128 bits
++ * wide. Since there is no native 128-bit datatype on most systems,
++ * and since 64-bit datatypes are inefficient on 32-bit systems and
++ * vice versa, we wrap accesses in a way that uses the most efficient
++ * datatype.
++ *
++ * The NICs are PCI devices and therefore little-endian. Since most
++ * of the quantities that we deal with are DMAed to/from host memory,
++ * we define our datatypes (efx_oword_t, efx_qword_t and
++ * efx_dword_t) to be little-endian.
++ */
++
++/* Lowest bit numbers and widths */
++#define EFX_DUMMY_FIELD_LBN 0
++#define EFX_DUMMY_FIELD_WIDTH 0
++#define EFX_DWORD_0_LBN 0
++#define EFX_DWORD_0_WIDTH 32
++#define EFX_DWORD_1_LBN 32
++#define EFX_DWORD_1_WIDTH 32
++#define EFX_DWORD_2_LBN 64
++#define EFX_DWORD_2_WIDTH 32
++#define EFX_DWORD_3_LBN 96
++#define EFX_DWORD_3_WIDTH 32
++
++#define EFX_BYTE 1
++#define EFX_WORD 2
++#define EFX_DWORD 4
++#define EFX_OWORD 8
++
++/* Specified attribute (e.g. LBN) of the specified field */
++#define EFX_VAL(field, attribute) field ## _ ## attribute
++/* Low bit number of the specified field */
++#define EFX_LOW_BIT(field) EFX_VAL(field, LBN)
++/* Bit width of the specified field */
++#define EFX_WIDTH(field) EFX_VAL(field, WIDTH)
++/* High bit number of the specified field */
++#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1)
++/* Mask equal in width to the specified field.
++ *
++ * For example, a field with width 5 would have a mask of 0x1f.
++ *
++ * The maximum width mask that can be generated is 64 bits.
++ */
++#define EFX_MASK64(field) \
++ (EFX_WIDTH(field) == 64 ? ~((u64) 0) : \
++ (((((u64) 1) << EFX_WIDTH(field))) - 1))
++
++/* Mask equal in width to the specified field.
++ *
++ * For example, a field with width 5 would have a mask of 0x1f.
++ *
++ * The maximum width mask that can be generated is 32 bits. Use
++ * EFX_MASK64 for higher width fields.
++ */
++#define EFX_MASK32(field) \
++ (EFX_WIDTH(field) == 32 ? ~((u32) 0) : \
++ (((((u32) 1) << EFX_WIDTH(field))) - 1))
++
++/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
++typedef union efx_dword {
++ __le32 u32[1];
++} efx_dword_t;
++
++/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
++typedef union efx_qword {
++ __le64 u64[1];
++ __le32 u32[2];
++ efx_dword_t dword[2];
++} efx_qword_t;
++
++/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
++typedef union efx_oword {
++ __le64 u64[2];
++ efx_qword_t qword[2];
++ __le32 u32[4];
++ efx_dword_t dword[4];
++} efx_oword_t;
++
++/* Format string and value expanders for printk */
++#define EFX_DWORD_FMT "%08x"
++#define EFX_QWORD_FMT "%08x:%08x"
++#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x"
++#define EFX_DWORD_VAL(dword) \
++ ((unsigned int) le32_to_cpu((dword).u32[0]))
++#define EFX_QWORD_VAL(qword) \
++ ((unsigned int) le32_to_cpu((qword).u32[1])), \
++ ((unsigned int) le32_to_cpu((qword).u32[0]))
++#define EFX_OWORD_VAL(oword) \
++ ((unsigned int) le32_to_cpu((oword).u32[3])), \
++ ((unsigned int) le32_to_cpu((oword).u32[2])), \
++ ((unsigned int) le32_to_cpu((oword).u32[1])), \
++ ((unsigned int) le32_to_cpu((oword).u32[0]))
++
++/*
++ * Extract bit field portion [low,high) from the native-endian element
++ * which contains bits [min,max).
++ *
++ * For example, suppose "element" represents the high 32 bits of a
++ * 64-bit value, and we wish to extract the bits belonging to the bit
++ * field occupying bits 28-45 of this 64-bit value.
++ *
++ * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give
++ *
++ * ( element ) << 4
++ *
++ * The result will contain the relevant bits filled in in the range
++ * [0,high-low), with garbage in bits [high-low+1,...).
++ */
++#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
++ (((low > max) || (high < min)) ? 0 : \
++ ((low > min) ? \
++ ((native_element) >> (low - min)) : \
++ ((native_element) << (min - low))))
++
++/*
++ * Extract bit field portion [low,high) from the 64-bit little-endian
++ * element which contains bits [min,max)
++ */
++#define EFX_EXTRACT64(element, min, max, low, high) \
++ EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
++
++/*
++ * Extract bit field portion [low,high) from the 32-bit little-endian
++ * element which contains bits [min,max)
++ */
++#define EFX_EXTRACT32(element, min, max, low, high) \
++ EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
++
++#define EFX_EXTRACT_OWORD64(oword, low, high) \
++ (EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
++ EFX_EXTRACT64((oword).u64[1], 64, 127, low, high))
++
++#define EFX_EXTRACT_QWORD64(qword, low, high) \
++ EFX_EXTRACT64((qword).u64[0], 0, 63, low, high)
++
++#define EFX_EXTRACT_OWORD32(oword, low, high) \
++ (EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
++ EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
++ EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
++ EFX_EXTRACT32((oword).u32[3], 96, 127, low, high))
++
++#define EFX_EXTRACT_QWORD32(qword, low, high) \
++ (EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
++ EFX_EXTRACT32((qword).u32[1], 32, 63, low, high))
++
++#define EFX_EXTRACT_DWORD(dword, low, high) \
++ EFX_EXTRACT32((dword).u32[0], 0, 31, low, high)
++
++#define EFX_OWORD_FIELD64(oword, field) \
++ (EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
++ & EFX_MASK64(field))
++
++#define EFX_QWORD_FIELD64(qword, field) \
++ (EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
++ & EFX_MASK64(field))
++
++#define EFX_OWORD_FIELD32(oword, field) \
++ (EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
++ & EFX_MASK32(field))
++
++#define EFX_QWORD_FIELD32(qword, field) \
++ (EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
++ & EFX_MASK32(field))
++
++#define EFX_DWORD_FIELD(dword, field) \
++ (EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
++ & EFX_MASK32(field))
++
++#define EFX_OWORD_IS_ZERO64(oword) \
++ (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
++
++#define EFX_QWORD_IS_ZERO64(qword) \
++ (((qword).u64[0]) == (__force __le64) 0)
++
++#define EFX_OWORD_IS_ZERO32(oword) \
++ (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
++ == (__force __le32) 0)
++
++#define EFX_QWORD_IS_ZERO32(qword) \
++ (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
++
++#define EFX_DWORD_IS_ZERO(dword) \
++ (((dword).u32[0]) == (__force __le32) 0)
++
++#define EFX_OWORD_IS_ALL_ONES64(oword) \
++ (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
++
++#define EFX_QWORD_IS_ALL_ONES64(qword) \
++ ((qword).u64[0] == ~((__force __le64) 0))
++
++#define EFX_OWORD_IS_ALL_ONES32(oword) \
++ (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
++ == ~((__force __le32) 0))
++
++#define EFX_QWORD_IS_ALL_ONES32(qword) \
++ (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
++
++#define EFX_DWORD_IS_ALL_ONES(dword) \
++ ((dword).u32[0] == ~((__force __le32) 0))
++
++#if BITS_PER_LONG == 64
++#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
++#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
++#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
++#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
++#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64
++#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64
++#else
++#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
++#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
++#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
++#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
++#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32
++#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32
++#endif
++
++/*
++ * Construct bit field portion
++ *
++ * Creates the portion of the bit field [low,high) that lies within
++ * the range [min,max).
++ */
++#define EFX_INSERT_NATIVE64(min, max, low, high, value) \
++ (((low > max) || (high < min)) ? 0 : \
++ ((low > min) ? \
++ (((u64) (value)) << (low - min)) : \
++ (((u64) (value)) >> (min - low))))
++
++#define EFX_INSERT_NATIVE32(min, max, low, high, value) \
++ (((low > max) || (high < min)) ? 0 : \
++ ((low > min) ? \
++ (((u32) (value)) << (low - min)) : \
++ (((u32) (value)) >> (min - low))))
++
++#define EFX_INSERT_NATIVE(min, max, low, high, value) \
++ ((((max - min) >= 32) || ((high - low) >= 32)) ? \
++ EFX_INSERT_NATIVE64(min, max, low, high, value) : \
++ EFX_INSERT_NATIVE32(min, max, low, high, value))
++
++/*
++ * Construct bit field portion
++ *
++ * Creates the portion of the named bit field that lies within the
++ * range [min,max).
++ */
++#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \
++ EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \
++ EFX_HIGH_BIT(field), value)
++
++/*
++ * Construct bit field
++ *
++ * Creates the portion of the named bit fields that lie within the
++ * range [min,max).
++ */
++#define EFX_INSERT_FIELDS_NATIVE(min, max, \
++ field1, value1, \
++ field2, value2, \
++ field3, value3, \
++ field4, value4, \
++ field5, value5, \
++ field6, value6, \
++ field7, value7, \
++ field8, value8, \
++ field9, value9, \
++ field10, value10) \
++ (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
++ EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
++ EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
++ EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \
++ EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \
++ EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \
++ EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \
++ EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \
++ EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \
++ EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)))
++
++#define EFX_INSERT_FIELDS64(...) \
++ cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
++
++#define EFX_INSERT_FIELDS32(...) \
++ cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
++
++#define EFX_POPULATE_OWORD64(oword, ...) do { \
++ (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
++ (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \
++ } while (0)
++
++#define EFX_POPULATE_QWORD64(qword, ...) do { \
++ (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
++ } while (0)
++
++#define EFX_POPULATE_OWORD32(oword, ...) do { \
++ (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
++ (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
++ (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \
++ (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \
++ } while (0)
++
++#define EFX_POPULATE_QWORD32(qword, ...) do { \
++ (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
++ (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
++ } while (0)
++
++#define EFX_POPULATE_DWORD(dword, ...) do { \
++ (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
++ } while (0)
++
++#if BITS_PER_LONG == 64
++#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
++#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
++#else
++#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
++#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
++#endif
++
++/* Populate an octword field with various numbers of arguments */
++#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
++#define EFX_POPULATE_OWORD_9(oword, ...) \
++ EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_OWORD_8(oword, ...) \
++ EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_OWORD_7(oword, ...) \
++ EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_OWORD_6(oword, ...) \
++ EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_OWORD_5(oword, ...) \
++ EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_OWORD_4(oword, ...) \
++ EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_OWORD_3(oword, ...) \
++ EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_OWORD_2(oword, ...) \
++ EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_OWORD_1(oword, ...) \
++ EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_ZERO_OWORD(oword) \
++ EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0)
++#define EFX_SET_OWORD(oword) \
++ EFX_POPULATE_OWORD_4(oword, \
++ EFX_DWORD_0, 0xffffffff, \
++ EFX_DWORD_1, 0xffffffff, \
++ EFX_DWORD_2, 0xffffffff, \
++ EFX_DWORD_3, 0xffffffff)
++
++/* Populate a quadword field with various numbers of arguments */
++#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
++#define EFX_POPULATE_QWORD_9(qword, ...) \
++ EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_QWORD_8(qword, ...) \
++ EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_QWORD_7(qword, ...) \
++ EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_QWORD_6(qword, ...) \
++ EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_QWORD_5(qword, ...) \
++ EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_QWORD_4(qword, ...) \
++ EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_QWORD_3(qword, ...) \
++ EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_QWORD_2(qword, ...) \
++ EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_QWORD_1(qword, ...) \
++ EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_ZERO_QWORD(qword) \
++ EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0)
++#define EFX_SET_QWORD(qword) \
++ EFX_POPULATE_QWORD_2(qword, \
++ EFX_DWORD_0, 0xffffffff, \
++ EFX_DWORD_1, 0xffffffff)
++
++/* Populate a dword field with various numbers of arguments */
++#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
++#define EFX_POPULATE_DWORD_9(dword, ...) \
++ EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_DWORD_8(dword, ...) \
++ EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_DWORD_7(dword, ...) \
++ EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_DWORD_6(dword, ...) \
++ EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_DWORD_5(dword, ...) \
++ EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_DWORD_4(dword, ...) \
++ EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_DWORD_3(dword, ...) \
++ EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_DWORD_2(dword, ...) \
++ EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_POPULATE_DWORD_1(dword, ...) \
++ EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
++#define EFX_ZERO_DWORD(dword) \
++ EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0)
++#define EFX_SET_DWORD(dword) \
++ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff)
++
++/*
++ * Modify a named field within an already-populated structure. Used
++ * for read-modify-write operations.
++ *
++ */
++
++#define EFX_INVERT_OWORD(oword) do { \
++ (oword).u64[0] = ~((oword).u64[0]); \
++ (oword).u64[1] = ~((oword).u64[1]); \
++ } while (0)
++
++#define EFX_INSERT_FIELD64(...) \
++ cpu_to_le64(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
++
++#define EFX_INSERT_FIELD32(...) \
++ cpu_to_le32(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
++
++#define EFX_INPLACE_MASK64(min, max, field) \
++ EFX_INSERT_FIELD64(min, max, field, EFX_MASK64(field))
++
++#define EFX_INPLACE_MASK32(min, max, field) \
++ EFX_INSERT_FIELD32(min, max, field, EFX_MASK32(field))
++
++#define EFX_SET_OWORD_FIELD64(oword, field, value) do { \
++ (oword).u64[0] = (((oword).u64[0] \
++ & ~EFX_INPLACE_MASK64(0, 63, field)) \
++ | EFX_INSERT_FIELD64(0, 63, field, value)); \
++ (oword).u64[1] = (((oword).u64[1] \
++ & ~EFX_INPLACE_MASK64(64, 127, field)) \
++ | EFX_INSERT_FIELD64(64, 127, field, value)); \
++ } while (0)
++
++#define EFX_SET_QWORD_FIELD64(qword, field, value) do { \
++ (qword).u64[0] = (((qword).u64[0] \
++ & ~EFX_INPLACE_MASK64(0, 63, field)) \
++ | EFX_INSERT_FIELD64(0, 63, field, value)); \
++ } while (0)
++
++#define EFX_SET_OWORD_FIELD32(oword, field, value) do { \
++ (oword).u32[0] = (((oword).u32[0] \
++ & ~EFX_INPLACE_MASK32(0, 31, field)) \
++ | EFX_INSERT_FIELD32(0, 31, field, value)); \
++ (oword).u32[1] = (((oword).u32[1] \
++ & ~EFX_INPLACE_MASK32(32, 63, field)) \
++ | EFX_INSERT_FIELD32(32, 63, field, value)); \
++ (oword).u32[2] = (((oword).u32[2] \
++ & ~EFX_INPLACE_MASK32(64, 95, field)) \
++ | EFX_INSERT_FIELD32(64, 95, field, value)); \
++ (oword).u32[3] = (((oword).u32[3] \
++ & ~EFX_INPLACE_MASK32(96, 127, field)) \
++ | EFX_INSERT_FIELD32(96, 127, field, value)); \
++ } while (0)
++
++#define EFX_SET_QWORD_FIELD32(qword, field, value) do { \
++ (qword).u32[0] = (((qword).u32[0] \
++ & ~EFX_INPLACE_MASK32(0, 31, field)) \
++ | EFX_INSERT_FIELD32(0, 31, field, value)); \
++ (qword).u32[1] = (((qword).u32[1] \
++ & ~EFX_INPLACE_MASK32(32, 63, field)) \
++ | EFX_INSERT_FIELD32(32, 63, field, value)); \
++ } while (0)
++
++#define EFX_SET_DWORD_FIELD(dword, field, value) do { \
++ (dword).u32[0] = (((dword).u32[0] \
++ & ~EFX_INPLACE_MASK32(0, 31, field)) \
++ | EFX_INSERT_FIELD32(0, 31, field, value)); \
++ } while (0)
++
++#if BITS_PER_LONG == 64
++#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
++#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
++#else
++#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
++#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
++#endif
++
++#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
++ if (FALCON_REV(efx) == FALCON_REV_B0) { \
++ EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
++ } else { \
++ EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
++ } \
++} while (0)
++
++#define EFX_QWORD_FIELD_VER(efx, qword, field) \
++ (FALCON_REV(efx) == FALCON_REV_B0 ? \
++ EFX_QWORD_FIELD((qword), field##_B0) : \
++ EFX_QWORD_FIELD((qword), field##_A1))
++
++/* Used to avoid compiler warnings about shift range exceeding width
++ * of the data types when dma_addr_t is only 32 bits wide.
++ */
++#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
++#define EFX_DMA_TYPE_WIDTH(width) \
++ (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
++#define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \
++ ~((u64) 0) : ~((u32) 0))
++#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
++
++/*
++ * Determine if a DMA address is over the 4GB threshold
++ *
++ * Defined in a slightly tortuous way to avoid compiler warnings.
++ */
++static inline int efx_is_over_4gb(dma_addr_t address)
++{
++ if (DMA_ADDR_T_WIDTH > 32)
++ return (((u64) address) >> 32) ? 1 : 0;
++ else
++ /* Can never be true */
++ return 0;
++}
++
++#endif /* EFX_BITFIELD_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/boards.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/boards.c
+--- linux-2.6.18.8/drivers/net/sfc/boards.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/boards.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,528 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include "net_driver.h"
++#include "phy.h"
++#include "lm87_support.h"
++#include "boards.h"
++#include "efx.h"
++
++/* Macros for unpacking the board revision */
++/* The revision info is in host byte order. */
++#define BOARD_TYPE(_rev) (_rev >> 8)
++#define BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
++#define BOARD_MINOR(_rev) (_rev & 0xf)
++
++/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
++#define BLINK_INTERVAL (HZ/2)
++
++static void blink_led_timer(unsigned long context)
++{
++ struct efx_nic *efx = (struct efx_nic *)context;
++ struct efx_blinker *bl = &efx->board_info.blinker;
++ efx->board_info.set_fault_led(efx, bl->state);
++ bl->state = !bl->state;
++ if (bl->resubmit) {
++ bl->timer.expires = jiffies + BLINK_INTERVAL;
++ add_timer(&bl->timer);
++ }
++}
++
++static void board_blink(struct efx_nic *efx, int blink)
++{
++ struct efx_blinker *blinker = &efx->board_info.blinker;
++
++ /* The rtnl mutex serialises all ethtool ioctls, so
++ * nothing special needs doing here. */
++ if (blink) {
++ blinker->resubmit = 1;
++ blinker->state = 0;
++ setup_timer(&blinker->timer, blink_led_timer,
++ (unsigned long)efx);
++ blinker->timer.expires = jiffies + BLINK_INTERVAL;
++ add_timer(&blinker->timer);
++ } else {
++ blinker->resubmit = 0;
++ if (blinker->timer.function)
++ del_timer_sync(&blinker->timer);
++ efx->board_info.set_fault_led(efx, 0);
++ }
++}
++
++
++struct sensor_conf {
++ const char *name;
++ const unsigned high;
++ const unsigned low;
++};
++
++#define NO_LIMIT ((unsigned)-1)
++
++#define LM87_SENSOR_BYTES (18)
++
++static int sensor_limits_to_bytes(const struct sensor_conf *limits,
++ int nlimits, u8 *bytes, int maxbytes)
++{
++ int i, nbytes;
++ nbytes = 0;
++ for (i = 0; i < nlimits; i++) {
++ bytes[nbytes++] = limits[i].high;
++ if (limits[i].low != NO_LIMIT)
++ bytes[nbytes++] = limits[i].low;
++ /* We may have overrun by one at this point, but this test
++ * should only trigger in development drivers as the sizes
++ * are not dynamic. */
++ if (nbytes > maxbytes) {
++ printk(KERN_ERR "%s: out of space!\n", __func__);
++ break;
++ }
++ }
++ return nbytes;
++}
++
++/*****************************************************************************
++ * Support for the SFE4002
++ *
++ */
++/* LM87 configuration data for the sensor on the SFE4002 board */
++static const struct sensor_conf sfe4002_lm87_limits[] = {
++ {"1.8V line", 0x91, 0x83}, /* 2.5V sensor, scaled for 1.8V */
++ {"1.2V line", 0x5a, 0x51}, /* Vccp1 */
++ {"3.3V line", 0xca, 0xb6},
++ {"5V line", 0xc9, 0xb6},
++ {"12V line", 0xe0, 0xb0},
++ {"1V line", 0x4b, 0x44}, /* vccp2 */
++ {"Ext. temp.", 0x46, 0x0a}, /* ASIC temp. */
++ {"Int. temp.", 0x3c, 0x0a}, /* Board temp. */
++ {"1.66V line", 0xb2, NO_LIMIT}, /* AIN1 only takes 1 value */
++ {"1.5V line", 0xa1, NO_LIMIT} /* AIN2 only takes 1 value */
++};
++
++static const int sfe4002_lm87_nlimits = ARRAY_SIZE(sfe4002_lm87_limits);
++
++static u16 sfe4002_lm87_irq_mask = EFX_LM87_NO_INTS;
++
++/* I2C ID of the onboard LM87 chip. This is board-specific as the bottom two
++ * bits are set by strap pins */
++#define SFE4002_LM87_I2C_ID (0x2e)
++
++/****************************************************************************/
++/* LED allocations. Note that on rev A0 boards the schematic and the reality
++ * differ: red and green are swapped. Below is the fixed (A1) layout (there
++ * are only 3 A0 boards in existence, so no real reason to make this
++ * conditional).
++ */
++#define SFE4002_FAULT_LED (2) /* Red */
++#define SFE4002_RX_LED (0) /* Green */
++#define SFE4002_TX_LED (1) /* Amber */
++
++static int sfe4002_init_leds(struct efx_nic *efx)
++{
++ /* Set the TX and RX LEDs to reflect status and activity, and the
++ * fault LED off */
++ xfp_set_led(efx, SFE4002_TX_LED,
++ QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
++ xfp_set_led(efx, SFE4002_RX_LED,
++ QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
++ xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
++ efx->board_info.blinker.led_num = SFE4002_FAULT_LED;
++ return 0;
++}
++
++static void sfe4002_fault_led(struct efx_nic *efx, int state)
++{
++ xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
++ QUAKE_LED_OFF);
++}
++
++static int sfe4002_sensor_meaning(struct efx_nic *efx, int limit_num,
++ unsigned val)
++{
++ const struct sensor_conf *lim = &sfe4002_lm87_limits[limit_num];
++ if (lim->low == NO_LIMIT)
++ EFX_ERR(efx, "%10s 0x%02x (nominal value 0x%02x)\n", lim->name,
++ val, lim->high);
++ else
++ EFX_ERR(efx, "%10s 0x%02x (nominal range 0x%02x - 0x%02x)\n",
++ lim->name, val, lim->high, lim->low);
++ return 1;
++}
++
++static int sfe4002_check_hw(struct efx_nic *efx)
++{
++ int rc;
++
++ /* A0 board rev. 4002s report a temperature fault the whole time
++ * (bad sensor) so we mask it out. */
++ unsigned alarm_mask = (efx->board_info.minor > 0) ?
++ 0 : ~EFX_LM87_ETMP_INT;
++
++ /* Check the sensor (NOP if not present). */
++ rc = efx_check_lm87(efx, alarm_mask);
++
++ /* We treat both lm87 interrupts and failure to talk to the lm87
++ * as problems (since failure will only be reported if we did
++ * find the sensor at probe time. */
++ if (rc)
++ EFX_ERR(efx, "sensor alert!\n");
++ return rc;
++}
++
++static int sfe4002_init(struct efx_nic *efx)
++{
++ u8 lm87_bytes[LM87_SENSOR_BYTES];
++ int nbytes;
++ int rc;
++
++ efx->board_info.monitor = sfe4002_check_hw;
++ efx->board_info.interpret_sensor = sfe4002_sensor_meaning;
++ efx->board_info.init_leds = sfe4002_init_leds;
++ efx->board_info.set_fault_led = sfe4002_fault_led;
++ efx->board_info.blink = board_blink;
++ /* To clean up shut down the lm87 (NOP if not present) */
++ efx->board_info.fini = efx_remove_lm87;
++
++ nbytes = sensor_limits_to_bytes(sfe4002_lm87_limits,
++ sfe4002_lm87_nlimits, lm87_bytes,
++ LM87_SENSOR_BYTES);
++
++ /* Activate the lm87 sensor if present (succeeds if nothing there) */
++ rc = efx_probe_lm87(efx, SFE4002_LM87_I2C_ID,
++ lm87_bytes, nbytes, sfe4002_lm87_irq_mask);
++
++ return rc;
++}
++
++/*****************************************************************************
++ * Support for the SFE4003
++ *
++ */
++/* LM87 configuration data for the sensor on the SFE4003 board */
++static const struct sensor_conf sfe4003_lm87_limits[] = {
++ {"1.5V line", 0x78, 0x6d}, /* 2.5V input, values scaled for 1.5V */
++ {"1.2V line", 0x5a, 0x51}, /* Vccp1 */
++ {"3.3V line", 0xca, 0xb6},
++ {"5V line", 0xc0, 0x00}, /* Sensor not connected. */
++ {"12V line", 0xe0, 0xb0},
++ {"1V line", 0x4b, 0x44}, /* Vccp2 */
++ {"Ext. temp.", 0x46, 0x0a}, /* ASIC temp. */
++ {"Int. temp.", 0x3c, 0x0a}, /* Board temp. */
++ {"", 0xff, NO_LIMIT}, /* FAN1/AIN1 unused */
++ {"", 0xff, NO_LIMIT} /* FAN2/AIN2 unused */
++};
++
++static const int sfe4003_lm87_nlimits = ARRAY_SIZE(sfe4003_lm87_limits);
++
++static u16 sfe4003_lm87_irq_mask = EFX_LM87_NO_INTS;
++
++
++static int sfe4003_sensor_meaning(struct efx_nic *efx, int limit_num,
++ unsigned val)
++{
++ const struct sensor_conf *lim = &sfe4003_lm87_limits[limit_num];
++ if (lim->low == NO_LIMIT)
++ return 0; /* Neither AIN1 nor AIN2 mean anything to us */
++ else
++ EFX_ERR(efx, "%10s 0x%02x (nominal range 0x%02x - 0x%02x)\n",
++ lim->name, val, lim->high, lim->low);
++ return 1;
++}
++
++/* I2C ID of the onboard LM87 chip. This is board-specific as the bottom two
++ * bits are set by strap pins */
++#define SFE4003_LM87_I2C_ID (0x2e)
++
++/* Board-specific LED info. */
++#define SFE4003_RED_LED_GPIO (11)
++#define SFE4003_LED_ON (1)
++#define SFE4003_LED_OFF (0)
++
++static void sfe4003_fault_led(struct efx_nic *efx, int state)
++{
++ /* The LEDs were not wired to GPIOs before A3 */
++ if (efx->board_info.minor < 3 && efx->board_info.major == 0)
++ return;
++
++ txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO,
++ state ? SFE4003_LED_ON : SFE4003_LED_OFF);
++}
++
++static int sfe4003_init_leds(struct efx_nic *efx)
++{
++ /* The LEDs were not wired to GPIOs before A3 */
++ if (efx->board_info.minor < 3 && efx->board_info.major == 0)
++ return 0;
++
++ txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT);
++ txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF);
++ return 0;
++}
++
++static int sfe4003_check_hw(struct efx_nic *efx)
++{
++ int rc;
++ /* A0/A1/A2 board rev. 4003s report a temperature fault the whole time
++ * (bad sensor) so we mask it out. */
++ unsigned alarm_mask =
++ ~(EFX_LM87_ETMP_INT | EFX_LM87_FAN1_INT | EFX_LM87_FAN2_INT);
++
++ /* Check the sensor (NOP if not present). */
++
++ rc = efx_check_lm87(efx, alarm_mask);
++ /* We treat both lm87 interrupts and failure to talk to the lm87
++ * as problems (since failure will only be reported if we did
++ * find the sensor at probe time. */
++ if (rc)
++ EFX_ERR(efx, "sensor alert!\n");
++
++ return rc;
++}
++
++static int sfe4003_init(struct efx_nic *efx)
++{
++ u8 lm87_bytes[LM87_SENSOR_BYTES];
++ int nbytes;
++ int rc;
++ efx->board_info.monitor = sfe4003_check_hw;
++ efx->board_info.interpret_sensor = sfe4003_sensor_meaning;
++ efx->board_info.init_leds = sfe4003_init_leds;
++ efx->board_info.set_fault_led = sfe4003_fault_led;
++ efx->board_info.blink = board_blink;
++ /* To clean up shut down the lm87 (NOP if not present) */
++ efx->board_info.fini = efx_remove_lm87;
++
++ nbytes = sensor_limits_to_bytes(sfe4003_lm87_limits,
++ sfe4003_lm87_nlimits, lm87_bytes,
++ LM87_SENSOR_BYTES);
++
++ /* Activate the lm87 sensor if present (succeeds if nothing there) */
++ rc = efx_probe_lm87(efx, SFE4003_LM87_I2C_ID,
++ lm87_bytes, nbytes, sfe4003_lm87_irq_mask);
++
++ if (rc < 0)
++ EFX_ERR(efx, "Temperature sensor probe failure: "
++ "please check the jumper position\n");
++ return rc;
++}
++
++/*****************************************************************************
++ * Support for the SFE4005
++ *
++ */
++/* LM87 configuration data for the sensor on the SFE4005 board */
++static const u8 sfe4005_lm87_limits[] = {
++ 0x51, /* 2.5V high lim. (actually monitor 1.0V line, so 1050mV) */
++ 0x49, /* 2.5V low lim. (950mV) */
++ 0xf6, /* Vccp1 high lim. (3.3V rail, 3465 mV) */
++ 0xde, /* Vcpp1 low lim. (3.3V rail, 3135 mV) */
++ 0xca, /* 3.3V AUX high lim. (3465 mV) */
++ 0xb6, /* 3.3V AUX low lim. (3135mV) */
++ 0xc0, /* 5V high lim. not connected) */
++ 0x00, /* 5V low lim. (not connected) */
++ 0xd0, /* 12V high lim. (13000mV) */
++ 0xb0, /* 12V low lim. (11000mV) */
++ 0xc0, /* Vccp2 high lim. (unused) */
++ 0x00, /* Vccp2 low lim. (unused) */
++ 0x46, /* Ext temp 1 (ASIC) high lim. */
++ 0x0a, /* Ext temp 1 low lim. */
++ 0x3c, /* Int temp (board) high lim. */
++ 0x0a, /* Int temp 1 low lim. */
++ 0xff, /* Fan 1 high (unused) */
++ 0xff, /* Fan 2 high (unused) */
++};
++
++#define SFE4005_LM87_I2C_ID (0x2e)
++
++/* Until the LM87 monitoring is interrupt driven. */
++#define SFE4005_LM87_IRQMASK EFX_LM87_NO_INTS
++
++#define SFE4005_PCF8575_I2C_ID (0x20)
++/* Definitions for the I/O expander that controls the CX4 chip:
++ * which PCF8575 pin maps to which function */
++#define SFE4005_PORT0_EXTLOOP (1 << 0)
++#define SFE4005_PORT1_EXTLOOP (1 << 1)
++#define SFE4005_HOSTPROT_LOOP (1 << 2)
++#define SFE4005_BCAST (1 << 3) /* TX on both ports */
++#define SFE4005_PORT0_EQ (1 << 4)
++#define SFE4005_PORT1_EQ (1 << 5)
++#define SFE4005_HOSTPORT_EQ (1 << 6)
++#define SFE4005_PORTSEL (1 << 7) /* Which port (for RX in BCAST mode) */
++#define SFE4005_PORT0_PRE_LBN (8) /* Preemphasis on port 0 (2 bits)*/
++#define SFE4005_PORT1_PRE_LBN (10) /* Preemphasis on port 1 (2 bits)*/
++#define SFE4005_HOSTPORT_PRE_LBN (12) /* Preemphasis on host port (2 bits) */
++#define SFE4005_UNUSED (1 << 14)
++#define SFE4005_CX4uC_nRESET (1 << 15) /* Reset the controller on CX4 chip */
++
++
++/* By default only turn on host port EQ. Can also OR in SFE4005_PORT0_EQ,
++ * SFE4005_PORT1_EQ but this hasn't been seen to make a difference. */
++#define SFE4005_CX4_DEFAULTS (SFE4005_CX4uC_nRESET | SFE4005_HOSTPORT_EQ)
++
++static int sfe4005_write_ioexpander(struct efx_nic *efx)
++{
++ unsigned long iobits = (unsigned long)efx->phy_data;
++ struct efx_i2c_interface *i2c = &efx->i2c;
++ u8 send[2], check[2];
++ int rc;
++ /* Do not, EVER, deassert nRESET as that will reset Falcon too,
++ * and the driver won't know to repush the configuration, so
++ * nothing will work until the next power cycle. */
++ BUG_ON(!(iobits & SFE4005_CX4uC_nRESET));
++ send[0] = (iobits & 0xff);
++ send[1] = ((iobits >> 8) & 0xff);
++ rc = efx_i2c_send_bytes(i2c, SFE4005_PCF8575_I2C_ID, send, 2);
++ if (rc) {
++ EFX_ERR(efx, "failed to write to I/O expander: %d\n", rc);
++ return rc;
++ }
++ /* Paranoia: just check what the I/O expander reads back */
++ rc = efx_i2c_recv_bytes(i2c, SFE4005_PCF8575_I2C_ID, check, 2);
++ if (rc)
++ EFX_ERR(efx, "failed to read back from I/O expander: %d\n", rc);
++ else if (check[0] != send[0] || check[1] != send[1])
++ EFX_ERR(efx, "read back wrong value from I/O expander: "
++ "wanted %.2x%.2x, got %.2x%.2x\n",
++ send[1], send[0], check[1], check[0]);
++ return rc;
++}
++
++static int sfe4005_init(struct efx_nic *efx)
++{
++ unsigned long iobits = SFE4005_CX4_DEFAULTS;
++ int rc;
++
++ /* There is no PHY as such on the SFE4005 so phy_data is ours. */
++ efx->phy_data = (void *)iobits;
++
++ /* Push the values */
++ rc = sfe4005_write_ioexpander(efx);
++ if (rc)
++ return rc;
++
++ /* Activate the lm87 sensor if present (succeeds if nothing there) */
++ rc = efx_probe_lm87(efx, SFE4005_LM87_I2C_ID,
++ sfe4005_lm87_limits,
++ sizeof(sfe4005_lm87_limits), SFE4005_LM87_IRQMASK);
++
++ /* To clean up shut down the lm87 (NOP if not present) */
++ efx->board_info.fini = efx_remove_lm87;
++
++ return rc;
++}
++
++/* This will get expanded as board-specific details get moved out of the
++ * PHY drivers. */
++struct efx_board_data {
++ const char *ref_model;
++ const char *gen_type;
++ int (*init) (struct efx_nic *nic);
++ unsigned mwatts;
++};
++
++static void dummy_fini(struct efx_nic *nic)
++{
++}
++
++static int dummy_init(struct efx_nic *nic)
++{
++ nic->board_info.fini = dummy_fini;
++ return 0;
++}
++
++/* Maximum board power (mW)
++ * Falcon controller ASIC accounts for 2.2W
++ * 10Xpress PHY accounts for 12W
++ *
++ */
++#define SFE4001_POWER 18000
++#define SFE4002_POWER 7500
++#define SFE4003_POWER 4500
++#define SFE4005_POWER 4500
++
++static struct efx_board_data board_data[] = {
++ [EFX_BOARD_INVALID] =
++ {NULL, NULL, dummy_init, 0},
++ [EFX_BOARD_SFE4001] =
++ {"SFE4001", "10GBASE-T adapter", sfe4001_poweron, SFE4001_POWER },
++ [EFX_BOARD_SFE4002] =
++ {"SFE4002", "XFP adapter", sfe4002_init, SFE4002_POWER },
++ [EFX_BOARD_SFE4003] =
++ {"SFE4003", "10GBASE-CX4 adapter", sfe4003_init, SFE4003_POWER },
++ [EFX_BOARD_SFE4005] =
++ {"SFE4005", "10G blade adapter", sfe4005_init, SFE4005_POWER },
++};
++
++int efx_set_board_info(struct efx_nic *efx, u16 revision_info)
++{
++ int rc = 0;
++ struct efx_board_data *data;
++
++ if (BOARD_TYPE(revision_info) >= EFX_BOARD_MAX) {
++ EFX_ERR(efx, "squashing unknown board type %d\n",
++ BOARD_TYPE(revision_info));
++ revision_info = 0;
++ }
++
++ if (BOARD_TYPE(revision_info) == 0) {
++ efx->board_info.major = 0;
++ efx->board_info.minor = 0;
++ /* For early boards that don't have revision info. there is
++ * only 1 board for each PHY type, so we can work it out, with
++ * the exception of the PHY-less boards. */
++ switch (efx->phy_type) {
++ case PHY_TYPE_10XPRESS:
++ efx->board_info.type = EFX_BOARD_SFE4001;
++ break;
++ case PHY_TYPE_XFP:
++ efx->board_info.type = EFX_BOARD_SFE4002;
++ break;
++ case PHY_TYPE_CX4_RTMR:
++ efx->board_info.type = EFX_BOARD_SFE4003;
++ break;
++ default:
++ efx->board_info.type = 0;
++ break;
++ }
++ } else {
++ efx->board_info.type = BOARD_TYPE(revision_info);
++ efx->board_info.major = BOARD_MAJOR(revision_info);
++ efx->board_info.minor = BOARD_MINOR(revision_info);
++ }
++
++ data = &board_data[efx->board_info.type];
++
++ /* Report the board model number or generic type for recognisable
++ * boards. */
++ if (efx->board_info.type != 0)
++ EFX_INFO(efx, "board is %s rev %c%d\n",
++ (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
++ ? data->ref_model : data->gen_type,
++ 'A' + efx->board_info.major, efx->board_info.minor);
++
++ efx->board_info.init = data->init;
++ efx->board_info.mwatts = data->mwatts;
++
++ return rc;
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/boards.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/boards.h
+--- linux-2.6.18.8/drivers/net/sfc/boards.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/boards.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,51 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_BOARDS_H
++#define EFX_BOARDS_H
++
++/* Board IDs (must fit in 8 bits). Note that 0 must never be assigned because
++ * on early boards it means there is no revision info. Board types pre 400x
++ * are not covered here, but this is not a problem because:
++ * - the early Falcon boards (FPGA, 401, 403) don't have any extra H/W we
++ * need care about and aren't being updated.
++ */
++enum efx_board_type {
++ EFX_BOARD_INVALID = 0, /* Early boards do not have board rev. info. */
++ EFX_BOARD_SFE4001 = 1,
++ EFX_BOARD_SFE4002 = 2,
++ EFX_BOARD_SFE4003 = 3,
++ EFX_BOARD_SFE4005 = 4,
++ /* Insert new types before here */
++ EFX_BOARD_MAX
++};
++
++extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
++
++/* SFE4001 (10GBASE-T) */
++extern int sfe4001_poweron(struct efx_nic *efx);
++extern void sfe4001_poweroff(struct efx_nic *efx);
++
++#endif
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/config.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/config.h
+--- linux-2.6.18.8/drivers/net/sfc/config.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/config.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1 @@
++/* SFC config options can go here */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/debugfs.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/debugfs.c
+--- linux-2.6.18.8/drivers/net/sfc/debugfs.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/debugfs.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,924 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/module.h>
++#include <linux/pci.h>
++/* For out-of-tree builds we always need procfs, if only for a compatibility
++ * symlink.
++ */
++#include <linux/proc_fs.h>
++#include <linux/dcache.h>
++#include <linux/seq_file.h>
++#include "net_driver.h"
++#include "efx.h"
++#include "debugfs.h"
++#include "falcon.h"
++
++/* EFX_USE_DEBUGFS is defined by kernel_compat.h so we can't decide whether to
++ * include this earlier.
++ */
++#ifdef EFX_USE_DEBUGFS
++#include <linux/debugfs.h>
++#endif
++
++#ifndef PRIu64
++# if (BITS_PER_LONG == 64)
++# define PRIu64 "lu"
++# else
++# define PRIu64 "llu"
++# endif
++#endif
++
++#ifndef EFX_USE_DEBUGFS
++
++static void efx_debugfs_remove(struct proc_dir_entry *entry)
++{
++ if (entry)
++ remove_proc_entry(entry->name, entry->parent);
++}
++#define debugfs_remove efx_debugfs_remove
++
++#define debugfs_create_dir proc_mkdir
++#define debugfs_create_symlink proc_symlink
++
++#endif /* !EFX_USE_DEBUGFS */
++
++/* Parameter definition bound to a structure - each file has one of these */
++struct efx_debugfs_bound_param {
++ const struct efx_debugfs_parameter *param;
++ void *structure;
++};
++
++
++/* Maximum length for a name component or symlink target */
++#define EFX_DEBUGFS_NAME_LEN 32
++
++
++/* Top-level debug directory ([/sys/kernel]/debug/sfc) */
++static struct dentry *efx_debug_root;
++
++/* "cards" directory ([/sys/kernel]/debug/sfc/cards) */
++static struct dentry *efx_debug_cards;
++
++
++/* Sequential file interface to bound parameters */
++
++#if defined(EFX_USE_DEBUGFS)
++
++static int efx_debugfs_seq_show(struct seq_file *file, void *v)
++{
++ struct efx_debugfs_bound_param *binding =
++ (struct efx_debugfs_bound_param *)file->private;
++
++ return binding->param->reader(file,
++ binding->structure +
++ binding->param->offset);
++}
++
++static int efx_debugfs_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, efx_debugfs_seq_show, inode->i_private);
++}
++
++#else /* EFX_NOT_UPSTREAM && !EFX_USE_DEBUGFS */
++
++static int efx_debugfs_seq_show(struct seq_file *file, void *v)
++{
++ struct proc_dir_entry *entry = (struct proc_dir_entry *)file->private;
++ struct efx_debugfs_parameter *param =
++ (struct efx_debugfs_parameter *)entry->data;
++ void *structure = (void *)entry->read_proc;
++
++ if (!structure)
++ return -EIO;
++
++ return param->reader(file, structure + param->offset);
++}
++
++static int efx_debugfs_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, efx_debugfs_seq_show, PROC_I(inode)->pde);
++}
++
++#endif /* !EFX_NOT_UPSTREAM || EFX_USE_DEBUGFS */
++
++
++static struct file_operations efx_debugfs_file_ops = {
++ .owner = THIS_MODULE,
++ .open = efx_debugfs_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = seq_release
++};
++
++
++#if defined(EFX_USE_DEBUGFS)
++
++/**
++ * efx_fini_debugfs_child - remove a named child of a debugfs directory
++ * @dir: Directory
++ * @name: Name of child
++ *
++ * This removes the named child from the directory, if it exists.
++ */
++void efx_fini_debugfs_child(struct dentry *dir, const char *name)
++{
++ struct qstr child_name;
++ struct dentry *child;
++
++ child_name.len = strlen(name);
++ child_name.name = name;
++ child_name.hash = full_name_hash(child_name.name, child_name.len);
++ child = d_lookup(dir, &child_name);
++ if (child) {
++ /* If it's a "regular" file, free its parameter binding */
++ if (S_ISREG(child->d_inode->i_mode))
++ kfree(child->d_inode->i_private);
++ debugfs_remove(child);
++ dput(child);
++ }
++}
++
++#else /* EFX_NOT_UPSTREAM && !EFX_USE_DEBUGFS */
++
++void efx_fini_debugfs_child(struct proc_dir_entry *dir, const char *name)
++{
++ remove_proc_entry(name, dir);
++}
++
++#endif /* !EFX_NOT_UPSTREAM || EFX_USE_DEBUGFS */
++
++/*
++ * Remove a debugfs directory.
++ *
++ * This removes the named parameter-files and sym-links from the
++ * directory, and the directory itself. It does not do any recursion
++ * to subdirectories.
++ */
++static void efx_fini_debugfs_dir(struct dentry *dir,
++ struct efx_debugfs_parameter *params,
++ const char *const *symlink_names)
++{
++ if (!dir)
++ return;
++
++ while (params->name) {
++ efx_fini_debugfs_child(dir, params->name);
++ params++;
++ }
++ while (symlink_names && *symlink_names) {
++ efx_fini_debugfs_child(dir, *symlink_names);
++ symlink_names++;
++ }
++ debugfs_remove(dir);
++}
++
++/* Functions for printing various types of parameter. */
++
++int efx_debugfs_read_uint(struct seq_file *file, void *data)
++{
++ return seq_printf(file, "%#x\n", *(unsigned int *)data);
++}
++
++int efx_debugfs_read_int(struct seq_file *file, void *data)
++{
++ return seq_printf(file, "%d\n", *(int *)data);
++}
++
++int efx_debugfs_read_atomic(struct seq_file *file, void *data)
++{
++ unsigned int value = atomic_read((atomic_t *) data);
++
++ return seq_printf(file, "%#x\n", value);
++}
++
++int efx_debugfs_read_dword(struct seq_file *file, void *data)
++{
++ unsigned int value = EFX_DWORD_FIELD(*(efx_dword_t *) data,
++ EFX_DWORD_0);
++
++ return seq_printf(file, "%#x\n", value);
++}
++
++static int efx_debugfs_read_int_mode(struct seq_file *file, void *data)
++{
++ unsigned int value = *(enum efx_int_mode *) data;
++
++ return seq_printf(file, "%d => %s\n", value,
++ STRING_TABLE_LOOKUP(value, efx_interrupt_mode));
++}
++
++#define EFX_INT_MODE_PARAMETER(container_type, parameter) \
++ EFX_PARAMETER(container_type, parameter, \
++ enum efx_int_mode, efx_debugfs_read_int_mode)
++
++static int efx_debugfs_read_loop_mode(struct seq_file *file, void *data)
++{
++ unsigned int value = *(enum efx_loopback_mode *)data;
++
++ return seq_printf(file, "%d => %s\n", value,
++ STRING_TABLE_LOOKUP(value, efx_loopback_mode));
++}
++
++#define EFX_LOOPBACK_MODE_PARAMETER(container_type, parameter) \
++ EFX_PARAMETER(container_type, parameter, \
++ enum efx_loopback_mode, efx_debugfs_read_loop_mode)
++
++static int efx_debugfs_read_phy_type(struct seq_file *file, void *data)
++{
++ unsigned int value = *(enum phy_type *) data;
++
++ return seq_printf(file, "%d => %s\n", value,
++ STRING_TABLE_LOOKUP(value, efx_phy_type));
++}
++
++#define EFX_PHY_TYPE_PARAMETER(container_type, parameter) \
++ EFX_PARAMETER(container_type, parameter, \
++ enum phy_type, efx_debugfs_read_phy_type)
++
++int efx_debugfs_read_string(struct seq_file *file, void *data)
++{
++ return seq_puts(file, (const char *)data);
++}
++
++
++/**
++ * efx_init_debugfs_files - create parameter-files in a debugfs directory
++ * @parent: Containing directory
++ * @params: Pointer to zero-terminated parameter definition array
++ * @structure: Structure containing parameters
++ *
++ * Add parameter-files to the given debugfs directory. Return a
++ * negative error code or 0 on success.
++ */
++static int efx_init_debugfs_files(struct dentry *parent,
++ struct efx_debugfs_parameter *params,
++ void *structure)
++{
++ struct efx_debugfs_parameter *param = params;
++
++ while (param->name) {
++ struct dentry *entry;
++#if defined(EFX_USE_DEBUGFS)
++ struct efx_debugfs_bound_param *binding;
++
++ binding = kmalloc(sizeof(*binding), GFP_KERNEL);
++ if (!binding)
++ goto err;
++ binding->param = param;
++ binding->structure = structure;
++
++ entry = debugfs_create_file(param->name, S_IRUGO, parent,
++ binding, &efx_debugfs_file_ops);
++ if (!entry) {
++ kfree(binding);
++ goto err;
++ }
++#else
++ entry = create_proc_entry(param->name, S_IRUGO, parent);
++ if (!entry)
++ goto err;
++ /*
++ * We have no good way to free a binding created here.
++ * However, once we install our file_operations the
++ * read_proc pointer becomes redundant and we can
++ * abuse it as a structure pointer.
++ */
++ entry->data = param;
++ entry->read_proc = NULL;
++ smp_wmb();
++ entry->proc_fops = &efx_debugfs_file_ops;
++ smp_wmb();
++ entry->read_proc = (read_proc_t *) structure;
++#endif
++
++ param++;
++ }
++
++ return 0;
++
++ err:
++ while (param != params) {
++ param--;
++ efx_fini_debugfs_child(parent, param->name);
++ }
++ return -ENOMEM;
++}
++
++/**
++ * efx_init_debugfs_netdev - create debugfs sym-links for net device
++ * @net_dev: Net device
++ *
++ * Create sym-links named after @net_dev to the debugfs directories for
++ * the corresponding NIC and port. Return a negative error code or 0 on
++ * success. The sym-links must be cleaned up using
++ * efx_fini_debugfs_netdev().
++ */
++int efx_init_debugfs_netdev(struct net_device *net_dev)
++{
++ struct efx_nic *efx = net_dev->priv;
++ char name[EFX_DEBUGFS_NAME_LEN];
++ char target[EFX_DEBUGFS_NAME_LEN];
++ size_t len;
++
++ if (snprintf(name, sizeof(name), "nic_%s", net_dev->name) >=
++ sizeof(name))
++ return -ENAMETOOLONG;
++ if (snprintf(target, sizeof(target), "cards/%s", pci_name(efx->pci_dev))
++ >= sizeof(target))
++ return -ENAMETOOLONG;
++ efx->debug_symlink = debugfs_create_symlink(name,
++ efx_debug_root, target);
++ if (!efx->debug_symlink)
++ return -ENOMEM;
++
++ if (snprintf(name, sizeof(name), "if_%s", net_dev->name) >=
++ sizeof(name))
++ return -ENAMETOOLONG;
++ len = snprintf(target, sizeof(target),
++ "cards/%s/port0", pci_name(efx->pci_dev));
++ if (len >= sizeof(target))
++ return -ENAMETOOLONG;
++ efx->debug_port_symlink = debugfs_create_symlink(name,
++ efx_debug_root,
++ target);
++ if (!efx->debug_port_symlink)
++ return -ENOMEM;
++
++ return 0;
++}
++
++/**
++ * efx_fini_debugfs_netdev - remove debugfs sym-links for net device
++ * @net_dev: Net device
++ *
++ * Remove sym-links created for @net_dev by efx_init_debugfs_netdev().
++ */
++void efx_fini_debugfs_netdev(struct net_device *net_dev)
++{
++ struct efx_nic *efx = net_dev->priv;
++
++ debugfs_remove(efx->debug_port_symlink);
++ efx->debug_port_symlink = NULL;
++ debugfs_remove(efx->debug_symlink);
++ efx->debug_symlink = NULL;
++}
++
++/* Per-port parameters */
++static struct efx_debugfs_parameter efx_debugfs_port_parameters[] = {
++ EFX_NAMED_PARAMETER(enabled, struct efx_nic, port_enabled,
++ int, efx_debugfs_read_int),
++ EFX_INT_PARAMETER(struct efx_nic, net_dev_registered),
++ EFX_INT_PARAMETER(struct efx_nic, rx_checksum_enabled),
++ EFX_ATOMIC_PARAMETER(struct efx_nic, netif_stop_count),
++ EFX_INT_PARAMETER(struct efx_nic, link_up),
++ EFX_UINT_PARAMETER(struct efx_nic, link_options),
++ EFX_INT_PARAMETER(struct efx_nic, promiscuous),
++ EFX_UINT_PARAMETER(struct efx_nic, loopback_modes),
++ EFX_LOOPBACK_MODE_PARAMETER(struct efx_nic, loopback_mode),
++ EFX_PHY_TYPE_PARAMETER(struct efx_nic, phy_type),
++ EFX_NAMED_PARAMETER(phy_id, struct efx_nic, mii.phy_id,
++ int, efx_debugfs_read_int),
++ EFX_UINT_PARAMETER(struct efx_nic, n_link_state_changes),
++ {NULL},
++};
++
++/**
++ * efx_init_debugfs_port - create debugfs directory for port
++ * @efx: Efx NIC
++ *
++ * Create a debugfs directory containing parameter-files for @efx.
++ * Return a negative error code or 0 on success. The directory must be
++ * cleaned up using efx_fini_debugfs_port().
++ */
++int efx_init_debugfs_port(struct efx_nic *efx)
++{
++ int rc;
++
++ /* Create directory */
++ efx->debug_port_dir = debugfs_create_dir("port0", efx->debug_dir);
++ if (!efx->debug_port_dir)
++ return -ENOMEM;
++
++ /* Create files */
++ rc = efx_init_debugfs_files(efx->debug_port_dir,
++ efx_debugfs_port_parameters,
++ (void *)efx);
++ if (rc)
++ efx_fini_debugfs_port(efx);
++
++ return rc;
++}
++
++/**
++ * efx_fini_debugfs_port - remove debugfs directory for port
++ * @efx: Efx NIC
++ *
++ * Remove directory created for @efx by efx_init_debugfs_port().
++ */
++void efx_fini_debugfs_port(struct efx_nic *efx)
++{
++ efx_fini_debugfs_dir(efx->debug_port_dir,
++ efx_debugfs_port_parameters, NULL);
++ efx->debug_port_dir = NULL;
++}
++
++/**
++ * efx_extend_debugfs_port - add parameter-files to directory for port
++ * @efx: Efx NIC
++ * @structure: Structure containing parameters
++ * @params: Pointer to zero-terminated parameter definition array
++ *
++ * Add parameter-files to the debugfs directory for @efx. Return
++ * a negative error code or 0 on success. This is intended for
++ * PHY-specific parameters. The files must be cleaned up using
++ * efx_trim_debugfs_port().
++ */
++int efx_extend_debugfs_port(struct efx_nic *efx,
++ void *structure,
++ struct efx_debugfs_parameter *params)
++{
++ return efx_init_debugfs_files(efx->debug_port_dir, params, structure);
++}
++
++/**
++ * efx_trim_debugfs_port - remove parameter-files from directory for port
++ * @efx: Efx NIC
++ * @params: Pointer to zero-terminated parameter definition array
++ *
++ * Remove parameter-files previously added to the debugfs directory
++ * for @efx using efx_extend_debugfs_port().
++ */
++void efx_trim_debugfs_port(struct efx_nic *efx,
++ struct efx_debugfs_parameter *params)
++{
++ struct dentry *dir = efx->debug_port_dir;
++
++ if (dir) {
++ struct efx_debugfs_parameter *field;
++ for (field = params; field->name; field++)
++ efx_fini_debugfs_child(dir, field->name);
++ }
++}
++
++/* Per-TX-queue parameters */
++static struct efx_debugfs_parameter efx_debugfs_tx_queue_parameters[] = {
++ EFX_UINT_PARAMETER(struct efx_tx_queue, insert_count),
++ EFX_UINT_PARAMETER(struct efx_tx_queue, write_count),
++ EFX_UINT_PARAMETER(struct efx_tx_queue, read_count),
++ EFX_INT_PARAMETER(struct efx_tx_queue, stopped),
++ {NULL},
++};
++
++static void efx_fini_debugfs_tx_queue(struct efx_tx_queue *tx_queue);
++
++/**
++ * efx_init_debugfs_tx_queue - create debugfs directory for TX queue
++ * @tx_queue: Efx TX queue
++ *
++ * Create a debugfs directory containing parameter-files for @tx_queue.
++ * Return a negative error code or 0 on success. The directory must be
++ * cleaned up using efx_fini_debugfs_tx_queue().
++ */
++static int efx_init_debugfs_tx_queue(struct efx_tx_queue *tx_queue)
++{
++ char name[EFX_DEBUGFS_NAME_LEN];
++ char target[EFX_DEBUGFS_NAME_LEN];
++ int rc;
++
++ /* Create directory */
++ if (snprintf(name, sizeof(name), EFX_TX_QUEUE_NAME(tx_queue))
++ >= sizeof(name))
++ goto err_len;
++ tx_queue->debug_dir = debugfs_create_dir(name,
++ tx_queue->efx->debug_dir);
++ if (!tx_queue->debug_dir)
++ goto err_mem;
++
++ /* Create files */
++ rc = efx_init_debugfs_files(tx_queue->debug_dir,
++ efx_debugfs_tx_queue_parameters,
++ (void *)tx_queue);
++ if (rc)
++ goto err;
++
++ /* Create symlink to channel */
++ if (snprintf(target, sizeof(target),
++ "../" EFX_CHANNEL_NAME(tx_queue->channel)) >=
++ sizeof(target))
++ goto err_len;
++ if (!debugfs_create_symlink("channel", tx_queue->debug_dir, target))
++ goto err_mem;
++
++ /* Create symlink to port */
++ if (!debugfs_create_symlink("port", tx_queue->debug_dir, "../port0"))
++ goto err_mem;
++
++ return 0;
++
++ err_len:
++ rc = -ENAMETOOLONG;
++ goto err;
++ err_mem:
++ rc = -ENOMEM;
++ err:
++ efx_fini_debugfs_tx_queue(tx_queue);
++ return rc;
++}
++
++/**
++ * efx_fini_debugfs_tx_queue - remove debugfs directory for TX queue
++ * @tx_queue: Efx TX queue
++ *
++ * Remove directory created for @tx_queue by efx_init_debugfs_tx_queue().
++ */
++static void efx_fini_debugfs_tx_queue(struct efx_tx_queue *tx_queue)
++{
++ static const char *const symlink_names[] = {
++ "channel", "port", NULL
++ };
++
++ efx_fini_debugfs_dir(tx_queue->debug_dir,
++ efx_debugfs_tx_queue_parameters, symlink_names);
++ tx_queue->debug_dir = NULL;
++}
++
++/* Per-RX-queue parameters */
++static struct efx_debugfs_parameter efx_debugfs_rx_queue_parameters[] = {
++ EFX_INT_PARAMETER(struct efx_rx_queue, added_count),
++ EFX_INT_PARAMETER(struct efx_rx_queue, removed_count),
++ EFX_UINT_PARAMETER(struct efx_rx_queue, max_fill),
++ EFX_UINT_PARAMETER(struct efx_rx_queue, fast_fill_trigger),
++ EFX_UINT_PARAMETER(struct efx_rx_queue, fast_fill_limit),
++ EFX_UINT_PARAMETER(struct efx_rx_queue, min_fill),
++ EFX_UINT_PARAMETER(struct efx_rx_queue, min_overfill),
++ EFX_UINT_PARAMETER(struct efx_rx_queue, alloc_page_count),
++ EFX_UINT_PARAMETER(struct efx_rx_queue, alloc_skb_count),
++ EFX_UINT_PARAMETER(struct efx_rx_queue, slow_fill_count),
++ {NULL},
++};
++
++static void efx_fini_debugfs_rx_queue(struct efx_rx_queue *rx_queue);
++
++/**
++ * efx_init_debugfs_rx_queue - create debugfs directory for RX queue
++ * @rx_queue: Efx RX queue
++ *
++ * Create a debugfs directory containing parameter-files for @rx_queue.
++ * Return a negative error code or 0 on success. The directory must be
++ * cleaned up using efx_fini_debugfs_rx_queue().
++ */
++static int efx_init_debugfs_rx_queue(struct efx_rx_queue *rx_queue)
++{
++ char name[EFX_DEBUGFS_NAME_LEN];
++ char target[EFX_DEBUGFS_NAME_LEN];
++ int rc;
++
++ /* Create directory */
++ if (snprintf(name, sizeof(name), EFX_RX_QUEUE_NAME(rx_queue))
++ >= sizeof(name))
++ goto err_len;
++ rx_queue->debug_dir = debugfs_create_dir(name,
++ rx_queue->efx->debug_dir);
++ if (!rx_queue->debug_dir)
++ goto err_mem;
++
++ /* Create files */
++ rc = efx_init_debugfs_files(rx_queue->debug_dir,
++ efx_debugfs_rx_queue_parameters,
++ (void *)rx_queue);
++ if (rc)
++ goto err;
++
++ /* Create symlink to channel */
++ if (snprintf(target, sizeof(target),
++ "../" EFX_CHANNEL_NAME(rx_queue->channel)) >=
++ sizeof(target))
++ goto err_len;
++ if (!debugfs_create_symlink("channel", rx_queue->debug_dir, target))
++ goto err_mem;
++
++ return 0;
++
++ err_len:
++ rc = -ENAMETOOLONG;
++ goto err;
++ err_mem:
++ rc = -ENOMEM;
++ err:
++ efx_fini_debugfs_rx_queue(rx_queue);
++ return rc;
++}
++
++/**
++ * efx_fini_debugfs_rx_queue - remove debugfs directory for RX queue
++ * @rx_queue: Efx RX queue
++ *
++ * Remove directory created for @rx_queue by efx_init_debugfs_rx_queue().
++ */
++static void efx_fini_debugfs_rx_queue(struct efx_rx_queue *rx_queue)
++{
++ const char *const symlink_names[] = {
++ "channel", NULL
++ };
++
++ efx_fini_debugfs_dir(rx_queue->debug_dir,
++ efx_debugfs_rx_queue_parameters, symlink_names);
++ rx_queue->debug_dir = NULL;
++}
++
++/* Per-channel parameters */
++static struct efx_debugfs_parameter efx_debugfs_channel_parameters[] = {
++ EFX_INT_PARAMETER(struct efx_channel, enabled),
++ EFX_INT_PARAMETER(struct efx_channel, irq),
++ EFX_UINT_PARAMETER(struct efx_channel, has_interrupt),
++ EFX_UINT_PARAMETER(struct efx_channel, irq_moderation),
++ EFX_UINT_PARAMETER(struct efx_channel, eventq_read_ptr),
++ EFX_UINT_PARAMETER(struct efx_channel, n_rx_tobe_disc),
++ EFX_UINT_PARAMETER(struct efx_channel, n_rx_ip_frag_err),
++ EFX_UINT_PARAMETER(struct efx_channel, n_rx_ip_hdr_chksum_err),
++ EFX_UINT_PARAMETER(struct efx_channel, n_rx_tcp_udp_chksum_err),
++ EFX_UINT_PARAMETER(struct efx_channel, n_rx_frm_trunc),
++ EFX_UINT_PARAMETER(struct efx_channel, n_rx_overlength),
++ EFX_UINT_PARAMETER(struct efx_channel, n_skbuff_leaks),
++ EFX_INT_PARAMETER(struct efx_channel, rx_alloc_level),
++ EFX_INT_PARAMETER(struct efx_channel, rx_alloc_push_pages),
++ EFX_INT_PARAMETER(struct efx_channel, rx_alloc_pop_pages),
++ {NULL},
++};
++
++static void efx_fini_debugfs_channel(struct efx_channel *channel);
++
++/**
++ * efx_init_debugfs_channel - create debugfs directory for channel
++ * @channel: Efx channel
++ *
++ * Create a debugfs directory containing parameter-files for @channel.
++ * Return a negative error code or 0 on success. The directory must be
++ * cleaned up using efx_fini_debugfs_channel().
++ */
++static int efx_init_debugfs_channel(struct efx_channel *channel)
++{
++ char name[EFX_DEBUGFS_NAME_LEN];
++ int rc;
++
++ /* Create directory */
++ if (snprintf(name, sizeof(name), EFX_CHANNEL_NAME(channel))
++ >= sizeof(name))
++ goto err_len;
++ channel->debug_dir = debugfs_create_dir(name, channel->efx->debug_dir);
++ if (!channel->debug_dir)
++ goto err_mem;
++
++ /* Create files */
++ rc = efx_init_debugfs_files(channel->debug_dir,
++ efx_debugfs_channel_parameters,
++ (void *)channel);
++ if (rc)
++ goto err;
++
++ return 0;
++
++ err_len:
++ rc = -ENAMETOOLONG;
++ goto err;
++ err_mem:
++ rc = -ENOMEM;
++ err:
++ efx_fini_debugfs_channel(channel);
++ return rc;
++}
++
++/**
++ * efx_fini_debugfs_channel - remove debugfs directory for channel
++ * @channel: Efx channel
++ *
++ * Remove directory created for @channel by efx_init_debugfs_channel().
++ */
++static void efx_fini_debugfs_channel(struct efx_channel *channel)
++{
++ efx_fini_debugfs_dir(channel->debug_dir,
++ efx_debugfs_channel_parameters, NULL);
++ channel->debug_dir = NULL;
++}
++
++/* Per-NIC parameters */
++static struct efx_debugfs_parameter efx_debugfs_nic_parameters[] = {
++ EFX_INT_PARAMETER(struct efx_nic, legacy_irq),
++ EFX_INT_PARAMETER(struct efx_nic, rss_queues),
++ EFX_UINT_PARAMETER(struct efx_nic, rx_buffer_len),
++ EFX_INT_MODE_PARAMETER(struct efx_nic, interrupt_mode),
++ {.name = "hardware_desc",
++ .offset = 0,
++ .reader = falcon_debugfs_read_hardware_desc},
++ {NULL},
++};
++
++/* Per-NIC error counts */
++static struct efx_debugfs_parameter efx_debugfs_nic_error_parameters[] = {
++ EFX_ATOMIC_PARAMETER(struct efx_nic_errors, missing_event),
++ EFX_ATOMIC_PARAMETER(struct efx_nic_errors, rx_reset),
++ EFX_ATOMIC_PARAMETER(struct efx_nic_errors, rx_desc_fetch),
++ EFX_ATOMIC_PARAMETER(struct efx_nic_errors, tx_desc_fetch),
++ EFX_ATOMIC_PARAMETER(struct efx_nic_errors, spurious_tx),
++ {NULL},
++};
++
++/**
++ * efx_init_debugfs_channels - create debugfs directories for NIC channels
++ * @efx: Efx NIC
++ *
++ * Create subdirectories of @efx's debugfs directory for all the
++ * channels, RX queues and TX queues used by this driver. Return a
++ * negative error code or 0 on success. The subdirectories must be
++ * cleaned up using efx_fini_debugfs_channels().
++ */
++int efx_init_debugfs_channels(struct efx_nic *efx)
++{
++ struct efx_channel *channel;
++ struct efx_rx_queue *rx_queue;
++ struct efx_tx_queue *tx_queue;
++ int rc;
++
++ efx_for_each_channel(channel, efx) {
++ rc = efx_init_debugfs_channel(channel);
++ if (rc)
++ goto err;
++ }
++
++ efx_for_each_rx_queue(rx_queue, efx) {
++ rc = efx_init_debugfs_rx_queue(rx_queue);
++ if (rc)
++ goto err;
++ }
++
++ efx_for_each_tx_queue(tx_queue, efx) {
++ rc = efx_init_debugfs_tx_queue(tx_queue);
++ if (rc)
++ goto err;
++ }
++
++ return 0;
++
++ err:
++ efx_fini_debugfs_channels(efx);
++ return rc;
++}
++
++/**
++ * efx_fini_debugfs_channels - remove debugfs directories for NIC queues
++ * @efx: Efx NIC
++ *
++ * Remove subdirectories of @efx's debugfs directory created by
++ * efx_init_debugfs_channels().
++ */
++void efx_fini_debugfs_channels(struct efx_nic *efx)
++{
++ struct efx_channel *channel;
++ struct efx_rx_queue *rx_queue;
++ struct efx_tx_queue *tx_queue;
++
++ efx_for_each_tx_queue(tx_queue, efx)
++ efx_fini_debugfs_tx_queue(tx_queue);
++
++ efx_for_each_rx_queue(rx_queue, efx)
++ efx_fini_debugfs_rx_queue(rx_queue);
++
++ efx_for_each_channel(channel, efx)
++ efx_fini_debugfs_channel(channel);
++}
++
++/**
++ * efx_init_debugfs_nic - create debugfs directory for NIC
++ * @efx: Efx NIC
++ *
++ * Create debugfs directory containing parameter-files for @efx,
++ * and a subdirectory "errors" containing per-NIC error counts.
++ * Return a negative error code or 0 on success. The directories
++ * must be cleaned up using efx_fini_debugfs_nic().
++ */
++int efx_init_debugfs_nic(struct efx_nic *efx)
++{
++ int rc;
++
++ /* Create directory */
++ efx->debug_dir = debugfs_create_dir(pci_name(efx->pci_dev),
++ efx_debug_cards);
++ if (!efx->debug_dir)
++ goto err_mem;
++
++ /* Create errors directory */
++ efx->errors.debug_dir = debugfs_create_dir("errors", efx->debug_dir);
++ if (!efx->errors.debug_dir)
++ goto err_mem;
++
++ /* Create files */
++ rc = efx_init_debugfs_files(efx->debug_dir,
++ efx_debugfs_nic_parameters, (void *)efx);
++ if (rc)
++ goto err;
++ rc = efx_init_debugfs_files(efx->errors.debug_dir,
++ efx_debugfs_nic_error_parameters,
++ (void *)&efx->errors);
++ if (rc)
++ goto err;
++
++ return 0;
++
++ err_mem:
++ rc = -ENOMEM;
++ err:
++ efx_fini_debugfs_nic(efx);
++ return rc;
++}
++
++/**
++ * efx_fini_debugfs_nic - remove debugfs directories for NIC
++ * @efx: Efx NIC
++ *
++ * Remove debugfs directories created for @efx by efx_init_debugfs_nic().
++ */
++void efx_fini_debugfs_nic(struct efx_nic *efx)
++{
++ efx_fini_debugfs_dir(efx->errors.debug_dir,
++ efx_debugfs_nic_error_parameters, NULL);
++ efx->errors.debug_dir = NULL;
++ efx_fini_debugfs_dir(efx->debug_dir, efx_debugfs_nic_parameters, NULL);
++ efx->debug_dir = NULL;
++}
++
++/**
++ * efx_init_debugfs - create debugfs directories for sfc driver
++ *
++ * Create debugfs directories "sfc" and "sfc/cards". This must be
++ * called before any of the other functions that create debugfs
++ * directories. Return a negative error code or 0 on success. The
++ * directories must be cleaned up using efx_fini_debugfs().
++ */
++int efx_init_debugfs(void)
++{
++ /* Create top-level directory */
++#if defined(EFX_USE_DEBUGFS)
++ efx_debug_root = debugfs_create_dir("sfc", NULL);
++#else
++ efx_debug_root = proc_mkdir("sfc", proc_root_driver);
++#endif
++ if (!efx_debug_root)
++ goto err;
++
++ /* Create "cards" directory */
++ efx_debug_cards = debugfs_create_dir("cards", efx_debug_root);
++ if (!efx_debug_cards)
++ goto err;
++
++#if defined(EFX_USE_DEBUGFS)
++ /* Create compatibility sym-link */
++ if (!proc_symlink("sfc", proc_root_driver, "/sys/kernel/debug/sfc"))
++ goto err;
++#endif
++ return 0;
++
++ err:
++ efx_fini_debugfs();
++ return -ENOMEM;
++}
++
++/**
++ * efx_fini_debugfs - remove debugfs directories for sfc driver
++ *
++ * Remove directories created by efx_init_debugfs().
++ */
++void efx_fini_debugfs(void)
++{
++#if defined(EFX_USE_DEBUGFS)
++ remove_proc_entry("sfc", proc_root_driver);
++#endif
++ debugfs_remove(efx_debug_cards);
++ efx_debug_cards = NULL;
++ debugfs_remove(efx_debug_root);
++ efx_debug_root = NULL;
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/debugfs.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/debugfs.h
+--- linux-2.6.18.8/drivers/net/sfc/debugfs.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/debugfs.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,172 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_DEBUGFS_H
++#define EFX_DEBUGFS_H
++
++#ifdef CONFIG_SFC_DEBUGFS
++
++struct seq_file;
++
++struct efx_debugfs_parameter {
++ const char *name;
++ size_t offset;
++ int (*reader)(struct seq_file *, void *);
++};
++
++extern void efx_fini_debugfs_child(struct dentry *dir, const char *name);
++extern int efx_init_debugfs_netdev(struct net_device *net_dev);
++extern void efx_fini_debugfs_netdev(struct net_device *net_dev);
++extern int efx_init_debugfs_port(struct efx_nic *efx);
++extern void efx_fini_debugfs_port(struct efx_nic *efx);
++extern int efx_init_debugfs_nic(struct efx_nic *efx);
++extern void efx_fini_debugfs_nic(struct efx_nic *efx);
++extern int efx_init_debugfs_channels(struct efx_nic *efx);
++extern void efx_fini_debugfs_channels(struct efx_nic *efx);
++extern int efx_init_debugfs(void);
++extern void efx_fini_debugfs(void);
++extern int efx_extend_debugfs_port(struct efx_nic *efx,
++ void *context,
++ struct efx_debugfs_parameter *params);
++extern void efx_trim_debugfs_port(struct efx_nic *efx,
++ struct efx_debugfs_parameter *params);
++
++/* Helpers for handling debugfs entry reads */
++extern int efx_debugfs_read_uint(struct seq_file *, void *);
++extern int efx_debugfs_read_string(struct seq_file *, void *);
++extern int efx_debugfs_read_int(struct seq_file *, void *);
++extern int efx_debugfs_read_atomic(struct seq_file *, void *);
++extern int efx_debugfs_read_dword(struct seq_file *, void *);
++
++/* Handy macros for filling out parameters */
++
++/* Initialiser for a struct efx_debugfs_parameter with type-checking */
++#define EFX_PARAMETER(container_type, parameter, field_type, \
++ reader_function) { \
++ .name = #parameter, \
++ .offset = ((((field_type *) 0) == \
++ &((container_type *) 0)->parameter) ? \
++ offsetof(container_type, parameter) : \
++ offsetof(container_type, parameter)), \
++ .reader = reader_function, \
++}
++
++/* Likewise, but the file name is not taken from the field name */
++#define EFX_NAMED_PARAMETER(_name, container_type, parameter, field_type, \
++ reader_function) { \
++ .name = #_name, \
++ .offset = ((((field_type *) 0) == \
++ &((container_type *) 0)->parameter) ? \
++ offsetof(container_type, parameter) : \
++ offsetof(container_type, parameter)), \
++ .reader = reader_function, \
++}
++
++/* Likewise, but with one file for each of 4 lanes */
++#define EFX_PER_LANE_PARAMETER(prefix, suffix, container_type, parameter, \
++ field_type, reader_function) { \
++ .name = prefix "0" suffix, \
++ .offset = ((((field_type *) 0) == \
++ ((container_type *) 0)->parameter) ? \
++ offsetof(container_type, parameter[0]) : \
++ offsetof(container_type, parameter[0])), \
++ .reader = reader_function, \
++}, { \
++ .name = prefix "1" suffix, \
++ .offset = offsetof(container_type, parameter[1]), \
++ .reader = reader_function, \
++}, { \
++ .name = prefix "2" suffix, \
++ .offset = offsetof(container_type, parameter[2]), \
++ .reader = reader_function, \
++}, { \
++ .name = prefix "3" suffix, \
++ .offset = offsetof(container_type, parameter[3]), \
++ .reader = reader_function, \
++}
++
++/* A string parameter (string embedded in the structure) */
++#define EFX_STRING_PARAMETER(container_type, parameter) { \
++ .name = #parameter, \
++ .offset = ((((char *) 0) == \
++ ((container_type *) 0)->parameter) ? \
++ offsetof(container_type, parameter) : \
++ offsetof(container_type, parameter)), \
++ .reader = efx_debugfs_read_string, \
++}
++
++/* An unsigned integer parameter */
++#define EFX_UINT_PARAMETER(container_type, parameter) \
++ EFX_PARAMETER(container_type, parameter, \
++ unsigned int, efx_debugfs_read_uint)
++
++/* A dword parameter */
++#define EFX_DWORD_PARAMETER(container_type, parameter) \
++ EFX_PARAMETER(container_type, parameter, \
++ efx_dword_t, efx_debugfs_read_dword)
++
++/* An atomic_t parameter */
++#define EFX_ATOMIC_PARAMETER(container_type, parameter) \
++ EFX_PARAMETER(container_type, parameter, \
++ atomic_t, efx_debugfs_read_atomic)
++
++/* An integer parameter */
++#define EFX_INT_PARAMETER(container_type, parameter) \
++ EFX_PARAMETER(container_type, parameter, \
++ int, efx_debugfs_read_int)
++
++#else /* !CONFIG_SFC_DEBUGFS */
++
++static inline int efx_init_debugfs_netdev(struct net_device *net_dev)
++{
++ return 0;
++}
++static inline void efx_fini_debugfs_netdev(struct net_device *net_dev) {}
++static inline int efx_init_debugfs_port(struct efx_nic *efx)
++{
++ return 0;
++}
++static inline void efx_fini_debugfs_port(struct efx_nic *efx) {}
++static inline int efx_init_debugfs_nic(struct efx_nic *efx)
++{
++ return 0;
++}
++static inline void efx_fini_debugfs_nic(struct efx_nic *efx) {}
++static inline int efx_init_debugfs_channels(struct efx_nic *efx)
++{
++ return 0;
++}
++static inline void efx_fini_debugfs_channels(struct efx_nic *efx) {}
++static inline int efx_init_debugfs(void)
++{
++ return 0;
++}
++static inline void efx_fini_debugfs(void) {}
++
++#endif /* CONFIG_SFC_DEBUGFS */
++
++#endif /* EFX_DEBUGFS_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/driverlink_api.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/driverlink_api.h
+--- linux-2.6.18.8/drivers/net/sfc/driverlink_api.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/driverlink_api.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,612 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_DRIVERLINK_API_H
++#define EFX_DRIVERLINK_API_H
++
++#include <linux/list.h> /* for struct list_head */
++#if !defined(EFX_USE_FASTCALL)
++ #include <linux/version.h>
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++ #define EFX_USE_FASTCALL yes
++ #include <linux/linkage.h>
++ #endif
++#endif
++
++/**
++ * DOC: Efx driverlink API
++ *
++ * This file must be included by any driver that wishes to attach to
++ * devices claimed by the Solarflare NIC driver (sfc). It allows separate
++ * kernel modules to expose other functionality offered by the NIC, with
++ * the sfc driver remaining in overall control.
++ *
++ * Overview:
++ *
++ * Driverlink clients define a &struct efx_dl_driver, and register
++ * this structure with the driverlink layer using
++ * efx_dl_register_driver(), which is exported by the sfc driver.
++ *
++ * The probe() routine of each driverlink client driver is called by
++ * the driverlink layer for each physical port in the system, after
++ * the sfc driver has performed start-of-day hardware initialisation
++ * and self-test. If ports are added or removed via pci hotplug then
++ * the &struct efx_dl_driver probe() or remove() routines are called
++ * as appropriate.
++ *
++ * If the port doesn't provide the necessary hardware resources for a
++ * client, then that client can return failure from its probe()
++ * routine. Information provided to the client driver at probe time
++ * includes
++ *
++ * Each probe() routine is given a unique &struct efx_dl_device per
++ * port, which means it can safely use the @priv member to store any
++ * useful state it needs. The probe routine also has the opportunity
++ * to provide a &struct efx_dl_callbacks via
++ * efx_dl_register_callbacks(), which allows the client to intercept
++ * the sfc driver's operations at strategic points.
++ *
++ * Occasionally, the underlying Efx device may need to be reset to
++ * recover from an error condition. The client's reset_suspend() and
++ * reset_resume() methods [if provided] will be called to enable the
++ * client to suspend operations and preserve any state before the
++ * reset. The client can itself request a reset using efx_dl_reset()
++ * or efx_dl_schedule_reset(), should it detect an error condition
++ * necessitating a reset.
++ *
++ * Example:
++ *
++ * The MTD driver (mtd.c) uses the driverlink layer.
++ */
++
++/* Forward declarations */
++struct pci_dev;
++struct net_device;
++struct sk_buff;
++struct efx_dl_device;
++struct efx_dl_device_info;
++
++/*
++ * This is used to guard against the registration of driverlink
++ * clients using an incorrect version of the API.
++ */
++#define EFX_DRIVERLINK_API_VERSION 1
++
++
++/**
++ * struct efx_dl_driver - An Efx driverlink device driver
++ *
++ * This is the analogue of a struct pci_driver for a normal PCI
++ * driver. Driverlink clients should register themselves using
++ * efx_dl_register_driver() at module initialisation, and deregister
++ * themselves using efx_dl_unregister_driver() at module exit.
++ *
++ * All calls to members of efx_dl_driver are serialised by a single
++ * semaphore, so you are allowed to sleep in these functions. Take care
++ * to not call driverlink methods from within these callbacks, otherwise
++ * a deadlock is possible.
++ *
++ * @name: Name of the driver
++ * @probe: Called when device added
++ * @remove: Called when device removed
++ * @reset_suspend: Called before device is reset
++ * @reset_resume: Called after device is reset
++ */
++struct efx_dl_driver {
++ const char *name;
++
++ /*
++ * probe - Handle device addition.
++ * @efx_dev: Efx driverlink device
++ * @net_dev: The net_dev relevant to this port
++ * @dev_info: A linked list of device information.
++ * @silicon_rev: Silicon revision name.
++ *
++ * This will be called after driverlink client registration for
++ * every port on the system, and for every port that appears
++ * thereafter via hotplug.
++ *
++ * The client may use either @efx_dev->pci_dev, the dev_info linked
++ * list of available driver information, or the silicon revision
++ * name to determine if they can support this port. If they can,
++ * they should return 0 to indicate the probe was successful. Any
++ * other return code indicates that the probe failed, and the
++ * @efx_dl_dev will be invalidated.
++ *
++ * The client should perform whatever initialisation it
++ * requires, and store a pointer to its private data in
++ * @efx_dl_dev->priv (which is not shared between clients).
++ * It may also wish to hook in a callbacks table using
++ * efx_dl_register_callbacks().
++ *
++ * Return a negative error code or 0 on success.
++ */
++ int (*probe) (struct efx_dl_device *efx_dl_dev,
++ const struct net_device *net_dev,
++ const struct efx_dl_device_info *dev_info,
++ const char *silicon_rev);
++
++ /*
++ * remove - Handle device removal.
++ * @efx_dev: Efx driverlink device
++ *
++ * This will be called at driver exit (or hotplug removal) for
++ * each registered driverlink client.
++ *
++ * The client must ensure that it has finished all operations
++ * using this device before returning from this method. If it
++ * has hooked in a callbacks table using
++ * efx_dl_register_callbacks(), it must unhook it using
++ * efx_dl_unregister_callbacks(), and then ensure that all
++ * callback-triggered operations (e.g. scheduled tasklets)
++ * have completed before returning. (It does not need to
++ * explicitly wait for callback methods to finish executing,
++ * since efx_dl_unregister_callbacks() will sleep until all
++ * callbacks have returned anyway.)
++ *
++ * Note that the device itself may not have been removed; it
++ * may be simply that the client is being unloaded
++ * via efx_dl_unregister_driver(). In this case other clients
++ * (and the sfc driver itself) will still be using the device,
++ * so the client cannot assume that the device itself is quiescent.
++ * In particular, callbacks may continue to be triggered at any
++ * point until efx_dl_unregister_callbacks() is called.
++ */
++ void (*remove) (struct efx_dl_device *efx_dev);
++
++ /*
++ * reset_suspend - Suspend ready for reset.
++ * @efx_dev: Efx driverlink device
++ *
++ * This method will be called immediately before a hardware
++ * reset (which may or may not have been initiated by the
++ * driverlink client). This client must save any state that it
++ * will need to restore after the reset, and suspend all
++ * operations that might access the hardware. It must not
++ * return until the client can guarantee to have stopped
++ * touching the hardware.
++ *
++ * It is guaranteed that callbacks will be inactive by the
++ * time this method is called; the driverlink layer will
++ * already have prevented new callbacks being made and waited
++ * for all callbacks functions to return before calling
++ * reset_suspend(). However, any delayed work scheduled by
++ * the callback functions (e.g. tasklets) may not yet have
++ * completed.
++ *
++ * This method is allowed to sleep, so waiting on tasklets,
++ * work queues etc. is permitted. There will always be a
++ * corresponding call to the reset_resume() method, so it is
++ * safe to e.g. down a semaphore within reset_suspend() and up
++ * it within reset_resume(). (However, you obviously cannot
++ * do the same with a spinlock).
++ *
++ * Note that the reset operation may be being carried out in
++ * the context of scheduled work, so you cannot use
++ * flush_scheduled_work() to ensure that any work you may have
++ * scheduled has completed.
++ *
++ * During hardware reset, there is a chance of receiving
++ * spurious interrupts, so the client's ISR (if any) should be
++ * unhooked or otherwise disabled.
++ */
++ void (*reset_suspend) (struct efx_dl_device *efx_dev);
++
++ /*
++ * reset_resume - Restore after a reset.
++ * @efx_dev: Efx driverlink device
++ * @ok: Reset success indicator
++ *
++ * This method will be called after a hardware reset. There
++ * will always have been a corresponding call to the
++ * reset_suspend() method beforehand.
++ *
++ * If @ok is non-zero, the client should restore the state
++ * that it saved during the call to reset_suspend() and resume
++ * normal operations.
++ *
++ * If @ok is zero, the reset operation has failed and the
++ * hardware is currently in an unusable state. In this case,
++ * the client should release any locks taken out by
++ * reset_suspend(), but should not take any other action; in
++ * particular, it must not access the hardware, nor resume
++ * normal operations. The hardware is effectively dead at
++ * this point, and our sole aim is to avoid deadlocking or
++ * crashing the host.
++ *
++ * The driverlink layer will still be locked when
++ * reset_resume() is called, so the client may not call
++ * driverlink functions. In particular, if the reset failed,
++ * the client must not call efx_dl_unregister_callbacks() at
++ * this point; it should wait until remove() is called.
++ */
++ void (*reset_resume) (struct efx_dl_device *efx_dev, int ok);
++
++/* private: */
++ struct list_head node;
++ struct list_head device_list;
++};
++
++/**
++ * DOC: Efx driverlink device information
++ *
++ * Each &struct efx_dl_device makes certain hardware resources visible
++ * to driverlink clients, and they describe which resources are
++ * available by passing a linked list of &struct efx_dl_device_info
++ * into the probe() routine.
++ *
++ * The driverlink client's probe function can iterate through the linked list,
++ * and provided that it understands the resources that are exported, it can
++ * choose to make use of them through an external interface.
++ */
++
++/**
++ * enum efx_dl_device_info_type - Device information identifier.
++ *
++ * Each distinct hardware resource API will have a member in this
++ * enumeration.
++ *
++ * @EFX_DL_FALCON_RESOURCES: Information type is &struct efx_dl_falcon_resources
++ */
++enum efx_dl_device_info_type {
++ /** Falcon resources available for export */
++ EFX_DL_FALCON_RESOURCES = 0,
++};
++
++/**
++ * struct efx_dl_device_info - device information structure
++ * @next: Link to next structure, if any
++ * @type: Type code for this structure
++ *
++ * This structure is embedded in other structures provided by the
++ * driverlink device provider, and implements a linked list of
++ * resources pertinent to a driverlink client.
++ *
++ * Example: &struct efx_dl_falcon_resources
++ */
++struct efx_dl_device_info {
++ struct efx_dl_device_info *next;
++ enum efx_dl_device_info_type type;
++};
++
++/**
++ * enum efx_dl_falcon_resource_flags - Falcon resource information flags.
++ *
++ * Flags that describe hardware variations for the described Falcon based port.
++ *
++ * @EFX_DL_FALCON_DUAL_FUNC: Port is dual-function.
++ * Certain silicon revisions have two pci functions, and require
++ * certain hardware resources to be accessed via the secondary
++ * function. See the discussion of @pci_dev in &struct efx_dl_device
++ * below.
++ * @EFX_DL_FALCON_USE_MSI: Port is initialised to use MSI/MSI-X interrupts.
++ * Falcon supports traditional legacy interrupts and MSI/MSI-X
++ * interrupts. Since the sfc driver supports either, as a run
++ * time configuration, driverlink drivers need to be aware of which
++ * one to use for their interrupting resources.
++ */
++enum efx_dl_falcon_resource_flags {
++ EFX_DL_FALCON_DUAL_FUNC = 0x1,
++ EFX_DL_FALCON_USE_MSI = 0x2,
++};
++
++/**
++ * struct efx_dl_falcon_resources - Falcon resource information.
++ *
++ * This structure describes Falcon hardware resources available for
++ * use by a driverlink driver.
++ *
++ * @hdr: Resource linked list header
++ * @biu_lock: Register access lock.
++ * Some Falcon revisions require register access for configuration
++ * registers to be serialised between ports and PCI functions.
++ * The sfc driver will provide the appropriate lock semantics for
++ * the underlying hardware.
++ * @buffer_table_min: First available buffer table entry
++ * @buffer_table_max: Last available buffer table entry + 1
++ * @evq_timer_min: First available event queue with timer
++ * @evq_timer_max: Last available event queue with timer + 1
++ * @evq_int_min: First available event queue with interrupt
++ * @evq_int_max: Last available event queue with interrupt + 1
++ * @rxq_min: First available RX queue
++ * @rxq_max: Last available RX queue + 1
++ * @txq_min: First available TX queue
++ * @txq_max: Last available TX queue + 1
++ * @flags: Hardware variation flags
++ */
++struct efx_dl_falcon_resources {
++ struct efx_dl_device_info hdr;
++ spinlock_t *biu_lock;
++ unsigned buffer_table_min, buffer_table_max;
++ unsigned evq_timer_min, evq_timer_max;
++ unsigned evq_int_min, evq_int_max;
++ unsigned rxq_min, rxq_max;
++ unsigned txq_min, txq_max;
++ enum efx_dl_falcon_resource_flags flags;
++};
++
++/**
++ * struct efx_dl_device - An Efx driverlink device.
++ *
++ * @pci_dev: Underlying PCI device.
++ * This is the PCI device used by the sfc driver. It will
++ * already have been enabled for bus-mastering DMA etc.
++ * @priv: Driver private data
++ * Driverlink clients can use this to store a pointer to their
++ * internal per-device data structure. Each (driver, device)
++ * tuple has a separate &struct efx_dl_device, so clients can use
++ * this @priv field independently.
++ * @driver: Efx driverlink driver for this device
++ */
++struct efx_dl_device {
++ struct pci_dev *pci_dev;
++ void *priv;
++ struct efx_dl_driver *driver;
++};
++
++/**
++ * enum efx_veto - Packet veto request flag.
++ *
++ * This is the return type for the rx_packet() and tx_packet() methods
++ * in &struct efx_dl_callbacks.
++ *
++ * @EFX_ALLOW_PACKET: Packet may be transmitted/received
++ * @EFX_VETO_PACKET: Packet must not be transmitted/received
++ */
++enum efx_veto {
++ EFX_ALLOW_PACKET = 0,
++ EFX_VETO_PACKET = 1,
++};
++
++/**
++ * struct efx_dl_callbacks - Efx callbacks
++ *
++ * These methods can be hooked in to the sfc driver via
++ * efx_dl_register_callbacks(). They allow clients to intercept and/or
++ * modify the behaviour of the sfc driver at predetermined points.
++ *
++ * For efficiency, only one client can hook each callback.
++ *
++ * Since these callbacks are called on packet transmit and reception
++ * paths, clients should avoid acquiring locks or allocating memory.
++ *
++ * @tx_packet: Called when packet is about to be transmitted
++ * @rx_packet: Called when packet is received
++ * @link_change: Called when link status has changed
++ * @request_mtu: Called to request MTU change
++ * @mtu_changed: Called when MTU has been changed
++ * @event: Called when NIC event is not handled by the sfc driver
++ */
++struct efx_dl_callbacks {
++ /*
++ * tx_packet - Packet about to be transmitted.
++ * @efx_dev: Efx driverlink device
++ * @skb: Socket buffer containing the packet to be sent
++ *
++ * This method is called for every packet about to be
++ * transmitted. It allows the client to snoop on traffic sent
++ * via the kernel queues.
++ *
++ * The method may return %EFX_VETO_PACKET in order to prevent
++ * the sfc driver from transmitting the packet. The net
++ * driver will then discard the packet. If the client wishes
++ * to retain a reference to the packet data after returning
++ * %EFX_VETO_PACKET, it must obtain its own copy of the
++ * packet (e.g. by calling skb_get(), or by copying out the
++ * packet data to an external buffer).
++ *
++ * This method must return quickly, since it will have a
++ * direct performance impact upon the sfc driver. It will be
++ * called with interrupts disabled (and may be called in
++ * interrupt context), so may not sleep. Since the sfc driver
++ * may have multiple TX queues, running in parallel, please avoid
++ * the need for locking if it all possible.
++ */
++#if defined(EFX_USE_FASTCALL)
++ enum efx_veto fastcall (*tx_packet) (struct efx_dl_device *efx_dev,
++ struct sk_buff *skb);
++#else
++ enum efx_veto (*tx_packet) (struct efx_dl_device *efx_dev,
++ struct sk_buff *skb);
++#endif
++
++ /*
++ * rx_packet - Packet received.
++ * @efx_dev: Efx driverlink device
++ * @pkt_hdr: Pointer to received packet
++ * @pkt_len: Length of received packet
++ *
++ * This method is called for every received packet. It allows
++ * the client to snoop on traffic received by the kernel
++ * queues.
++ *
++ * The method may return %EFX_VETO_PACKET in order to prevent
++ * the sfc driver from passing the packet to the kernel. The net
++ * driver will then discard the packet.
++ *
++ * This method must return quickly, since it will have a
++ * direct performance impact upon the sfc driver. It is
++ * called in tasklet context, so may not sleep. Note that
++ * there are per-channel tasklets in the sfc driver, so
++ * rx_packet() may be called simultaneously on different CPUs
++ * and must lock appropriately. The design of the sfc driver
++ * allows for lockless operation between receive channels, so
++ * please avoid the need for locking if at all possible.
++ */
++#if defined(EFX_USE_FASTCALL)
++ enum efx_veto fastcall (*rx_packet) (struct efx_dl_device *efx_dev,
++ const char *pkt_hdr, int pkt_len);
++#else
++ enum efx_veto (*rx_packet) (struct efx_dl_device *efx_dev,
++ const char *pkt_hdr, int pkt_len);
++#endif
++
++ /*
++ * link_change - Link status change.
++ * @efx_dev: Efx driverlink device
++ * @link_up: Link up indicator
++ *
++ * This method is called to inform the driverlink client
++ * whenever the PHY link status changes. By the time this
++ * function is called, the MAC has already been reconfigured
++ * with the new autonegotiation settings from the PHY.
++ *
++ * This method is called from tasklet context and may not
++ * sleep.
++ */
++ void (*link_change) (struct efx_dl_device *efx_dev, int link_up);
++
++ /*
++ * request_mtu: Request MTU change.
++ * @efx_dev: Efx driverlink device
++ * @new_mtu: Requested new MTU
++ *
++ * This method is called whenever the user requests an MTU
++ * change on an interface. The client may return an error, in
++ * which case the MTU change request will be denied. If the
++ * client returns success, the MAC will be reconfigured with a
++ * new maxmimum frame length equal to
++ * EFX_MAX_FRAME_LEN(new_mtu). The client will be notified
++ * via the mtu_changed() method once the MAC has been
++ * reconfigured.
++ *
++ * The current MTU for the port can be obtained via
++ * efx_dl_get_netdev(efx_dl_device)->mtu.
++ *
++ * The sfc driver guarantees that no other callback functions
++ * are in progress when this method is called. This function
++ * is called in process context and may sleep.
++ *
++ * Return a negative error code or 0 on success.
++ */
++ int (*request_mtu) (struct efx_dl_device *efx_dev, int new_mtu);
++
++ /*
++ * mtu_changed - MTU has been changed.
++ * @efx_dev: Efx driverlink device
++ * @mtu: The new MTU
++ *
++ * This method is called once the MAC has been reconfigured
++ * with a new MTU. There will have been a preceding call to
++ * request_mtu().
++ *
++ * The sfc driver guarantees that no other callback functions
++ * are in progress when this method is called. This function
++ * is called in process context and may sleep.
++ */
++ void (*mtu_changed) (struct efx_dl_device *efx_dev, int mtu);
++
++ /*
++ * event - Event callback.
++ * @efx_dev: Efx driverlink device
++ * @p_event: Pointer to event
++ *
++ * This method is called for each event that is not handled by the
++ * sfc driver.
++ */
++ void (*event) (struct efx_dl_device *efx_dev, void *p_event);
++};
++
++/* Include API version number in symbol used for efx_dl_register_driver */
++#define efx_dl_stringify_1(x, y) x ## y
++#define efx_dl_stringify_2(x, y) efx_dl_stringify_1(x, y)
++#define efx_dl_register_driver \
++ efx_dl_stringify_2(efx_dl_register_driver_api_ver_, \
++ EFX_DRIVERLINK_API_VERSION)
++
++extern int efx_dl_register_driver(struct efx_dl_driver *driver);
++
++extern void efx_dl_unregister_driver(struct efx_dl_driver *driver);
++
++extern int efx_dl_register_callbacks(struct efx_dl_device *efx_dev,
++ struct efx_dl_callbacks *callbacks);
++
++extern void efx_dl_unregister_callbacks(struct efx_dl_device *efx_dev,
++ struct efx_dl_callbacks *callbacks);
++
++extern void efx_dl_schedule_reset(struct efx_dl_device *efx_dev);
++
++/**
++ * efx_dl_for_each_device_info_matching - iterate an efx_dl_device_info list
++ * @_dev_info: Pointer to first &struct efx_dl_device_info
++ * @_type: Type code to look for
++ * @_info_type: Structure type corresponding to type code
++ * @_field: Name of &struct efx_dl_device_info field in the type
++ * @_p: Iterator variable
++ *
++ * Example:
++ *
++ * static int driver_dl_probe(... const struct efx_dl_device_info *dev_info ...)
++ * {
++ * struct efx_dl_falcon_resources *res;
++ *
++ * efx_dl_for_each_device_info_matching(dev_info,EFX_DL_FALCON_RESOURCES,
++ * struct efx_dl_falcon_resources,
++ * hdr, res) {
++ * if (res->flags & EFX_DL_FALCON_DUAL_FUNC) {
++ * .....
++ * }
++ * }
++ * }
++ */
++#define efx_dl_for_each_device_info_matching(_dev_info, _type, \
++ _info_type, _field, _p) \
++ for ((_p) = container_of((_dev_info), _info_type, _field); \
++ (_p) != NULL; \
++ (_p) = container_of((_p)->_field.next, _info_type, _field))\
++ if ((_p)->_field.type != _type) \
++ continue; \
++ else
++
++/**
++ * efx_dl_search_device_info - search an efx_dl_device_info list
++ * @_dev_info: Pointer to first &struct efx_dl_device_info
++ * @_type: Type code to look for
++ * @_info_type: Structure type corresponding to type code
++ * @_field: Name of &struct efx_dl_device_info member in this type
++ * @_p: Result variable
++ *
++ * Example:
++ *
++ * static int driver_dl_probe(... const struct efx_dl_device_info *dev_info ...)
++ * {
++ * struct efx_dl_falcon_resources *res;
++ *
++ * efx_dl_search_device_info(dev_info, EFX_DL_FALCON_RESOURCES,
++ * struct efx_dl_falcon_resources, hdr, res);
++ * if (res != NULL) {
++ * ....
++ * }
++ * }
++ */
++#define efx_dl_search_device_info(_dev_info, _type, _info_type, \
++ _field, _p) \
++ efx_dl_for_each_device_info_matching((_dev_info), (_type), \
++ _info_type, _field, (_p)) \
++ break;
++
++#endif /* EFX_DRIVERLINK_API_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/driverlink.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/driverlink.c
+--- linux-2.6.18.8/drivers/net/sfc/driverlink.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/driverlink.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,544 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005: Fen Systems Ltd.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/module.h>
++#include <linux/list.h>
++#include <linux/skbuff.h>
++#include <linux/rtnetlink.h>
++#include "net_driver.h"
++#include "efx.h"
++#include "driverlink.h"
++
++/* Driverlink semaphore
++ * This semaphore must be held for any operation that modifies any of
++ * the driverlink lists.
++ */
++static DEFINE_MUTEX(efx_driverlink_lock);
++
++/* List of all registered drivers */
++static LIST_HEAD(efx_driver_list);
++
++/* List of all registered Efx ports */
++static LIST_HEAD(efx_port_list);
++
++/* Driver link handle used internally to track devices */
++struct efx_dl_handle {
++ /* The efx_dl_device consumers see */
++ struct efx_dl_device efx_dev;
++ /* The efx_nic providers provide */
++ struct efx_nic *efx;
++ /* Per-device list */
++ struct list_head port_node;
++ /* Per-driver list */
++ struct list_head driver_node;
++};
++
++/* Get the handle for an efx_dl_device */
++static struct efx_dl_handle *efx_dl_handle(struct efx_dl_device *efx_dev)
++{
++ return container_of(efx_dev, struct efx_dl_handle, efx_dev);
++}
++
++/* Remove an Efx device
++ * You must hold the efx_driverlink_lock before calling this
++ * function.
++ */
++static void efx_dl_del_device(struct efx_dl_device *efx_dev)
++{
++ struct efx_dl_handle *efx_handle = efx_dl_handle(efx_dev);
++
++ EFX_INFO(efx_handle->efx, "%s driverlink client unregistering\n",
++ efx_dev->driver->name);
++
++ /* Call driver's remove() routine */
++ if (efx_dev->driver->remove)
++ efx_dev->driver->remove(efx_dev);
++
++ /* Remove handle from per-driver and per-NIC lists */
++ list_del(&efx_handle->driver_node);
++ list_del(&efx_handle->port_node);
++
++ /* Free efx_handle structure */
++ kfree(efx_handle);
++}
++
++/* Try to add an Efx device
++ * Attempt to probe the given device with the driver, creating a
++ * new efx_dl_device. If the probe routine fails, because the driver
++ * doesn't support this port, then the efx_dl_device is destroyed,
++ */
++static void efx_dl_try_add_device(struct efx_nic *efx,
++ struct efx_dl_driver *driver)
++{
++ struct efx_dl_handle *efx_handle;
++ struct efx_dl_device *efx_dev;
++ int rc;
++
++ /* Allocate and initialise new efx_dl_device structure */
++ efx_handle = kzalloc(sizeof(*efx_handle), GFP_KERNEL);
++ efx_dev = &efx_handle->efx_dev;
++ efx_handle->efx = efx;
++ efx_dev->driver = driver;
++ efx_dev->pci_dev = efx->pci_dev;
++ INIT_LIST_HEAD(&efx_handle->port_node);
++ INIT_LIST_HEAD(&efx_handle->driver_node);
++
++ /* Attempt driver probe */
++ rc = driver->probe(efx_dev, efx->net_dev,
++ efx->dl_info, efx->silicon_rev);
++ if (rc)
++ goto fail;
++
++ /* Add device to per-driver and per-NIC lists */
++ list_add_tail(&efx_handle->driver_node, &driver->device_list);
++ list_add_tail(&efx_handle->port_node, &efx->dl_device_list);
++
++ EFX_INFO(efx, "%s driverlink client registered\n", driver->name);
++ return;
++
++ fail:
++ EFX_INFO(efx, "%s driverlink client skipped\n", driver->name);
++
++ kfree(efx_dev);
++}
++
++/**
++ * efx_dl_unregister_driver - unregister an Efx device driver
++ * @driver: Efx driverlink driver
++ *
++ * Unregisters an Efx driver. The driver's remove() method will be
++ * called for all Efx devices currently claimed by the driver.
++ */
++void efx_dl_unregister_driver(struct efx_dl_driver *driver)
++{
++ struct efx_dl_handle *efx_handle, *efx_handle_n;
++
++ printk(KERN_INFO "Efx driverlink unregistering %s driver\n",
++ driver->name);
++
++ /* Acquire lock. We can't return failure, so have to use
++ * down() instead of down_interruptible()
++ */
++ mutex_lock(&efx_driverlink_lock);
++
++ /* Remove all devices claimed by the driver */
++ list_for_each_entry_safe(efx_handle, efx_handle_n,
++ &driver->device_list, driver_node)
++ efx_dl_del_device(&efx_handle->efx_dev);
++
++ /* Remove driver from driver list */
++ list_del(&driver->node);
++
++ /* Release lock */
++ mutex_unlock(&efx_driverlink_lock);
++}
++EXPORT_SYMBOL(efx_dl_unregister_driver);
++
++/**
++ * efx_dl_register_driver - register an Efx device driver
++ * @driver: Efx driverlink driver
++ *
++ * Registers a new Efx driver. The driver's probe() method will be
++ * called for all Efx NICs currently registered.
++ *
++ * Return a negative error code or 0 on success.
++ */
++int efx_dl_register_driver(struct efx_dl_driver *driver)
++{
++ struct efx_nic *efx;
++ int rc;
++
++ printk(KERN_INFO "Efx driverlink registering %s driver\n",
++ driver->name);
++
++ /* Initialise driver list structures */
++ INIT_LIST_HEAD(&driver->node);
++ INIT_LIST_HEAD(&driver->device_list);
++
++ /* Acquire lock */
++ rc = mutex_lock_interruptible(&efx_driverlink_lock);
++ if (rc)
++ return rc;
++
++ /* Add driver to driver list */
++ list_add_tail(&driver->node, &efx_driver_list);
++
++ /* Feed all existing devices to driver */
++ list_for_each_entry(efx, &efx_port_list, dl_node)
++ efx_dl_try_add_device(efx, driver);
++
++ /* Release locks */
++ mutex_unlock(&efx_driverlink_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(efx_dl_register_driver);
++
++void efx_dl_unregister_nic(struct efx_nic *efx)
++{
++ struct efx_dl_handle *efx_handle, *efx_handle_n;
++
++ if (!efx)
++ return;
++
++ /* Acquire lock. We can't return failure, so have to use
++ * down() instead of down_interruptible()
++ */
++ mutex_lock(&efx_driverlink_lock);
++
++ /* Remove all devices related to this NIC */
++ list_for_each_entry_safe_reverse(efx_handle, efx_handle_n,
++ &efx->dl_device_list,
++ port_node)
++ efx_dl_del_device(&efx_handle->efx_dev);
++
++ /* Remove port from port list */
++ list_del(&efx->dl_node);
++
++ /* Release lock */
++ mutex_unlock(&efx_driverlink_lock);
++}
++
++int efx_dl_register_nic(struct efx_nic *efx)
++{
++ struct efx_dl_driver *driver;
++ int rc;
++
++ /* Acquire lock */
++ rc = mutex_lock_interruptible(&efx_driverlink_lock);
++ if (rc)
++ return rc;
++
++ /* Add port to port list */
++ list_add_tail(&efx->dl_node, &efx_port_list);
++
++ /* Feed port to all existing drivers */
++ list_for_each_entry(driver, &efx_driver_list, node)
++ efx_dl_try_add_device(efx, driver);
++
++ /* Release lock */
++ mutex_unlock(&efx_driverlink_lock);
++
++ return 0;
++}
++
++/*
++ * Dummy callback implementations.
++ *
++ * To avoid a branch point on the fast-path, the callbacks are always
++ * implemented - they are never NULL.
++ */
++#if defined(EFX_USE_FASTCALL)
++static enum efx_veto fastcall
++#else
++static enum efx_veto
++#endif
++efx_dummy_tx_packet_callback(struct efx_dl_device *efx_dev, struct sk_buff *skb)
++{
++ /* Never veto the packet */
++ return EFX_ALLOW_PACKET;
++}
++
++#if defined(EFX_USE_FASTCALL)
++static enum efx_veto fastcall
++#else
++static enum efx_veto
++#endif
++efx_dummy_rx_packet_callback(struct efx_dl_device *efx_dev,
++ const char *pkt_buf, int len)
++{
++ /* Never veto the packet */
++ return EFX_ALLOW_PACKET;
++}
++
++static void
++efx_dummy_link_change_callback(struct efx_dl_device *efx_dev, int link_up)
++{
++}
++
++static int
++efx_dummy_request_mtu_callback(struct efx_dl_device *efx_dev, int new_mtu)
++{
++ /* Always allow */
++ return 0;
++}
++
++static void
++efx_dummy_mtu_changed_callback(struct efx_dl_device *efx_dev, int mtu)
++{
++ return;
++}
++
++static void efx_dummy_event_callback(struct efx_dl_device *efx_dev, void *event)
++{
++ return;
++}
++
++struct efx_dl_callbacks efx_default_callbacks = {
++ .tx_packet = efx_dummy_tx_packet_callback,
++ .rx_packet = efx_dummy_rx_packet_callback,
++ .link_change = efx_dummy_link_change_callback,
++ .request_mtu = efx_dummy_request_mtu_callback,
++ .mtu_changed = efx_dummy_mtu_changed_callback,
++ .event = efx_dummy_event_callback,
++};
++
++#define EFX_DL_UNREGISTER_CALLBACK(_port, _dev, _member) \
++ do { \
++ BUG_ON((_port)->dl_cb_dev._member != (_dev)); \
++ (_port)->dl_cb._member = \
++ efx_default_callbacks._member; \
++ (_port)->dl_cb_dev._member = NULL; \
++ } while (0)
++
++
++#define EFX_DL_REGISTER_CALLBACK(_port, _dev, _from, _member) \
++ if ((_from)->_member) { \
++ BUG_ON((_port)->dl_cb_dev._member != NULL); \
++ (_port)->dl_cb._member = (_from)->_member; \
++ (_port)->dl_cb_dev._member = _dev; \
++ }
++
++/**
++ * efx_dl_unregister_callbacks - unregister callbacks for an Efx NIC
++ * @efx_dev: Efx driverlink device
++ * @callbacks: Callback list
++ *
++ * This removes a set of callbacks registered with
++ * efx_dl_register_callbacks(). It should be called as part of the
++ * client's remove() method.
++ *
++ * The net driver will ensure that all callback functions have
++ * returned to the net driver before efx_dl_unregister_callbacks()
++ * returns. Note that the device itself may still be running when the
++ * client's remove() method is called. The client must therefore
++ * unhook its callbacks using efx_dl_unregister_callbacks() and only
++ * then ensure that any delayed tasks triggered by callback methods
++ * (e.g. scheduled tasklets) have completed.
++ */
++void efx_dl_unregister_callbacks(struct efx_dl_device *efx_dev,
++ struct efx_dl_callbacks *callbacks)
++{
++ struct efx_dl_handle *efx_handle = efx_dl_handle(efx_dev);
++ struct efx_nic *efx = efx_handle->efx;
++
++ /* Suspend net driver operations */
++ efx_suspend(efx);
++
++ EFX_INFO(efx, "removing callback hooks into %s driver\n",
++ efx_dev->driver->name);
++
++ if (callbacks->tx_packet)
++ EFX_DL_UNREGISTER_CALLBACK(efx, efx_dev, tx_packet);
++
++ if (callbacks->rx_packet)
++ EFX_DL_UNREGISTER_CALLBACK(efx, efx_dev, rx_packet);
++
++ if (callbacks->link_change)
++ EFX_DL_UNREGISTER_CALLBACK(efx, efx_dev, link_change);
++
++ if (callbacks->request_mtu)
++ EFX_DL_UNREGISTER_CALLBACK(efx, efx_dev, request_mtu);
++
++ if (callbacks->mtu_changed)
++ EFX_DL_UNREGISTER_CALLBACK(efx, efx_dev, mtu_changed);
++
++ if (callbacks->event)
++ EFX_DL_UNREGISTER_CALLBACK(efx, efx_dev, event);
++
++ /* Resume net driver operations */
++ efx_resume(efx);
++}
++EXPORT_SYMBOL(efx_dl_unregister_callbacks);
++
++/**
++ * efx_dl_register_callbacks - register callbacks for an Efx NIC
++ * @efx_dev: Efx driverlink device
++ * @callbacks: Callback list
++ *
++ * This registers a set of callback functions with the net driver.
++ * These functions will be called at various key points to allow
++ * external code to monitor and/or modify the behaviour of the network
++ * driver. Any of the callback function pointers may be %NULL if a
++ * callback is not required. The intended user of this mechanism is
++ * the SFC char driver.
++ *
++ * This client should call efx_dl_register_callbacks() during its
++ * probe() method. The client must ensure that it also calls
++ * efx_dl_unregister_callbacks() as part of its remove() method.
++ *
++ * Only one function may be registered for each callback per NIC.
++ * If a requested callback is already registered for this NIC, this
++ * function will return -%EBUSY.
++ *
++ * The device may already be running, so the client must be prepared
++ * for callbacks to be triggered immediately after calling
++ * efx_dl_register_callbacks().
++ *
++ * Return a negative error code or 0 on success.
++ */
++int efx_dl_register_callbacks(struct efx_dl_device *efx_dev,
++ struct efx_dl_callbacks *callbacks)
++{
++ struct efx_dl_handle *efx_handle = efx_dl_handle(efx_dev);
++ struct efx_nic *efx = efx_handle->efx;
++ int rc = 0;
++
++ /* Suspend net driver operations */
++ efx_suspend(efx);
++
++ /* Check that the requested callbacks are not already hooked. */
++ if ((callbacks->tx_packet && efx->dl_cb_dev.tx_packet) ||
++ (callbacks->rx_packet && efx->dl_cb_dev.rx_packet) ||
++ (callbacks->link_change && efx->dl_cb_dev.link_change) ||
++ (callbacks->request_mtu && efx->dl_cb_dev.request_mtu) ||
++ (callbacks->mtu_changed && efx->dl_cb_dev.mtu_changed) ||
++ (callbacks->event && efx->dl_cb_dev.event)) {
++ rc = -EBUSY;
++ goto out;
++ }
++
++ EFX_INFO(efx, "adding callback hooks to %s driver\n",
++ efx_dev->driver->name);
++
++ /* Hook in callbacks. For maximum speed, we never check to
++ * see whether these are NULL before calling; therefore we
++ * must ensure that they are never NULL. If the set we're
++ * being asked to hook in is sparse, we leave the default
++ * values in place for the empty hooks.
++ */
++ EFX_DL_REGISTER_CALLBACK(efx, efx_dev, callbacks, tx_packet);
++ EFX_DL_REGISTER_CALLBACK(efx, efx_dev, callbacks, rx_packet);
++ EFX_DL_REGISTER_CALLBACK(efx, efx_dev, callbacks, link_change);
++ EFX_DL_REGISTER_CALLBACK(efx, efx_dev, callbacks, request_mtu);
++ EFX_DL_REGISTER_CALLBACK(efx, efx_dev, callbacks, mtu_changed);
++ EFX_DL_REGISTER_CALLBACK(efx, efx_dev, callbacks, event);
++
++ out:
++ /* Resume net driver operations */
++ efx_resume(efx);
++
++ return rc;
++}
++EXPORT_SYMBOL(efx_dl_register_callbacks);
++
++/**
++ * efx_dl_schedule_reset - schedule an Efx NIC reset
++ * @efx_dev: Efx driverlink device
++ *
++ * This schedules a hardware reset for a short time in the future. It
++ * can be called from any context, and so can be used when
++ * efx_dl_reset() cannot be called.
++ */
++void efx_dl_schedule_reset(struct efx_dl_device *efx_dev)
++{
++ struct efx_dl_handle *efx_handle = efx_dl_handle(efx_dev);
++ struct efx_nic *efx = efx_handle->efx;
++
++ efx_schedule_reset(efx, RESET_TYPE_ALL);
++}
++EXPORT_SYMBOL(efx_dl_schedule_reset);
++
++/*
++ * Lock the driverlink layer before a reset
++ * To avoid deadlock, efx_driverlink_lock needs to be acquired before
++ * efx->suspend_lock.
++ */
++void efx_dl_reset_lock(void)
++{
++ /* Acquire lock */
++ mutex_lock(&efx_driverlink_lock);
++}
++
++/*
++ * Unlock the driverlink layer after a reset
++ * This call must be matched against efx_dl_reset_lock.
++ */
++void efx_dl_reset_unlock(void)
++{
++ /* Acquire lock */
++ mutex_unlock(&efx_driverlink_lock);
++}
++
++/*
++ * Suspend ready for reset
++ * This calls the reset_suspend method of all drivers registered to
++ * the specified NIC. It must only be called between
++ * efx_dl_reset_lock and efx_dl_reset_unlock.
++ */
++void efx_dl_reset_suspend(struct efx_nic *efx)
++{
++ struct efx_dl_handle *efx_handle;
++ struct efx_dl_device *efx_dev;
++
++ BUG_ON(!mutex_is_locked(&efx_driverlink_lock));
++
++ /* Call suspend method of each driver in turn */
++ list_for_each_entry_reverse(efx_handle,
++ &efx->dl_device_list,
++ port_node) {
++ efx_dev = &efx_handle->efx_dev;
++ if (efx_dev->driver->reset_suspend)
++ efx_dev->driver->reset_suspend(efx_dev);
++ }
++}
++
++/*
++ * Resume after a reset
++ * This calls the reset_resume method of all drivers registered to the
++ * specified NIC. It must only be called between efx_dl_reset_lock
++ * and efx_dl_reset_unlock.
++ */
++void efx_dl_reset_resume(struct efx_nic *efx, int ok)
++{
++ struct efx_dl_handle *efx_handle;
++ struct efx_dl_device *efx_dev;
++
++ BUG_ON(!mutex_is_locked(&efx_driverlink_lock));
++
++ /* Call resume method of each driver in turn */
++ list_for_each_entry(efx_handle, &efx->dl_device_list,
++ port_node) {
++ efx_dev = &efx_handle->efx_dev;
++ if (efx_dev->driver->reset_resume)
++ efx_dev->driver->reset_resume(efx_dev, ok);
++ }
++}
++
++/**
++ * efx_dl_get_nic - obtain the Efx NIC for the given driverlink device
++ * @efx_dev: Efx driverlink device
++ *
++ * Get a pointer to the &struct efx_nic corresponding to
++ * @efx_dev. This can be used by driverlink clients built along with
++ * the sfc driver, which may have intimate knowledge of its internals.
++ */
++struct efx_nic *efx_dl_get_nic(struct efx_dl_device *efx_dev)
++{
++ return efx_dl_handle(efx_dev)->efx;
++}
++EXPORT_SYMBOL(efx_dl_get_nic);
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/driverlink.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/driverlink.h
+--- linux-2.6.18.8/drivers/net/sfc/driverlink.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/driverlink.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,93 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005: Fen Systems Ltd.
++ * Copyright 2006: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_DRIVERLINK_H
++#define EFX_DRIVERLINK_H
++
++/* Forward declarations */
++struct efx_dl_device;
++struct efx_nic;
++
++/*
++ * Efx driverlink
++ *
++ * This header file defines the portions of the Efx driverlink
++ * interface that are used only within the sfc module. It also
++ * declares efx_dl_get_nic(), which may be used by sfc_mtd
++ * and any other module built along with sfc.
++ */
++
++
++/* Efx callback devices
++ *
++ * A list of the devices that own each callback. The partner to
++ * struct efx_dl_callbacks
++ */
++struct efx_dl_cb_devices {
++ /* Device owning the tx_packet callback */
++ struct efx_dl_device *tx_packet;
++ /* Device owning the rx_packet callback */
++ struct efx_dl_device *rx_packet;
++ /* Device owning the link_change callback. */
++ struct efx_dl_device *link_change;
++ /* Device owning the request_mtu callback. */
++ struct efx_dl_device *request_mtu;
++ /* Device owning the mtu_changed callback. */
++ struct efx_dl_device *mtu_changed;
++ /* Device owning the event callback. */
++ struct efx_dl_device *event;
++};
++
++/* No-op callbacks used for initialisation */
++extern struct efx_dl_callbacks efx_default_callbacks;
++
++/* Macro used to invoke callbacks */
++#define EFX_DL_CALLBACK(_port, _name, ...) \
++ (_port)->dl_cb._name((_port)->dl_cb_dev._name, __VA_ARGS__)
++
++/* Register an Efx NIC */
++extern int efx_dl_register_nic(struct efx_nic *efx);
++
++/* Unregister an Efx NIC */
++extern void efx_dl_unregister_nic(struct efx_nic *efx);
++
++/* Lock the driverlink layer prior to a reset */
++extern void efx_dl_reset_lock(void);
++
++/* Unlock the driverlink layer following a reset */
++extern void efx_dl_reset_unlock(void);
++
++/* Suspend all drivers prior to a hardware reset */
++extern void efx_dl_reset_suspend(struct efx_nic *efx);
++
++/* Resume all drivers after a hardware reset */
++extern void efx_dl_reset_resume(struct efx_nic *efx, int ok);
++
++/* Obtain the Efx NIC for the given driverlink device. */
++extern struct efx_nic *efx_dl_get_nic(struct efx_dl_device *efx_dev);
++
++#endif /* EFX_DRIVERLINK_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/efx.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/efx.c
+--- linux-2.6.18.8/drivers/net/sfc/efx.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/efx.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2783 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/delay.h>
++#include <linux/notifier.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/in.h>
++#include <linux/crc32.h>
++#include <linux/ethtool.h>
++#include <asm/uaccess.h>
++#include "net_driver.h"
++#include "gmii.h"
++#include "driverlink.h"
++#include "selftest.h"
++#include "debugfs.h"
++#include "ethtool.h"
++#include "tx.h"
++#include "rx.h"
++#include "efx.h"
++#include "mdio_10g.h"
++#include "falcon.h"
++#include "workarounds.h"
++
++/**************************************************************************
++ *
++ * Type name strings
++ *
++ **************************************************************************
++ */
++
++/* Loopback mode names (see LOOPBACK_MODE()) */
++const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
++const char *efx_loopback_mode_names[] = {
++ [LOOPBACK_NONE] = "NONE",
++ [LOOPBACK_MAC] = "MAC",
++ [LOOPBACK_XGMII] = "XGMII",
++ [LOOPBACK_XGXS] = "XGXS",
++ [LOOPBACK_XAUI] = "XAUI",
++ [LOOPBACK_PHY] = "PHY",
++ [LOOPBACK_PHYXS] = "PHY(XS)",
++ [LOOPBACK_PCS] = "PHY(PCS)",
++ [LOOPBACK_PMAPMD] = "PHY(PMAPMD)",
++ [LOOPBACK_NETWORK] = "NETWORK",
++};
++
++/* Interrupt mode names (see INT_MODE())) */
++const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
++const char *efx_interrupt_mode_names[] = {
++ [EFX_INT_MODE_MSIX] = "MSI-X",
++ [EFX_INT_MODE_MSI] = "MSI",
++ [EFX_INT_MODE_LEGACY] = "legacy",
++};
++
++/* PHY type names (see PHY_TYPE())) */
++const unsigned int efx_phy_type_max = PHY_TYPE_MAX;
++const char *efx_phy_type_names[] = {
++ [PHY_TYPE_NONE] = "none",
++ [PHY_TYPE_CX4_RTMR] = "Mysticom CX4",
++ [PHY_TYPE_1G_ALASKA] = "1G Alaska",
++ [PHY_TYPE_10XPRESS] = "SFC 10Xpress",
++ [PHY_TYPE_XFP] = "Quake XFP",
++ [PHY_TYPE_PM8358] = "PM8358 XAUI",
++};
++
++const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
++const char *efx_reset_type_names[] = {
++ [RESET_TYPE_INVISIBLE] = "INVISIBLE",
++ [RESET_TYPE_ALL] = "ALL",
++ [RESET_TYPE_WORLD] = "WORLD",
++ [RESET_TYPE_DISABLE] = "DISABLE",
++ [RESET_TYPE_MONITOR] = "MONITOR",
++ [RESET_TYPE_INT_ERROR] = "INT_ERROR",
++ [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
++};
++
++const unsigned int efx_nic_state_max = STATE_MAX;
++const char *efx_nic_state_names[] = {
++ [STATE_INIT] = "INIT",
++ [STATE_RUNNING] = "RUNNING",
++ [STATE_FINI] = "FINI",
++ [STATE_RESETTING] = "RESETTING",
++ [STATE_DISABLED] = "DISABLED",
++};
++
++#define EFX_MAX_MTU (9 * 1024)
++
++
++/**************************************************************************
++ *
++ * Configurable values
++ *
++ *************************************************************************/
++
++/*
++ * Use separate channels for TX and RX events
++ *
++ * Set this to 1 to use separate channels for TX and RX. It allows us to
++ * apply a higher level of interrupt moderation to TX events.
++ *
++ * This is forced to 0 for MSI interrupt mode as the interrupt vector
++ * is not written
++ */
++static unsigned int separate_tx_and_rx_channels = 1;
++
++/* This is the weight assigned to each of the (per-channel) virtual
++ * NAPI devices.
++ */
++static int napi_weight = 64;
++
++/* This is the time (in jiffies) between invocations of the hardware
++ * monitor, which checks for known hardware bugs and resets the
++ * hardware and driver as necessary.
++ */
++unsigned int efx_monitor_interval = 1 * HZ;
++
++/* This controls whether or not the hardware monitor will trigger a
++ * reset when it detects an error condition.
++ */
++static unsigned int monitor_reset = 1;
++
++/* This controls whether or not the driver will initialise devices
++ * with invalid MAC addresses stored in the EEPROM or flash. If true,
++ * such devices will be initialised with a random locally-generated
++ * MAC address. This allows for loading the efx_mtd driver to
++ * reprogram the flash, even if the flash contents (including the MAC
++ * address) have previously been erased.
++ */
++static unsigned int allow_bad_hwaddr;
++
++/* Initial interrupt moderation settings. They can be modified after
++ * module load with ethtool.
++ *
++ * The default for RX should strike a balance between increasing the
++ * round-trip latency and reducing overhead.
++ */
++static unsigned int rx_irq_mod_usec = 60;
++
++/* Initial interrupt moderation settings. They can be modified after
++ * module load with ethtool.
++ *
++ * This default is chosen to ensure that a 10G link does not go idle
++ * while a TX queue is stopped after it has become full. A queue is
++ * restarted when it drops below half full. The time this takes (assuming
++ * worst case 3 descriptors per packet and 1024 descriptors) is
++ * 512 / 3 * 1.2 = 205 usec.
++ */
++static unsigned int tx_irq_mod_usec = 150;
++
++/* Ignore online self-test failures at load
++ *
++ * If set to 1, then the driver will not fail to load
++ * if the online self-test fails. Useful only during testing
++ */
++static unsigned int allow_load_on_failure;
++
++/* Set to 1 to enable the use of Message-Signalled Interrupts (MSI).
++ * MSI will not work on some motherboards due to limitations of the
++ * chipset, so the default is off.
++ *
++ * This is the highest capability interrupt mode to use
++ * 0 => MSI-X
++ * 1 => MSI
++ * 2 => legacy
++ */
++static unsigned int interrupt_mode;
++
++/* If set to 1, then the driver will perform an offline self test
++ * when each interface first comes up. This will appear like the
++ * interface bounces up and down
++ */
++static unsigned int onload_offline_selftest = 1;
++
++/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
++ * i.e. the number of CPUs among which we may distribute simultaneous
++ * interrupt handling.
++ *
++ * Cards without MSI-X will only target one CPU
++ *
++ * Default (0) means to use all CPUs in the system. This parameter
++ * can be set using "rss_cpus=xxx" when loading the module.
++ */
++static unsigned int rss_cpus;
++module_param(rss_cpus, uint, 0444);
++MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
++
++/**************************************************************************
++ *
++ * Utility functions and prototypes
++ *
++ *************************************************************************/
++static void efx_remove_channel(struct efx_channel *channel);
++static void efx_remove_port(struct efx_nic *efx);
++static void efx_fini_napi(struct efx_nic *efx);
++static void efx_fini_channels(struct efx_nic *efx);
++
++/**************************************************************************
++ *
++ * Event queue processing
++ *
++ *************************************************************************/
++
++/* Process channel's event queue
++ *
++ * This function is responsible for processing the event queue of a
++ * single channel. The caller must guarantee that this function will
++ * never be concurrently called more than once on the same channel,
++ * though different channels may be being processed concurrently.
++ */
++static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
++{
++ int rxdmaqs;
++ struct efx_rx_queue *rx_queue;
++
++ if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE ||
++ !channel->enabled))
++ return rx_quota;
++
++ rxdmaqs = falcon_process_eventq(channel, &rx_quota);
++
++ /* Deliver last RX packet. */
++ if (channel->rx_pkt) {
++ __efx_rx_packet(channel, channel->rx_pkt,
++ channel->rx_pkt_csummed);
++ channel->rx_pkt = NULL;
++ }
++
++ efx_rx_strategy(channel);
++
++ /* Refill descriptor rings as necessary */
++ rx_queue = &channel->efx->rx_queue[0];
++ while (rxdmaqs) {
++ if (rxdmaqs & 0x01)
++ efx_fast_push_rx_descriptors(rx_queue);
++ rx_queue++;
++ rxdmaqs >>= 1;
++ }
++
++ return rx_quota;
++}
++
++/* Mark channel as finished processing
++ *
++ * Note that since we will not receive further interrupts for this
++ * channel before we finish processing and call the eventq_read_ack()
++ * method, there is no need to use the interrupt hold-off timers.
++ */
++static inline void efx_channel_processed(struct efx_channel *channel)
++{
++ /* Write to EVQ_RPTR_REG. If a new event arrived in a race
++ * with finishing processing, a new interrupt will be raised.
++ */
++ channel->work_pending = 0;
++ smp_wmb(); /* Ensure channel updated before any new interrupt. */
++ falcon_eventq_read_ack(channel);
++}
++
++/* NAPI poll handler
++ *
++ * NAPI guarantees serialisation of polls of the same device, which
++ * provides the guarantee required by efx_process_channel().
++ */
++#if !defined(EFX_HAVE_OLD_NAPI)
++static int efx_poll(struct napi_struct *napi, int budget)
++{
++ struct efx_channel *channel =
++ container_of(napi, struct efx_channel, napi_str);
++ struct net_device *napi_dev = channel->napi_dev;
++#else
++static int efx_poll(struct net_device *napi, int *budget_ret)
++{
++ struct net_device *napi_dev = napi;
++ struct efx_channel *channel = napi_dev->priv;
++ int budget = min(napi_dev->quota, *budget_ret);
++#endif
++ int unused;
++ int rx_packets;
++
++ EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
++ channel->channel, raw_smp_processor_id());
++
++ unused = efx_process_channel(channel, budget);
++ rx_packets = (budget - unused);
++#if defined(EFX_HAVE_OLD_NAPI)
++ napi_dev->quota -= rx_packets;
++ *budget_ret -= rx_packets;
++#endif
++
++ if (rx_packets < budget) {
++ /* There is no race here; although napi_disable() will
++ * only wait for netif_rx_complete(), this isn't a problem
++ * since efx_channel_processed() will have no effect if
++ * interrupts have already been disabled.
++ */
++ netif_rx_complete(napi_dev, napi);
++ efx_channel_processed(channel);
++ }
++
++#if !defined(EFX_HAVE_OLD_NAPI)
++ return rx_packets;
++#else
++ return (rx_packets >= budget);
++#endif
++}
++
++/* Process the eventq of the specified channel immediately on this CPU
++ *
++ * Disable hardware generated interrupts, wait for any existing
++ * processing to finish, then directly poll (and ack ) the eventq.
++ * Finally reenable NAPI and interrupts.
++ *
++ * Since we are touching interrupts the caller should hold the suspend lock
++ */
++void efx_process_channel_now(struct efx_channel *channel)
++{
++ struct efx_nic *efx = channel->efx;
++
++ BUG_ON(!channel->used_flags);
++ BUG_ON(!channel->enabled);
++
++ /* Disable interrupts and wait for ISRs to complete */
++ falcon_disable_interrupts(efx);
++ if (efx->legacy_irq)
++ synchronize_irq(efx->legacy_irq);
++ if (channel->has_interrupt && channel->irq)
++ synchronize_irq(channel->irq);
++
++ /* Wait for any NAPI processing to complete */
++ napi_disable(&channel->napi_str);
++
++ /* Poll the channel */
++ (void) efx_process_channel(channel, efx->type->evq_size);
++
++ /* Ack the eventq. This may cause an interrupt to be generated
++ * when they are reenabled */
++ efx_channel_processed(channel);
++
++ /* Reenable NAPI polling */
++ napi_enable(&channel->napi_str);
++
++ /* Reenable interrupts */
++ falcon_enable_interrupts(efx);
++}
++
++/* Create event queue
++ * Event queue memory allocations are done only once. If the channel
++ * is reset, the memory buffer will be reused; this guards against
++ * errors during channel reset and also simplifies interrupt handling.
++ */
++static int efx_probe_eventq(struct efx_channel *channel)
++{
++ EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
++
++ return falcon_probe_eventq(channel);
++}
++
++/* Prepare channel's event queue */
++static int efx_init_eventq(struct efx_channel *channel)
++{
++ EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
++
++ ASSERT_RTNL();
++
++ /* Initialise fields */
++ channel->eventq_read_ptr = 0;
++
++ return falcon_init_eventq(channel);
++}
++
++static void efx_fini_eventq(struct efx_channel *channel)
++{
++ EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
++
++ ASSERT_RTNL();
++
++ falcon_fini_eventq(channel);
++}
++
++static void efx_remove_eventq(struct efx_channel *channel)
++{
++ EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
++
++ falcon_remove_eventq(channel);
++}
++
++/**************************************************************************
++ *
++ * Channel handling
++ *
++ *************************************************************************/
++
++/* Setup per-NIC RX buffer parameters.
++ * Calculate the rx buffer allocation parameters required to support
++ * the current MTU, including padding for header alignment and overruns.
++ */
++static void efx_calc_rx_buffer_params(struct efx_nic *efx)
++{
++ unsigned int order, len;
++
++ len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
++ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
++ efx->type->rx_buffer_padding);
++
++ /* Page-based allocation page-order */
++ for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)
++ ;
++
++ efx->rx_buffer_len = len;
++ efx->rx_buffer_order = order;
++}
++
++static int efx_probe_channel(struct efx_channel *channel)
++{
++ struct efx_tx_queue *tx_queue;
++ struct efx_rx_queue *rx_queue;
++ int rc;
++
++ EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
++
++ rc = efx_probe_eventq(channel);
++ if (rc)
++ goto fail1;
++
++ efx_for_each_channel_tx_queue(tx_queue, channel) {
++ rc = efx_probe_tx_queue(tx_queue);
++ if (rc)
++ goto fail2;
++ }
++
++ efx_for_each_channel_rx_queue(rx_queue, channel) {
++ rc = efx_probe_rx_queue(rx_queue);
++ if (rc)
++ goto fail3;
++ }
++
++ channel->n_rx_frm_trunc = 0;
++
++ return 0;
++
++ fail3:
++ efx_for_each_channel_rx_queue(rx_queue, channel)
++ efx_remove_rx_queue(rx_queue);
++ fail2:
++ efx_for_each_channel_tx_queue(tx_queue, channel)
++ efx_remove_tx_queue(tx_queue);
++ fail1:
++ return rc;
++}
++
++
++/* Channels are shutdown and reinitialised whilst the NIC is running
++ * to propagate configuration changes (mtu, checksum offload), or
++ * to clear hardware error conditions
++ */
++static int efx_init_channels(struct efx_nic *efx)
++{
++ struct efx_tx_queue *tx_queue;
++ struct efx_rx_queue *rx_queue;
++ struct efx_channel *channel;
++ int rc = 0;
++
++ /* Recalculate the rx buffer parameters */
++ efx_calc_rx_buffer_params(efx);
++
++ /* Initialise the channels */
++ efx_for_each_channel(channel, efx) {
++ EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
++
++ rc = efx_init_eventq(channel);
++ if (rc)
++ goto err;
++
++ efx_for_each_channel_tx_queue(tx_queue, channel) {
++ rc = efx_init_tx_queue(tx_queue);
++ if (rc)
++ goto err;
++ }
++
++ /* The rx buffer allocation strategy is MTU dependent */
++ efx_rx_strategy(channel);
++
++ efx_for_each_channel_rx_queue(rx_queue, channel) {
++ rc = efx_init_rx_queue(rx_queue);
++ if (rc)
++ goto err;
++ }
++
++ WARN_ON(channel->rx_pkt != NULL);
++ efx_rx_strategy(channel);
++ }
++
++ return 0;
++
++ err:
++ EFX_ERR(efx, "failed to initialise channel %d\n",
++ channel ? channel->channel : -1);
++ efx_fini_channels(efx);
++ return rc;
++}
++
++/* This enables event queue processing and packet transmission.
++ *
++ * Note that this function is not allowed to fail, since that would
++ * introduce too much complexity into the suspend/resume path.
++ */
++static void efx_start_channel(struct efx_channel *channel)
++{
++ struct efx_rx_queue *rx_queue;
++
++ EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
++
++ if (!(channel->efx->net_dev->flags & IFF_UP))
++ netif_napi_add(channel->napi_dev, &channel->napi_str,
++ efx_poll, napi_weight);
++
++ /* Mark channel as enabled */
++ channel->work_pending = 0;
++ channel->enabled = 1;
++ smp_wmb(); /* ensure channel updated before first interrupt */
++
++ /* Enable NAPI poll handler */
++ napi_enable(&channel->napi_str);
++
++ /* Load up RX descriptors */
++ efx_for_each_channel_rx_queue(rx_queue, channel)
++ efx_fast_push_rx_descriptors(rx_queue);
++}
++
++/* This disables event queue processing and packet transmission.
++ * This function does not guarantee that all queue processing
++ * (e.g. RX refill) is complete.
++ */
++static void efx_stop_channel(struct efx_channel *channel)
++{
++ struct efx_rx_queue *rx_queue;
++
++ if (!channel->enabled)
++ return;
++
++ EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
++
++ /* Mark channel as disabled */
++ channel->enabled = 0;
++
++ /* Wait for any NAPI processing to complete */
++ napi_disable(&channel->napi_str);
++
++ /* Ensure that any worker threads have exited or will be
++ * no-ops.
++ */
++ efx_for_each_channel_rx_queue(rx_queue, channel) {
++ spin_lock_bh(&rx_queue->add_lock);
++ spin_unlock_bh(&rx_queue->add_lock);
++ }
++}
++
++static void efx_fini_channels(struct efx_nic *efx)
++{
++ struct efx_channel *channel;
++ struct efx_tx_queue *tx_queue;
++ struct efx_rx_queue *rx_queue;
++
++ ASSERT_RTNL();
++
++ efx_for_each_channel(channel, efx) {
++ EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
++
++ efx_for_each_channel_rx_queue(rx_queue, channel)
++ efx_fini_rx_queue(rx_queue);
++ efx_for_each_channel_tx_queue(tx_queue, channel)
++ efx_fini_tx_queue(tx_queue);
++ }
++
++ /* Do the event queues last so that we can handle flush events
++ * for all DMA queues. */
++ efx_for_each_channel(channel, efx) {
++ EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
++
++ efx_fini_eventq(channel);
++ }
++}
++
++static void efx_remove_channel(struct efx_channel *channel)
++{
++ struct efx_tx_queue *tx_queue;
++ struct efx_rx_queue *rx_queue;
++
++ EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
++
++ efx_for_each_channel_rx_queue(rx_queue, channel)
++ efx_remove_rx_queue(rx_queue);
++ efx_for_each_channel_tx_queue(tx_queue, channel)
++ efx_remove_tx_queue(tx_queue);
++ efx_remove_eventq(channel);
++
++ channel->used_flags = 0;
++}
++
++/**************************************************************************
++ *
++ * Port handling
++ *
++ **************************************************************************/
++
++/* This ensures that the kernel is kept informed (via
++ * netif_carrier_on/off) of the link status, and also maintains the
++ * link status's stop on the port's TX queue.
++ */
++static void efx_link_status_changed(struct efx_nic *efx)
++{
++ unsigned long flags __attribute__ ((unused));
++ int carrier_ok;
++
++ /* Ensure no link status notifications get sent to the OS after the net
++ * device has been unregistered. */
++ if (!efx->net_dev_registered)
++ return;
++
++ carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0;
++ if (efx->link_up != carrier_ok) {
++ efx->n_link_state_changes++;
++
++ if (efx->link_up)
++ netif_carrier_on(efx->net_dev);
++ else
++ netif_carrier_off(efx->net_dev);
++ }
++
++ /* Inform driverlink client */
++ EFX_DL_CALLBACK(efx, link_change, efx->link_up);
++
++ /* Status message for kernel log */
++ if (efx->link_up) {
++ struct mii_if_info *gmii = &efx->mii;
++ unsigned adv, lpa;
++ /* NONE here means direct XAUI from the controller, with no
++ * MDIO-attached device we can query. */
++ if (efx->phy_type != PHY_TYPE_NONE) {
++ adv = gmii_advertised(gmii);
++ lpa = gmii_lpa(gmii);
++ } else {
++ lpa = GM_LPA_10000 | LPA_DUPLEX;
++ adv = lpa;
++ }
++ EFX_INFO(efx, "link up at %dMbps %s-duplex "
++ "(adv %04x lpa %04x) (MTU %d)%s%s%s%s\n",
++ (efx->link_options & GM_LPA_10000 ? 10000 :
++ (efx->link_options & GM_LPA_1000 ? 1000 :
++ (efx->link_options & GM_LPA_100 ? 100 :
++ 10))),
++ (efx->link_options & GM_LPA_DUPLEX ?
++ "full" : "half"),
++ adv, lpa,
++ efx->net_dev->mtu,
++ (efx->loopback_mode ? " [" : ""),
++ (efx->loopback_mode ? LOOPBACK_MODE(efx) : ""),
++ (efx->loopback_mode ? " LOOPBACK]" : ""),
++ (efx->promiscuous ? " [PROMISC]" : ""));
++ } else {
++ EFX_INFO(efx, "link down\n");
++ }
++
++}
++
++/* This call reinitialises the MAC to pick up new PHY settings
++ * To call from a context that cannot sleep use reconfigure_work work item
++ * For on_disabled=1 the caller must be serialised against efx_reset,
++ * ideally by holding the rtnl lock.
++ */
++void efx_reconfigure_port(struct efx_nic *efx, int on_disabled)
++{
++ mutex_lock(&efx->mac_lock);
++
++ EFX_LOG(efx, "reconfiguring MAC from PHY settings\n");
++
++ if (on_disabled)
++ ASSERT_RTNL();
++ else if (!efx->port_enabled)
++ goto out;
++
++ efx->mac_op->reconfigure(efx);
++
++out:
++ /* Inform kernel of loss/gain of carrier */
++ efx_link_status_changed(efx);
++
++ mutex_unlock(&efx->mac_lock);
++}
++
++static void efx_reconfigure_work(struct work_struct *data)
++{
++ struct efx_nic *efx = container_of(data, struct efx_nic,
++ reconfigure_work);
++
++ EFX_LOG(efx, "MAC reconfigure executing on CPU %d\n",
++ raw_smp_processor_id());
++
++ /* Reinitialise MAC to activate new PHY parameters */
++ efx_reconfigure_port(efx, 0);
++}
++
++static int efx_probe_port(struct efx_nic *efx)
++{
++ unsigned char *dev_addr;
++ int rc;
++
++ EFX_LOG(efx, "create port\n");
++
++ /* Connect up MAC/PHY operations table and read MAC address */
++ rc = falcon_probe_port(efx);
++ if (rc)
++ goto err;
++
++ /* Sanity check MAC address */
++ dev_addr = efx->mac_address;
++ if (!is_valid_ether_addr(dev_addr)) {
++ DECLARE_MAC_BUF(mac);
++
++ EFX_ERR(efx, "invalid MAC address %s\n",
++ print_mac(mac, dev_addr));
++ if (!allow_bad_hwaddr) {
++ rc = -EINVAL;
++ goto err;
++ }
++ random_ether_addr(dev_addr);
++ EFX_INFO(efx, "using locally-generated MAC %s\n",
++ print_mac(mac, dev_addr));
++ }
++
++ /* Register debugfs entries */
++ rc = efx_init_debugfs_port(efx);
++ if (rc)
++ goto err;
++
++ return 0;
++
++ err:
++ efx_remove_port(efx);
++ return rc;
++}
++
++static int efx_init_port(struct efx_nic *efx)
++{
++ int rc;
++
++ EFX_LOG(efx, "init port\n");
++
++ /* The default power state is ON */
++ efx->phy_powered = 1;
++
++ /* Initialise the MAC and PHY */
++ rc = efx->mac_op->init(efx);
++ if (rc)
++ return rc;
++
++ efx->port_initialized = 1;
++
++ /* Reconfigure port to program MAC registers */
++ efx->mac_op->reconfigure(efx);
++
++ return 0;
++}
++
++/* Allow efx_reconfigure_port() to run, and propagate delayed changes
++ * to the promiscuous flag to the MAC if needed */
++static void efx_start_port(struct efx_nic *efx)
++{
++ EFX_LOG(efx, "start port\n");
++ ASSERT_RTNL();
++
++ BUG_ON(efx->port_enabled);
++
++ mutex_lock(&efx->mac_lock);
++ efx->port_enabled = 1;
++ mutex_unlock(&efx->mac_lock);
++
++ if (efx->net_dev_registered) {
++ int promiscuous;
++
++ netif_tx_lock_bh(efx->net_dev);
++ promiscuous = (efx->net_dev->flags & IFF_PROMISC) ? 1 : 0;
++ if (efx->promiscuous != promiscuous) {
++ efx->promiscuous = promiscuous;
++ queue_work(efx->workqueue, &efx->reconfigure_work);
++ }
++ netif_tx_unlock_bh(efx->net_dev);
++ }
++}
++
++/* Prevents efx_reconfigure_port() from executing, and prevents
++ * efx_set_multicast_list() from scheduling efx_reconfigure_work.
++ * efx_reconfigure_work can still be scheduled via NAPI processing
++ * until efx_flush_all() is called */
++static void efx_stop_port(struct efx_nic *efx)
++{
++ EFX_LOG(efx, "stop port\n");
++ ASSERT_RTNL();
++
++ mutex_lock(&efx->mac_lock);
++ efx->port_enabled = 0;
++ mutex_unlock(&efx->mac_lock);
++
++ /* Serialise against efx_set_multicast_list() */
++ if (efx->net_dev_registered) {
++ netif_tx_lock_bh(efx->net_dev);
++ netif_tx_unlock_bh(efx->net_dev);
++ }
++}
++
++static void efx_fini_port(struct efx_nic *efx)
++{
++ EFX_LOG(efx, "shut down port\n");
++
++ if (!efx->port_initialized)
++ return;
++
++ efx->mac_op->fini(efx);
++ efx->port_initialized = 0;
++
++ /* Mark the link down */
++ efx->link_up = 0;
++ efx_link_status_changed(efx);
++}
++
++static void efx_remove_port(struct efx_nic *efx)
++{
++ EFX_LOG(efx, "destroying port\n");
++
++ efx_fini_debugfs_port(efx);
++ falcon_remove_port(efx);
++}
++
++/**************************************************************************
++ *
++ * NIC handling
++ *
++ **************************************************************************/
++
++/* This configures the PCI device to enable I/O and DMA. */
++static int efx_init_io(struct efx_nic *efx)
++{
++ struct pci_dev *pci_dev = efx->pci_dev;
++ int rc;
++
++ EFX_LOG(efx, "initialising I/O\n");
++
++ /* Generic device-enabling code */
++ rc = pci_enable_device(pci_dev);
++ if (rc) {
++ EFX_ERR(efx, "failed to enable PCI device\n");
++ goto fail1;
++ }
++
++ pci_set_master(pci_dev);
++
++ /* Set the PCI DMA mask. Try all possibilities from our
++ * genuine mask down to 32 bits, because some architectures
++ * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
++ * masks event though they reject 46 bit masks.
++ */
++ efx->dma_mask = efx->type->max_dma_mask;
++ while (efx->dma_mask > 0x7fffffffUL) {
++ if (pci_dma_supported(pci_dev, efx->dma_mask) &&
++ ((rc = pci_set_dma_mask(pci_dev, efx->dma_mask)) == 0))
++ break;
++ efx->dma_mask >>= 1;
++ }
++ if (rc) {
++ EFX_ERR(efx, "could not find a suitable DMA mask\n");
++ goto fail2;
++ }
++ EFX_LOG(efx, "using DMA mask %llx\n",
++ (unsigned long long)efx->dma_mask);
++ rc = pci_set_consistent_dma_mask(pci_dev, efx->dma_mask);
++ if (rc) {
++ /* pci_set_consistent_dma_mask() is not *allowed* to
++ * fail with a mask that pci_set_dma_mask() accepted,
++ * but just in case...
++ */
++ EFX_ERR(efx, "failed to set consistent DMA mask\n");
++ goto fail2;
++ }
++
++ /* Get memory base address */
++ efx->membase_phys = pci_resource_start(efx->pci_dev,
++ efx->type->mem_bar);
++#if !defined(EFX_HAVE_MSIX_TABLE_RESERVED)
++ rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
++#else
++ if (!request_mem_region(efx->membase_phys, efx->type->mem_map_size,
++ "sfc"))
++ rc = -EIO;
++#endif
++ if (rc) {
++ EFX_ERR(efx, "request for memory BAR failed\n");
++ rc = -EIO;
++ goto fail3;
++ }
++ efx->membase = ioremap_nocache(efx->membase_phys,
++ efx->type->mem_map_size);
++ if (!efx->membase) {
++ EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n",
++ efx->type->mem_bar, efx->membase_phys,
++ efx->type->mem_map_size);
++ rc = -ENOMEM;
++ goto fail4;
++ }
++ EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n",
++ efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size,
++ efx->membase);
++
++ return 0;
++
++ fail4:
++ release_mem_region(efx->membase_phys, efx->type->mem_map_size);
++ fail3:
++ efx->membase_phys = 0UL;
++ /* fall-thru */
++ fail2:
++ pci_disable_device(efx->pci_dev);
++ fail1:
++ return rc;
++}
++
++static void efx_fini_io(struct efx_nic *efx)
++{
++ EFX_LOG(efx, "shutting down I/O\n");
++
++ if (efx->membase) {
++ iounmap(efx->membase);
++ efx->membase = NULL;
++ }
++
++ if (efx->membase_phys) {
++#if !defined(EFX_HAVE_MSIX_TABLE_RESERVED)
++ pci_release_region(efx->pci_dev, efx->type->mem_bar);
++#else
++ release_mem_region(efx->membase_phys, efx->type->mem_map_size);
++#endif
++ efx->membase_phys = 0UL;
++ }
++
++ pci_disable_device(efx->pci_dev);
++}
++
++/* Probe the number and type of interrupts we are able to obtain. */
++static int efx_probe_interrupts(struct efx_nic *efx)
++{
++ struct msix_entry xentries[EFX_MAX_CHANNELS];
++ int rc, i;
++
++ /* Select number of used RSS queues */
++ /* TODO: Can we react to CPU hotplug? */
++ if (rss_cpus == 0)
++ rss_cpus = num_online_cpus();
++
++ efx->rss_queues = 1;
++ if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
++ unsigned int max_channel = efx->type->phys_addr_channels - 1;
++
++ BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
++ efx->rss_queues = min(max_channel + 1, rss_cpus);
++ efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
++ }
++
++ /* Determine how many RSS queues we can use, and mark channels
++ * with the appropriate interrupt state */
++ if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
++ /* Build MSI request structure */
++ for (i = 0; i < efx->rss_queues; i++)
++ xentries[i].entry = i;
++
++ /* Request maximum number of MSI interrupts */
++ rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
++ if (rc > 0) {
++ EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
++ efx->rss_queues = rc;
++ rc = pci_enable_msix(efx->pci_dev, xentries,
++ efx->rss_queues);
++ }
++ if (rc == 0) {
++ for (i = 0; i < efx->rss_queues; i++) {
++ efx->channel[i].has_interrupt = 1;
++ efx->channel[i].irq = xentries[i].vector;
++ }
++ } else {
++ /* Fall back to single channel MSI */
++ efx->interrupt_mode = EFX_INT_MODE_MSI;
++ EFX_ERR(efx, "could not enable MSI-X\n");
++ }
++ }
++
++ /* Try single interrupt MSI */
++ if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
++ efx->rss_queues = 1;
++ rc = pci_enable_msi(efx->pci_dev);
++ if (rc == 0) {
++ efx->channel[0].irq = efx->pci_dev->irq;
++ efx->channel[0].has_interrupt = 1;
++ } else {
++ EFX_ERR(efx, "could not enable MSI\n");
++ efx->interrupt_mode = EFX_INT_MODE_LEGACY;
++ }
++ }
++
++ /* Assume legacy interrupts */
++ if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
++ /* Every channel is interruptible */
++ for (i = 0; i < EFX_MAX_CHANNELS; i++)
++ efx->channel[i].has_interrupt = 1;
++ efx->legacy_irq = efx->pci_dev->irq;
++ }
++
++ return 0;
++}
++
++static void efx_remove_interrupts(struct efx_nic *efx)
++{
++ struct efx_channel *channel;
++
++ /* Remove MSI/MSI-X interrupts */
++ efx_for_each_channel_with_interrupt(channel, efx)
++ channel->irq = 0;
++ pci_disable_msi(efx->pci_dev);
++ pci_disable_msix(efx->pci_dev);
++
++ /* Remove legacy interrupt */
++ efx->legacy_irq = 0;
++}
++
++/* Select number of used resources
++ * Should be called after probe_interrupts()
++ */
++static int efx_select_used(struct efx_nic *efx)
++{
++ struct efx_tx_queue *tx_queue;
++ struct efx_rx_queue *rx_queue;
++ int i;
++
++ /* TX queues. One per port per channel with TX capability
++ * (more than one per port won't work on Linux, due to out
++ * of order issues... but will be fine on Solaris)
++ */
++ tx_queue = &efx->tx_queue[0];
++
++ /* Perform this for each channel with TX capabilities.
++ * At the moment, we only support a single TX queue
++ */
++ tx_queue->used = 1;
++ if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels)
++ tx_queue->channel = &efx->channel[1];
++ else
++ tx_queue->channel = &efx->channel[0];
++ tx_queue->channel->used_flags |= EFX_USED_BY_TX;
++ tx_queue++;
++
++ /* RX queues. Each has a dedicated channel. */
++ for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
++ rx_queue = &efx->rx_queue[i];
++
++ if (i < efx->rss_queues) {
++ rx_queue->used = 1;
++ /* If we allow multiple RX queues per channel
++ * we need to decide that here
++ */
++ rx_queue->channel = &efx->channel[rx_queue->queue];
++ rx_queue->channel->used_flags |= EFX_USED_BY_RX;
++ rx_queue++;
++ }
++ }
++ return 0;
++}
++
++static int efx_probe_nic(struct efx_nic *efx)
++{
++ int rc;
++
++ EFX_LOG(efx, "creating NIC\n");
++
++ /* Carry out hardware-type specific initialisation */
++ rc = falcon_probe_nic(efx);
++ if (rc)
++ goto fail1;
++
++ /* Determine the number of channels and RX queues by trying to hook
++ * in MSI-X interrupts. */
++ rc = efx_probe_interrupts(efx);
++ if (rc)
++ goto fail2;
++
++ /* Determine number of RX queues and TX queues */
++ rc = efx_select_used(efx);
++ if (rc)
++ goto fail3;
++
++ /* Register debugfs entries */
++ rc = efx_init_debugfs_nic(efx);
++ if (rc)
++ goto fail4;
++ /* Initialise the interrupt moderation settings */
++ efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
++
++ return 0;
++
++ fail4:
++ /* fall-thru */
++ fail3:
++ efx_remove_interrupts(efx);
++ fail2:
++ falcon_remove_nic(efx);
++ fail1:
++ return rc;
++}
++
++static void efx_remove_nic(struct efx_nic *efx)
++{
++ EFX_LOG(efx, "destroying NIC\n");
++
++ efx_remove_interrupts(efx);
++ falcon_remove_nic(efx);
++
++ efx_fini_debugfs_nic(efx);
++}
++
++/**************************************************************************
++ *
++ * NIC startup/shutdown
++ *
++ *************************************************************************/
++
++static int efx_probe_all(struct efx_nic *efx)
++{
++ struct efx_channel *channel;
++ int rc;
++
++ /* Create NIC */
++ rc = efx_probe_nic(efx);
++ if (rc) {
++ EFX_ERR(efx, "failed to create NIC\n");
++ goto fail1;
++ }
++
++ /* Create port */
++ rc = efx_probe_port(efx);
++ if (rc) {
++ EFX_ERR(efx, "failed to create port\n");
++ goto fail2;
++ }
++
++ /* Create channels */
++ efx_for_each_channel(channel, efx) {
++ rc = efx_probe_channel(channel);
++ if (rc) {
++ EFX_ERR(efx, "failed to create channel %d\n",
++ channel->channel);
++ goto fail3;
++ }
++ }
++
++ return 0;
++
++ fail3:
++ efx_for_each_channel(channel, efx)
++ efx_remove_channel(channel);
++ fail2:
++ efx_remove_port(efx);
++ fail1:
++ return rc;
++}
++
++/* Called after previous invocation(s) of efx_stop_all, restarts the
++ * port, kernel transmit queue, NAPI processing and hardware interrupts.
++ * This function is safe to call multiple times when the NIC is in any
++ * state. */
++static void efx_start_all(struct efx_nic *efx)
++{
++ struct efx_channel *channel;
++
++ ASSERT_RTNL();
++
++ /* Check that it is appropriate to restart the interface. All
++ * of these flags are safe to read under just the rtnl lock */
++ if (efx->port_enabled)
++ return;
++ if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
++ return;
++ if (efx->net_dev_registered && !netif_running(efx->net_dev))
++ return;
++
++ /* Mark the port as enabled so port reconfigurations can start, then
++ * restart the transmit interface early so the watchdog timer stops */
++ efx_start_port(efx);
++ efx_wake_queue(efx);
++
++ efx_for_each_channel(channel, efx)
++ efx_start_channel(channel);
++
++ falcon_enable_interrupts(efx);
++
++ /* Start hardware monitor if we're in RUNNING */
++ if (efx->state == STATE_RUNNING)
++ queue_delayed_work(efx->workqueue, &efx->monitor_work,
++ efx_monitor_interval);
++}
++
++/* Flush all delayed work. Should only be called when no more delayed work
++ * will be scheduled. This doesn't flush pending online resets (efx_reset),
++ * since we're holding the rtnl_lock at this point. */
++static void efx_flush_all(struct efx_nic *efx)
++{
++#if defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
++ struct efx_rx_queue *rx_queue;
++
++ /* Make sure the hardware monitor is stopped */
++ cancel_delayed_work_sync(&efx->monitor_work);
++
++ /* Ensure that all RX slow refills are complete. */
++ efx_for_each_rx_queue(rx_queue, efx) {
++ cancel_delayed_work_sync(&rx_queue->work);
++ }
++#endif
++
++#if defined(EFX_USE_CANCEL_WORK_SYNC)
++ /* Stop scheduled port reconfigurations */
++ cancel_work_sync(&efx->reconfigure_work);
++#endif
++
++#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
++ /* Ensure that the hardware monitor and asynchronous port
++ * reconfigurations are complete, which are the only two consumers
++ * of efx->workqueue. Since the hardware monitor runs on a long period,
++ * we put in some effort to cancel the delayed work safely rather
++ * than just flushing the queue twice (which is guaranteed to flush
++ * all the work since both efx_monitor and efx_reconfigure_work disarm
++ * if !efx->port_enabled. */
++ if (timer_pending(&efx->monitor_work.timer))
++ cancel_delayed_work(&efx->monitor_work);
++ flush_workqueue(efx->workqueue);
++ if (timer_pending(&efx->monitor_work.timer))
++ cancel_delayed_work(&efx->monitor_work);
++ flush_workqueue(efx->workqueue);
++
++ /* efx_rx_work will disarm if !channel->enabled, so we can just
++ * flush the refill workqueue twice as well. */
++ flush_workqueue(efx->refill_workqueue);
++ flush_workqueue(efx->refill_workqueue);
++#endif
++}
++
++/* Quiesce hardware and software without bringing the link down.
++ * Safe to call multiple times, when the nic and interface is in any
++ * state. The caller is guaranteed to subsequently be in a position
++ * to modify any hardware and software state they see fit without
++ * taking locks. */
++static void efx_stop_all(struct efx_nic *efx)
++{
++ struct efx_channel *channel;
++
++ ASSERT_RTNL();
++
++ /* port_enabled can be read safely under the rtnl lock */
++ if (!efx->port_enabled)
++ return;
++
++ /* Disable interrupts and wait for ISR to complete */
++ falcon_disable_interrupts(efx);
++ if (efx->legacy_irq)
++ synchronize_irq(efx->legacy_irq);
++ efx_for_each_channel_with_interrupt(channel, efx)
++ if (channel->irq)
++ synchronize_irq(channel->irq);
++
++ /* Stop all synchronous port reconfigurations. */
++ efx_stop_port(efx);
++
++ /* Stop all NAPI processing and synchronous rx refills */
++ efx_for_each_channel(channel, efx)
++ efx_stop_channel(channel);
++
++ /* Flush reconfigure_work, refill_workqueue, monitor_work */
++ efx_flush_all(efx);
++
++ /* Stop the kernel transmit interface late, so the watchdog
++ * timer isn't ticking over the flush */
++ efx_stop_queue(efx);
++ if (efx->net_dev_registered) {
++ netif_tx_lock_bh(efx->net_dev);
++ netif_tx_unlock_bh(efx->net_dev);
++ }
++}
++
++static void efx_remove_all(struct efx_nic *efx)
++{
++ struct efx_channel *channel;
++
++ efx_for_each_channel(channel, efx)
++ efx_remove_channel(channel);
++ efx_remove_port(efx);
++ efx_remove_nic(efx);
++}
++
++static int efx_run_selftests(struct efx_nic *efx)
++{
++ struct efx_self_tests tests;
++ unsigned modes = efx->startup_loopbacks & efx->loopback_modes;
++ int rc;
++
++ rc = efx_online_test(efx, &tests);
++ if (rc) {
++ EFX_ERR(efx, "failed self-tests with interrupt_mode of %s\n",
++ INT_MODE(efx));
++ goto fail;
++ }
++
++ if (onload_offline_selftest && modes) {
++ /* Run offline self test */
++ EFX_LOG(efx, "performing on-load offline self-tests\n");
++ rc = efx_offline_test(efx, &tests, modes);
++ EFX_LOG(efx, "%s on-load offline self-tests\n",
++ rc ? "FAILED" : "PASSED");
++ if (rc)
++ goto fail;
++ }
++
++ return 0;
++
++ fail:
++ EFX_ERR(efx, "self-tests failed. Given up!\n");
++ if (allow_load_on_failure)
++ rc = 0;
++
++ return rc;
++}
++
++int efx_flush_queues(struct efx_nic *efx)
++{
++ int rc;
++
++ ASSERT_RTNL();
++
++ efx_stop_all(efx);
++
++ /* We can't just flush the tx queues because the event queues
++ * may contain tx completions from that queue. Just flush everything */
++ efx_fini_channels(efx);
++ rc = efx_init_channels(efx);
++ if (rc) {
++ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
++ return rc;
++ }
++
++ efx_start_all(efx);
++
++ return 0;
++}
++
++/**************************************************************************
++ *
++ * Interrupt moderation
++ *
++ **************************************************************************/
++
++/* Set interrupt moderation parameters */
++void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
++{
++ struct efx_tx_queue *tx_queue;
++ struct efx_rx_queue *rx_queue;
++
++ ASSERT_RTNL();
++
++ efx_for_each_tx_queue(tx_queue, efx)
++ tx_queue->channel->irq_moderation = tx_usecs;
++
++ efx_for_each_rx_queue(rx_queue, efx)
++ rx_queue->channel->irq_moderation = rx_usecs;
++}
++
++/**************************************************************************
++ *
++ * Hardware monitor
++ *
++ **************************************************************************/
++
++/* Run periodically off the general workqueue. Serialised against
++ * efx_reconfigure_port via the mac_lock */
++static void efx_monitor(struct work_struct *data)
++{
++#if !defined(EFX_NEED_WORK_API_WRAPPERS)
++ struct efx_nic *efx = container_of(data, struct efx_nic,
++ monitor_work.work);
++#else
++ struct efx_nic *efx = container_of(data, struct efx_nic,
++ monitor_work);
++#endif
++ int rc = 0;
++
++ EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
++ raw_smp_processor_id());
++
++
++#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
++ /* Without cancel_delayed_work_sync(), we have to make sure that
++ * we don't rearm when port_enabled == 0 */
++ mutex_lock(&efx->mac_lock);
++ if (!efx->port_enabled) {
++ mutex_unlock(&efx->mac_lock);
++ return;
++ }
++
++ rc = efx->mac_op->check_hw(efx);
++#else
++ /* If the mac_lock is already held then it is likely a port
++ * reconfiguration is already in place, which will likely do
++ * most of the work of check_hw() anyway. */
++ if (!mutex_trylock(&efx->mac_lock)) {
++ queue_delayed_work(efx->workqueue, &efx->monitor_work,
++ efx_monitor_interval);
++ return;
++ }
++
++ if (efx->port_enabled)
++ rc = efx->mac_op->check_hw(efx);
++#endif
++ mutex_unlock(&efx->mac_lock);
++
++ if (rc) {
++ if (monitor_reset) {
++ EFX_ERR(efx, "hardware monitor detected a fault: "
++ "triggering reset\n");
++ efx_schedule_reset(efx, RESET_TYPE_MONITOR);
++ } else {
++ EFX_ERR(efx, "hardware monitor detected a fault, "
++ "skipping reset\n");
++ }
++ }
++
++ queue_delayed_work(efx->workqueue, &efx->monitor_work,
++ efx_monitor_interval);
++}
++
++/**************************************************************************
++ *
++ * ioctls
++ *
++ *************************************************************************/
++
++/* Net device ioctl
++ * Context: process, rtnl_lock() held.
++ */
++static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
++{
++ struct efx_nic *efx = net_dev->priv;
++ int rc;
++
++ ASSERT_RTNL();
++
++ switch (cmd) {
++ case SIOCGMIIPHY:
++ case SIOCGMIIREG:
++ rc = generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
++ break;
++ case SIOCSMIIREG:
++ rc = generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
++ efx_reconfigure_port(efx, 0);
++ break;
++ default:
++ rc = -EOPNOTSUPP;
++ }
++
++ return rc;
++}
++
++/**************************************************************************
++ *
++ * NAPI interface
++ *
++ **************************************************************************/
++
++/* Allocate the NAPI dev's.
++ * Called after we know how many channels there are.
++ */
++static int efx_init_napi(struct efx_nic *efx)
++{
++ struct efx_channel *channel;
++ int rc;
++
++ ASSERT_RTNL();
++
++ /* Allocate the NAPI dev for the port */
++ efx->net_dev = alloc_etherdev(0);
++ if (!efx->net_dev) {
++ rc = -ENOMEM;
++ goto err;
++ }
++ efx->net_dev->priv = efx;
++ efx->mii.dev = efx->net_dev;
++
++ /* Set features based on module parameters and DMA mask.
++ * Enable DMA to ZONE_HIGHMEM if the NIC can access all memory
++ * directly. This only has an effect on 32-bit systems and
++ * PAE on x86 limits memory to 64GB so 40 bits is plenty to
++ * address everything. If the device can't address 40 bits
++ * then it's safest to turn NETIF_F_HIGHDMA off because this
++ * might be a PAE system with more than 4G of RAM and a 32-bit
++ * NIC. The use of EFX_DMA_MASK is to eliminate compiler
++ * warnings on platforms where dma_addr_t is 32-bit. We
++ * assume that in those cases we can access all memory
++ * directly if our DMA mask is all ones. */
++ efx->net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
++ if (efx->dma_mask >= EFX_DMA_MASK(DMA_40BIT_MASK))
++ efx->net_dev->features |= NETIF_F_HIGHDMA;
++
++ /* Copy MAC address */
++ memcpy(&efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
++
++ /* Allocate the per channel devs */
++ efx_for_each_channel(channel, efx) {
++#if !defined(EFX_HAVE_OLD_NAPI)
++ channel->napi_dev = efx->net_dev;
++#else
++ channel->napi_dev = alloc_etherdev(0);
++ if (!channel->napi_dev) {
++ rc = -ENOMEM;
++ goto err;
++ }
++ channel->napi_dev->priv = channel;
++ atomic_set(&channel->napi_dev->refcnt, 1);
++#endif
++ }
++
++ return 0;
++ err:
++ efx_fini_napi(efx);
++ return rc;
++}
++
++/* Free the NAPI state for the port and channels */
++static void efx_fini_napi(struct efx_nic *efx)
++{
++ struct efx_channel *channel;
++
++ ASSERT_RTNL();
++
++ efx_for_each_channel(channel, efx) {
++ /* Finish per channel NAPI */
++#if defined(EFX_HAVE_OLD_NAPI)
++ if (channel->napi_dev) {
++ channel->napi_dev->priv = NULL;
++ free_netdev(channel->napi_dev);
++ }
++#endif
++ channel->napi_dev = NULL;
++ }
++
++ /* Finish port NAPI */
++ if (efx->net_dev) {
++ efx->net_dev->priv = NULL;
++ free_netdev(efx->net_dev);
++ efx->net_dev = NULL;
++ }
++}
++
++/**************************************************************************
++ *
++ * Kernel netpoll interface
++ *
++ *************************************************************************/
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++
++/* Although in the common case interrupts will be disabled, this is not
++ * guaranteed. However, all our work happens inside the NAPI callback,
++ * so no locking is required.
++ */
++static void efx_netpoll(struct net_device *net_dev)
++{
++ struct efx_nic *efx = net_dev->priv;
++ struct efx_channel *channel;
++
++ efx_for_each_channel_with_interrupt(channel, efx)
++ efx_schedule_channel(channel);
++}
++
++#endif
++
++/**************************************************************************
++ *
++ * Kernel net device interface
++ *
++ *************************************************************************/
++
++/* Context: process, rtnl_lock() held. */
++static int efx_net_open(struct net_device *net_dev)
++{
++ struct efx_nic *efx = net_dev->priv;
++ ASSERT_RTNL();
++
++ EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
++ raw_smp_processor_id());
++ efx_start_all(efx);
++ return 0;
++}
++
++/* Context: process, rtnl_lock() held.
++ * Note that the kernel will ignore our return code; this method
++ * should really be a void.
++ */
++static int efx_net_stop(struct net_device *net_dev)
++{
++ struct efx_nic *efx = net_dev->priv;
++ int rc;
++
++ EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
++ raw_smp_processor_id());
++
++ /* Stop device and flush all the channels */
++ efx_stop_all(efx);
++ efx_fini_channels(efx);
++ rc = efx_init_channels(efx);
++ if (rc)
++ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
++
++ return 0;
++}
++
++/* Context: process, dev_base_lock held, non-blocking.
++ * Statistics are taken directly from the MAC.
++ */
++static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
++{
++ struct efx_nic *efx = net_dev->priv;
++ struct efx_mac_stats *mac_stats = &efx->mac_stats;
++ struct net_device_stats *stats = &efx->stats;
++
++ if (!spin_trylock(&efx->stats_lock))
++ return stats;
++ if (efx->state == STATE_RUNNING)
++ efx->mac_op->update_stats(efx);
++ spin_unlock(&efx->stats_lock);
++
++ stats->rx_packets = mac_stats->rx_packets;
++ stats->tx_packets = mac_stats->tx_packets;
++ stats->rx_bytes = mac_stats->rx_bytes;
++ stats->tx_bytes = mac_stats->tx_bytes;
++ stats->tx_errors = mac_stats->tx_bad;
++ stats->multicast = mac_stats->rx_multicast;
++ stats->collisions = mac_stats->tx_collision;
++ stats->rx_length_errors = mac_stats->rx_gtjumbo;
++ stats->rx_over_errors = mac_stats->rx_overflow;
++ stats->rx_crc_errors = mac_stats->rx_bad;
++ stats->rx_frame_errors = mac_stats->rx_align_error;
++ stats->rx_fifo_errors = 0;
++ stats->rx_missed_errors = mac_stats->rx_missed;
++ stats->rx_errors = (stats->rx_length_errors +
++ stats->rx_over_errors +
++ stats->rx_crc_errors +
++ stats->rx_frame_errors +
++ stats->rx_fifo_errors +
++ stats->rx_missed_errors +
++ mac_stats->rx_symbol_error);
++ stats->tx_aborted_errors = 0;
++ stats->tx_carrier_errors = 0;
++ stats->tx_fifo_errors = 0;
++ stats->tx_heartbeat_errors = 0;
++ stats->tx_window_errors = 0;
++
++ return stats;
++}
++
++/* Context: netif_tx_lock held, BHs disabled. */
++static void efx_watchdog(struct net_device *net_dev)
++{
++ struct efx_nic *efx = net_dev->priv;
++
++ EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
++ atomic_read(&efx->netif_stop_count), efx->port_enabled,
++ monitor_reset ? "resetting channels" : "skipping reset");
++
++ if (monitor_reset)
++ efx_schedule_reset(efx, RESET_TYPE_MONITOR);
++}
++
++
++/* Context: process, rtnl_lock() held. */
++static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
++{
++ struct efx_nic *efx = net_dev->priv;
++ int rc = 0;
++
++ ASSERT_RTNL();
++
++ if (new_mtu > EFX_MAX_MTU)
++ return -EINVAL;
++
++ efx_stop_all(efx);
++
++ /* Ask driverlink client if we can change MTU */
++ rc = EFX_DL_CALLBACK(efx, request_mtu, new_mtu);
++ if (rc) {
++ EFX_ERR(efx, "MTU change vetoed by driverlink %s driver\n",
++ efx->dl_cb_dev.request_mtu->driver->name);
++ goto out;
++ }
++
++ EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
++
++ efx_fini_channels(efx);
++ net_dev->mtu = new_mtu;
++ rc = efx_init_channels(efx);
++ if (rc)
++ goto fail;
++
++ /* Reconfigure the MAC */
++ efx_reconfigure_port(efx, 1);
++
++ /* Notify driverlink client of new MTU */
++ EFX_DL_CALLBACK(efx, mtu_changed, new_mtu);
++
++ efx_start_all(efx);
++
++ out:
++ return rc;
++
++ fail:
++ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
++ return rc;
++}
++
++static int efx_set_mac_address(struct net_device *net_dev, void *data)
++{
++ struct efx_nic *efx = net_dev->priv;
++ struct sockaddr *addr = data;
++ char *new_addr = addr->sa_data;
++
++ ASSERT_RTNL();
++
++ if (!is_valid_ether_addr(new_addr)) {
++ DECLARE_MAC_BUF(mac);
++ EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n",
++ print_mac(mac, new_addr));
++ return -EINVAL;
++ }
++
++ memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
++
++ /* Reconfigure the MAC */
++ efx_reconfigure_port(efx, 1);
++
++ return 0;
++}
++
++/* Context: netif_tx_lock held, BHs disabled. */
++static void efx_set_multicast_list(struct net_device *net_dev)
++{
++ struct efx_nic *efx = net_dev->priv;
++ struct dev_mc_list *mc_list = net_dev->mc_list;
++ union efx_multicast_hash *mc_hash = &efx->multicast_hash;
++ unsigned long flags __attribute__ ((unused));
++ int promiscuous;
++ u32 crc;
++ int bit;
++ int i;
++
++ /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
++ promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
++ if (efx->promiscuous != promiscuous) {
++ if (efx->port_enabled) {
++ efx->promiscuous = promiscuous;
++ queue_work(efx->workqueue, &efx->reconfigure_work);
++ }
++ }
++
++ /* Build multicast hash table */
++ if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
++ memset(mc_hash, 0xff, sizeof(*mc_hash));
++ } else {
++ memset(mc_hash, 0x00, sizeof(*mc_hash));
++ for (i = 0; i < net_dev->mc_count; i++) {
++ crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
++ bit = (crc & ((1 << EFX_MCAST_HASH_BITS) - 1));
++ set_bit_le(bit, (void *)mc_hash);
++ mc_list = mc_list->next;
++ }
++ }
++
++ /* Create and activate new global multicast hash table */
++ falcon_set_multicast_hash(efx);
++}
++
++/* Handle net device notifier events */
++static int efx_netdev_event(struct notifier_block *this,
++ unsigned long event, void *ptr)
++{
++ struct net_device *net_dev = (struct net_device *)ptr;
++
++ if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
++ struct efx_nic *efx = net_dev->priv;
++
++ strcpy(efx->name, net_dev->name);
++ efx_fini_debugfs_netdev(net_dev);
++ efx_init_debugfs_netdev(net_dev);
++ }
++
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block efx_netdev_notifier = {
++ .notifier_call = efx_netdev_event,
++};
++
++static int efx_register_netdev(struct efx_nic *efx)
++{
++ struct net_device *net_dev = efx->net_dev;
++ int rc;
++
++ net_dev->watchdog_timeo = 5 * HZ;
++ net_dev->irq = efx->pci_dev->irq;
++ net_dev->open = efx_net_open;
++ net_dev->stop = efx_net_stop;
++ net_dev->get_stats = efx_net_stats;
++ net_dev->tx_timeout = &efx_watchdog;
++ net_dev->hard_start_xmit = efx_hard_start_xmit;
++ net_dev->do_ioctl = efx_ioctl;
++ net_dev->change_mtu = efx_change_mtu;
++ net_dev->set_mac_address = efx_set_mac_address;
++ net_dev->set_multicast_list = efx_set_multicast_list;
++#ifdef CONFIG_NET_POLL_CONTROLLER
++ net_dev->poll_controller = efx_netpoll;
++#endif
++ SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
++ SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
++
++ /* Always start with carrier off; PHY events will detect the link */
++ netif_carrier_off(efx->net_dev);
++
++ BUG_ON(efx->net_dev_registered);
++
++ /* Clear MAC statistics */
++ efx->mac_op->update_stats(efx);
++ memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
++
++ rc = register_netdev(net_dev);
++ if (rc) {
++ EFX_ERR(efx, "could not register net dev\n");
++ return rc;
++ }
++ strcpy(efx->name, net_dev->name);
++
++ /* Create debugfs symlinks */
++ rc = efx_init_debugfs_netdev(net_dev);
++ if (rc) {
++ EFX_ERR(efx, "failed to init net dev debugfs\n");
++ unregister_netdev(efx->net_dev);
++ return rc;
++ }
++
++ /* Allow link change notifications to be sent to the operating
++ * system. The must happen after register_netdev so that
++ * there are no outstanding link changes if that call fails.
++ * It must happen before efx_reconfigure_port so that the
++ * initial state of the link is reported. */
++ mutex_lock(&efx->mac_lock);
++ efx->net_dev_registered = 1;
++ mutex_unlock(&efx->mac_lock);
++
++ /* Safety net: in case we don't get a PHY event */
++ rtnl_lock();
++ efx_reconfigure_port(efx, 1);
++ rtnl_unlock();
++
++ EFX_LOG(efx, "registered\n");
++
++ return 0;
++}
++
++static void efx_unregister_netdev(struct efx_nic *efx)
++{
++ int was_registered = efx->net_dev_registered;
++ struct efx_tx_queue *tx_queue;
++
++ if (!efx->net_dev)
++ return;
++
++ BUG_ON(efx->net_dev->priv != efx);
++
++ /* SFC Bug 5356: Ensure that no more link status notifications get
++ * sent to the stack. Bad things happen if there's an
++ * outstanding notification after the net device is freed, but
++ * they only get flushed out by unregister_netdev, not by
++ * free_netdev. */
++ mutex_lock(&efx->mac_lock);
++ efx->net_dev_registered = 0;
++ mutex_unlock(&efx->mac_lock);
++
++ /* Free up any skbs still remaining. This has to happen before
++ * we try to unregister the netdev as running their destructors
++ * may be needed to get the device ref. count to 0. */
++ efx_for_each_tx_queue(tx_queue, efx)
++ efx_release_tx_buffers(tx_queue);
++
++ if (was_registered) {
++ strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
++ efx_fini_debugfs_netdev(efx->net_dev);
++ unregister_netdev(efx->net_dev);
++ }
++}
++
++/**************************************************************************
++ *
++ * Device reset and suspend
++ *
++ **************************************************************************/
++
++/* This suspends the device (and acquires the suspend lock) without
++ * flushing the descriptor queues. It is included for the convenience
++ * of the driverlink layer.
++ */
++void efx_suspend(struct efx_nic *efx)
++{
++ EFX_LOG(efx, "suspending operations\n");
++
++ down(&efx->suspend_lock);
++
++ rtnl_lock();
++ efx_stop_all(efx);
++}
++
++void efx_resume(struct efx_nic *efx)
++{
++ EFX_LOG(efx, "resuming operations\n");
++
++ efx_start_all(efx);
++ rtnl_unlock();
++
++ up(&efx->suspend_lock);
++}
++
++/* The final hardware and software finalisation before reset.
++ * This function does not handle serialisation with the kernel, it
++ * assumes the caller has done this */
++static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
++{
++ int rc;
++
++ ASSERT_RTNL();
++
++ rc = efx->mac_op->get_settings(efx, ecmd);
++ if (rc) {
++ EFX_ERR(efx, "could not back up PHY settings\n");
++ goto fail;
++ }
++
++ efx_fini_channels(efx);
++ return 0;
++
++ fail:
++ return rc;
++}
++
++/* The first part of software initialisation after a hardware reset
++ * This function does not handle serialisation with the kernel, it
++ * assumes the caller has done this */
++static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd)
++{
++ int rc;
++
++ rc = efx_init_channels(efx);
++ if (rc)
++ goto fail1;
++
++ /* In an INVISIBLE_RESET there might not be a link state transition,
++ * so we push the multicast list here. */
++ falcon_set_multicast_hash(efx);
++
++ /* Restore MAC and PHY settings. */
++ rc = efx->mac_op->set_settings(efx, ecmd);
++ if (rc) {
++ EFX_ERR(efx, "could not restore PHY settings\n");
++ goto fail2;
++ }
++
++ return 0;
++
++ fail2:
++ efx_fini_channels(efx);
++ fail1:
++ return rc;
++}
++
++/* Reset the NIC as transparently as possible. Do not reset the PHY
++ * Note that the reset may fail, in which case the card will be left
++ * in a most-probably-unusable state.
++ *
++ * This function will sleep. You cannot reset from within an atomic
++ * state; use efx_schedule_reset() instead.
++ */
++static int efx_reset(struct efx_nic *efx)
++{
++ struct ethtool_cmd ecmd;
++ unsigned long flags __attribute__ ((unused));
++ enum reset_type method = efx->reset_pending;
++ int rc;
++
++ efx_dl_reset_lock();
++
++ rc = down_interruptible(&efx->suspend_lock);
++ if (rc) {
++ EFX_ERR(efx, "reset aborted by signal\n");
++ goto unlock_dl_lock;
++ }
++
++ /* We've got suspend_lock, which means we can only be in
++ * STATE_RUNNING or STATE_FINI. Don't clear
++ * efx->reset_pending, since this flag indicates that we
++ * should retry device initialisation.
++ */
++ if (efx->state != STATE_RUNNING) {
++ EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
++ goto unlock_suspend_lock;
++ }
++
++ /* Notify driverlink clients of imminent reset. */
++ efx_dl_reset_suspend(efx);
++ rtnl_lock();
++
++ efx->state = STATE_RESETTING;
++ EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method));
++
++ /* The net_dev->get_stats handler is quite slow, and will fail
++ * if a fetch is pending over reset. Serialise against it. */
++ spin_lock(&efx->stats_lock);
++ spin_unlock(&efx->stats_lock);
++
++ efx_stop_all(efx);
++ mutex_lock(&efx->mac_lock);
++
++ rc = efx_reset_down(efx, &ecmd);
++ if (rc)
++ goto fail1;
++ falcon_fini_nic(efx);
++
++ rc = falcon_reset_hw(efx, method);
++ if (rc) {
++ EFX_ERR(efx, "failed to reset hardware\n");
++ goto fail2;
++ }
++
++ /* Allow resets to be rescheduled. */
++ efx->reset_pending = RESET_TYPE_NONE;
++
++ /* Reinitialise bus-mastering, which may have been turned off before
++ * the reset was scheduled. This is still appropriate, even in the
++ * RESET_TYPE_DISABLE since this driver generally assumes the hardware
++ * can respond to requests. */
++ pci_set_master(efx->pci_dev);
++
++ /* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
++ * case so the driver can talk to external SRAM */
++ rc = falcon_init_nic(efx);
++ if (rc) {
++ EFX_ERR(efx, "failed to initialise NIC\n");
++ goto fail3;
++ }
++
++ /* Leave device stopped if necessary */
++ if (method == RESET_TYPE_DISABLE) {
++ /* Reinitialise the device anyway so the driver unload sequence
++ * can talk to the external SRAM */
++ (void) falcon_init_nic(efx);
++ rc = -EIO;
++ goto fail4;
++ }
++
++ rc = efx_reset_up(efx, &ecmd);
++ if (rc)
++ goto fail5;
++
++ mutex_unlock(&efx->mac_lock);
++ efx_reconfigure_port(efx, 1);
++ EFX_LOG(efx, "reset complete\n");
++
++ efx->state = STATE_RUNNING;
++ efx_start_all(efx);
++
++ rtnl_unlock();
++
++ goto notify;
++
++ fail5:
++ fail4:
++ fail3:
++ fail2:
++ fail1:
++ EFX_ERR(efx, "has been disabled\n");
++ efx->state = STATE_DISABLED;
++
++ /* Remove the net_dev */
++ mutex_unlock(&efx->mac_lock);
++ rtnl_unlock();
++ efx_unregister_netdev(efx);
++ efx_fini_port(efx);
++
++ notify:
++ /* Notify driverlink clients of completed reset */
++ efx_dl_reset_resume(efx, (rc == 0));
++
++ unlock_suspend_lock:
++ up(&efx->suspend_lock);
++
++ unlock_dl_lock:
++ efx_dl_reset_unlock();
++
++ return rc;
++}
++
++/* The worker thread exists so that code that cannot sleep can
++ * schedule a reset for later.
++ */
++static void efx_reset_work(struct work_struct *data)
++{
++ struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
++
++ efx_reset(nic);
++}
++
++void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
++{
++ enum reset_type method;
++
++ if (efx->reset_pending != RESET_TYPE_NONE) {
++ EFX_INFO(efx, "quenching already scheduled reset\n");
++ return;
++ }
++
++ switch (type) {
++ case RESET_TYPE_INVISIBLE:
++ case RESET_TYPE_ALL:
++ case RESET_TYPE_WORLD:
++ case RESET_TYPE_DISABLE:
++ method = type;
++ break;
++ case RESET_TYPE_RX_RECOVERY:
++ case RESET_TYPE_RX_DESC_FETCH:
++ case RESET_TYPE_TX_DESC_FETCH:
++ method = RESET_TYPE_INVISIBLE;
++ break;
++ default:
++ method = RESET_TYPE_ALL;
++ break;
++ }
++
++ if (method != type)
++ EFX_LOG(efx, "scheduling %s reset for %s\n",
++ RESET_TYPE(method), RESET_TYPE(type));
++ else
++ EFX_LOG(efx, "scheduling %s reset\n", RESET_TYPE(method));
++
++ efx->reset_pending = method;
++
++#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
++ queue_work(efx->reset_workqueue, &efx->reset_work);
++#else
++ queue_work(efx->workqueue, &efx->reset_work);
++#endif
++}
++
++/**************************************************************************
++ *
++ * List of NICs we support
++ *
++ **************************************************************************/
++
++enum efx_type_index {
++ EFX_TYPE_FALCON_A = 0,
++ EFX_TYPE_FALCON_B = 1,
++};
++
++static struct efx_nic_type *efx_nic_types[] = {
++ [EFX_TYPE_FALCON_A] = &falcon_a_nic_type,
++ [EFX_TYPE_FALCON_B] = &falcon_b_nic_type,
++};
++
++
++/* PCI device ID table */
++static struct pci_device_id efx_pci_table[] __devinitdata = {
++ {EFX_VENDID_SFC, FALCON_A_P_DEVID, PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0, EFX_TYPE_FALCON_A},
++ {EFX_VENDID_SFC, FALCON_B_P_DEVID, PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0, EFX_TYPE_FALCON_B},
++ {0} /* end of list */
++};
++
++/**************************************************************************
++ *
++ * Dummy PHY/MAC/Board operations
++ *
++ * Can be used where the MAC does not implement this operation
++ * Needed so all function pointers are valid and do not have to be tested
++ * before use
++ *
++ **************************************************************************/
++int efx_port_dummy_op_int(struct efx_nic *efx)
++{
++ return 0;
++}
++void efx_port_dummy_op_void(struct efx_nic *efx) {}
++void efx_port_dummy_op_blink(struct efx_nic *efx, int blink) {}
++
++static struct efx_mac_operations efx_dummy_mac_operations = {
++ .init = efx_port_dummy_op_int,
++ .reconfigure = efx_port_dummy_op_void,
++ .fini = efx_port_dummy_op_void,
++};
++
++static struct efx_phy_operations efx_dummy_phy_operations = {
++ .init = efx_port_dummy_op_int,
++ .reconfigure = efx_port_dummy_op_void,
++ .check_hw = efx_port_dummy_op_int,
++ .fini = efx_port_dummy_op_void,
++ .clear_interrupt = efx_port_dummy_op_void,
++ .reset_xaui = efx_port_dummy_op_void,
++};
++
++/* Dummy board operations */
++static int efx_nic_dummy_op_int(struct efx_nic *nic)
++{
++ return 0;
++}
++
++static void efx_nic_dummy_op_void(struct efx_nic *nic) {}
++
++static struct efx_board efx_dummy_board_info = {
++ .init = efx_nic_dummy_op_int,
++ .init_leds = efx_port_dummy_op_int,
++ .set_fault_led = efx_port_dummy_op_blink,
++ .monitor = efx_nic_dummy_op_int,
++ .blink = efx_port_dummy_op_blink,
++ .fini = efx_nic_dummy_op_void,
++};
++
++/**************************************************************************
++ *
++ * Data housekeeping
++ *
++ **************************************************************************/
++
++/* This zeroes out and then fills in the invariants in a struct
++ * efx_nic (including all sub-structures).
++ */
++static int efx_init_struct(struct efx_nic *efx, enum efx_type_index type,
++ struct pci_dev *pci_dev)
++{
++ struct efx_channel *channel;
++ struct efx_tx_queue *tx_queue;
++ struct efx_rx_queue *rx_queue;
++ int i, rc;
++
++ /* Initialise common structures */
++ memset(efx, 0, sizeof(*efx));
++ spin_lock_init(&efx->biu_lock);
++ spin_lock_init(&efx->phy_lock);
++ mutex_init(&efx->spi_lock);
++ sema_init(&efx->suspend_lock, 1);
++ INIT_WORK(&efx->reset_work, efx_reset_work);
++ INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
++ efx->pci_dev = pci_dev;
++ efx->state = STATE_INIT;
++ efx->reset_pending = RESET_TYPE_NONE;
++ strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
++ efx->board_info = efx_dummy_board_info;
++
++ efx->rx_checksum_enabled = 1;
++ spin_lock_init(&efx->netif_stop_lock);
++ spin_lock_init(&efx->stats_lock);
++ mutex_init(&efx->mac_lock);
++ efx->mac_op = &efx_dummy_mac_operations;
++ efx->phy_op = &efx_dummy_phy_operations;
++ INIT_LIST_HEAD(&efx->dl_node);
++ INIT_LIST_HEAD(&efx->dl_device_list);
++ efx->dl_cb = efx_default_callbacks;
++ INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
++ atomic_set(&efx->netif_stop_count, 1);
++
++ for (i = 0; i < EFX_MAX_CHANNELS; i++) {
++ channel = &efx->channel[i];
++ channel->efx = efx;
++ channel->channel = i;
++ channel->evqnum = i;
++ channel->work_pending = 0;
++ }
++ for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
++ tx_queue = &efx->tx_queue[i];
++ tx_queue->efx = efx;
++ tx_queue->queue = i;
++ tx_queue->buffer = NULL;
++ tx_queue->channel = &efx->channel[0]; /* for safety */
++ }
++ for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
++ rx_queue = &efx->rx_queue[i];
++ rx_queue->efx = efx;
++ rx_queue->queue = i;
++ rx_queue->channel = &efx->channel[0]; /* for safety */
++ rx_queue->buffer = NULL;
++ spin_lock_init(&rx_queue->add_lock);
++ INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
++ }
++
++ efx->type = efx_nic_types[type];
++
++ /* Sanity-check NIC type */
++ EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
++ (efx->type->txd_ring_mask + 1));
++ EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
++ (efx->type->rxd_ring_mask + 1));
++ EFX_BUG_ON_PARANOID(efx->type->evq_size &
++ (efx->type->evq_size - 1));
++ /* As close as we can get to guaranteeing that we don't overflow */
++ EFX_BUG_ON_PARANOID(efx->type->evq_size <
++ (efx->type->txd_ring_mask + 1 +
++ efx->type->rxd_ring_mask + 1));
++
++ EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
++
++ /* Higher numbered interrupt modes are less capable! */
++ efx->interrupt_mode = max(efx->type->max_interrupt_mode,
++ interrupt_mode);
++#if defined(EFX_NEED_DUMMY_MSIX)
++ if (efx->interrupt_mode == EFX_INT_MODE_MSIX)
++ efx->interrupt_mode = EFX_INT_MODE_MSI;
++#endif
++
++ /* Tasks that can fail are last */
++ efx->refill_workqueue = create_workqueue("sfc_refill");
++ if (!efx->refill_workqueue) {
++ rc = -ENOMEM;
++ goto fail1;
++ }
++
++ efx->workqueue = create_singlethread_workqueue("sfc_work");
++ if (!efx->workqueue) {
++ rc = -ENOMEM;
++ goto fail2;
++ }
++
++#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
++ efx->reset_workqueue = create_singlethread_workqueue("sfc_reset");
++ if (!efx->reset_workqueue) {
++ rc = -ENOMEM;
++ goto fail3;
++ }
++#endif
++
++ return 0;
++
++#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
++ fail3:
++ destroy_workqueue(efx->workqueue);
++ efx->workqueue = NULL;
++#endif
++
++ fail2:
++ destroy_workqueue(efx->refill_workqueue);
++ efx->refill_workqueue = NULL;
++ fail1:
++ return rc;
++}
++
++static void efx_fini_struct(struct efx_nic *efx)
++{
++#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
++ if (efx->reset_workqueue) {
++ destroy_workqueue(efx->reset_workqueue);
++ efx->reset_workqueue = NULL;
++ }
++#endif
++ if (efx->workqueue) {
++ destroy_workqueue(efx->workqueue);
++ efx->workqueue = NULL;
++ }
++ if (efx->refill_workqueue) {
++ destroy_workqueue(efx->refill_workqueue);
++ efx->refill_workqueue = NULL;
++ }
++}
++
++/**************************************************************************
++ *
++ * PCI interface
++ *
++ **************************************************************************/
++
++/* Main body of final NIC shutdown code
++ * This is called only at module unload (or hotplug removal).
++ */
++static void efx_pci_remove_main(struct efx_nic *efx)
++{
++ ASSERT_RTNL();
++
++ /* Skip everything if we never obtained a valid membase */
++ if (!efx->membase)
++ return;
++
++ efx_fini_channels(efx);
++ efx_fini_port(efx);
++
++ /* Shutdown the board, then the NIC and board state */
++ efx->board_info.fini(efx);
++ falcon_fini_nic(efx);
++ falcon_fini_interrupt(efx);
++ efx->board_info.fini(efx);
++
++ /* Tear down NAPI and LRO */
++ efx_fini_napi(efx);
++ efx_remove_all(efx);
++}
++
++/* Final NIC shutdown
++ * This is called only at module unload (or hotplug removal).
++ */
++static void efx_pci_remove(struct pci_dev *pci_dev)
++{
++ struct efx_nic *efx;
++
++ efx = pci_get_drvdata(pci_dev);
++ if (!efx)
++ return;
++
++ /* Unregister driver from driverlink layer */
++ efx_dl_unregister_nic(efx);
++
++ /* Mark the NIC as fini under both suspend_lock and
++ * rtnl_lock */
++ down(&efx->suspend_lock);
++ rtnl_lock();
++ efx->state = STATE_FINI;
++ up(&efx->suspend_lock);
++
++ if (efx->membase) {
++ /* Stop the NIC. Since we're in STATE_FINI, this
++ * won't be reversed. */
++ if (efx->net_dev_registered)
++ dev_close(efx->net_dev);
++
++ /* Release the rtnl lock. Any queued efx_resets()
++ * can now return early [we're in STATE_FINI]. */
++ rtnl_unlock();
++
++ efx_unregister_netdev(efx);
++ efx_fini_debugfs_channels(efx);
++
++ /* Wait for any scheduled resets to complete. No more will be
++ * scheduled from this point because efx_stop_all() has been
++ * called, we are no longer registered with driverlink, and
++ * the net_device's have been removed. */
++#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
++ flush_workqueue(efx->reset_workqueue);
++#else
++ flush_workqueue(efx->workqueue);
++#endif
++
++ /* Fini and remove all the software state */
++ rtnl_lock();
++ efx_pci_remove_main(efx);
++ }
++
++ rtnl_unlock();
++
++ efx_fini_io(efx);
++ EFX_LOG(efx, "shutdown successful\n");
++
++ pci_set_drvdata(pci_dev, NULL);
++ efx_fini_struct(efx);
++ kfree(efx);
++};
++
++/* Main body of NIC initialisation
++ * This is called at module load (or hotplug insertion, theoretically).
++ */
++static int efx_pci_probe_main(struct efx_nic *efx)
++{
++ int rc;
++
++ /* Do start-of-day initialisation */
++ rc = efx_probe_all(efx);
++ if (rc)
++ goto fail1;
++
++ /* Initialise port/channel net_dev's */
++ rc = efx_init_napi(efx);
++ if (rc)
++ goto fail2;
++
++ /* Initialise the board */
++ rc = efx->board_info.init(efx);
++ if (rc) {
++ EFX_ERR(efx, "failed to initialise board\n");
++ goto fail3;
++ }
++
++ /* Initialise device */
++ rc = falcon_init_nic(efx);
++ if (rc) {
++ EFX_ERR(efx, "failed to initialise NIC\n");
++ goto fail4;
++ }
++
++ /* Initialise port */
++ rc = efx_init_port(efx);
++ if (rc) {
++ EFX_ERR(efx, "failed to initialise port\n");
++ goto fail5;
++ }
++
++ /* Initialise channels */
++ rc = efx_init_channels(efx);
++ if (rc)
++ goto fail6;
++
++ rc = falcon_init_interrupt(efx);
++ if (rc)
++ goto fail7;
++
++ /* Start up device - interrupts can occur from this point */
++ efx_start_all(efx);
++
++ /* Check basic functionality and set interrupt mode */
++ rc = efx_run_selftests(efx);
++ if (rc)
++ goto fail8;
++
++ /* Stop the NIC */
++ efx_stop_all(efx);
++
++ return 0;
++
++ fail8:
++ efx_stop_all(efx);
++ falcon_fini_interrupt(efx);
++ fail7:
++ efx_fini_channels(efx);
++ fail6:
++ efx_fini_port(efx);
++ fail5:
++ falcon_fini_nic(efx);
++ fail4:
++ efx->board_info.fini(efx);
++ fail3:
++ efx_fini_napi(efx);
++ fail2:
++ efx_remove_all(efx);
++ fail1:
++ return rc;
++}
++
++/* NIC initialisation
++ *
++ * This is called at module load (or hotplug insertion,
++ * theoretically). It sets up PCI mappings, tests and resets the NIC,
++ * sets up and registers the network devices with the kernel and hooks
++ * the interrupt service routine. It does not prepare the device for
++ * transmission; this is left to the first time one of the network
++ * interfaces is brought up (i.e. efx_net_open).
++ */
++static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
++ const struct pci_device_id *entry)
++{
++ struct efx_nic *efx;
++ enum efx_type_index type = entry->driver_data;
++ int i, rc;
++
++ /* Allocate and initialise a struct efx_nic */
++ efx = kmalloc(sizeof(*efx), GFP_KERNEL);
++ if (!efx) {
++ rc = -ENOMEM;
++ goto fail1;
++ }
++ pci_set_drvdata(pci_dev, efx);
++ rc = efx_init_struct(efx, type, pci_dev);
++ if (rc)
++ goto fail2;
++
++ EFX_INFO(efx, "Solarflare Communications NIC detected\n");
++
++ /* Set up basic I/O (BAR mappings etc) */
++ rc = efx_init_io(efx);
++ if (rc)
++ goto fail3;
++
++ /* From this point on we begin to expose the driver to the OS
++ * to varying degrees, so lets grab the suspend_lock and
++ * rtnl_lock to serialise against efx_reset() and
++ * friends. efx->state is not STATE_RUNNING yet, but we don't
++ * want these tasks to fail, just to block until we drop the
++ * lock
++ */
++ rc = down_interruptible(&efx->suspend_lock);
++ if (rc) {
++ EFX_ERR(efx, "suspend interrupted - aborting\n");
++ goto fail4;
++ }
++
++ rtnl_lock();
++
++ /* Probe, initialise and start everything. Run self-test */
++ for (i = 0; i < 5; i++) {
++ rc = efx_pci_probe_main(efx);
++ if (rc == 0)
++ break;
++
++ /* Retry if a recoverably reset event has been scheduled */
++ if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
++ (efx->reset_pending != RESET_TYPE_ALL))
++ goto fail5;
++
++ /* Serialise against efx_reset(). No more resets will be
++ * scheduled since efx_stop_all() has been called, and we
++ * have not and never have been registered with either
++ * the rtnetlink or driverlink layers. */
++ rtnl_unlock();
++ up(&efx->suspend_lock);
++
++#if defined(EFX_USE_CANCEL_WORK_SYNC)
++ cancel_work_sync(&efx->reset_work);
++#else
++ flush_workqueue(efx->reset_workqueue);
++#endif
++
++ down(&efx->suspend_lock);
++ rtnl_lock();
++
++ efx->reset_pending = RESET_TYPE_NONE;
++ };
++ if (rc) {
++ EFX_ERR(efx, "Could not reset NIC\n");
++ goto fail5;
++ }
++
++ /* Self-tests have all passed */
++ rc = efx_init_debugfs_channels(efx);
++ if (rc)
++ goto fail6;
++
++ /* Switch to the running state before we expose the device to
++ * the OS. This is to ensure that the initial gathering of
++ * MAC stats succeeds. */
++ efx->state = STATE_RUNNING;
++
++ rtnl_unlock();
++
++ rc = efx_register_netdev(efx);
++ if (rc)
++ goto fail7;
++
++ up(&efx->suspend_lock);
++
++ EFX_LOG(efx, "initialisation successful\n");
++
++ /* Register with driverlink layer */
++ rc = efx_dl_register_nic(efx);
++ if (rc)
++ goto fail8;
++
++ return 0;
++
++ fail8:
++ down(&efx->suspend_lock);
++ efx_unregister_netdev(efx);
++ fail7:
++ /* Re-acquire the rtnl lock around pci_remove_main() */
++ rtnl_lock();
++ efx_fini_debugfs_channels(efx);
++ fail6:
++ efx_pci_remove_main(efx);
++ fail5:
++ /* Drop the locks before fini */
++ rtnl_unlock();
++ up(&efx->suspend_lock);
++ fail4:
++ efx_fini_io(efx);
++ fail3:
++ efx_fini_struct(efx);
++ fail2:
++ kfree(efx);
++ fail1:
++ EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
++ return rc;
++}
++
++/* PCI driver definition */
++static struct pci_driver efx_pci_driver = {
++ .name = EFX_DRIVER_NAME,
++ .id_table = efx_pci_table,
++ .probe = efx_pci_probe,
++ .remove = efx_pci_remove,
++};
++
++/**************************************************************************
++ *
++ * Kernel module interface
++ *
++ *************************************************************************/
++
++module_param(interrupt_mode, uint, 0444);
++MODULE_PARM_DESC(interrupt_mode,
++ "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
++
++module_param(onload_offline_selftest, uint, 0444);
++MODULE_PARM_DESC(onload_offline_selftest, "Perform offline selftest on load");
++
++static int __init efx_init_module(void)
++{
++ int rc;
++
++ printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
++
++ rc = efx_init_debugfs();
++ if (rc)
++ goto err_debugfs;
++
++ rc = register_netdevice_notifier(&efx_netdev_notifier);
++ if (rc)
++ goto err_notifier;
++
++ rc = pci_register_driver(&efx_pci_driver);
++ if (rc < 0)
++ goto err_pci;
++
++ return 0;
++
++ err_pci:
++ unregister_netdevice_notifier(&efx_netdev_notifier);
++ err_notifier:
++ efx_fini_debugfs();
++ err_debugfs:
++ return rc;
++}
++
++static void __exit efx_exit_module(void)
++{
++ printk(KERN_INFO "Solarflare NET driver unloading\n");
++
++ pci_unregister_driver(&efx_pci_driver);
++ unregister_netdevice_notifier(&efx_netdev_notifier);
++ efx_fini_debugfs();
++
++}
++
++module_init(efx_init_module);
++module_exit(efx_exit_module);
++
++MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
++ "Solarflare Communications");
++MODULE_DESCRIPTION("Solarflare Communications network driver");
++MODULE_LICENSE("GPL");
++MODULE_DEVICE_TABLE(pci, efx_pci_table);
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/efx.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/efx.h
+--- linux-2.6.18.8/drivers/net/sfc/efx.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/efx.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,103 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_EFX_H
++#define EFX_EFX_H
++
++#include "net_driver.h"
++
++/* PCI IDs */
++#define EFX_VENDID_SFC 0x1924
++#define FALCON_A_P_DEVID 0x0703
++#define FALCON_A_S_DEVID 0x6703
++#define FALCON_B_P_DEVID 0x0710
++
++/* TX */
++extern int efx_xmit(struct efx_nic *efx,
++ struct efx_tx_queue *tx_queue, struct sk_buff *skb);
++extern void efx_stop_queue(struct efx_nic *efx);
++extern void efx_wake_queue(struct efx_nic *efx);
++
++/* RX */
++#if defined(EFX_USE_FASTCALL)
++extern void fastcall efx_xmit_done(struct efx_tx_queue *tx_queue,
++ unsigned int index);
++#else
++extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
++#endif
++#if defined(EFX_USE_FASTCALL)
++extern void fastcall efx_rx_packet(struct efx_rx_queue *rx_queue,
++ unsigned int index, unsigned int len,
++ int checksummed, int discard);
++#else
++extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
++ unsigned int len, int checksummed, int discard);
++#endif
++extern void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
++ struct efx_rx_buffer *rx_buf);
++
++/* Channels */
++extern void efx_process_channel_now(struct efx_channel *channel);
++extern int efx_flush_queues(struct efx_nic *efx);
++
++/* Ports */
++extern void efx_reconfigure_port(struct efx_nic *efx,
++ int on_disabled);
++
++/* Global */
++extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
++extern void efx_suspend(struct efx_nic *efx);
++extern void efx_resume(struct efx_nic *efx);
++extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
++ int rx_usecs);
++extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
++extern void efx_hex_dump(const u8 *, unsigned int, const char *);
++
++/* Dummy PHY ops for PHY drivers */
++extern int efx_port_dummy_op_int(struct efx_nic *efx);
++extern void efx_port_dummy_op_void(struct efx_nic *efx);
++extern void efx_port_dummy_op_blink(struct efx_nic *efx, int blink);
++
++
++extern unsigned int efx_monitor_interval;
++
++static inline void efx_schedule_channel(struct efx_channel *channel)
++{
++ EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n",
++ channel->channel, raw_smp_processor_id());
++ channel->work_pending = 1;
++
++#if defined(EFX_HAVE_OLD_NAPI)
++ if (!test_and_set_bit(__LINK_STATE_RX_SCHED, &channel->napi_dev->state))
++ __netif_rx_schedule(channel->napi_dev);
++#else
++ netif_rx_schedule(channel->napi_dev, &channel->napi_str);
++#endif
++}
++
++
++#endif /* EFX_EFX_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/enum.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/enum.h
+--- linux-2.6.18.8/drivers/net/sfc/enum.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/enum.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,117 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_ENUM_H
++#define EFX_ENUM_H
++
++/**
++ * enum efx_loopback_mode - loopback modes
++ * @LOOPBACK_NONE: no loopback
++ * @LOOPBACK_NEAR: loopback nearest to bus
++ * @LOOPBACK_MAC: loopback within MAC unspecified level
++ * @LOOPBACK_XGMII: loopback within MAC at XGMII level
++ * @LOOPBACK_XGXS: loopback within MAC at XGXS level
++ * @LOOPBACK_XAUI: loopback within MAC at XAUI level
++ * @LOOPBACK_PHY: loopback within PHY unspecified level
++ * @LOOPBACK_PHYXS: loopback within PHY at PHYXS level
++ * @LOOPBACK_PCS: loopback within PHY at PCS level
++ * @LOOPBACK_PMAPMD: loopback within PHY at PMAPMD level
++ * @LOOPBACK_FAR: loopback furthest from bus
++ * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!)
++ */
++/* Please keep in order and up-to-date w.r.t the following two #defines */
++enum efx_loopback_mode {
++ LOOPBACK_NONE = 0,
++ LOOPBACK_NEAR = 1,
++ LOOPBACK_MAC = 2,
++ LOOPBACK_XGMII = 3,
++ LOOPBACK_XGXS = 4,
++ LOOPBACK_XAUI = 5,
++ LOOPBACK_PHY = 6,
++ LOOPBACK_PHYXS = 7,
++ LOOPBACK_PCS = 8,
++ LOOPBACK_PMAPMD = 9,
++ LOOPBACK_FAR = 10,
++ LOOPBACK_NETWORK = 11,
++ LOOPBACK_MAX
++};
++#define LOOPBACK_TEST_MAX LOOPBACK_FAR
++
++/* These loopbacks occur within the controller */
++#define LOOPBACKS_10G_INTERNAL ((1 << LOOPBACK_XGMII)| \
++ (1 << LOOPBACK_XGXS) | \
++ (1 << LOOPBACK_XAUI))
++
++#define LOOPBACKS_1G_INTERNAL (1 << LOOPBACK_MAC)
++
++#define LOOPBACK_MASK(_efx) \
++ (1 << (_efx)->loopback_mode)
++
++#define LOOPBACK_INTERNAL(_efx) \
++ (((LOOPBACKS_10G_INTERNAL | LOOPBACKS_1G_INTERNAL) & \
++ LOOPBACK_MASK(_efx)) ? 1 : 0)
++
++#define LOOPBACK_CHANGED(_from, _to, _mask) \
++ ((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & \
++ (_mask) ? 1 : 0)
++
++#define LOOPBACK_OUT_OF(_from, _to, _mask) \
++ (((LOOPBACK_MASK(_from) & (_mask)) && \
++ ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0)
++
++/*****************************************************************************/
++
++/**
++ * enum reset_type - reset types
++ *
++ * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
++ * %RESET_TYPE_DISABLE specify the method/scope of the reset. The
++ * other valuesspecify reasons, which efx_schedule_reset() will choose
++ * a method for.
++ *
++ * @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts
++ * @RESET_TYPE_ALL: reset everything but PCI core blocks
++ * @RESET_TYPE_WORLD: reset everything, save & restore PCI config
++ * @RESET_TYPE_DISABLE: disable NIC
++ * @RESET_TYPE_MONITOR: reset due to hardware monitor
++ * @RESET_TYPE_INT_ERROR: reset due to internal error
++ * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
++ */
++enum reset_type {
++ RESET_TYPE_NONE = -1,
++ RESET_TYPE_INVISIBLE = 0,
++ RESET_TYPE_ALL = 1,
++ RESET_TYPE_WORLD = 2,
++ RESET_TYPE_DISABLE = 3,
++ RESET_TYPE_MAX_METHOD,
++ RESET_TYPE_MONITOR,
++ RESET_TYPE_INT_ERROR,
++ RESET_TYPE_RX_RECOVERY,
++ RESET_TYPE_RX_DESC_FETCH,
++ RESET_TYPE_TX_DESC_FETCH,
++ RESET_TYPE_MAX,
++};
++
++#endif /* EFX_ENUM_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/ethtool.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/ethtool.c
+--- linux-2.6.18.8/drivers/net/sfc/ethtool.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/ethtool.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,734 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/netdevice.h>
++#include <linux/ethtool.h>
++#include <linux/rtnetlink.h>
++#include <asm/uaccess.h>
++#include "net_driver.h"
++#include "selftest.h"
++#include "efx.h"
++#include "ethtool.h"
++#include "falcon.h"
++#include "gmii.h"
++
++static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
++
++struct ethtool_string {
++ char name[ETH_GSTRING_LEN];
++};
++
++struct efx_ethtool_stat {
++ const char *name;
++ enum {
++ EFX_ETHTOOL_STAT_SOURCE_mac_stats,
++ EFX_ETHTOOL_STAT_SOURCE_nic,
++ EFX_ETHTOOL_STAT_SOURCE_channel
++ } source;
++ unsigned offset;
++ u64(*get_stat) (void *field); /* Reader function */
++};
++
++/* Initialiser for a struct #efx_ethtool_stat with type-checking */
++#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
++ get_stat_function) { \
++ .name = #stat_name, \
++ .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
++ .offset = ((((field_type *) 0) == \
++ &((struct efx_##source_name *)0)->field) ? \
++ offsetof(struct efx_##source_name, field) : \
++ offsetof(struct efx_##source_name, field)), \
++ .get_stat = get_stat_function, \
++}
++
++static u64 efx_get_uint_stat(void *field)
++{
++ return *(unsigned int *)field;
++}
++
++static u64 efx_get_ulong_stat(void *field)
++{
++ return *(unsigned long *)field;
++}
++
++static u64 efx_get_u64_stat(void *field)
++{
++ return *(u64 *) field;
++}
++
++static u64 efx_get_atomic_stat(void *field)
++{
++ return atomic_read((atomic_t *) field);
++}
++
++#define EFX_ETHTOOL_ULONG_MAC_STAT(field) \
++ EFX_ETHTOOL_STAT(field, mac_stats, field, \
++ unsigned long, efx_get_ulong_stat)
++
++#define EFX_ETHTOOL_U64_MAC_STAT(field) \
++ EFX_ETHTOOL_STAT(field, mac_stats, field, \
++ u64, efx_get_u64_stat)
++
++#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
++ EFX_ETHTOOL_STAT(name, nic, n_##name, \
++ unsigned int, efx_get_uint_stat)
++
++#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
++ EFX_ETHTOOL_STAT(field, nic, errors.field, \
++ atomic_t, efx_get_atomic_stat)
++
++#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
++ EFX_ETHTOOL_STAT(field, channel, n_##field, \
++ unsigned int, efx_get_uint_stat)
++
++static struct efx_ethtool_stat efx_ethtool_stats[] = {
++ EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
++ EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
++ EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_control),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_64),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
++ EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
++ EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
++ EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
++ EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_good),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_control),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_64),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error),
++ EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error),
++ EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
++ EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
++ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
++ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
++ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
++ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
++};
++
++/* Number of ethtool statistics */
++#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
++
++/**************************************************************************
++ *
++ * Ethtool operations
++ *
++ **************************************************************************
++ */
++
++/* Identify device by flashing LEDs */
++static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds)
++{
++ struct efx_nic *efx = net_dev->priv;
++
++ efx->board_info.blink(efx, 1);
++ schedule_timeout_interruptible(seconds * HZ);
++ efx->board_info.blink(efx, 0);
++ return 0;
++}
++
++/* This must be called with rtnl_lock held. */
++int efx_ethtool_get_settings(struct net_device *net_dev,
++ struct ethtool_cmd *ecmd)
++{
++ struct efx_nic *efx = net_dev->priv;
++
++ return efx->mac_op->get_settings(efx, ecmd);
++}
++
++/* This must be called with rtnl_lock held. */
++int efx_ethtool_set_settings(struct net_device *net_dev,
++ struct ethtool_cmd *ecmd)
++{
++ struct efx_nic *efx = net_dev->priv;
++ int rc;
++
++ rc = efx->mac_op->set_settings(efx, ecmd);
++ if (rc)
++ return rc;
++
++ /* Push the settings to the MAC */
++ efx_reconfigure_port(efx, 0);
++
++ return 0;
++}
++
++static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
++ struct ethtool_drvinfo *info)
++{
++ struct efx_nic *efx = net_dev->priv;
++
++ strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
++ strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
++ strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
++}
++
++/**
++ * efx_fill_test - fill in an individual self-test entry
++ * @test_index: Index of the test
++ * @strings: Ethtool strings, or %NULL
++ * @data: Ethtool test results, or %NULL
++ * @test: Pointer to test result (used only if data != %NULL)
++ * @unit_format: Unit name format (e.g. "channel\%d")
++ * @unit_id: Unit id (e.g. 0 for "channel0")
++ * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
++ * @test_id: Test id (e.g. "PHY" for "loopback.PHY.tx_sent")
++ *
++ * Fill in an individual self-test entry.
++ */
++static void efx_fill_test(unsigned int test_index,
++ struct ethtool_string *strings, u64 *data,
++ int *test, const char *unit_format, int unit_id,
++ const char *test_format, const char *test_id)
++{
++ struct ethtool_string unit_str, test_str;
++
++ /* Fill data value, if applicable */
++ if (data)
++ data[test_index] = *test;
++
++ /* Fill string, if applicable */
++ if (strings) {
++ snprintf(unit_str.name, sizeof(unit_str.name),
++ unit_format, unit_id);
++ snprintf(test_str.name, sizeof(test_str.name),
++ test_format, test_id);
++ snprintf(strings[test_index].name,
++ sizeof(strings[test_index].name),
++ "%-9s%-17s", unit_str.name, test_str.name);
++ }
++}
++
++#define EFX_PORT_NAME "port%d", 0
++
++/**
++ * efx_fill_loopback_test - fill in a block of loopback self-test entries
++ * @efx: Efx NIC
++ * @lb_tests: Efx loopback self-test results structure
++ * @mode: Loopback test mode
++ * @test_index: Starting index of the test
++ * @strings: Ethtool strings, or %NULL
++ * @data: Ethtool test results, or %NULL
++ *
++ * Fill in a block of loopback self-test entries. Return new test
++ * index.
++ */
++static int efx_fill_loopback_test(struct efx_nic *efx,
++ struct efx_loopback_self_tests *lb_tests,
++ enum efx_loopback_mode mode,
++ unsigned int test_index,
++ struct ethtool_string *strings, u64 *data)
++{
++ struct efx_tx_queue *tx_queue;
++
++ efx_for_each_tx_queue(tx_queue, efx) {
++ efx_fill_test(test_index++, strings, data,
++ &lb_tests->tx_sent[tx_queue->queue],
++ EFX_TX_QUEUE_NAME(tx_queue),
++ "loopback.%s.tx_sent",
++ efx_loopback_mode_names[mode]);
++ efx_fill_test(test_index++, strings, data,
++ &lb_tests->tx_done[tx_queue->queue],
++ EFX_TX_QUEUE_NAME(tx_queue),
++ "loopback.%s.tx_done",
++ efx_loopback_mode_names[mode]);
++ }
++ efx_fill_test(test_index++, strings, data,
++ &lb_tests->rx_good,
++ EFX_PORT_NAME,
++ "loopback.%s.rx_good",
++ efx_loopback_mode_names[mode]);
++ efx_fill_test(test_index++, strings, data,
++ &lb_tests->rx_bad,
++ EFX_PORT_NAME,
++ "loopback.%s.rx_bad",
++ efx_loopback_mode_names[mode]);
++
++ return test_index;
++}
++
++/**
++ * efx_ethtool_fill_self_tests - get self-test details
++ * @efx: Efx NIC
++ * @tests: Efx self-test results structure, or %NULL
++ * @strings: Ethtool strings, or %NULL
++ * @data: Ethtool test results, or %NULL
++ *
++ * Get self-test number of strings, strings, and/or test results.
++ * Return number of strings (== number of test results).
++ *
++ * The reason for merging these three functions is to make sure that
++ * they can never be inconsistent.
++ */
++static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
++ struct efx_self_tests *tests,
++ struct ethtool_string *strings,
++ u64 *data)
++{
++ struct efx_channel *channel;
++ unsigned int n = 0;
++ enum efx_loopback_mode mode;
++
++ /* Interrupt */
++ efx_fill_test(n++, strings, data, &tests->interrupt,
++ "core", 0, "interrupt", NULL);
++
++ /* Event queues */
++ efx_for_each_channel(channel, efx) {
++ efx_fill_test(n++, strings, data,
++ &tests->eventq_dma[channel->channel],
++ EFX_CHANNEL_NAME(channel),
++ "eventq.dma", NULL);
++ efx_fill_test(n++, strings, data,
++ &tests->eventq_int[channel->channel],
++ EFX_CHANNEL_NAME(channel),
++ "eventq.int", NULL);
++ efx_fill_test(n++, strings, data,
++ &tests->eventq_poll[channel->channel],
++ EFX_CHANNEL_NAME(channel),
++ "eventq.poll", NULL);
++ }
++
++ /* PHY presence */
++ efx_fill_test(n++, strings, data, &tests->phy_ok,
++ EFX_PORT_NAME, "phy_ok", NULL);
++
++ /* Loopback tests */
++ efx_fill_test(n++, strings, data, &tests->loopback_speed,
++ EFX_PORT_NAME, "loopback.speed", NULL);
++ efx_fill_test(n++, strings, data, &tests->loopback_full_duplex,
++ EFX_PORT_NAME, "loopback.full_duplex", NULL);
++ for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
++ if (!(efx->loopback_modes & (1 << mode)))
++ continue;
++ n = efx_fill_loopback_test(efx,
++ &tests->loopback[mode], mode, n,
++ strings, data);
++ }
++
++ return n;
++}
++
++static int efx_ethtool_get_stats_count(struct net_device *net_dev)
++{
++ return EFX_ETHTOOL_NUM_STATS;
++}
++
++static int efx_ethtool_self_test_count(struct net_device *net_dev)
++{
++ struct efx_nic *efx = net_dev->priv;
++
++ return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
++}
++
++static void efx_ethtool_get_strings(struct net_device *net_dev,
++ u32 string_set, u8 *strings)
++{
++ struct efx_nic *efx = net_dev->priv;
++ struct ethtool_string *ethtool_strings =
++ (struct ethtool_string *)strings;
++ int i;
++
++ switch (string_set) {
++ case ETH_SS_STATS:
++ for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
++ strncpy(ethtool_strings[i].name,
++ efx_ethtool_stats[i].name,
++ sizeof(ethtool_strings[i].name));
++ break;
++ case ETH_SS_TEST:
++ efx_ethtool_fill_self_tests(efx, NULL,
++ ethtool_strings, NULL);
++ break;
++ default:
++ /* No other string sets */
++ break;
++ }
++}
++
++static void efx_ethtool_get_stats(struct net_device *net_dev,
++ struct ethtool_stats *stats
++ __attribute__ ((unused)), u64 *data)
++{
++ unsigned long flags __attribute__ ((unused));
++ struct efx_nic *efx = net_dev->priv;
++ struct efx_mac_stats *mac_stats = &efx->mac_stats;
++ struct efx_ethtool_stat *stat;
++ struct efx_channel *channel;
++ int i;
++
++ EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
++
++ /* Update MAC and NIC statistics */
++ net_dev->get_stats(net_dev);
++ falcon_update_nic_stats(efx);
++
++ /* Fill detailed statistics buffer */
++ for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
++ stat = &efx_ethtool_stats[i];
++ switch (stat->source) {
++ case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
++ data[i] = stat->get_stat((void *)mac_stats +
++ stat->offset);
++ break;
++ case EFX_ETHTOOL_STAT_SOURCE_nic:
++ data[i] = stat->get_stat((void *)efx + stat->offset);
++ break;
++ case EFX_ETHTOOL_STAT_SOURCE_channel:
++ data[i] = 0;
++ efx_for_each_channel(channel, efx)
++ data[i] += stat->get_stat((void *)channel +
++ stat->offset);
++ break;
++ }
++ }
++}
++
++static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
++{
++ struct efx_nic *efx = net_dev->priv;
++ int rc;
++
++ rc = ethtool_op_set_tx_csum(net_dev, enable);
++ if (rc)
++ return rc;
++
++
++ efx_flush_queues(efx);
++
++ return 0;
++}
++
++static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
++{
++ struct efx_nic *efx = net_dev->priv;
++
++ /* No way to stop the hardware doing the checks; we just
++ * ignore the result.
++ */
++ efx->rx_checksum_enabled = (enable ? 1 : 0);
++
++ return 0;
++}
++
++static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
++{
++ struct efx_nic *efx = net_dev->priv;
++
++ return efx->rx_checksum_enabled;
++}
++
++static void efx_ethtool_self_test(struct net_device *net_dev,
++ struct ethtool_test *test, u64 *data)
++{
++ struct efx_nic *efx = net_dev->priv;
++ struct efx_self_tests efx_tests;
++ int offline, already_up;
++ int rc;
++
++ /* Make sure we've got rtnl lock since we're playing with
++ * interrupts, and calling efx_process_channel_now and others
++ */
++ ASSERT_RTNL();
++
++ /* If the NIC isn't in the RUNNING state then exit */
++ if (efx->state != STATE_RUNNING) {
++ rc = -EIO;
++ goto fail1;
++ }
++
++ /* Make sure the interface is up. We need interrupts, NAPI
++ * and some RX buffers so this is helpful. NB. The caller has
++ * rtnl_lock so nobody else can call dev_open. */
++ already_up = (efx->net_dev->flags & IFF_UP);
++ if (!already_up) {
++ rc = dev_open(efx->net_dev);
++ if (rc) {
++ EFX_ERR(efx, "failed opening device.\n");
++ goto fail2;
++ }
++ }
++
++ memset(&efx_tests, 0, sizeof(efx_tests));
++ offline = (test->flags & ETH_TEST_FL_OFFLINE);
++
++ /* Perform online self tests first */
++ rc = efx_online_test(efx, &efx_tests);
++ if (rc)
++ goto out;
++
++ /* Perform offline tests only if online tests passed */
++ if (offline) {
++ /* Stop the kernel from sending packets during the test. The
++ * selftest will be consistently bringing the port up and down
++ * as it moves between loopback modes, so the watchdog timer
++ * probably won't run anyway */
++ efx_stop_queue(efx);
++
++ rc = efx_flush_queues(efx);
++ if (rc != 0)
++ goto out_offline;
++
++ rc = efx_offline_test(efx, &efx_tests,
++ efx->loopback_modes);
++ out_offline:
++ efx_wake_queue(efx);
++ }
++
++ /* fall-thru */
++ out:
++ if (!already_up)
++ dev_close(efx->net_dev);
++
++ EFX_LOG(efx, "%s all %sline self-tests\n",
++ rc == 0 ? "passed" : "failed", offline ? "off" : "on");
++
++ fail2:
++ fail1:
++ /* Fill ethtool results structures */
++ efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
++ if (rc)
++ test->flags |= ETH_TEST_FL_FAILED;
++}
++
++/* Restart autonegotiation */
++static int efx_ethtool_nway_reset(struct net_device *net_dev)
++{
++ struct efx_nic *efx = net_dev->priv;
++
++ return mii_nway_restart(&efx->mii);
++}
++
++static u32 efx_ethtool_get_link(struct net_device *net_dev)
++{
++ struct efx_nic *efx = net_dev->priv;
++
++ return efx->link_up;
++}
++
++static int efx_ethtool_get_coalesce(struct net_device *net_dev,
++ struct ethtool_coalesce *coalesce)
++{
++ struct efx_nic *efx = net_dev->priv;
++ struct efx_tx_queue *tx_queue;
++ struct efx_rx_queue *rx_queue;
++ struct efx_channel *channel;
++
++ memset(coalesce, 0, sizeof(*coalesce));
++
++ /* Find lowest IRQ moderation across all used TX queues */
++ coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
++ efx_for_each_tx_queue(tx_queue, efx) {
++ channel = tx_queue->channel;
++ if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
++ if (channel->used_flags != EFX_USED_BY_RX_TX)
++ coalesce->tx_coalesce_usecs_irq =
++ channel->irq_moderation;
++ else
++ coalesce->tx_coalesce_usecs_irq = 0;
++ }
++ }
++
++ /* Find lowest IRQ moderation across all used RX queues */
++ coalesce->rx_coalesce_usecs_irq = ~((u32) 0);
++ efx_for_each_rx_queue(rx_queue, efx) {
++ channel = rx_queue->channel;
++ if (channel->irq_moderation < coalesce->rx_coalesce_usecs_irq)
++ coalesce->rx_coalesce_usecs_irq =
++ channel->irq_moderation;
++ }
++
++ return 0;
++}
++
++/* Set coalescing parameters
++ * The difficulties occur for shared channels
++ */
++static int efx_ethtool_set_coalesce(struct net_device *net_dev,
++ struct ethtool_coalesce *coalesce)
++{
++ struct efx_nic *efx = net_dev->priv;
++ struct efx_channel *channel;
++ struct efx_tx_queue *tx_queue;
++ unsigned tx_usecs, rx_usecs;
++
++ if (coalesce->use_adaptive_rx_coalesce ||
++ coalesce->use_adaptive_tx_coalesce)
++ return -EOPNOTSUPP;
++
++ if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) {
++ EFX_ERR(efx, "invalid coalescing setting. "
++ "Only rx/tx_coalesce_usecs_irq are supported\n");
++ return -EOPNOTSUPP;
++ }
++
++ rx_usecs = coalesce->rx_coalesce_usecs_irq;
++ tx_usecs = coalesce->tx_coalesce_usecs_irq;
++
++ /* If the channel is shared only allow RX parameters to be set */
++ efx_for_each_tx_queue(tx_queue, efx) {
++ if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) &&
++ tx_usecs) {
++ EFX_ERR(efx, "Channel is shared. "
++ "Only RX coalescing may be set\n");
++ return -EOPNOTSUPP;
++ }
++ }
++
++ efx_init_irq_moderation(efx, tx_usecs, rx_usecs);
++
++ /* Reset channel to pick up new moderation value. Note that
++ * this may change the value of the irq_moderation field
++ * (e.g. to allow for hardware timer granularity).
++ */
++ efx_for_each_channel(channel, efx)
++ falcon_set_int_moderation(channel);
++
++ return 0;
++}
++
++static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
++ struct ethtool_pauseparam *pause)
++{
++ struct efx_nic *efx = net_dev->priv;
++ enum efx_fc_type flow_control = efx->flow_control;
++ int rc;
++
++ flow_control &= ~(EFX_FC_RX | EFX_FC_TX | EFX_FC_AUTO);
++ flow_control |= pause->rx_pause ? EFX_FC_RX : 0;
++ flow_control |= pause->tx_pause ? EFX_FC_TX : 0;
++ flow_control |= pause->autoneg ? EFX_FC_AUTO : 0;
++
++ /* Try to push the pause parameters */
++ rc = efx->mac_op->set_pause(efx, flow_control);
++ if (rc)
++ return rc;
++
++ /* Push the settings to the MAC */
++ efx_reconfigure_port(efx, 0);
++
++ return 0;
++}
++
++static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
++ struct ethtool_pauseparam *pause)
++{
++ struct efx_nic *efx = net_dev->priv;
++
++ pause->rx_pause = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
++ pause->tx_pause = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
++ pause->autoneg = (efx->flow_control & EFX_FC_AUTO) ? 1 : 0;
++}
++
++
++#if defined(EFX_USE_ETHTOOL_GET_PERM_ADDR)
++static int efx_ethtool_op_get_perm_addr(struct net_device *net_dev,
++ struct ethtool_perm_addr *addr,
++ u8 *data)
++{
++ struct efx_nic *efx = net_dev->priv;
++
++ memcpy(data, efx->mac_address, ETH_ALEN);
++
++ return 0;
++}
++#endif
++
++struct ethtool_ops efx_ethtool_ops = {
++ .get_settings = efx_ethtool_get_settings,
++ .set_settings = efx_ethtool_set_settings,
++ .get_drvinfo = efx_ethtool_get_drvinfo,
++ .nway_reset = efx_ethtool_nway_reset,
++ .get_link = efx_ethtool_get_link,
++ .get_coalesce = efx_ethtool_get_coalesce,
++ .set_coalesce = efx_ethtool_set_coalesce,
++ .get_pauseparam = efx_ethtool_get_pauseparam,
++ .set_pauseparam = efx_ethtool_set_pauseparam,
++ .get_rx_csum = efx_ethtool_get_rx_csum,
++ .set_rx_csum = efx_ethtool_set_rx_csum,
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = efx_ethtool_set_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = ethtool_op_set_sg,
++#if defined(EFX_USE_ETHTOOL_FLAGS)
++ .get_flags = ethtool_op_get_flags,
++ .set_flags = ethtool_op_set_flags,
++#endif
++ .self_test_count = efx_ethtool_self_test_count,
++ .self_test = efx_ethtool_self_test,
++ .get_strings = efx_ethtool_get_strings,
++ .phys_id = efx_ethtool_phys_id,
++ .get_stats_count = efx_ethtool_get_stats_count,
++ .get_ethtool_stats = efx_ethtool_get_stats,
++#if defined(EFX_USE_ETHTOOL_GET_PERM_ADDR)
++ .get_perm_addr = efx_ethtool_op_get_perm_addr,
++#endif
++};
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/ethtool.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/ethtool.h
+--- linux-2.6.18.8/drivers/net/sfc/ethtool.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/ethtool.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,44 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005: Fen Systems Ltd.
++ * Copyright 2006: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_ETHTOOL_H
++#define EFX_ETHTOOL_H
++
++#include "net_driver.h"
++
++/*
++ * Ethtool support
++ */
++
++extern int efx_ethtool_get_settings(struct net_device *net_dev,
++ struct ethtool_cmd *ecmd);
++extern int efx_ethtool_set_settings(struct net_device *net_dev,
++ struct ethtool_cmd *ecmd);
++
++extern struct ethtool_ops efx_ethtool_ops;
++
++#endif /* EFX_ETHTOOL_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/extraversion.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/extraversion.h
+--- linux-2.6.18.8/drivers/net/sfc/extraversion.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/extraversion.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,4 @@
++/*
++ * If compiling on kernels with backported features you may need to
++ * define EFX_DIST_KVER_ symbols here
++ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/falcon.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/falcon.c
+--- linux-2.6.18.8/drivers/net/sfc/falcon.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/falcon.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,3708 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <asm/io.h>
++#include <asm/bitops.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include "net_driver.h"
++#include "bitfield.h"
++#include "efx.h"
++#include "mac.h"
++#include "gmii.h"
++#include "spi.h"
++#include "falcon.h"
++#include "falcon_hwdefs.h"
++#include "falcon_io.h"
++#include "mdio_10g.h"
++#include "phy.h"
++#include "boards.h"
++#include "driverlink.h"
++#include "workarounds.h"
++
++/* Falcon hardware control.
++ * Falcon is the internal codename for the SFC4000 controller that is
++ * present in SFE400X evaluation boards
++ */
++
++struct falcon_nic_data {
++ /* Number of entries in each TX queue descriptor cache. */
++ unsigned tx_dc_entries;
++ /* Number of entries in each RX queue descriptor cache. */
++ unsigned rx_dc_entries;
++ /* Base address in SRAM of TX queue descriptor caches. */
++ unsigned tx_dc_base;
++ /* Base address in SRAM of RX queue descriptor caches. */
++ unsigned rx_dc_base;
++
++ /* Previous loopback mode used in deconfigure_mac_wrapper */
++ enum efx_loopback_mode old_loopback_mode;
++
++ /* Driverlink parameters */
++ struct efx_dl_falcon_resources resources;
++};
++
++/**************************************************************************
++ *
++ * Configurable values
++ *
++ **************************************************************************
++ */
++
++static int disable_dma_stats;
++
++/* Specify the size of the RX descriptor cache */
++static int descriptor_cache_size = 64;
++
++/*
++ * Override EEPROM/flash type from non-volatile configuration or GPIO;
++ * may need to be specified if bootstrapping from blank flash.
++ */
++static unsigned int eeprom_type = -1;
++static unsigned int flash_type = -1;
++
++/* RX FIFO XOFF watermark
++ *
++ * When the amount of the RX FIFO increases used increases past this
++ * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
++ * This also has an effect on RX/TX arbitration
++ */
++static int rx_xoff_thresh_bytes = -1;
++module_param(rx_xoff_thresh_bytes, int, 0644);
++MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
++
++/* RX FIFO XON watermark
++ *
++ * When the amount of the RX FIFO used decreases below this
++ * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
++ * This also has an effect on RX/TX arbitration
++ */
++static int rx_xon_thresh_bytes = -1;
++module_param(rx_xon_thresh_bytes, int, 0644);
++MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
++
++/* TX descriptor ring size - min 512 max 4k */
++#define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
++#define FALCON_TXD_RING_SIZE 1024
++#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
++
++/* RX descriptor ring size - min 512 max 4k */
++#define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
++#define FALCON_RXD_RING_SIZE 1024
++#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
++
++/* Event queue size - max 32k */
++#define FALCON_EVQ_ORDER EVQ_SIZE_4K
++#define FALCON_EVQ_SIZE 4096
++#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
++
++/* Max number of internal errors. After this resets will not be performed */
++#define FALCON_MAX_INT_ERRORS 4
++
++/* Maximum period that we wait for flush events. If the flush event
++ * doesn't arrive in this period of time then we check if the queue
++ * was disabled anyway. */
++#define FALCON_FLUSH_TIMEOUT 10 /* 10ms */
++
++/**************************************************************************
++ *
++ * Falcon constants
++ *
++ **************************************************************************
++ */
++
++/* DMA address mask (up to 46-bit, avoiding compiler warnings)
++ *
++ * Note that it is possible to have a platform with 64-bit longs and
++ * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the
++ * platform DMA mask.
++ */
++#if BITS_PER_LONG == 64
++#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)
++#else
++#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)
++#endif
++
++/* TX DMA length mask (13-bit) */
++#define FALCON_TX_DMA_MASK (8192 - 1)
++
++/* Alignment of special buffers (4KB) */
++#define FALCON_BUF_ALIGN 4096
++
++/* Dummy SRAM size code */
++#define SRM_NB_BSZ_ONCHIP_ONLY (-1)
++
++/* Be nice if these (or equiv.) were in linux/pci_regs.h, but they're not. */
++#define PCI_EXP_DEVCAP_PWR_VAL_LBN (18)
++/* This field takes up bits 26 and 27. */
++#define PCI_EXP_DEVCAP_PWR_SCL_LBN (26)
++#define PCI_EXP_LNKSTA_LNK_WID (0x3f0)
++#define PCI_EXP_LNKSTA_LNK_WID_LBN (4)
++
++
++/**************************************************************************
++ *
++ * Falcon hardware access
++ *
++ **************************************************************************/
++
++/* Read the current event from the event queue */
++static inline efx_qword_t *falcon_event(struct efx_channel *channel,
++ unsigned int index)
++{
++ return (((efx_qword_t *) (channel->eventq.addr)) + index);
++}
++
++/* See if an event is present
++ *
++ * We check both the high and low dword of the event for all ones. We
++ * wrote all ones when we cleared the event, and no valid event can
++ * have all ones in either its high or low dwords. This approach is
++ * robust against reordering.
++ *
++ * Note that using a single 64-bit comparison is incorrect; even
++ * though the CPU read will be atomic, the DMA write may not be.
++ */
++static inline int falcon_event_present(efx_qword_t *event)
++{
++ return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
++ EFX_DWORD_IS_ALL_ONES(event->dword[1])));
++}
++
++/* Read dword from a Falcon PCIE core register */
++static void falcon_pcie_core_read_reg(struct efx_nic *efx, int address,
++ efx_dword_t *result)
++{
++ efx_oword_t temp;
++
++ BUG_ON(FALCON_REV(efx) < FALCON_REV_B0);
++ BUG_ON(address & 3 || address < 0);
++
++ EFX_POPULATE_OWORD_1(temp, PCIE_CORE_ADDR, address);
++
++ falcon_write(efx, &temp, PCIE_CORE_INDIRECT_REG);
++ falcon_read(efx, &temp, PCIE_CORE_INDIRECT_REG);
++ /* Extract PCIE_CORE_VALUE without byte-swapping */
++ BUILD_BUG_ON(PCIE_CORE_VALUE_LBN != 32 ||
++ PCIE_CORE_VALUE_WIDTH != 32);
++ result->u32[0] = temp.u32[1];
++}
++
++/* Write dword to a Falcon PCIE core register */
++static void falcon_pcie_core_write_reg(struct efx_nic *efx, int address,
++ efx_dword_t value)
++{
++ efx_oword_t temp;
++
++ BUG_ON(FALCON_REV(efx) < FALCON_REV_B0);
++ BUG_ON(address & 0x3 || address < 0);
++
++ EFX_POPULATE_OWORD_2(temp,
++ PCIE_CORE_ADDR, address,
++ PCIE_CORE_RW, 1);
++ /* Fill PCIE_CORE_VALUE without byte-swapping */
++ BUILD_BUG_ON(PCIE_CORE_VALUE_LBN != 32 ||
++ PCIE_CORE_VALUE_WIDTH != 32);
++ temp.u32[1] = value.u32[0];
++ falcon_write(efx, &temp, PCIE_CORE_INDIRECT_REG);
++}
++
++/**************************************************************************
++ *
++ * I2C bus - this is a bit-bashing interface using GPIO pins
++ * Note that it uses the output enables to tristate the outputs
++ * SDA is the data pin and SCL is the clock
++ *
++ **************************************************************************
++ */
++static void falcon_setsdascl(struct efx_i2c_interface *i2c)
++{
++ efx_oword_t reg;
++
++ falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
++ EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, (i2c->scl ? 0 : 1));
++ EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, (i2c->sda ? 0 : 1));
++ falcon_write(i2c->efx, &reg, GPIO_CTL_REG_KER);
++}
++
++static int falcon_getsda(struct efx_i2c_interface *i2c)
++{
++ efx_oword_t reg;
++
++ falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
++ return EFX_OWORD_FIELD(reg, GPIO3_IN);
++}
++
++static int falcon_getscl(struct efx_i2c_interface *i2c)
++{
++ efx_oword_t reg;
++
++ falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
++ return EFX_DWORD_FIELD(reg, GPIO0_IN);
++}
++
++static struct efx_i2c_bit_operations falcon_i2c_bit_operations = {
++ .setsda = falcon_setsdascl,
++ .setscl = falcon_setsdascl,
++ .getsda = falcon_getsda,
++ .getscl = falcon_getscl,
++ .udelay = 100,
++ .mdelay = 10,
++};
++
++/**************************************************************************
++ *
++ * Falcon special buffer handling
++ * Special buffers are used for event queues and the TX and RX
++ * descriptor rings.
++ *
++ *************************************************************************/
++
++/* Adds the relevant entries to the full-mode buffer table. */
++static int
++falcon_pin_special_buffer_full(struct efx_nic *efx,
++ struct efx_special_buffer *buffer)
++{
++ efx_qword_t buf_desc;
++ int index;
++ dma_addr_t dma_addr;
++ int i;
++
++ /* Write buffer descriptors to NIC */
++ for (i = 0; i < buffer->entries; i++) {
++ index = buffer->index + i;
++ dma_addr = buffer->dma_addr + (i * 4096);
++ EFX_LOG(efx, "mapping special buffer %d at %llx\n",
++ index, (unsigned long long)dma_addr);
++ EFX_POPULATE_QWORD_4(buf_desc,
++ IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K,
++ BUF_ADR_REGION, 0,
++ BUF_ADR_FBUF, (dma_addr >> 12),
++ BUF_OWNER_ID_FBUF, 0);
++ falcon_write_sram(efx, &buf_desc, index);
++ }
++
++ return 0;
++}
++
++/* Clears the relevant entries from the buffer table */
++static void
++falcon_clear_special_buffer_full(struct efx_nic *efx,
++ struct efx_special_buffer *buffer)
++{
++ efx_oword_t buf_tbl_upd;
++ unsigned int start = buffer->index;
++ unsigned int end = (buffer->index + buffer->entries - 1);
++
++ EFX_LOG(efx, "unmapping special buffers %d-%d\n",
++ buffer->index, buffer->index + buffer->entries - 1);
++
++ EFX_POPULATE_OWORD_4(buf_tbl_upd,
++ BUF_UPD_CMD, 0,
++ BUF_CLR_CMD, 1,
++ BUF_CLR_END_ID, end,
++ BUF_CLR_START_ID, start);
++ falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER);
++}
++
++/*
++ * Allocate a new Falcon special buffer
++ *
++ * This allocates memory for a new buffer, clears it and allocates a
++ * new buffer ID range. It does not write into Falcon's buffer table.
++ *
++ * This call will allocate 4kB buffers, since Falcon can't use 8kB
++ * buffers for event queues and descriptor rings. It will always
++ * allocate an even number of 4kB buffers, since when we're in
++ * half-entry mode for the buffer table we can only deal with pairs of
++ * buffers.
++ */
++static int falcon_alloc_special_buffer(struct efx_nic *efx,
++ struct efx_special_buffer *buffer,
++ unsigned int len)
++{
++ struct falcon_nic_data *nic_data = efx->nic_data;
++
++ /* Round size up to an 8kB boundary (i.e. pairs of 4kB buffers) */
++ len = (len + 8192 - 1) & ~(8192 - 1);
++
++ /* Allocate buffer as consistent PCI DMA space */
++ buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
++ &buffer->dma_addr);
++ if (!buffer->addr)
++ return -ENOMEM;
++ buffer->len = len;
++ buffer->entries = len / 4096;
++ BUG_ON(buffer->dma_addr & (FALCON_BUF_ALIGN - 1));
++
++ /* All zeros is a potentially valid event so memset to 0xff */
++ memset(buffer->addr, 0xff, len);
++
++ /* Select new buffer ID */
++ buffer->index = nic_data->resources.buffer_table_min;
++ nic_data->resources.buffer_table_min += buffer->entries;
++
++ EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
++ "(virt %p phys %lx)\n", buffer->index,
++ buffer->index + buffer->entries - 1,
++ (unsigned long long)buffer->dma_addr, len,
++ buffer->addr, virt_to_phys(buffer->addr));
++
++ return 0;
++}
++
++/*
++ * Initialise a Falcon special buffer
++ *
++ * This will define a buffer (previously allocated via
++ * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
++ * it to be used for event queues, descriptor rings etc.
++ */
++static int falcon_init_special_buffer(struct efx_nic *efx,
++ struct efx_special_buffer *buffer)
++{
++ EFX_BUG_ON_PARANOID(!buffer->addr);
++
++ /* Write buffer descriptors to NIC */
++ return falcon_pin_special_buffer_full(efx, buffer);
++}
++
++/* Unmaps a buffer from Falcon and clears the buffer table
++ * entries */
++static void falcon_fini_special_buffer(struct efx_nic *efx,
++ struct efx_special_buffer *buffer)
++{
++
++ if (!buffer->entries)
++ return;
++
++ falcon_clear_special_buffer_full(efx, buffer);
++}
++
++/* Release the buffer memory. */
++static void falcon_free_special_buffer(struct efx_nic *efx,
++ struct efx_special_buffer *buffer)
++{
++ if (!buffer->addr)
++ return;
++
++ EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
++ "(virt %p phys %lx)\n", buffer->index,
++ buffer->index + buffer->entries - 1,
++ (unsigned long long)buffer->dma_addr, buffer->len,
++ buffer->addr, virt_to_phys(buffer->addr));
++
++ pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
++ buffer->dma_addr);
++ buffer->addr = NULL;
++ buffer->entries = 0;
++}
++
++/**************************************************************************
++ *
++ * Falcon generic buffer handling
++ * These buffers are used for interrupt status and MAC stats
++ *
++ **************************************************************************/
++
++static int falcon_alloc_buffer(struct efx_nic *efx,
++ struct efx_buffer *buffer, unsigned int len)
++{
++ buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
++ &buffer->dma_addr);
++ if (!buffer->addr)
++ return -ENOMEM;
++ buffer->len = len;
++ memset(buffer->addr, 0, len);
++ return 0;
++}
++
++static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
++{
++ if (buffer->addr) {
++ pci_free_consistent(efx->pci_dev, buffer->len,
++ buffer->addr, buffer->dma_addr);
++ buffer->addr = NULL;
++ }
++}
++
++/**************************************************************************
++ *
++ * Falcon TX path
++ *
++ **************************************************************************/
++
++/* Returns a pointer to the specified transmit descriptor in the TX
++ * descriptor queue belonging to the specified channel.
++ */
++static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
++ unsigned int index)
++{
++ return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
++}
++
++/* Update TX descriptor write pointer
++ * This writes to the TX_DESC_WPTR register for the specified
++ * channel's transmit descriptor ring.
++ */
++static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
++{
++ unsigned write_ptr;
++ efx_dword_t reg;
++
++ write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
++ EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr);
++ falcon_writel_page(tx_queue->efx, &reg,
++ TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue);
++}
++
++
++/* For each entry inserted into the software descriptor ring, create a
++ * descriptor in the hardware TX descriptor ring (in host memory), and
++ * write a doorbell.
++ */
++#if defined(EFX_USE_FASTCALL)
++void fastcall falcon_push_buffers(struct efx_tx_queue *tx_queue)
++#else
++void falcon_push_buffers(struct efx_tx_queue *tx_queue)
++#endif
++{
++
++ struct efx_tx_buffer *buffer;
++ efx_qword_t *txd;
++ unsigned write_ptr;
++
++ BUG_ON(tx_queue->write_count == tx_queue->insert_count);
++
++ do {
++ write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
++ buffer = &tx_queue->buffer[write_ptr];
++ txd = falcon_tx_desc(tx_queue, write_ptr);
++ ++tx_queue->write_count;
++
++ /* Create TX descriptor ring entry */
++ EFX_POPULATE_QWORD_5(*txd,
++ TX_KER_PORT, 0,
++ TX_KER_CONT, buffer->continuation,
++ TX_KER_BYTE_CNT, buffer->len,
++ TX_KER_BUF_REGION, 0,
++ TX_KER_BUF_ADR, buffer->dma_addr);
++ } while (tx_queue->write_count != tx_queue->insert_count);
++
++ wmb(); /* Ensure descriptors are written before they are fetched */
++ falcon_notify_tx_desc(tx_queue);
++}
++
++/* Allocate hardware resources for a TX queue */
++int falcon_probe_tx(struct efx_tx_queue *tx_queue)
++{
++ struct efx_nic *efx = tx_queue->efx;
++ struct falcon_nic_data *nic_data = efx->nic_data;
++ int rc;
++
++ rc = falcon_alloc_special_buffer(efx, &tx_queue->txd,
++ FALCON_TXD_RING_SIZE *
++ sizeof(efx_qword_t));
++ if (rc)
++ return rc;
++
++ nic_data->resources.txq_min = max(nic_data->resources.txq_min,
++ (unsigned)tx_queue->queue + 1);
++
++ return 0;
++}
++
++/* Prepare channel's TX datapath. */
++int falcon_init_tx(struct efx_tx_queue *tx_queue)
++{
++ efx_oword_t tx_desc_ptr;
++ struct efx_nic *efx = tx_queue->efx;
++ int rc;
++
++ /* Pin TX descriptor ring */
++ rc = falcon_init_special_buffer(efx, &tx_queue->txd);
++ if (rc)
++ return rc;
++
++ /* Push TX descriptor ring to card */
++ EFX_POPULATE_OWORD_10(tx_desc_ptr,
++ TX_DESCQ_EN, 1,
++ TX_ISCSI_DDIG_EN, 0,
++ TX_ISCSI_HDIG_EN, 0,
++ TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
++ TX_DESCQ_EVQ_ID, tx_queue->channel->evqnum,
++ TX_DESCQ_OWNER_ID, 0,
++ TX_DESCQ_LABEL, tx_queue->queue,
++ TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
++ TX_DESCQ_TYPE, 0, /* kernel queue */
++ TX_NON_IP_DROP_DIS_B0, 1);
++
++ if (FALCON_REV(efx) >= FALCON_REV_B0) {
++ int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
++ EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
++ EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
++ }
++
++ falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
++ tx_queue->queue);
++
++ if (FALCON_REV(efx) < FALCON_REV_B0) {
++ efx_oword_t reg;
++
++ /* Only 128 bits in this register */
++ BUG_ON(tx_queue->queue >= 128);
++
++ falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
++ if (efx->net_dev->features & NETIF_F_IP_CSUM)
++ clear_bit_le(tx_queue->queue, (void *)&reg);
++ else
++ set_bit_le(tx_queue->queue, (void *)&reg);
++ falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
++ }
++
++ return 0;
++}
++
++static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
++{
++ struct efx_nic *efx = tx_queue->efx;
++ struct efx_channel *channel = &efx->channel[0];
++ efx_oword_t tx_flush_descq;
++ unsigned int read_ptr, i;
++
++ /* Post a flush command */
++ EFX_POPULATE_OWORD_2(tx_flush_descq,
++ TX_FLUSH_DESCQ_CMD, 1,
++ TX_FLUSH_DESCQ, tx_queue->queue);
++ falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
++ msleep(FALCON_FLUSH_TIMEOUT);
++
++ /* If the NIC is resetting then don't bother checking */
++ if (EFX_WORKAROUND_7803(efx) || (efx->state == STATE_RESETTING))
++ return 0;
++
++ /* Look for a flush completed event */
++ read_ptr = channel->eventq_read_ptr;
++ for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
++ efx_qword_t *event = falcon_event(channel, read_ptr);
++ int ev_code, ev_sub_code, ev_queue;
++ if (!falcon_event_present(event))
++ break;
++
++ ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
++ ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
++ ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID);
++ if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) &&
++ (ev_queue == tx_queue->queue)) {
++ EFX_LOG(efx, "tx queue %d flush command succesful\n",
++ tx_queue->queue);
++ return 0;
++ }
++
++ read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
++ }
++
++ if (EFX_WORKAROUND_11557(efx)) {
++ efx_oword_t reg;
++ int enabled;
++
++ falcon_read_table(efx, &reg, efx->type->txd_ptr_tbl_base,
++ tx_queue->queue);
++ enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN);
++ if (!enabled) {
++ EFX_LOG(efx, "tx queue %d disabled without a "
++ "flush event seen\n", tx_queue->queue);
++ return 0;
++ }
++ }
++
++ EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue);
++ return -ETIMEDOUT;
++}
++
++void falcon_fini_tx(struct efx_tx_queue *tx_queue)
++{
++ struct efx_nic *efx = tx_queue->efx;
++ efx_oword_t tx_desc_ptr;
++
++ /* Stop the hardware using the queue */
++ if (falcon_flush_tx_queue(tx_queue))
++ EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue);
++
++ /* Remove TX descriptor ring from card */
++ EFX_ZERO_OWORD(tx_desc_ptr);
++ falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
++ tx_queue->queue);
++
++ /* Unpin TX descriptor ring */
++ falcon_fini_special_buffer(efx, &tx_queue->txd);
++}
++
++/* Free buffers backing TX queue */
++void falcon_remove_tx(struct efx_tx_queue *tx_queue)
++{
++ falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
++}
++
++/**************************************************************************
++ *
++ * Falcon RX path
++ *
++ **************************************************************************/
++
++/* Returns a pointer to the specified transmit descriptor in the RX
++ * descriptor queue.
++ */
++static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
++ unsigned int index)
++{
++ return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
++}
++
++/* This creates an entry in the RX descriptor queue corresponding to
++ * the receive buffer.
++ */
++static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
++ unsigned index)
++{
++ struct efx_rx_buffer *rx_buf;
++ efx_qword_t *rxd;
++
++ rxd = falcon_rx_desc(rx_queue, index);
++ rx_buf = efx_rx_buffer(rx_queue, index);
++ EFX_POPULATE_QWORD_3(*rxd,
++ RX_KER_BUF_SIZE,
++ rx_buf->len -
++ rx_queue->efx->type->rx_buffer_padding,
++ RX_KER_BUF_REGION, 0,
++ RX_KER_BUF_ADR, rx_buf->dma_addr);
++}
++
++/* This writes to the RX_DESC_WPTR register for the specified receive
++ * descriptor ring.
++ */
++#if defined(EFX_USE_FASTCALL)
++void fastcall falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
++#else
++void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
++#endif
++{
++ efx_dword_t reg;
++ unsigned write_ptr;
++
++ while (rx_queue->notified_count != rx_queue->added_count) {
++ falcon_build_rx_desc(rx_queue,
++ rx_queue->notified_count &
++ FALCON_RXD_RING_MASK);
++ ++rx_queue->notified_count;
++ }
++
++ wmb();
++ write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK;
++ EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr);
++ falcon_writel_page(rx_queue->efx, &reg,
++ RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue);
++}
++
++int falcon_probe_rx(struct efx_rx_queue *rx_queue)
++{
++ struct efx_nic *efx = rx_queue->efx;
++ struct falcon_nic_data *nic_data = efx->nic_data;
++ int rc;
++
++ rc = falcon_alloc_special_buffer(efx, &rx_queue->rxd,
++ FALCON_RXD_RING_SIZE *
++ sizeof(efx_qword_t));
++ if (rc)
++ return rc;
++
++ /* Increment the rxq_min counter */
++ nic_data->resources.rxq_min = max(nic_data->resources.rxq_min,
++ (unsigned)rx_queue->queue + 1);
++
++ return 0;
++}
++
++int falcon_init_rx(struct efx_rx_queue *rx_queue)
++{
++ efx_oword_t rx_desc_ptr;
++ struct efx_nic *efx = rx_queue->efx;
++ int rc;
++ int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0;
++ int iscsi_digest_en = is_b0;
++
++ EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
++ rx_queue->queue, rx_queue->rxd.index,
++ rx_queue->rxd.index + rx_queue->rxd.entries - 1);
++
++ /* Pin RX descriptor ring */
++ rc = falcon_init_special_buffer(efx, &rx_queue->rxd);
++ if (rc)
++ return rc;
++
++ /* Push RX descriptor ring to card */
++ EFX_POPULATE_OWORD_10(rx_desc_ptr,
++ RX_ISCSI_DDIG_EN, iscsi_digest_en,
++ RX_ISCSI_HDIG_EN, iscsi_digest_en,
++ RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
++ RX_DESCQ_EVQ_ID, rx_queue->channel->evqnum,
++ RX_DESCQ_OWNER_ID, 0,
++ RX_DESCQ_LABEL, rx_queue->queue,
++ RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
++ RX_DESCQ_TYPE, 0 /* kernel queue */ ,
++ /* For >=B0 this is scatter so disable */
++ RX_DESCQ_JUMBO, !is_b0,
++ RX_DESCQ_EN, 1);
++ falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
++ rx_queue->queue);
++ return 0;
++}
++
++static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
++{
++ struct efx_nic *efx = rx_queue->efx;
++ struct efx_channel *channel = &efx->channel[0];
++ unsigned int read_ptr, i;
++ efx_oword_t rx_flush_descq;
++
++ /* Post a flush command */
++ EFX_POPULATE_OWORD_2(rx_flush_descq,
++ RX_FLUSH_DESCQ_CMD, 1,
++ RX_FLUSH_DESCQ, rx_queue->queue);
++
++ falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
++ msleep(FALCON_FLUSH_TIMEOUT);
++
++ /* If the NIC is resetting then don't bother checking */
++ if (EFX_WORKAROUND_7803(efx) || (efx->state == STATE_RESETTING))
++ return 0;
++
++ /* Look for a flush completed event */
++ read_ptr = channel->eventq_read_ptr;
++ for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
++ efx_qword_t *event = falcon_event(channel, read_ptr);
++ int ev_code, ev_sub_code, ev_queue, ev_failed;
++ if (!falcon_event_present(event))
++ break;
++
++ ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
++ ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
++ ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID);
++ ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL);
++
++ if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) &&
++ (ev_queue == rx_queue->queue)) {
++ if (ev_failed) {
++ EFX_INFO(efx, "rx queue %d flush command "
++ "failed\n", rx_queue->queue);
++ return -EAGAIN;
++ } else {
++ EFX_LOG(efx, "rx queue %d flush command "
++ "succesful\n", rx_queue->queue);
++ return 0;
++ }
++ }
++
++ read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
++ }
++
++ if (EFX_WORKAROUND_11557(efx)) {
++ efx_oword_t reg;
++ int enabled;
++
++ falcon_read_table(efx, &reg, efx->type->rxd_ptr_tbl_base,
++ rx_queue->queue);
++ enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN);
++ if (!enabled) {
++ EFX_LOG(efx, "rx queue %d disabled without a "
++ "flush event seen\n", rx_queue->queue);
++ return 0;
++ }
++ }
++
++ EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue);
++ return -ETIMEDOUT;
++}
++
++void falcon_fini_rx(struct efx_rx_queue *rx_queue)
++{
++ efx_oword_t rx_desc_ptr;
++ struct efx_nic *efx = rx_queue->efx;
++ int i, rc;
++
++ /* Try and flush the rx queue. This may need to be repeated */
++ for (i = 0; i < 5; i++) {
++ rc = falcon_flush_rx_queue(rx_queue);
++ if (rc == -EAGAIN)
++ continue;
++ break;
++ }
++ if (rc)
++ EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
++
++ /* Remove RX descriptor ring from card */
++ EFX_ZERO_OWORD(rx_desc_ptr);
++ falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
++ rx_queue->queue);
++
++ /* Unpin RX descriptor ring */
++ falcon_fini_special_buffer(efx, &rx_queue->rxd);
++}
++
++/* Free buffers backing RX queue */
++void falcon_remove_rx(struct efx_rx_queue *rx_queue)
++{
++ falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
++}
++
++/**************************************************************************
++ *
++ * Falcon event queue processing
++ * Event queues are processed by per-channel tasklets.
++ *
++ **************************************************************************/
++
++/* Update a channel's event queue's read pointer (RPTR) register
++ *
++ * This writes the EVQ_RPTR_REG register for the specified channel's
++ * event queue.
++ *
++ * Note that EVQ_RPTR_REG contains the index of the "last read" event,
++ * whereas channel->eventq_read_ptr contains the index of the "next to
++ * read" event.
++ */
++#if defined(EFX_USE_FASTCALL)
++void fastcall falcon_eventq_read_ack(struct efx_channel *channel)
++#else
++void falcon_eventq_read_ack(struct efx_channel *channel)
++#endif
++{
++ efx_dword_t reg;
++ struct efx_nic *efx = channel->efx;
++
++ EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
++ falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base,
++ channel->evqnum);
++}
++
++/* Use HW to insert a SW defined event */
++void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
++{
++ efx_oword_t drv_ev_reg;
++
++ EFX_POPULATE_OWORD_2(drv_ev_reg,
++ DRV_EV_QID, channel->evqnum,
++ DRV_EV_DATA,
++ EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
++ falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
++}
++
++/* Handle a transmit completion event
++ *
++ * Falcon batches TX completion events; the message we receive is of
++ * the form "complete all TX events up to this index".
++ */
++static inline void falcon_handle_tx_event(struct efx_channel *channel,
++ efx_qword_t *event)
++{
++ unsigned int tx_ev_desc_ptr;
++ unsigned int tx_ev_q_label;
++ struct efx_tx_queue *tx_queue;
++ struct efx_nic *efx = channel->efx;
++
++ if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) {
++ /* Transmit completion */
++ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR);
++ tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
++ tx_queue = &efx->tx_queue[tx_ev_q_label];
++ efx_xmit_done(tx_queue, tx_ev_desc_ptr);
++ } else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) {
++ /* Rewrite the FIFO write pointer */
++ tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
++ tx_queue = &efx->tx_queue[tx_ev_q_label];
++
++ if (efx->net_dev_registered)
++ netif_tx_lock(efx->net_dev);
++ falcon_notify_tx_desc(tx_queue);
++ if (efx->net_dev_registered)
++ netif_tx_unlock(efx->net_dev);
++ } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
++ EFX_WORKAROUND_10727(efx)) {
++ efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
++ } else {
++ EFX_ERR(efx, "channel %d unexpected TX event "
++ EFX_QWORD_FMT"\n", channel->channel,
++ EFX_QWORD_VAL(*event));
++ }
++}
++
++/* Check received packet's destination MAC address. */
++static int check_dest_mac(struct efx_rx_queue *rx_queue,
++ const efx_qword_t *event)
++{
++ struct efx_rx_buffer *rx_buf;
++ struct efx_nic *efx = rx_queue->efx;
++ int rx_ev_desc_ptr;
++ struct ethhdr *eh;
++
++ if (efx->promiscuous)
++ return 1;
++
++ rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
++ rx_buf = efx_rx_buffer(rx_queue, rx_ev_desc_ptr);
++ eh = (struct ethhdr *)rx_buf->data;
++ if (memcmp(eh->h_dest, efx->net_dev->dev_addr, ETH_ALEN))
++ return 0;
++ return 1;
++}
++
++/* Detect errors included in the rx_evt_pkt_ok bit. */
++static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
++ const efx_qword_t *event,
++ unsigned *rx_ev_pkt_ok,
++ int *discard, int byte_count)
++{
++ struct efx_nic *efx = rx_queue->efx;
++ unsigned rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
++ unsigned rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
++ unsigned rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
++ unsigned rx_ev_pkt_type, rx_ev_other_err, rx_ev_pause_frm;
++ unsigned rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
++ int snap, non_ip;
++
++ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
++ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
++ rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC);
++ rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE);
++ rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
++ RX_EV_BUF_OWNER_ID_ERR);
++ rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR);
++ rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
++ RX_EV_IP_HDR_CHKSUM_ERR);
++ rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
++ RX_EV_TCP_UDP_CHKSUM_ERR);
++ rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
++ rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
++ rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ?
++ 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
++ rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
++
++ /* Every error apart from tobe_disc and pause_frm */
++ rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
++ rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
++ rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
++
++ snap = (rx_ev_pkt_type == RX_EV_PKT_TYPE_LLC_DECODE) ||
++ (rx_ev_pkt_type == RX_EV_PKT_TYPE_VLAN_LLC_DECODE);
++ non_ip = (rx_ev_hdr_type == RX_EV_HDR_TYPE_NON_IP_DECODE);
++
++ /* SFC bug 5475/8970: The Falcon XMAC incorrectly calculates the
++ * length field of an LLC frame, which sets TOBE_DISC. We could set
++ * PASS_LEN_ERR, but we want the MAC to filter out short frames (to
++ * protect the RX block).
++ *
++ * bug5475 - LLC/SNAP: Falcon identifies SNAP packets.
++ * bug8970 - LLC/noSNAP: Falcon does not provide an LLC flag.
++ * LLC can't encapsulate IP, so by definition
++ * these packets are NON_IP.
++ *
++ * Unicast mismatch will also cause TOBE_DISC, so the driver needs
++ * to check this.
++ */
++ if (EFX_WORKAROUND_5475(efx) && rx_ev_tobe_disc && (snap || non_ip)) {
++ /* If all the other flags are zero then we can state the
++ * entire packet is ok, which will flag to the kernel not
++ * to recalculate checksums.
++ */
++ if (!(non_ip | rx_ev_other_err | rx_ev_pause_frm))
++ *rx_ev_pkt_ok = 1;
++
++ rx_ev_tobe_disc = 0;
++
++ /* TOBE_DISC is set for unicast mismatch. But given that
++ * we can't trust TOBE_DISC here, we must validate the dest
++ * MAC address ourselves.
++ */
++ if (!rx_ev_mcast_pkt && !check_dest_mac(rx_queue, event))
++ rx_ev_tobe_disc = 1;
++ }
++
++ /* Count errors that are not in MAC stats. */
++ if (rx_ev_frm_trunc)
++ ++rx_queue->channel->n_rx_frm_trunc;
++ else if (rx_ev_tobe_disc)
++ ++rx_queue->channel->n_rx_tobe_disc;
++ else if (rx_ev_ip_hdr_chksum_err)
++ ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
++ else if (rx_ev_tcp_udp_chksum_err)
++ ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
++ if (rx_ev_ip_frag_err)
++ ++rx_queue->channel->n_rx_ip_frag_err;
++
++ /* The frame must be discarded if any of these are true. */
++ *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
++ rx_ev_tobe_disc | rx_ev_pause_frm);
++
++ /* TOBE_DISC is expected on unicast mismatches; don't print out an
++ * error message. FRM_TRUNC indicates RXDP dropped the packet due
++ * to a FIFO overflow.
++ */
++#ifdef EFX_ENABLE_DEBUG
++ if (rx_ev_other_err) {
++ EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
++ EFX_QWORD_FMT "%s%s%s%s%s%s%s%s%s\n",
++ rx_queue->queue, EFX_QWORD_VAL(*event),
++ rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
++ rx_ev_ip_hdr_chksum_err ?
++ " [IP_HDR_CHKSUM_ERR]" : "",
++ rx_ev_tcp_udp_chksum_err ?
++ " [TCP_UDP_CHKSUM_ERR]" : "",
++ rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
++ rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
++ rx_ev_drib_nib ? " [DRIB_NIB]" : "",
++ rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
++ rx_ev_pause_frm ? " [PAUSE]" : "",
++ snap ? " [SNAP/LLC]" : "");
++ }
++#endif
++
++ if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) &&
++ efx->phy_type == PHY_TYPE_10XPRESS))
++ tenxpress_crc_err(efx);
++}
++
++
++/* Handle receive events that are not in-order. */
++static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
++ unsigned index)
++{
++ struct efx_nic *efx = rx_queue->efx;
++ unsigned expected, dropped;
++
++ expected = rx_queue->removed_count & FALCON_RXD_RING_MASK;
++ dropped = ((index + FALCON_RXD_RING_SIZE - expected) &
++ FALCON_RXD_RING_MASK);
++ EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
++ dropped, index, expected);
++
++ atomic_inc(&efx->errors.missing_event);
++ efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
++ RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
++}
++
++
++/* Handle a packet received event
++ *
++ * Falcon silicon gives a "discard" flag if it's a unicast packet with the
++ * wrong destination address
++ * Also "is multicast" and "matches multicast filter" flags can be used to
++ * discard non-matching multicast packets.
++ */
++static inline int falcon_handle_rx_event(struct efx_channel *channel,
++ const efx_qword_t *event)
++{
++ unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt;
++ unsigned int rx_ev_pkt_ok, rx_ev_hdr_type, rx_ev_mcast_pkt;
++ unsigned expected_ptr;
++ int discard = 0, checksummed;
++ struct efx_rx_queue *rx_queue;
++ struct efx_nic *efx = channel->efx;
++
++ /* Basic packet information */
++ rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT);
++ rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK);
++ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
++ WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
++ WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
++
++ rx_ev_q_label = EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL);
++ rx_queue = &efx->rx_queue[rx_ev_q_label];
++
++ rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
++ expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
++ if (unlikely(rx_ev_desc_ptr != expected_ptr)) {
++ falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
++ return rx_ev_q_label;
++ }
++
++ if (likely(rx_ev_pkt_ok)) {
++ /* If packet is marked as OK and packet type is TCP/IPv4 or
++ * UDP/IPv4, then we can rely on the hardware checksum.
++ */
++ checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
++ } else {
++ falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
++ &discard, rx_ev_byte_cnt);
++ checksummed = 0;
++ }
++
++ /* Detect multicast packets that didn't match the filter */
++ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
++ if (rx_ev_mcast_pkt) {
++ unsigned int rx_ev_mcast_hash_match =
++ EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
++
++ if (unlikely(!rx_ev_mcast_hash_match))
++ discard = 1;
++ }
++
++ /* Handle received packet */
++ efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
++ checksummed, discard);
++
++ return rx_ev_q_label;
++}
++
++/* Global events are basically PHY events */
++static void falcon_handle_global_event(struct efx_channel *channel,
++ efx_qword_t *event)
++{
++ struct efx_nic *efx = channel->efx;
++ int is_phy_event = 0, handled = 0;
++
++ /* Check for interrupt on either port. Some boards have a
++ * single PHY wired to the interrupt line for port 1. */
++ if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
++ EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
++ EFX_QWORD_FIELD(*event, XG_PHY_INTR))
++ is_phy_event = 1;
++
++ if ((FALCON_REV(efx) >= FALCON_REV_B0) &&
++ EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
++ is_phy_event = 1;
++
++ if (is_phy_event) {
++ efx->phy_op->clear_interrupt(efx);
++ queue_work(efx->workqueue, &efx->reconfigure_work);
++ handled = 1;
++ }
++
++ if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
++ EFX_ERR(efx, "channel %d seen global RX_RESET "
++ "event. Resetting.\n", channel->channel);
++
++ atomic_inc(&efx->errors.rx_reset);
++ efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
++ RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
++ handled = 1;
++ }
++
++ if (!handled)
++ EFX_ERR(efx, "channel %d unknown global event "
++ EFX_QWORD_FMT "\n", channel->channel,
++ EFX_QWORD_VAL(*event));
++}
++
++static void falcon_handle_driver_event(struct efx_channel *channel,
++ efx_qword_t *event)
++{
++ struct efx_nic *efx = channel->efx;
++ unsigned int ev_sub_code;
++ unsigned int ev_sub_data;
++
++ ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
++ ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA);
++
++ switch (ev_sub_code) {
++ case TX_DESCQ_FLS_DONE_EV_DECODE:
++ EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
++ channel->channel, ev_sub_data);
++ EFX_DL_CALLBACK(efx, event, event);
++ break;
++ case RX_DESCQ_FLS_DONE_EV_DECODE:
++ EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
++ channel->channel, ev_sub_data);
++ EFX_DL_CALLBACK(efx, event, event);
++ break;
++ case EVQ_INIT_DONE_EV_DECODE:
++ EFX_LOG(efx, "channel %d EVQ %d initialised\n",
++ channel->channel, ev_sub_data);
++ break;
++ case SRM_UPD_DONE_EV_DECODE:
++ EFX_TRACE(efx, "channel %d SRAM update done\n",
++ channel->channel);
++ EFX_DL_CALLBACK(efx, event, event);
++ break;
++ case WAKE_UP_EV_DECODE:
++ EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
++ channel->channel, ev_sub_data);
++ EFX_DL_CALLBACK(efx, event, event);
++ break;
++ case TIMER_EV_DECODE:
++ EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
++ channel->channel, ev_sub_data);
++ EFX_DL_CALLBACK(efx, event, event);
++ break;
++ case RX_RECOVERY_EV_DECODE:
++ EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
++ "Resetting.\n", channel->channel);
++
++ atomic_inc(&efx->errors.rx_reset);
++ efx_schedule_reset(efx,
++ EFX_WORKAROUND_6555(efx) ?
++ RESET_TYPE_RX_RECOVERY :
++ RESET_TYPE_DISABLE);
++ break;
++ case RX_DSC_ERROR_EV_DECODE:
++ EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
++ " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
++ atomic_inc(&efx->errors.rx_desc_fetch);
++ efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
++ break;
++ case TX_DSC_ERROR_EV_DECODE:
++ EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
++ " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
++ atomic_inc(&efx->errors.tx_desc_fetch);
++ efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
++ break;
++ default:
++ EFX_TRACE(efx, "channel %d unknown driver event code %d "
++ "data %04x\n", channel->channel, ev_sub_code,
++ ev_sub_data);
++ EFX_DL_CALLBACK(efx, event, event);
++ break;
++ }
++}
++
++#if defined(EFX_USE_FASTCALL)
++int fastcall falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
++#else
++int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
++#endif
++{
++ unsigned int read_ptr;
++ efx_qword_t event, *p_event;
++ int ev_code;
++ int rxq;
++ int rxdmaqs = 0;
++
++ read_ptr = channel->eventq_read_ptr;
++
++ do {
++ p_event = falcon_event(channel, read_ptr);
++ event = *p_event;
++
++ if (!falcon_event_present(&event))
++ /* End of events */
++ break;
++
++ EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
++ channel->channel, EFX_QWORD_VAL(event));
++
++ /* Clear this event by marking it all ones */
++ EFX_SET_QWORD(*p_event);
++
++ ev_code = EFX_QWORD_FIELD(event, EV_CODE);
++
++ switch (ev_code) {
++ case RX_IP_EV_DECODE:
++ rxq = falcon_handle_rx_event(channel, &event);
++ rxdmaqs |= (1 << rxq);
++ (*rx_quota)--;
++ break;
++ case TX_IP_EV_DECODE:
++ falcon_handle_tx_event(channel, &event);
++ break;
++ case DRV_GEN_EV_DECODE:
++ channel->eventq_magic
++ = EFX_QWORD_FIELD(event, EVQ_MAGIC);
++ EFX_LOG(channel->efx, "channel %d received generated "
++ "event "EFX_QWORD_FMT"\n", channel->channel,
++ EFX_QWORD_VAL(event));
++ break;
++ case GLOBAL_EV_DECODE:
++ falcon_handle_global_event(channel, &event);
++ break;
++ case DRIVER_EV_DECODE:
++ falcon_handle_driver_event(channel, &event);
++ break;
++ default:
++ EFX_ERR(channel->efx, "channel %d unknown event type %d"
++ " (data " EFX_QWORD_FMT ")\n", channel->channel,
++ ev_code, EFX_QWORD_VAL(event));
++ }
++
++ /* Increment read pointer */
++ read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
++
++ } while (*rx_quota);
++
++ channel->eventq_read_ptr = read_ptr;
++ return rxdmaqs;
++}
++
++void falcon_set_int_moderation(struct efx_channel *channel)
++{
++ efx_dword_t timer_cmd;
++ struct efx_nic *efx = channel->efx;
++
++ /* Set timer register */
++ if (channel->irq_moderation) {
++ /* Round to resolution supported by hardware. The value we
++ * program is based at 0. So actual interrupt moderation
++ * achieved is ((x + 1) * res).
++ */
++ unsigned int res = 5;
++ channel->irq_moderation -= (channel->irq_moderation % res);
++ if (channel->irq_moderation < res)
++ channel->irq_moderation = res;
++ EFX_POPULATE_DWORD_2(timer_cmd,
++ TIMER_MODE, TIMER_MODE_INT_HLDOFF,
++ TIMER_VAL,
++ (channel->irq_moderation / res) - 1);
++ } else {
++ EFX_POPULATE_DWORD_2(timer_cmd,
++ TIMER_MODE, TIMER_MODE_DIS,
++ TIMER_VAL, 0);
++ }
++ falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
++ channel->evqnum);
++
++}
++
++/* Allocate buffer table entries for event queue */
++int falcon_probe_eventq(struct efx_channel *channel)
++{
++ struct efx_nic *efx = channel->efx;
++ struct falcon_nic_data *nic_data = efx->nic_data;
++ unsigned int evq_size;
++ int rc;
++
++ evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t);
++ rc = falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
++ if (rc)
++ return rc;
++
++ nic_data->resources.evq_int_min = max(nic_data->resources.evq_int_min,
++ (unsigned)channel->evqnum + 1);
++
++ return 0;
++}
++
++int falcon_init_eventq(struct efx_channel *channel)
++{
++ efx_oword_t evq_ptr;
++ struct efx_nic *efx = channel->efx;
++ int rc;
++
++ EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
++ channel->channel, channel->eventq.index,
++ channel->eventq.index + channel->eventq.entries - 1);
++
++ /* Pin event queue buffer */
++ rc = falcon_init_special_buffer(efx, &channel->eventq);
++ if (rc)
++ return rc;
++
++ /* Fill event queue with all ones (i.e. empty events) */
++ memset(channel->eventq.addr, 0xff, channel->eventq.len);
++
++ /* Push event queue to card */
++ EFX_POPULATE_OWORD_3(evq_ptr,
++ EVQ_EN, 1,
++ EVQ_SIZE, FALCON_EVQ_ORDER,
++ EVQ_BUF_BASE_ID, channel->eventq.index);
++ falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
++ channel->evqnum);
++
++ falcon_set_int_moderation(channel);
++
++ return 0;
++}
++
++void falcon_fini_eventq(struct efx_channel *channel)
++{
++ efx_oword_t eventq_ptr;
++ struct efx_nic *efx = channel->efx;
++
++ /* Remove event queue from card */
++ EFX_ZERO_OWORD(eventq_ptr);
++ falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
++ channel->evqnum);
++
++ /* Unpin event queue */
++ falcon_fini_special_buffer(efx, &channel->eventq);
++}
++
++/* Free buffers backing event queue */
++void falcon_remove_eventq(struct efx_channel *channel)
++{
++ falcon_free_special_buffer(channel->efx, &channel->eventq);
++}
++
++
++/* Generates a test event on the event queue. A subsequent call to
++ * process_eventq() should pick up the event and place the value of
++ * "magic" into channel->eventq_magic;
++ */
++void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
++{
++ efx_qword_t test_event;
++
++ EFX_POPULATE_QWORD_2(test_event,
++ EV_CODE, DRV_GEN_EV_DECODE,
++ EVQ_MAGIC, magic);
++ falcon_generate_event(channel, &test_event);
++}
++
++
++/**************************************************************************
++ *
++ * Falcon hardware interrupts
++ * The hardware interrupt handler does very little work; all the event
++ * queue processing is carried out by per-channel tasklets.
++ *
++ **************************************************************************/
++
++/* Enable/disable/generate Falcon interrupts */
++static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
++ int force)
++{
++ efx_oword_t int_en_reg_ker;
++
++ EFX_POPULATE_OWORD_2(int_en_reg_ker,
++ KER_INT_KER, force,
++ DRV_INT_EN_KER, enabled);
++ falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER);
++}
++
++void falcon_enable_interrupts(struct efx_nic *efx)
++{
++ efx_oword_t int_adr_reg_ker;
++ struct efx_channel *channel;
++
++ /* Zero INT_KER */
++ EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
++ wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
++
++ /* Program INT_ADR_KER_REG */
++ EFX_POPULATE_OWORD_2(int_adr_reg_ker,
++ NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx),
++ INT_ADR_KER, efx->irq_status.dma_addr);
++ falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER);
++
++ /* Enable interrupts */
++ falcon_interrupts(efx, 1, 0);
++
++ /* Force processing of all the channels to get the EVQ RPTRs up to
++ date */
++ efx_for_each_channel_with_interrupt(channel, efx)
++ efx_schedule_channel(channel);
++}
++
++void falcon_disable_interrupts(struct efx_nic *efx)
++{
++ /* Disable interrupts */
++ falcon_interrupts(efx, 0, 0);
++}
++
++/* Generate a Falcon test interrupt
++ * Interrupt must already have been enabled, otherwise nasty things
++ * may happen.
++ */
++void falcon_generate_interrupt(struct efx_nic *efx)
++{
++ falcon_interrupts(efx, 1, 1);
++}
++
++/* Acknowledge a legacy interrupt from Falcon
++ *
++ * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
++ *
++ * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
++ * BIU. Interrupt acknowledge is read sensitive so must write instead
++ * (then read to ensure the BIU collector is flushed)
++ *
++ * NB most hardware supports MSI interrupts
++ */
++static inline void falcon_irq_ack_a1(struct efx_nic *efx)
++{
++ efx_dword_t reg;
++
++ EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e);
++ falcon_writel(efx, &reg, INT_ACK_REG_KER_A1);
++ falcon_readl(efx, &reg, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1);
++}
++
++/* Process a fatal interrupt
++ * Disable bus mastering ASAP and schedule a reset
++ */
++static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
++{
++ efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
++ efx_oword_t fatal_intr;
++ int error, mem_perr;
++ static int n_int_errors;
++
++ falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER);
++ error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR);
++
++ EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
++ EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
++ EFX_OWORD_VAL(fatal_intr),
++ error ? "disabling bus mastering" : "no recognised error");
++ if (error == 0)
++ goto out;
++
++ /* If this is a memory parity error dump which blocks are offending */
++ mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER);
++ if (mem_perr) {
++ efx_oword_t reg;
++ falcon_read(efx, &reg, MEM_STAT_REG_KER);
++ EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
++ EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
++ }
++
++ /* Disable DMA bus mastering on both devices */
++ pci_disable_device(efx->pci_dev);
++ if (efx->type->is_dual_func)
++ pci_disable_device(efx->pci_dev2);
++
++ if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
++ EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
++ efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
++ } else {
++ EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
++ "NIC will be disabled\n");
++ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
++ }
++out:
++ return IRQ_HANDLED;
++}
++
++/* Handle a legacy interrupt from Falcon
++ * Acknowledges the interrupt and schedule event queue processing.
++ *
++ * This routine must guarantee not to touch the hardware when
++ * interrupts are disabled, to allow for correct semantics of
++ * efx_suspend() and efx_resume().
++ */
++#if !defined(EFX_HAVE_IRQ_HANDLER_REGS)
++static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
++#else
++static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id,
++ struct pt_regs *regs
++ __attribute__ ((unused)))
++#endif
++{
++ struct efx_nic *efx = (struct efx_nic *)dev_id;
++ efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
++ struct efx_channel *channel;
++ efx_dword_t reg;
++ u32 queues;
++ int syserr;
++
++ /* Read the ISR which also ACKs the interrupts */
++ falcon_readl(efx, &reg, INT_ISR0_B0);
++ queues = EFX_EXTRACT_DWORD(reg, 0, 31);
++
++ /* Check to see if we have a serious error condition */
++ syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
++ if (unlikely(syserr))
++ return falcon_fatal_interrupt(efx);
++
++ if (queues == 0)
++ return IRQ_NONE;
++
++ efx->last_irq_cpu = raw_smp_processor_id();
++ EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
++ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
++
++ /* Schedule processing of any interrupting queues */
++ channel = &efx->channel[0];
++ while (queues) {
++ if (queues & 0x01)
++ efx_schedule_channel(channel);
++ channel++;
++ queues >>= 1;
++ }
++
++ return IRQ_HANDLED;
++}
++
++
++#if !defined(EFX_HAVE_IRQ_HANDLER_REGS)
++static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
++#else
++static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id,
++ struct pt_regs *regs
++ __attribute__ ((unused)))
++#endif
++{
++ struct efx_nic *efx = (struct efx_nic *)dev_id;
++ efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
++ struct efx_channel *channel;
++ int syserr;
++ int queues;
++
++ /* Check to see if this is our interrupt. If it isn't, we
++ * exit without having touched the hardware.
++ */
++ if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
++ EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
++ raw_smp_processor_id());
++ return IRQ_NONE;
++ }
++ efx->last_irq_cpu = raw_smp_processor_id();
++ EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
++ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
++
++ /* Check to see if we have a serious error condition */
++ syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
++ if (unlikely(syserr))
++ return falcon_fatal_interrupt(efx);
++
++ /* Determine interrupting queues, clear interrupt status
++ * register and acknowledge the device interrupt.
++ */
++ BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS);
++ queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS);
++ EFX_ZERO_OWORD(*int_ker);
++ wmb(); /* Ensure the vector is cleared before interrupt ack */
++ falcon_irq_ack_a1(efx);
++
++ /* Schedule processing of any interrupting queues */
++ channel = &efx->channel[0];
++ while (queues) {
++ if (queues & 0x01)
++ efx_schedule_channel(channel);
++ channel++;
++ queues >>= 1;
++ }
++
++ return IRQ_HANDLED;
++}
++
++/* Handle an MSI interrupt from Falcon
++ *
++ * Handle an MSI hardware interrupt. This routine schedules event
++ * queue processing. No interrupt acknowledgement cycle is necessary.
++ * Also, we never need to check that the interrupt is for us, since
++ * MSI interrupts cannot be shared.
++ *
++ * This routine must guarantee not to touch the hardware when
++ * interrupts are disabled, to allow for correct semantics of
++ * efx_suspend() and efx_resume().
++ */
++#if !defined(EFX_HAVE_IRQ_HANDLER_REGS)
++static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
++#else
++static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs
++ __attribute__ ((unused)))
++#endif
++{
++ struct efx_channel *channel = (struct efx_channel *)dev_id;
++ struct efx_nic *efx = channel->efx;
++ efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
++ int syserr;
++
++ efx->last_irq_cpu = raw_smp_processor_id();
++ EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
++ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
++
++ /* Check to see if we have a serious error condition */
++ syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
++ if (unlikely(syserr))
++ return falcon_fatal_interrupt(efx);
++
++ /* Schedule processing of the channel */
++ efx_schedule_channel(channel);
++
++ return IRQ_HANDLED;
++}
++
++
++/* Setup RSS indirection table.
++ * This maps from the hash value of the packet to RXQ
++ */
++static void falcon_setup_rss_indir_table(struct efx_nic *efx)
++{
++ int i = 0;
++ unsigned long offset;
++ unsigned long flags __attribute__ ((unused));
++ efx_dword_t dword;
++
++ if (FALCON_REV(efx) < FALCON_REV_B0)
++ return;
++
++ for (offset = RX_RSS_INDIR_TBL_B0;
++ offset < RX_RSS_INDIR_TBL_B0 + 0x800;
++ offset += 0x10) {
++ EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
++ i % efx->rss_queues);
++ falcon_writel(efx, &dword, offset);
++ i++;
++ }
++}
++
++/* Hook interrupt handler(s)
++ * Try MSI and then legacy interrupts.
++ */
++int falcon_init_interrupt(struct efx_nic *efx)
++{
++ struct efx_channel *channel;
++ int rc;
++
++ if (!EFX_INT_MODE_USE_MSI(efx)) {
++ irq_handler_t handler;
++ if (FALCON_REV(efx) >= FALCON_REV_B0)
++ handler = falcon_legacy_interrupt_b0;
++ else
++ handler = falcon_legacy_interrupt_a1;
++
++ rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
++ efx->name, efx);
++ if (rc) {
++ EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
++ efx->pci_dev->irq);
++ goto fail1;
++ }
++ return 0;
++ }
++
++ /* Hook MSI or MSI-X interrupt */
++ efx_for_each_channel_with_interrupt(channel, efx) {
++ rc = request_irq(channel->irq, falcon_msi_interrupt,
++ IRQF_PROBE_SHARED, /* Not shared */
++ efx->name, channel);
++ if (rc) {
++ EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
++ goto fail2;
++ }
++ }
++
++ return 0;
++
++ fail2:
++ efx_for_each_channel_with_interrupt(channel, efx)
++ free_irq(channel->irq, channel);
++ fail1:
++ return rc;
++}
++
++void falcon_fini_interrupt(struct efx_nic *efx)
++{
++ struct efx_channel *channel;
++ efx_oword_t reg;
++
++ /* Disable MSI/MSI-X interrupts */
++ efx_for_each_channel_with_interrupt(channel, efx)
++ if (channel->irq)
++ free_irq(channel->irq, channel);
++
++ /* ACK legacy interrupt */
++ if (FALCON_REV(efx) >= FALCON_REV_B0)
++ falcon_read(efx, &reg, INT_ISR0_B0);
++ else
++ falcon_irq_ack_a1(efx);
++
++ /* Disable legacy interrupt */
++ if (efx->legacy_irq)
++ free_irq(efx->legacy_irq, efx);
++}
++
++/**************************************************************************
++ *
++ * EEPROM/flash
++ *
++ **************************************************************************
++ */
++
++/* Wait for SPI command completion */
++static int falcon_spi_wait(struct efx_nic *efx)
++{
++ efx_oword_t reg;
++ int cmd_en, timer_active;
++ int count;
++
++ count = 0;
++ do {
++ falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER);
++ cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN);
++ timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE);
++ if (!cmd_en && !timer_active)
++ return 0;
++ udelay(10);
++ } while (++count < 10000); /* wait upto 100msec */
++ EFX_ERR(efx, "timed out waiting for SPI\n");
++ return -ETIMEDOUT;
++}
++
++static int
++falcon_spi_read(const struct efx_spi_device *spi, struct efx_nic *efx,
++ unsigned int command, int address, void *data, unsigned int len)
++{
++ int addressed = (address >= 0);
++ efx_oword_t reg;
++ int rc;
++
++ /* Input validation */
++ if (len > FALCON_SPI_MAX_LEN)
++ return -EINVAL;
++
++ /* Acquire SPI lock */
++ mutex_lock(&efx->spi_lock);
++
++ /* Check SPI not currently being accessed */
++ rc = falcon_spi_wait(efx);
++ if (rc)
++ goto out;
++
++ /* Program address register, if we have an address */
++ if (addressed) {
++ EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
++ falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
++ }
++
++ /* Issue read command */
++ EFX_POPULATE_OWORD_7(reg,
++ EE_SPI_HCMD_CMD_EN, 1,
++ EE_SPI_HCMD_SF_SEL, spi->device_id,
++ EE_SPI_HCMD_DABCNT, len,
++ EE_SPI_HCMD_READ, EE_SPI_READ,
++ EE_SPI_HCMD_DUBCNT, 0,
++ EE_SPI_HCMD_ADBCNT,
++ (addressed ? spi->addr_len : 0),
++ EE_SPI_HCMD_ENC, command);
++ falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER);
++
++ /* Wait for read to complete */
++ rc = falcon_spi_wait(efx);
++ if (rc)
++ goto out;
++
++ /* Read data */
++ falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER);
++ memcpy(data, &reg, len);
++
++ out:
++ /* Release SPI lock */
++ mutex_unlock(&efx->spi_lock);
++
++ return rc;
++}
++
++static int
++falcon_spi_write(const struct efx_spi_device *spi, struct efx_nic *efx,
++ unsigned int command, int address, const void *data,
++ unsigned int len)
++{
++ int addressed = (address >= 0);
++ efx_oword_t reg;
++ int rc;
++
++ /* Input validation */
++ if (len > (addressed ? efx_spi_write_limit(spi, address)
++ : FALCON_SPI_MAX_LEN))
++ return -EINVAL;
++
++ /* Acquire SPI lock */
++ mutex_lock(&efx->spi_lock);
++
++ /* Check SPI not currently being accessed */
++ rc = falcon_spi_wait(efx);
++ if (rc)
++ goto out;
++
++ /* Program address register, if we have an address */
++ if (addressed) {
++ EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
++ falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
++ }
++
++ /* Program data register, if we have data */
++ if (data) {
++ memcpy(&reg, data, len);
++ falcon_write(efx, &reg, EE_SPI_HDATA_REG_KER);
++ }
++
++ /* Issue write command */
++ EFX_POPULATE_OWORD_7(reg,
++ EE_SPI_HCMD_CMD_EN, 1,
++ EE_SPI_HCMD_SF_SEL, spi->device_id,
++ EE_SPI_HCMD_DABCNT, len,
++ EE_SPI_HCMD_READ, EE_SPI_WRITE,
++ EE_SPI_HCMD_DUBCNT, 0,
++ EE_SPI_HCMD_ADBCNT,
++ (addressed ? spi->addr_len : 0),
++ EE_SPI_HCMD_ENC, command);
++ falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER);
++
++ /* Wait for write to complete */
++ rc = falcon_spi_wait(efx);
++ if (rc)
++ goto out;
++
++ out:
++ /* Release SPI lock */
++ mutex_unlock(&efx->spi_lock);
++
++ return rc;
++}
++
++/**************************************************************************
++ *
++ * MAC wrapper
++ *
++ **************************************************************************
++ */
++void falcon_drain_tx_fifo(struct efx_nic *efx)
++{
++ efx_oword_t temp;
++ efx_oword_t mcast_reg0;
++ efx_oword_t mcast_reg1;
++ int count;
++
++ if (FALCON_REV(efx) < FALCON_REV_B0)
++ return;
++
++ falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
++ /* There is no point in draining more than once */
++ if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
++ return;
++
++ /* MAC stats will fail whilst the TX fifo is draining. Serialise
++ * the drain sequence with the statistics fetch */
++ spin_lock(&efx->stats_lock);
++
++ EFX_SET_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0, 1);
++ falcon_write(efx, &temp, MAC0_CTRL_REG_KER);
++
++ falcon_read(efx, &mcast_reg0, MAC_MCAST_HASH_REG0_KER);
++ falcon_read(efx, &mcast_reg1, MAC_MCAST_HASH_REG1_KER);
++
++ /* Reset the MAC and EM block. */
++ falcon_read(efx, &temp, GLB_CTL_REG_KER);
++ EFX_SET_OWORD_FIELD(temp, RST_XGTX, 1);
++ EFX_SET_OWORD_FIELD(temp, RST_XGRX, 1);
++ EFX_SET_OWORD_FIELD(temp, RST_EM, 1);
++ falcon_write(efx, &temp, GLB_CTL_REG_KER);
++
++ count = 0;
++ while (1) {
++ falcon_read(efx, &temp, GLB_CTL_REG_KER);
++ if (!EFX_OWORD_FIELD(temp, RST_XGTX) &&
++ !EFX_OWORD_FIELD(temp, RST_XGRX) &&
++ !EFX_OWORD_FIELD(temp, RST_EM)) {
++ EFX_LOG(efx, "Completed MAC reset after %d loops\n",
++ count);
++ break;
++ }
++ if (count > 20) {
++ EFX_ERR(efx, "MAC reset failed\n");
++ break;
++ }
++ count++;
++ udelay(10);
++ }
++
++ spin_unlock(&efx->stats_lock);
++
++ /* Restore the multicast hash registers. */
++ falcon_write(efx, &mcast_reg0, MAC_MCAST_HASH_REG0_KER);
++ falcon_write(efx, &mcast_reg1, MAC_MCAST_HASH_REG1_KER);
++
++ /* If we've reset the EM block and the link is up, then
++ * we'll have to kick the XAUI link so the PHY can recover */
++ if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
++ falcon_reset_xaui(efx);
++}
++
++void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
++{
++ struct falcon_nic_data *nic_data = efx->nic_data;
++ efx_oword_t temp;
++ int changing_loopback;
++
++ if (FALCON_REV(efx) < FALCON_REV_B0)
++ return;
++
++ /* Isolate the MAC -> RX */
++ falcon_read(efx, &temp, RX_CFG_REG_KER);
++ EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 0);
++ falcon_write(efx, &temp, RX_CFG_REG_KER);
++
++ /* Synchronise the EM block against any loopback mode changes by
++ * draining the TX fifo and resetting. */
++ changing_loopback = (efx->loopback_mode != nic_data->old_loopback_mode);
++ nic_data->old_loopback_mode = efx->loopback_mode;
++ if (changing_loopback || !efx->link_up)
++ falcon_drain_tx_fifo(efx);
++}
++
++void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
++{
++ efx_oword_t reg;
++ int link_speed;
++ unsigned int tx_fc;
++
++ if (efx->link_options & GM_LPA_10000)
++ link_speed = 0x3;
++ else if (efx->link_options & GM_LPA_1000)
++ link_speed = 0x2;
++ else if (efx->link_options & GM_LPA_100)
++ link_speed = 0x1;
++ else
++ link_speed = 0x0;
++ /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
++ * as advertised. Disable to ensure packets are not
++ * indefinitely held and TX queue can be flushed at any point
++ * while the link is down.
++ */
++ EFX_POPULATE_OWORD_5(reg,
++ MAC_XOFF_VAL, 0xffff /* max pause time */,
++ MAC_BCAD_ACPT, 1,
++ MAC_UC_PROM, efx->promiscuous,
++ MAC_LINK_STATUS, 1, /* always set */
++ MAC_SPEED, link_speed);
++ /* On B0, MAC backpressure can be disabled and packets get
++ * discarded. */
++ if (FALCON_REV(efx) >= FALCON_REV_B0) {
++ EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
++ !efx->link_up);
++ }
++
++ falcon_write(efx, &reg, MAC0_CTRL_REG_KER);
++
++ /*
++ * Transmission of pause frames when RX crosses the threshold is
++ * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
++ *
++ * Action on receipt of pause frames is controller by XM_DIS_FCNTL
++ */
++ tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
++ falcon_read(efx, &reg, RX_CFG_REG_KER);
++ EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
++
++ /* Unisolate the MAC -> RX */
++ if (FALCON_REV(efx) >= FALCON_REV_B0)
++ EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
++ falcon_write(efx, &reg, RX_CFG_REG_KER);
++}
++
++int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
++{
++ efx_oword_t reg;
++ u32 *dma_done;
++ int i;
++
++ if (disable_dma_stats)
++ return 0;
++
++ /* Statistics fetch will fail if the MAC is in TX drain */
++ if (FALCON_REV(efx) >= FALCON_REV_B0) {
++ efx_oword_t temp;
++ falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
++ if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
++ return 0;
++ }
++
++ /* Clear completion pointer */
++ dma_done = (efx->stats_buffer.addr + done_offset);
++ *dma_done = FALCON_STATS_NOT_DONE;
++ wmb(); /* ensure done flag is clear */
++
++ /* Initiate DMA transfer of stats */
++ EFX_POPULATE_OWORD_2(reg,
++ MAC_STAT_DMA_CMD, 1,
++ MAC_STAT_DMA_ADR,
++ efx->stats_buffer.dma_addr);
++ falcon_write(efx, &reg, MAC0_STAT_DMA_REG_KER);
++
++ /* Wait for transfer to complete */
++ for (i = 0; i < 400; i++) {
++ if (*(volatile u32 *)dma_done == FALCON_STATS_DONE)
++ return 0;
++ udelay(10);
++ }
++
++ if (EFX_WORKAROUND_8419(efx)) {
++ disable_dma_stats = 1;
++ EFX_INFO(efx, "MAC stats DMA disabled\n");
++ } else {
++ EFX_ERR(efx, "timed out waiting for statistics\n");
++ }
++
++ return -ETIMEDOUT;
++}
++
++/**************************************************************************
++ *
++ * PHY access via GMII
++ *
++ **************************************************************************
++ */
++
++/* Use the top bit of the MII PHY id to indicate the PHY type
++ * (1G/10G), with the remaining bits as the actual PHY id.
++ *
++ * This allows us to avoid leaking information from the mii_if_info
++ * structure into other data structures.
++ */
++#define FALCON_PHY_ID_ID_WIDTH EFX_WIDTH(MD_PRT_DEV_ADR)
++#define FALCON_PHY_ID_ID_MASK ((1 << FALCON_PHY_ID_ID_WIDTH) - 1)
++#define FALCON_PHY_ID_WIDTH (FALCON_PHY_ID_ID_WIDTH + 1)
++#define FALCON_PHY_ID_MASK ((1 << FALCON_PHY_ID_WIDTH) - 1)
++#define FALCON_PHY_ID_10G (1 << (FALCON_PHY_ID_WIDTH - 1))
++
++
++/* Packing the clause 45 port and device fields into a single value */
++#define MD_PRT_ADR_COMP_LBN (MD_PRT_ADR_LBN - MD_DEV_ADR_LBN)
++#define MD_PRT_ADR_COMP_WIDTH MD_PRT_ADR_WIDTH
++#define MD_DEV_ADR_COMP_LBN 0
++#define MD_DEV_ADR_COMP_WIDTH MD_DEV_ADR_WIDTH
++
++
++/* Wait for GMII access to complete */
++static int falcon_gmii_wait(struct efx_nic *efx)
++{
++ efx_dword_t md_stat;
++ int count;
++
++ for (count = 0; count < 1000; count++) { /* wait upto 10ms */
++ falcon_readl(efx, &md_stat, MD_STAT_REG_KER);
++ if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) {
++ if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 ||
++ EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) {
++ EFX_ERR(efx, "error from GMII access "
++ EFX_DWORD_FMT"\n",
++ EFX_DWORD_VAL(md_stat));
++ return -EIO;
++ }
++ return 0;
++ }
++ udelay(10);
++ }
++ EFX_ERR(efx, "timed out waiting for GMII\n");
++ return -ETIMEDOUT;
++}
++
++/* Writes a GMII register of a PHY connected to Falcon using MDIO. */
++static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
++ int addr, int value)
++{
++ struct efx_nic *efx = (struct efx_nic *)net_dev->priv;
++ unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
++ unsigned int phy_10g = phy_id & FALCON_PHY_ID_10G;
++ efx_oword_t reg;
++
++ /* The 'generic' prt/dev packing in mdio_10g.h is conveniently
++ * chosen so that the only current user, Falcon, can take the
++ * packed value and use them directly.
++ * Fail to build if this assumption is broken.
++ */
++ BUILD_BUG_ON(FALCON_PHY_ID_10G != MDIO45_XPRT_ID_IS10G);
++ BUILD_BUG_ON(FALCON_PHY_ID_ID_WIDTH != MDIO45_PRT_DEV_WIDTH);
++ BUILD_BUG_ON(MD_PRT_ADR_COMP_LBN != MDIO45_PRT_ID_COMP_LBN);
++ BUILD_BUG_ON(MD_DEV_ADR_COMP_LBN != MDIO45_DEV_ID_COMP_LBN);
++
++ if (phy_id2 == PHY_ADDR_INVALID)
++ return;
++
++ /* See falcon_mdio_read for an explanation. */
++ if (EFX_ISCLAUSE45(efx) && !phy_10g) {
++ int mmd = ffs(efx->phy_op->mmds) - 1;
++ EFX_TRACE(efx, "Fixing erroneous clause22 write\n");
++ phy_id2 = mdio_clause45_pack(phy_id2, mmd)
++ & FALCON_PHY_ID_ID_MASK;
++ phy_10g = 1;
++ }
++
++ EFX_REGDUMP(efx, "writing GMII %d register %02x with %04x\n", phy_id,
++ addr, value);
++
++ /* Obtain PHY lock */
++ spin_lock_bh(&efx->phy_lock);
++
++ /* Check MII not currently being accessed */
++ if (falcon_gmii_wait(efx) != 0)
++ goto out;
++
++ /* Write the address/ID register */
++ EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
++ falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
++
++ if (phy_10g)
++ EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_id2);
++ else
++ /* MDIO clause 22 */
++ EFX_POPULATE_OWORD_2(reg,
++ MD_PRT_ADR, phy_id2,
++ MD_DEV_ADR, addr);
++ falcon_write(efx, &reg, MD_ID_REG_KER);
++
++ /* Write data */
++ EFX_POPULATE_OWORD_1(reg, MD_TXD, value);
++ falcon_write(efx, &reg, MD_TXD_REG_KER);
++
++ EFX_POPULATE_OWORD_2(reg,
++ MD_WRC, 1,
++ MD_GC, (phy_10g ? 0 : 1));
++ falcon_write(efx, &reg, MD_CS_REG_KER);
++
++ /* Wait for data to be written */
++ if (falcon_gmii_wait(efx) != 0) {
++ /* Abort the write operation */
++ EFX_POPULATE_OWORD_2(reg,
++ MD_WRC, 0,
++ MD_GC, 1);
++ falcon_write(efx, &reg, MD_CS_REG_KER);
++ udelay(10);
++ }
++
++ out:
++ /* Release PHY lock */
++ spin_unlock_bh(&efx->phy_lock);
++}
++
++/* Reads a GMII register from a PHY connected to Falcon. If no value
++ * could be read, -1 will be returned. */
++static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
++{
++ struct efx_nic *efx = (struct efx_nic *)net_dev->priv;
++ unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
++ unsigned int phy_10g = phy_id & FALCON_PHY_ID_10G;
++ efx_oword_t reg;
++ int value = -1;
++ unsigned long flags __attribute__ ((unused));
++
++ if (phy_addr == PHY_ADDR_INVALID)
++ return -1;
++
++ /* Our PHY code knows whether it needs to talk clause 22(1G) or 45(10G)
++ * but the generic Linux code does not make any distinction or have
++ * any state for this.
++ * We spot the case where someone tried to talk 22 to a 45 PHY and
++ * redirect the request to the lowest numbered MMD as a clause45
++ * request. This is enough to allow simple queries like id and link
++ * state to succeed. TODO: We may need to do more in future.
++ */
++ if (EFX_ISCLAUSE45(efx) && !phy_10g) {
++ int mmd = ffs(efx->phy_op->mmds) - 1;
++ EFX_TRACE(efx, "Fixing erroneous clause22 read\n");
++ phy_addr = mdio_clause45_pack(phy_addr, mmd)
++ & FALCON_PHY_ID_ID_MASK;
++ phy_10g = 1;
++ }
++
++ /* Obtain PHY lock */
++ spin_lock_bh(&efx->phy_lock);
++
++ /* Check MII not currently being accessed */
++ if (falcon_gmii_wait(efx) != 0)
++ goto out;
++
++ if (!phy_10g) {
++ /* Write the address registers */
++ EFX_POPULATE_OWORD_2(reg,
++ MD_PRT_ADR, phy_addr,
++ MD_DEV_ADR, addr);
++ falcon_write(efx, &reg, MD_ID_REG_KER);
++ /* Request data to be read */
++ EFX_POPULATE_OWORD_2(reg,
++ MD_RIC, 1,
++ MD_GC, 1);
++ } else {
++ EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
++ falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
++
++ EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_addr);
++ falcon_write(efx, &reg, MD_ID_REG_KER);
++
++ /* Request data to be read */
++ EFX_POPULATE_OWORD_2(reg,
++ MD_RDC, 1,
++ MD_GC, 0);
++ }
++ falcon_write(efx, &reg, MD_CS_REG_KER);
++
++ /* Wait for data to become available */
++ value = falcon_gmii_wait(efx);
++ if (value == 0) {
++ falcon_read(efx, &reg, MD_RXD_REG_KER);
++ value = EFX_OWORD_FIELD(reg, MD_RXD);
++ EFX_REGDUMP(efx, "read from GMII %d register %02x, got %04x\n",
++ phy_id, addr, value);
++ } else {
++ /* Abort the read operation */
++ EFX_POPULATE_OWORD_2(reg,
++ MD_RIC, 0,
++ MD_GC, 1);
++ falcon_write(efx, &reg, MD_CS_REG_KER);
++
++ EFX_LOG(efx, "read from GMII 0x%x register %02x, got "
++ "error %d\n", phy_id, addr, value);
++ }
++
++ out:
++ /* Release PHY lock */
++ spin_unlock_bh(&efx->phy_lock);
++
++ return value;
++}
++
++static void falcon_init_mdio(struct mii_if_info *gmii)
++{
++ gmii->mdio_read = falcon_mdio_read;
++ gmii->mdio_write = falcon_mdio_write;
++ gmii->phy_id_mask = FALCON_PHY_ID_MASK;
++ gmii->reg_num_mask = ((1 << EFX_WIDTH(MD_DEV_ADR)) - 1);
++}
++
++static int falcon_probe_gmac_port(struct efx_nic *efx)
++{
++ struct efx_phy_operations *phy_op = efx->phy_op;
++
++ efx->mac_op = &falcon_gmac_operations;
++ efx->loopback_modes = LOOPBACKS_1G_INTERNAL | phy_op->loopbacks;
++ efx->startup_loopbacks = ((1 << LOOPBACK_MAC) |
++ (1 << phy_op->startup_loopback));
++ return 0;
++}
++
++static int falcon_probe_xmac_port(struct efx_nic *efx)
++{
++ struct efx_phy_operations *phy_op = efx->phy_op;
++
++ efx->mac_op = &falcon_xmac_operations;
++
++ /* The Falcon B0 FPGA only supports XGMII loopback */
++ if (FALCON_REV(efx) >= FALCON_REV_B0 && !efx->is_asic)
++ efx->loopback_modes = (1 << LOOPBACK_XGMII);
++ else
++ efx->loopback_modes = LOOPBACKS_10G_INTERNAL;
++ efx->loopback_modes |= phy_op->loopbacks;
++
++ efx->startup_loopbacks = ((1 << LOOPBACK_XGMII) |
++ (1 << phy_op->startup_loopback));
++ return 0;
++}
++
++static int falcon_probe_phy(struct efx_nic *efx)
++{
++ switch (efx->phy_type) {
++ case PHY_TYPE_1G_ALASKA:
++ efx->phy_op = &alaska_phy_operations;
++ break;
++ case PHY_TYPE_10XPRESS:
++ efx->phy_op = &falcon_tenxpress_phy_ops;
++ break;
++ case PHY_TYPE_NONE:
++ efx->phy_op = &falcon_null_phy_ops;
++ break;
++ case PHY_TYPE_XFP:
++ efx->phy_op = &falcon_xfp_phy_ops;
++ break;
++ case PHY_TYPE_CX4_RTMR:
++ efx->phy_op = &falcon_txc_phy_ops;
++ break;
++ case PHY_TYPE_PM8358:
++ efx->phy_op = &falcon_pm8358_phy_ops;
++ break;
++ default:
++ EFX_ERR(efx, "Unknown PHY type %d\n",
++ efx->phy_type);
++ return -1;
++ }
++ return 0;
++}
++
++/* This call is responsible for hooking in the MAC and PHY operations */
++int falcon_probe_port(struct efx_nic *efx)
++{
++ int rc;
++
++ /* Hook in PHY operations table */
++ rc = falcon_probe_phy(efx);
++ if (rc)
++ return rc;
++
++ /* Hook in MAC operations table */
++ if (EFX_IS10G(efx))
++ rc = falcon_probe_xmac_port(efx);
++ else
++ rc = falcon_probe_gmac_port(efx);
++ if (rc)
++ return rc;
++
++ EFX_LOG(efx, "created port using %cMAC\n",
++ EFX_IS10G(efx) ? 'X' : 'G');
++
++ /* Set up GMII structure for PHY */
++ efx->mii.supports_gmii = 1;
++ falcon_init_mdio(&efx->mii);
++
++ /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
++ if (FALCON_REV(efx) >= FALCON_REV_B0)
++ efx->flow_control = EFX_FC_RX | EFX_FC_TX;
++ else
++ efx->flow_control = EFX_FC_RX;
++
++ /* Allocate buffer for stats */
++ rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
++ FALCON_MAC_STATS_SIZE);
++ if (rc)
++ return rc;
++ EFX_LOG(efx, "stats buffer at %llx (virt %p phys %lx)\n",
++ (unsigned long long)efx->stats_buffer.dma_addr,
++ efx->stats_buffer.addr,
++ virt_to_phys(efx->stats_buffer.addr));
++
++ return 0;
++}
++
++void falcon_remove_port(struct efx_nic *efx)
++{
++ /* Free stats buffer */
++ falcon_free_buffer(efx, &efx->stats_buffer);
++}
++
++/**************************************************************************
++ *
++ * Multicast filtering
++ *
++ **************************************************************************
++ */
++
++void falcon_set_multicast_hash(struct efx_nic *efx)
++{
++ union efx_multicast_hash falcon_mc_hash;
++
++ /* Broadcast packets go through the multicast hash filter.
++ * ether_crc_le() of the broadcast address is 0xbe2612ff
++ * so we always add bit 0xff to the mask we are given.
++ */
++ memcpy(&falcon_mc_hash, &efx->multicast_hash, sizeof(falcon_mc_hash));
++ set_bit_le(0xff, (void *)&falcon_mc_hash);
++
++ falcon_write(efx, &falcon_mc_hash.oword[0], MAC_MCAST_HASH_REG0_KER);
++ falcon_write(efx, &falcon_mc_hash.oword[1], MAC_MCAST_HASH_REG1_KER);
++}
++
++/**************************************************************************
++ *
++ * Device reset
++ *
++ **************************************************************************
++ */
++
++static int falcon_clear_b0_memories(struct efx_nic *efx)
++{
++ /* Need to clear memories after a reset. On B0 we can do this
++ * via the net function.
++ */
++ int rc = 0, offset;
++ efx_oword_t blanko;
++ efx_dword_t blankd;
++ unsigned long membase_phys, membase_len;
++ void __iomem *membase_orig;
++ unsigned long flags __attribute__ ((unused));
++
++ EFX_ZERO_OWORD(blanko);
++ EFX_ZERO_DWORD(blankd);
++ membase_orig = efx->membase;
++ membase_phys = pci_resource_start(efx->pci_dev, efx->type->mem_bar);
++
++ for (offset = RX_FILTER_TBL0;
++ offset < RX_RSS_INDIR_TBL_B0;
++ offset += 0x10)
++ falcon_write(efx, &blanko, offset);
++
++ /* Clear RSS indirection table */
++ for (offset = RX_RSS_INDIR_TBL_B0;
++ offset < RX_RSS_INDIR_TBL_B0 + 0x800;
++ offset += 0x10)
++ /* Clear 6 bits every 16 bytes */
++ falcon_writel(efx, &blankd, offset);
++
++ /* Need to split this into several mappings so MSI-X table and PBA
++ * never get mapped
++ */
++ membase_phys = membase_phys + 0x2800000;
++ membase_len = 0x3000000 - 0x2800000;
++
++ efx->membase = ioremap_nocache(membase_phys, membase_len);
++ if (efx->membase == NULL) {
++ EFX_ERR(efx, "could not map memory BAR %d at %lx+%lx\n",
++ efx->type->mem_bar, membase_phys, membase_len);
++ rc = -ENOMEM;
++ goto out;
++ }
++ /* Clear the buffer table. The first 7/8 of it is a duplicate
++ * of the mapping at 0x800000 and must be accessed 2 DWORDs at
++ * a time. The final 1/8 must be accessed 4 DWORDs at a time.
++ * We make sure to obey both rules at the same time.
++ */
++ for (offset = 0; offset < membase_len; offset += 0x10) {
++ spin_lock_irqsave(&efx->biu_lock, flags);
++ _falcon_writel(efx, 0, offset + 0x0);
++ wmb();
++ _falcon_writel(efx, 0, offset + 0x4);
++ wmb();
++ _falcon_writel(efx, 0, offset + 0x8);
++ wmb();
++ _falcon_writel(efx, 0, offset + 0xc);
++ mmiowb();
++ spin_unlock_irqrestore(&efx->biu_lock, flags);
++ }
++
++ iounmap(efx->membase);
++
++out:
++ /* Restore */
++ efx->membase = membase_orig;
++
++ return rc;
++}
++
++
++/* Resets NIC to known state. This routine must be called in process
++ * context and is allowed to sleep. */
++int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
++{
++ efx_oword_t glb_ctl_reg_ker;
++ int rc;
++
++ EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method));
++
++ /* Initiate device reset */
++ if (method == RESET_TYPE_WORLD) {
++ /* Save PCI config space */
++ rc = pci_save_state(efx->pci_dev);
++ if (rc) {
++ EFX_ERR(efx, "failed to backup PCI state of primary "
++ "function prior to hardware reset\n");
++ goto fail1;
++ }
++ if (efx->type->is_dual_func) {
++ rc = pci_save_state(efx->pci_dev2);
++ if (rc) {
++ EFX_ERR(efx, "failed to backup PCI state of "
++ "secondary function prior to "
++ "hardware reset\n");
++ goto fail2;
++ }
++ }
++
++ EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
++ EXT_PHY_RST_DUR, 0x7,
++ SWRST, 1);
++ } else {
++ int reset_phy = (method == RESET_TYPE_INVISIBLE ?
++ EXCLUDE_FROM_RESET : 0);
++
++ EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
++ EXT_PHY_RST_CTL, reset_phy,
++ PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET,
++ PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET,
++ PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET,
++ EE_RST_CTL, EXCLUDE_FROM_RESET,
++ EXT_PHY_RST_DUR, 0x7 /* 10ms */,
++ SWRST, 1);
++ }
++ falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
++
++ /* Wait for 50ms for the chip to come out of reset */
++ EFX_LOG(efx, "waiting for hardware reset\n");
++ schedule_timeout_uninterruptible(HZ / 20);
++
++ /* Restore PCI configuration if needed */
++ if (method == RESET_TYPE_WORLD) {
++ if (efx->type->is_dual_func) {
++ rc = pci_restore_state(efx->pci_dev2);
++ if (rc) {
++ EFX_ERR(efx, "failed to restore PCI config for "
++ "the secondary function\n");
++ goto fail3;
++ }
++ }
++ rc = pci_restore_state(efx->pci_dev);
++ if (rc) {
++ EFX_ERR(efx, "failed to restore PCI config for the "
++ "primary function\n");
++ goto fail4;
++ }
++ EFX_LOG(efx, "successfully restored PCI config\n");
++ }
++
++ /* Assert that reset complete */
++ falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
++ if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) {
++ rc = -ETIMEDOUT;
++ EFX_ERR(efx, "timed out waiting for hardware reset\n");
++ goto fail5;
++ }
++ EFX_LOG(efx, "hardware reset complete\n");
++
++ if (EFX_WORKAROUND_8202(efx)) {
++ rc = falcon_clear_b0_memories(efx);
++ if (rc)
++ goto fail6;
++ }
++
++ return 0;
++
++ /* pci_save_state() and pci_restore_state() MUST be called in pairs */
++fail2:
++fail3:
++ pci_restore_state(efx->pci_dev);
++ /* fall-thru */
++fail1:
++fail4:
++fail5:
++fail6:
++ return rc;
++}
++
++/* Zeroes out the SRAM contents. This routine must be called in
++ * process context and is allowed to sleep.
++ */
++static int falcon_reset_sram(struct efx_nic *efx)
++{
++ efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
++ int count, onchip, sram_cfg_val;
++
++ /* Set the SRAM wake/sleep GPIO appropriately. */
++ onchip = (efx->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY);
++ falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
++ EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1);
++ EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, onchip ? 1 : 0);
++ falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
++
++ /* Initiate SRAM reset */
++ sram_cfg_val = (efx->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY) ?
++ 0 : efx->external_sram_cfg;
++
++ EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
++ SRAM_OOB_BT_INIT_EN, 1,
++ SRM_NUM_BANKS_AND_BANK_SIZE, sram_cfg_val);
++ falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
++
++ /* Wait for SRAM reset to complete */
++ count = 0;
++ do {
++ EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
++
++ /* SRAM reset is slow; expect around 16ms */
++ schedule_timeout_uninterruptible(HZ / 50);
++
++ /* Check for reset complete */
++ falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
++ if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) {
++ EFX_LOG(efx, "SRAM reset complete\n");
++
++ return 0;
++ }
++ } while (++count < 20); /* wait upto 0.4 sec */
++
++ EFX_ERR(efx, "timed out waiting for SRAM reset\n");
++ return -ETIMEDOUT;
++}
++
++static void falcon_spi_device_init(struct efx_spi_device **spi_device_ret,
++ unsigned int device_id, u32 device_type)
++{
++ struct efx_spi_device *spi_device;
++
++ if (device_type != 0) {
++ spi_device = kmalloc(sizeof(*spi_device), GFP_KERNEL);
++ spi_device->device_id = device_id;
++ spi_device->size =
++ 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
++ spi_device->addr_len =
++ SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
++ spi_device->munge_address = (spi_device->size == 1 << 9 &&
++ spi_device->addr_len == 1);
++ spi_device->erase_command =
++ SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
++ spi_device->erase_size =
++ 1 << SPI_DEV_TYPE_FIELD(device_type,
++ SPI_DEV_TYPE_ERASE_SIZE);
++ spi_device->block_size =
++ 1 << SPI_DEV_TYPE_FIELD(device_type,
++ SPI_DEV_TYPE_BLOCK_SIZE);
++ spi_device->read = falcon_spi_read;
++ spi_device->write = falcon_spi_write;
++ } else {
++ spi_device = NULL;
++ }
++
++ kfree(*spi_device_ret);
++ *spi_device_ret = spi_device;
++}
++
++/* Extract non-volatile configuration */
++static int falcon_probe_nvconfig(struct efx_nic *efx)
++{
++ int rc;
++ struct falcon_nvconfig *nvconfig;
++ struct efx_spi_device *spi;
++ size_t offset, len;
++ int magic_num, struct_ver, board_rev, onchip_sram;
++
++ nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
++
++ /* Read the whole configuration structure into memory. It's
++ * in Falcon's boot device, which may be either flash or
++ * EEPROM, but if both are present Falcon prefers flash. The
++ * boot device is always too large for 9-bit addressing, so we
++ * don't have to munge commands.
++ */
++ spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
++ for (offset = 0; offset < sizeof(*nvconfig); offset += len) {
++ len = min(sizeof(*nvconfig) - offset,
++ (size_t) FALCON_SPI_MAX_LEN);
++ rc = falcon_spi_read(spi, efx, SPI_READ,
++ NVCONFIG_BASE + offset,
++ (char *)nvconfig + offset, len);
++ if (rc)
++ goto out;
++ }
++
++ /* Read the MAC addresses */
++ memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
++
++ /* Read the board configuration. */
++ magic_num = le16_to_cpu(nvconfig->board_magic_num);
++ struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
++
++ if (magic_num != NVCONFIG_BOARD_MAGIC_NUM || struct_ver < 2) {
++ EFX_ERR(efx, "Non volatile memory bad magic=%x ver=%x "
++ "therefore using defaults\n", magic_num, struct_ver);
++ efx->phy_type = PHY_TYPE_NONE;
++ efx->mii.phy_id = PHY_ADDR_INVALID;
++ board_rev = 0;
++ onchip_sram = 1;
++
++ } else {
++ struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
++ struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
++
++ efx->phy_type = v2->port0_phy_type;
++ efx->mii.phy_id = v2->port0_phy_addr;
++ board_rev = le16_to_cpu(v2->board_revision);
++ onchip_sram = EFX_OWORD_FIELD(nvconfig->nic_stat_reg,
++ ONCHIP_SRAM);
++
++ if (struct_ver >= 3) {
++ __le32 fl = v3->spi_device_type[EE_SPI_FLASH];
++ __le32 ee = v3->spi_device_type[EE_SPI_EEPROM];
++ falcon_spi_device_init(&efx->spi_flash, EE_SPI_FLASH,
++ le32_to_cpu(fl));
++ falcon_spi_device_init(&efx->spi_eeprom, EE_SPI_EEPROM,
++ le32_to_cpu(ee));
++ }
++ }
++
++ EFX_LOG(efx, "PHY is %s(%d) phy_id %d\n",
++ PHY_TYPE(efx), efx->phy_type,
++ efx->mii.phy_id);
++
++ efx_set_board_info(efx, board_rev);
++
++ /* Read the SRAM configuration. The register is initialised
++ * automatically but might may been reset since boot.
++ */
++ if (onchip_sram) {
++ efx->external_sram_cfg = SRM_NB_BSZ_ONCHIP_ONLY;
++ } else {
++ efx->external_sram_cfg =
++ EFX_OWORD_FIELD(nvconfig->srm_cfg_reg,
++ SRM_NUM_BANKS_AND_BANK_SIZE);
++ WARN_ON(efx->external_sram_cfg == SRM_NB_BSZ_RESERVED);
++ /* Replace invalid setting with the smallest defaults */
++ if (efx->external_sram_cfg == SRM_NB_BSZ_DEFAULT)
++ efx->external_sram_cfg = SRM_NB_BSZ_1BANKS_2M;
++ }
++ EFX_LOG(efx, "external_sram_cfg=%d (>=0 is external)\n",
++ efx->external_sram_cfg);
++
++ out:
++ kfree(nvconfig);
++ return rc;
++}
++
++/* Looks at available SRAM resources and silicon revision, and works out
++ * how many queues we can support, and where things like descriptor caches
++ * should live. */
++static int falcon_dimension_resources(struct efx_nic *efx)
++{
++ unsigned buffer_entry_bytes, internal_dcs_entries, dcs;
++ struct falcon_nic_data *nic_data = efx->nic_data;
++ struct efx_dl_falcon_resources *res = &nic_data->resources;
++
++ /* Fill out the driverlink resource list */
++ res->hdr.type = EFX_DL_FALCON_RESOURCES;
++ res->biu_lock = &efx->biu_lock;
++ efx->dl_info = &res->hdr;
++
++ /* This is set to 16 for a good reason. In summary, if larger than
++ * 16, the descriptor cache holds more than a default socket
++ * buffer's worth of packets (for UDP we can only have at most one
++ * socket buffer's worth outstanding). This combined with the fact
++ * that we only get 1 TX event per descriptor cache means the NIC
++ * goes idle.
++ * 16 gives us up to 256 TXQs on Falcon B in internal-SRAM mode,
++ * and up to 512 on Falcon A.
++ */
++ nic_data->tx_dc_entries = 16;
++
++ /* Set the RX descriptor cache size. Values 16, 32 and 64 are
++ * supported (8 won't work). Bigger is better, especially on B
++ * silicon.
++ */
++ nic_data->rx_dc_entries = descriptor_cache_size;
++ dcs = ffs(nic_data->rx_dc_entries);
++ if ((dcs < 5) || (dcs > 7) ||
++ ((1 << (dcs - 1)) != nic_data->rx_dc_entries)) {
++ EFX_ERR(efx, "bad descriptor_cache_size=%d (dcs=%d)\n",
++ nic_data->rx_dc_entries, dcs);
++ return -EINVAL;
++ }
++
++ /* NB. The minimum values get increased as this driver initialises
++ * its resources, so this should prevent any overlap.
++ */
++ switch (FALCON_REV(efx)) {
++ case FALCON_REV_A1:
++ res->rxq_min = res->txq_min = 16;
++ res->evq_int_min = res->evq_int_max = 4;
++ res->evq_timer_min = 5;
++ res->evq_timer_max = 4096;
++ internal_dcs_entries = 8192;
++ break;
++ case FALCON_REV_B0:
++ default:
++ res->rxq_min = res->txq_min = res->evq_int_min = 0;
++ res->evq_int_max = 64;
++ res->evq_timer_min = 64;
++ res->evq_timer_max = 4096;
++ internal_dcs_entries = 4096;
++ break;
++ }
++
++ buffer_entry_bytes = 8;
++
++ if (efx->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY) {
++ res->rxq_max = internal_dcs_entries / nic_data->rx_dc_entries;
++ res->txq_max = internal_dcs_entries / nic_data->tx_dc_entries;
++ /* Prog model says 8K entries for buffer table in internal
++ * mode. But does this not depend on full/half mode?
++ */
++ res->buffer_table_max = 8192;
++ nic_data->tx_dc_base = 0x130000;
++ nic_data->rx_dc_base = 0x100000;
++ } else {
++ unsigned sram_bytes, vnic_bytes, max_vnics, n_vnics;
++
++ /* Determine how much SRAM we have to play with. We have
++ * to fit buffer table and descriptor caches in.
++ */
++ switch (efx->external_sram_cfg) {
++ case SRM_NB_BSZ_1BANKS_2M:
++ default:
++ sram_bytes = 2 * 1024 * 1024;
++ break;
++ case SRM_NB_BSZ_1BANKS_4M:
++ case SRM_NB_BSZ_2BANKS_4M:
++ sram_bytes = 4 * 1024 * 1024;
++ break;
++ case SRM_NB_BSZ_1BANKS_8M:
++ case SRM_NB_BSZ_2BANKS_8M:
++ sram_bytes = 8 * 1024 * 1024;
++ break;
++ case SRM_NB_BSZ_2BANKS_16M:
++ sram_bytes = 16 * 1024 * 1024;
++ break;
++ }
++ /* For each VNIC allow at least 512 buffer table entries
++ * and descriptor cache for an rxq and txq. Buffer table
++ * space for evqs and dmaqs is relatively trivial, so not
++ * considered in this calculation.
++ */
++ vnic_bytes = (512 * buffer_entry_bytes
++ + nic_data->rx_dc_entries * 8
++ + nic_data->tx_dc_entries * 8);
++ max_vnics = sram_bytes / vnic_bytes;
++ for (n_vnics = 1; n_vnics < res->evq_timer_min + max_vnics;)
++ n_vnics *= 2;
++ res->rxq_max = n_vnics;
++ res->txq_max = n_vnics;
++
++ dcs = n_vnics * nic_data->tx_dc_entries * 8;
++ nic_data->tx_dc_base = sram_bytes - dcs;
++ dcs = n_vnics * nic_data->rx_dc_entries * 8;
++ nic_data->rx_dc_base = nic_data->tx_dc_base - dcs;
++ res->buffer_table_max = nic_data->rx_dc_base / 8;
++ }
++
++ if (efx->type->is_dual_func)
++ res->flags |= EFX_DL_FALCON_DUAL_FUNC;
++
++ if (EFX_INT_MODE_USE_MSI(efx))
++ res->flags |= EFX_DL_FALCON_USE_MSI;
++
++ return 0;
++}
++
++/* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
++ * count, port speed). Set workaround and feature flags accordingly.
++ */
++static int falcon_probe_nic_variant(struct efx_nic *efx)
++{
++ efx_oword_t altera_build;
++
++ falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
++ efx->is_asic = EFX_OWORD_FIELD(altera_build, VER_ALL) == 0;
++
++#if !defined(EFX_USE_PCI_DEV_REVISION)
++ {
++ int rc;
++ rc = pci_read_config_byte(efx->pci_dev, PCI_CLASS_REVISION,
++ &efx->revision);
++ if (rc)
++ return rc;
++ }
++#endif
++ switch (FALCON_REV(efx)) {
++ case FALCON_REV_A0:
++ case 0xff:
++ EFX_ERR(efx, "Falcon rev A0 not supported\n");
++ return -ENODEV;
++
++ case FALCON_REV_A1:{
++ efx_oword_t nic_stat;
++
++ falcon_read(efx, &nic_stat, NIC_STAT_REG);
++
++ if (!efx->is_asic) {
++ EFX_ERR(efx, "Falcon rev A1 FPGA not supported\n");
++ return -ENODEV;
++ }
++ if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) {
++ EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
++ return -ENODEV;
++ }
++ efx->is_10g = EFX_OWORD_FIELD(nic_stat, STRAP_10G);
++ efx->silicon_rev = "falcon/a1";
++ break;
++ }
++
++ case FALCON_REV_B0:{
++ efx->is_10g = 1;
++ efx->silicon_rev = "falcon/b0";
++ break;
++ }
++
++ default:
++ EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx));
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++/* Probe all SPI devices on the NIC */
++static void falcon_probe_spi_devices(struct efx_nic *efx)
++{
++ efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
++ unsigned int has_flash, has_eeprom, boot_is_external;
++
++ falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER);
++ falcon_read(efx, &nic_stat, NIC_STAT_REG);
++ falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
++
++ has_flash = EFX_OWORD_FIELD(nic_stat, SF_PRST);
++ has_eeprom = EFX_OWORD_FIELD(nic_stat, EE_PRST);
++ boot_is_external = EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE);
++
++ if (has_flash) {
++ u32 flash_device_type;
++
++ if (flash_type == -1) {
++ /* Default flash SPI device: Atmel AT25F1024
++ * 128 KB, 24-bit address, 32 KB erase block,
++ * 256 B write block
++ */
++ flash_device_type =
++ (17 << SPI_DEV_TYPE_SIZE_LBN)
++ | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
++ | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
++ | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
++ | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
++ } else {
++ flash_device_type = flash_type;
++ }
++
++ falcon_spi_device_init(&efx->spi_flash, EE_SPI_FLASH,
++ flash_device_type);
++
++ if (!boot_is_external) {
++ /* Disable VPD and set clock dividers to safe
++ * values for initial programming.
++ */
++ EFX_LOG(efx, "Booted from internal ASIC settings;"
++ " setting SPI config\n");
++ EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0,
++ /* 125 MHz / 7 ~= 20 MHz */
++ EE_SF_CLOCK_DIV, 7,
++ /* 125 MHz / 63 ~= 2 MHz */
++ EE_EE_CLOCK_DIV, 63);
++ falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
++ }
++ }
++
++ if (has_eeprom) {
++ u32 eeprom_device_type;
++
++ /* eeprom_type may be -1 (default) for automatic detection,
++ * 0 or 1 to select the default or large EEPROM, or
++ * some larger number to specify the precise configuration
++ */
++ if (eeprom_type == -1 || eeprom_type <= 1) {
++ /* If it has no flash, it must have a large EEPROM
++ * for chip config; otherwise check whether 9-bit
++ * addressing is used for VPD configuration
++ */
++ if (eeprom_type == 0 ||
++ (eeprom_type == -1 && has_flash &&
++ (!boot_is_external ||
++ EFX_OWORD_FIELD(ee_vpd_cfg,
++ EE_VPD_EN_AD9_MODE)))) {
++ /* Default SPI device: Atmel AT25040 or similar
++ * 512 B, 9-bit address, 8 B write block
++ */
++ eeprom_device_type =
++ (9 << SPI_DEV_TYPE_SIZE_LBN)
++ | (1 << SPI_DEV_TYPE_ADDR_LEN_LBN)
++ | (3 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
++ } else {
++ /* "Large" SPI device: Atmel AT25640 or similar
++ * 8 KB, 16-bit address, 32 B write block
++ */
++ eeprom_device_type =
++ (13 << SPI_DEV_TYPE_SIZE_LBN)
++ | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
++ | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
++ }
++ } else {
++ eeprom_device_type = eeprom_type;
++ }
++
++ falcon_spi_device_init(&efx->spi_eeprom, EE_SPI_EEPROM,
++ eeprom_device_type);
++ }
++
++ EFX_LOG(efx, "flash is %s, EEPROM is %s\n",
++ (has_flash ? "present" : "absent"),
++ (has_eeprom ? "present" : "absent"));
++}
++
++static void falcon_remove_spi_devices(struct efx_nic *efx)
++{
++ kfree(efx->spi_eeprom);
++ efx->spi_eeprom = NULL;
++ kfree(efx->spi_flash);
++ efx->spi_flash = NULL;
++}
++
++#ifdef CONFIG_SFC_DEBUGFS
++
++/* Generate a hardware revision string */
++int falcon_debugfs_read_hardware_desc(struct seq_file *file, void *data)
++{
++ struct efx_nic *efx = data;
++ efx_oword_t altera_build;
++ int major, minor, build;
++ int rc, len;
++
++ if (efx->is_asic) {
++ rc = seq_puts(file, "Falcon ASIC");
++ } else {
++ falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
++
++ major = EFX_OWORD_FIELD(altera_build, VER_MAJOR);
++ minor = EFX_OWORD_FIELD(altera_build, VER_MINOR);
++ build = EFX_OWORD_FIELD(altera_build, VER_BUILD);
++ rc = seq_printf(file, "Falcon FPGA v%x.%x.%x",
++ major, minor, build);
++ }
++ len = rc;
++
++ switch (FALCON_REV(efx)) {
++ case FALCON_REV_A1:
++ rc = seq_puts(file, " rev A1 ");
++ break;
++ case FALCON_REV_B0:
++ rc = seq_puts(file, " rev B0 ");
++ break;
++ default:
++ rc = seq_puts(file, " rev ?? ");
++ break;
++ }
++ len += rc;
++
++ rc = seq_printf(file, "%s %s\n",
++ efx->is_10g ? "10G" : "1G", PHY_TYPE(efx));
++ len += rc;
++
++ return rc < 0 ? rc : len;
++}
++
++#endif /* CONFIG_SFC_DEBUGFS */
++
++int falcon_probe_nic(struct efx_nic *efx)
++{
++ struct falcon_nic_data *nic_data;
++ int rc;
++
++ /* Initialise I2C interface state */
++ efx->i2c.efx = efx;
++ efx->i2c.op = &falcon_i2c_bit_operations;
++ efx->i2c.sda = 1;
++ efx->i2c.scl = 1;
++
++ /* Determine number of ports etc. */
++ rc = falcon_probe_nic_variant(efx);
++ if (rc)
++ goto fail1;
++
++ /* Probe secondary function if expected */
++ if (efx->type->is_dual_func) {
++ struct pci_dev *dev = pci_dev_get(efx->pci_dev);
++
++ while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
++ dev))) {
++ if (dev->bus == efx->pci_dev->bus &&
++ dev->devfn == efx->pci_dev->devfn + 1) {
++ efx->pci_dev2 = dev;
++ break;
++ }
++ }
++ if (!efx->pci_dev2) {
++ EFX_ERR(efx, "failed to find secondary function\n");
++ rc = -ENODEV;
++ goto fail2;
++ }
++ }
++
++ /* Now we can reset the NIC */
++ rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
++ if (rc) {
++ EFX_ERR(efx, "failed to reset NIC\n");
++ goto fail3;
++ }
++
++ /* Allocate memory for INT_KER */
++ rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
++ if (rc)
++ goto fail4;
++ BUG_ON(efx->irq_status.dma_addr & 0x0f);
++
++ EFX_LOG(efx, "INT_KER at %llx (virt %p phys %lx)\n",
++ (unsigned long long)efx->irq_status.dma_addr,
++ efx->irq_status.addr, virt_to_phys(efx->irq_status.addr));
++
++ /* Determine attached SPI devices */
++ falcon_probe_spi_devices(efx);
++
++ /* Read in the non-volatile configuration */
++ rc = falcon_probe_nvconfig(efx);
++ if (rc)
++ goto fail5;
++
++ if (!efx->is_10g && efx->phy_type != PHY_TYPE_1G_ALASKA) {
++ /* Actually using 1G port, not 10G port */
++ efx->phy_type = PHY_TYPE_1G_ALASKA;
++ efx->mii.phy_id = 2;
++ }
++
++ /* Decide how many resources we can allocate, to ourselves
++ * and to driverlink clients */
++ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
++ efx->nic_data = (void *) nic_data;
++
++ rc = falcon_dimension_resources(efx);
++ if (rc)
++ goto fail6;
++
++ return 0;
++
++ fail6:
++ kfree(nic_data);
++ efx->nic_data = efx->dl_info = NULL;
++ fail5:
++ falcon_remove_spi_devices(efx);
++ falcon_free_buffer(efx, &efx->irq_status);
++ fail4:
++ /* fall-thru */
++ fail3:
++ if (efx->pci_dev2) {
++ pci_dev_put(efx->pci_dev2);
++ efx->pci_dev2 = NULL;
++ }
++ fail2:
++ /* fall-thru */
++ fail1:
++ return rc;
++}
++
++static int falcon_check_power_limit(struct efx_nic *efx)
++{
++ int pciecap_offset = pci_find_capability(efx->pci_dev, PCI_CAP_ID_EXP);
++ u32 pcie_devcap;
++ unsigned val, scale;
++ int rc;
++
++ if (!pciecap_offset)
++ return -EIO;
++ rc = pci_read_config_dword(efx->pci_dev,
++ (pciecap_offset + PCI_EXP_DEVCAP),
++ &pcie_devcap);
++ if (rc)
++ return rc;
++
++ val = ((pcie_devcap & PCI_EXP_DEVCAP_PWR_VAL) >>
++ PCI_EXP_DEVCAP_PWR_VAL_LBN);
++ scale = ((pcie_devcap & PCI_EXP_DEVCAP_PWR_SCL) >>
++ PCI_EXP_DEVCAP_PWR_SCL_LBN);
++
++ /* Re-scale to milliwatts if necessary */
++ while (scale != 3) {
++ val *= 10;
++ scale++;
++ }
++
++ if (val != 0 && efx->board_info.mwatts > val) {
++ EFX_ERR(efx, "board needs %d mW but only %d mW available\n",
++ efx->board_info.mwatts, val);
++ return -EIO;
++ }
++
++ return 0;
++}
++
++static void falcon_init_ack_repl_timer(struct efx_nic *efx, int num_lanes)
++{
++ unsigned tlp_size;
++ efx_dword_t pcie_ack_rpl_reg;
++ efx_dword_t pcie_ack_freq_reg;
++ efx_dword_t pcie_ctrl_stat_reg;
++ u16 pcie_devicectrl;
++ int lut_index, tlp_size_decoded;
++ int current_replay, expected_replay;
++ int current_ack_timer, current_ack_freq;
++
++ static struct efx_tlp_ack_factor {
++ int tlp;
++ int replay[4]; /* 0=1x, 1=2x, 3=4x, 4=8x (see pcie docs) */
++ } tlp_ack_factor_lut[4] = {
++ { 128, { 421, 257, 174, 166 } },
++ { 256, { 689, 391, 241, 225 } },
++ { 512, { 903, 498, 295, 193 } },
++ { 1024, { 1670, 881, 487, 290 } }
++ };
++ struct efx_tlp_ack_factor *tlp_ack_factor;
++
++ /* Get TLP size */
++ falcon_pcie_core_read_reg(efx, PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT,
++ &pcie_ctrl_stat_reg);
++ pcie_devicectrl = (u16) EFX_EXTRACT_DWORD(pcie_ctrl_stat_reg, 0, 15);
++ tlp_size = ((PCI_EXP_DEVCTL_PAYLOAD & pcie_devicectrl) >>
++ ffs(PCI_EXP_DEVCTL_PAYLOAD));
++ EFX_WARN_ON_PARANOID(tlp_size > 3); /* => 1024 bytes */
++ tlp_ack_factor = &tlp_ack_factor_lut[tlp_size & 0x3];
++ tlp_size_decoded = tlp_ack_factor->tlp;
++
++ /* Get actual ack & actual and expected replay settings */
++ falcon_pcie_core_read_reg(efx, PCIE_CORE_ADDR_ACK_RPL_TIMER,
++ &pcie_ack_rpl_reg);
++ current_replay = EFX_DWORD_FIELD(pcie_ack_rpl_reg, PCIE_CORE_RPL_TL);
++ current_ack_timer = EFX_DWORD_FIELD(pcie_ack_rpl_reg,
++ PCIE_CORE_ACK_TL);
++
++ lut_index = ffs(num_lanes) - 1;
++ expected_replay = tlp_ack_factor->replay[lut_index & 0x3];
++
++ falcon_pcie_core_read_reg(efx, PCIE_CORE_ADDR_ACK_FREQ,
++ &pcie_ack_freq_reg);
++ current_ack_freq = EFX_DWORD_FIELD(pcie_ack_freq_reg,
++ PCIE_CORE_ACK_FREQ);
++
++ EFX_LOG(efx, "pcie x%d tlp=%d replay_reg=" EFX_DWORD_FMT " { ack=%d "
++ "current_replay=%d expected_replay=%d } ack_reg="
++ EFX_DWORD_FMT " { current_freq=%d expected_freq=%d }\n",
++ num_lanes, tlp_size_decoded,
++ EFX_DWORD_VAL(pcie_ack_rpl_reg), current_ack_timer,
++ current_replay, expected_replay,
++ EFX_DWORD_VAL(pcie_ack_rpl_reg), current_ack_freq, 0);
++
++ /* If expected replay setting needs to be bigger then set it */
++ if (expected_replay > current_replay) {
++ EFX_SET_DWORD_FIELD(pcie_ack_rpl_reg, PCIE_CORE_RPL_TL,
++ expected_replay);
++
++ falcon_pcie_core_write_reg(efx, PCIE_CORE_ADDR_ACK_RPL_TIMER,
++ pcie_ack_rpl_reg);
++ }
++}
++
++static int falcon_init_pcie_core(struct efx_nic *efx)
++{
++ int pciecap_offset;
++ unsigned num_lanes = 0;
++
++ /* Get num lanes */
++ pciecap_offset = pci_find_capability(efx->pci_dev, PCI_CAP_ID_EXP);
++ if (pciecap_offset) {
++ u16 pcie_linkstat;
++ int rc, link_sta;
++
++ link_sta = pciecap_offset + PCI_EXP_LNKSTA;
++ rc = pci_read_config_word(efx->pci_dev, link_sta,
++ &pcie_linkstat);
++ if (rc)
++ return rc;
++
++ num_lanes = ((pcie_linkstat & PCI_EXP_LNKSTA_LNK_WID)
++ >> PCI_EXP_LNKSTA_LNK_WID_LBN);
++ EFX_BUG_ON_PARANOID(num_lanes <= 0 || num_lanes > 8);
++
++ if (num_lanes < 8)
++ EFX_ERR(efx, "WARNING: the Solarflare Network Adapter "
++ "has been plugged into a PCI-Express slot with "
++ "less than 8 lanes (%d detected). This will "
++ "limit the maximum achievable bandwidth! "
++ "Consult your motherboard documentation to "
++ "find a slot that is 8 lanes electrically and "
++ "physically\n", num_lanes);
++ }
++
++ if (FALCON_REV(efx) <= FALCON_REV_A1)
++ return 0;
++
++ if (EFX_WORKAROUND_6943(efx) && num_lanes > 0)
++ falcon_init_ack_repl_timer(efx, num_lanes);
++
++ if (EFX_WORKAROUND_9096(efx)) {
++ efx_dword_t pcie_ack_freq_reg;
++
++ /* ensure ack freq timer is 0 = always ack after timeout */
++ falcon_pcie_core_read_reg(efx, PCIE_CORE_ADDR_ACK_FREQ,
++ &pcie_ack_freq_reg);
++ EFX_SET_DWORD_FIELD(pcie_ack_freq_reg, PCIE_CORE_ACK_FREQ, 0);
++ falcon_pcie_core_write_reg(efx, PCIE_CORE_ADDR_ACK_FREQ,
++ pcie_ack_freq_reg);
++ }
++
++ return 0;
++}
++
++static void falcon_fini_pcie_core(struct efx_nic *efx)
++{
++ efx_dword_t pcie_ack_freq_reg;
++
++ if (FALCON_REV(efx) <= FALCON_REV_A1)
++ return;
++
++ if (EFX_WORKAROUND_9096(efx)) {
++ /* Set the ACK frequency timer to 1, so TLP's are acked in
++ * a timely fashion.
++ */
++ falcon_pcie_core_read_reg(efx, PCIE_CORE_ADDR_ACK_FREQ,
++ &pcie_ack_freq_reg);
++ EFX_SET_DWORD_FIELD(pcie_ack_freq_reg, PCIE_CORE_ACK_FREQ, 1);
++ falcon_pcie_core_write_reg(efx, PCIE_CORE_ADDR_ACK_FREQ,
++ pcie_ack_freq_reg);
++ }
++}
++
++/* This call performs hardware-specific global initialisation, such as
++ * defining the descriptor cache sizes and number of RSS channels.
++ * It does not set up any buffers, descriptor rings or event queues.
++ */
++int falcon_init_nic(struct efx_nic *efx)
++{
++ struct falcon_nic_data *data;
++ efx_oword_t temp;
++ unsigned thresh;
++ int rc;
++
++ data = (struct falcon_nic_data *)efx->nic_data;
++
++ /* Set up the address region register. This is only needed
++ * for the B0 FPGA, but since we are just pushing in the
++ * reset defaults this may as well be unconditional. */
++ EFX_POPULATE_OWORD_4(temp, ADR_REGION0, 0,
++ ADR_REGION1, (1 << 16),
++ ADR_REGION2, (2 << 16),
++ ADR_REGION3, (3 << 16));
++ falcon_write(efx, &temp, ADR_REGION_REG_KER);
++
++ /* Use on-chip SRAM if needed.
++ */
++ falcon_read(efx, &temp, NIC_STAT_REG);
++ if (efx->external_sram_cfg == SRM_NB_BSZ_ONCHIP_ONLY)
++ EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1);
++ else
++ EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 0);
++ falcon_write(efx, &temp, NIC_STAT_REG);
++
++ /* Check power requirements against PCIe power budgeting */
++ rc = falcon_check_power_limit(efx);
++ if (rc)
++ return rc;
++
++ /* Warn if <8 lanes of PCIe detected & set pcie timers */
++ rc = falcon_init_pcie_core(efx);
++ if (rc)
++ return rc;
++
++ /* Set buffer table mode */
++ EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL);
++ falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);
++
++ rc = falcon_reset_sram(efx);
++ if (rc)
++ return rc;
++
++ /* Set positions of descriptor caches in SRAM. */
++ EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, data->tx_dc_base / 8);
++ falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER);
++ EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, data->rx_dc_base / 8);
++ falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER);
++
++ /* Set TX descriptor cache size. */
++ EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, ffs(data->tx_dc_entries) - 4);
++ falcon_write(efx, &temp, TX_DC_CFG_REG_KER);
++
++ /* Set RX descriptor cache size. Set low watermark to size-8, as
++ * this allows most efficient prefetching.
++ */
++ EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, ffs(data->rx_dc_entries) - 4);
++ falcon_write(efx, &temp, RX_DC_CFG_REG_KER);
++ EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, data->rx_dc_entries - 8);
++ falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER);
++
++ /* Clear the parity enables on the TX data fifos as
++ * they produce false parity errors because of timing issues
++ */
++ if (EFX_WORKAROUND_5129(efx)) {
++ falcon_read(efx, &temp, SPARE_REG_KER);
++ EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0);
++ falcon_write(efx, &temp, SPARE_REG_KER);
++ }
++
++ /* Enable all the genuinely fatal interrupts. (They are still
++ * masked by the overall interrupt mask, controlled by
++ * falcon_interrupts()).
++ *
++ * Note: All other fatal interrupts are enabled
++ */
++ EFX_POPULATE_OWORD_3(temp,
++ ILL_ADR_INT_KER_EN, 1,
++ RBUF_OWN_INT_KER_EN, 1,
++ TBUF_OWN_INT_KER_EN, 1);
++ EFX_INVERT_OWORD(temp);
++ falcon_write(efx, &temp, FATAL_INTR_REG_KER);
++
++ /* Set number of RSS queues for receive path. */
++ falcon_read(efx, &temp, RX_FILTER_CTL_REG);
++ if (FALCON_REV(efx) >= FALCON_REV_B0)
++ EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
++ else
++ EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
++ if (EFX_WORKAROUND_7244(efx)) {
++ EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
++ EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
++ EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8);
++ EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8);
++ }
++ falcon_write(efx, &temp, RX_FILTER_CTL_REG);
++
++ falcon_setup_rss_indir_table(efx);
++
++ /* Setup RX. Wait for descriptor is broken and must
++ * be disabled. RXDP recovery shouldn't be needed, but is.
++ */
++ falcon_read(efx, &temp, RX_SELF_RST_REG_KER);
++ EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1);
++ EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1);
++ if (EFX_WORKAROUND_5583(efx))
++ EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1);
++ falcon_write(efx, &temp, RX_SELF_RST_REG_KER);
++
++ /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
++ * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
++ */
++ falcon_read(efx, &temp, TX_CFG2_REG_KER);
++ EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe);
++ EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1);
++ EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1);
++ EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0);
++ EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1);
++ /* Enable SW_EV to inherit in char driver - assume harmless here */
++ EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1);
++ /* Prefetch threshold 2 => fetch when descriptor cache half empty */
++ EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
++ if (EFX_WORKAROUND_9008(efx))
++ EFX_SET_OWORD_FIELD(temp, TX_PREF_WD_TMR, (unsigned)0x3fffff);
++ /* Squash TX of packets of 16 bytes or less */
++ if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
++ EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
++ falcon_write(efx, &temp, TX_CFG2_REG_KER);
++
++ /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
++ * descriptors (which is bad).
++ */
++ falcon_read(efx, &temp, TX_CFG_REG_KER);
++ EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0);
++ falcon_write(efx, &temp, TX_CFG_REG_KER);
++
++ /* RX config */
++ falcon_read(efx, &temp, RX_CFG_REG_KER);
++ EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0);
++ if (EFX_WORKAROUND_7575(efx))
++ EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
++ (3 * 4096) / 32);
++ if (FALCON_REV(efx) >= FALCON_REV_B0)
++ EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
++
++ /* RX FIFO flow control thresholds */
++ thresh = ((rx_xon_thresh_bytes >= 0) ?
++ rx_xon_thresh_bytes : efx->type->rx_xon_thresh);
++ EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256);
++ thresh = ((rx_xoff_thresh_bytes >= 0) ?
++ rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
++ EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
++ /* RX control FIFO thresholds [32 entries] */
++ EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 25);
++ EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 20);
++ falcon_write(efx, &temp, RX_CFG_REG_KER);
++
++ /* Set destination of both TX and RX Flush events */
++ if (FALCON_REV(efx) >= FALCON_REV_B0) {
++ EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
++ falcon_write(efx, &temp, DP_CTRL_REG);
++ }
++
++ return 0;
++}
++
++void falcon_fini_nic(struct efx_nic *efx)
++{
++ falcon_fini_pcie_core(efx);
++}
++
++void falcon_remove_nic(struct efx_nic *efx)
++{
++ /* Tear down the private nic state, and the driverlink nic params */
++ kfree(efx->nic_data);
++ efx->nic_data = efx->dl_info = NULL;
++
++ falcon_remove_spi_devices(efx);
++ falcon_free_buffer(efx, &efx->irq_status);
++
++ /* Reset the NIC finally */
++ (void) falcon_reset_hw(efx, RESET_TYPE_ALL);
++
++ /* Release the second function after the reset */
++ if (efx->pci_dev2) {
++ pci_dev_put(efx->pci_dev2);
++ efx->pci_dev2 = NULL;
++ }
++}
++
++void falcon_update_nic_stats(struct efx_nic *efx)
++{
++ efx_oword_t cnt;
++
++ /* Read the RX drop counter */
++ falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER);
++ efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT);
++}
++
++/**************************************************************************
++ *
++ * Revision-dependent attributes used by efx.c
++ *
++ **************************************************************************
++ */
++
++struct efx_nic_type falcon_a_nic_type = {
++ .is_dual_func = 1,
++ .mem_bar = 2,
++ .mem_map_size = 0x20000,
++ .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1,
++ .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1,
++ .buf_tbl_base = BUF_TBL_KER_A1,
++ .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1,
++ .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1,
++ .txd_ring_mask = FALCON_TXD_RING_MASK,
++ .rxd_ring_mask = FALCON_RXD_RING_MASK,
++ .evq_size = FALCON_EVQ_SIZE,
++ .max_dma_mask = FALCON_DMA_MASK,
++ .tx_dma_mask = FALCON_TX_DMA_MASK,
++ .bug5391_mask = 0xf,
++ .rx_xoff_thresh = 2048,
++ .rx_xon_thresh = 512,
++ .rx_buffer_padding = 0x24,
++ .max_interrupt_mode = EFX_INT_MODE_MSI,
++ .phys_addr_channels = 4,
++};
++
++struct efx_nic_type falcon_b_nic_type = {
++ .is_dual_func = 0,
++ .mem_bar = 2,
++ /* Map everything up to and including the RSS indirection
++ * table. Don't map MSI-X table, MSI-X PBA since Linux
++ * requires that they not be mapped. */
++ .mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800,
++ .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0,
++ .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0,
++ .buf_tbl_base = BUF_TBL_KER_B0,
++ .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0,
++ .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0,
++ .txd_ring_mask = FALCON_TXD_RING_MASK,
++ .rxd_ring_mask = FALCON_RXD_RING_MASK,
++ .evq_size = FALCON_EVQ_SIZE,
++ .max_dma_mask = FALCON_DMA_MASK,
++ .tx_dma_mask = FALCON_TX_DMA_MASK,
++ .bug5391_mask = 0,
++ .rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */
++ .rx_xon_thresh = 27648, /* ~3*max MTU */
++ .rx_buffer_padding = 0,
++ .max_interrupt_mode = EFX_INT_MODE_MSIX,
++ .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
++ * interrupt handler only supports 32
++ * channels */
++
++};
++
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/falcon_gmac.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/falcon_gmac.c
+--- linux-2.6.18.8/drivers/net/sfc/falcon_gmac.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/falcon_gmac.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,320 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/delay.h>
++#include "net_driver.h"
++#include "efx.h"
++#include "falcon.h"
++#include "mac.h"
++#include "falcon_hwdefs.h"
++#include "falcon_io.h"
++#include "gmii.h"
++
++/**************************************************************************
++ *
++ * MAC register access
++ *
++ **************************************************************************/
++
++/* Offset of a GMAC register within Falcon */
++#define FALCON_GMAC_REG(mac_reg) \
++ (FALCON_GMAC_REGBANK + ((mac_reg) * FALCON_GMAC_REG_SIZE))
++
++static void falcon_gmac_writel(struct efx_nic *efx,
++ efx_dword_t *value, unsigned int mac_reg)
++{
++ efx_oword_t temp;
++
++ EFX_POPULATE_OWORD_1(temp, MAC_DATA, EFX_DWORD_FIELD(*value, MAC_DATA));
++ falcon_write(efx, &temp, FALCON_GMAC_REG(mac_reg));
++}
++
++static void falcon_gmac_readl(struct efx_nic *efx,
++ efx_dword_t *value, unsigned int mac_reg)
++{
++ efx_oword_t temp;
++
++ falcon_read(efx, &temp, FALCON_GMAC_REG(mac_reg));
++ EFX_POPULATE_DWORD_1(*value, MAC_DATA, EFX_OWORD_FIELD(temp, MAC_DATA));
++}
++
++/**************************************************************************
++ *
++ * MAC operations
++ *
++ *************************************************************************/
++
++static int falcon_init_gmac(struct efx_nic *efx)
++{
++ int rc;
++
++ /* Reset the MAC */
++ mentormac_reset(efx);
++
++ /* Initialise PHY */
++ rc = efx->phy_op->init(efx);
++ if (rc)
++ return rc;
++
++ return 0;
++}
++
++static void falcon_reconfigure_gmac(struct efx_nic *efx)
++{
++ /* Reconfigure PHY and pick up PHY parameters. This updates
++ * the link status. */
++ efx->phy_op->reconfigure(efx);
++
++ /* Isolate the MAC. */
++ falcon_deconfigure_mac_wrapper(efx);
++
++ /* Reconfigure MAC */
++ mentormac_reconfigure(efx);
++
++ /* Reconfigure MAC wrapper */
++ falcon_reconfigure_mac_wrapper(efx);
++}
++
++static void falcon_fini_gmac(struct efx_nic *efx)
++{
++ /* Isolate the MAC - PHY */
++ falcon_deconfigure_mac_wrapper(efx);
++
++ /* Shut down PHY */
++ efx->phy_op->fini(efx);
++
++ /* Reset MAC */
++ mentormac_reset(efx);
++}
++
++static void falcon_update_stats_gmac(struct efx_nic *efx)
++{
++ struct efx_mac_stats *mac_stats = &efx->mac_stats;
++ unsigned long old_rx_pause, old_tx_pause;
++ unsigned long new_rx_pause, new_tx_pause;
++ int rc;
++
++ rc = falcon_dma_stats(efx, GDmaDone_offset);
++ if (rc)
++ return;
++
++ /* Pause frames are erroneously counted as errors (SFC bug 3269) */
++ old_rx_pause = mac_stats->rx_pause;
++ old_tx_pause = mac_stats->tx_pause;
++
++ /* Update MAC stats from DMAed values */
++ FALCON_STAT(efx, GRxGoodOct, rx_good_bytes);
++ FALCON_STAT(efx, GRxBadOct, rx_bad_bytes);
++ FALCON_STAT(efx, GRxMissPkt, rx_missed);
++ FALCON_STAT(efx, GRxFalseCRS, rx_false_carrier);
++ FALCON_STAT(efx, GRxPausePkt, rx_pause);
++ FALCON_STAT(efx, GRxBadPkt, rx_bad);
++ FALCON_STAT(efx, GRxUcastPkt, rx_unicast);
++ FALCON_STAT(efx, GRxMcastPkt, rx_multicast);
++ FALCON_STAT(efx, GRxBcastPkt, rx_broadcast);
++ FALCON_STAT(efx, GRxGoodLt64Pkt, rx_good_lt64);
++ FALCON_STAT(efx, GRxBadLt64Pkt, rx_bad_lt64);
++ FALCON_STAT(efx, GRx64Pkt, rx_64);
++ FALCON_STAT(efx, GRx65to127Pkt, rx_65_to_127);
++ FALCON_STAT(efx, GRx128to255Pkt, rx_128_to_255);
++ FALCON_STAT(efx, GRx256to511Pkt, rx_256_to_511);
++ FALCON_STAT(efx, GRx512to1023Pkt, rx_512_to_1023);
++ FALCON_STAT(efx, GRx1024to15xxPkt, rx_1024_to_15xx);
++ FALCON_STAT(efx, GRx15xxtoJumboPkt, rx_15xx_to_jumbo);
++ FALCON_STAT(efx, GRxGtJumboPkt, rx_gtjumbo);
++ FALCON_STAT(efx, GRxFcsErr64to15xxPkt, rx_bad_64_to_15xx);
++ FALCON_STAT(efx, GRxFcsErr15xxtoJumboPkt, rx_bad_15xx_to_jumbo);
++ FALCON_STAT(efx, GRxFcsErrGtJumboPkt, rx_bad_gtjumbo);
++ FALCON_STAT(efx, GTxGoodBadOct, tx_bytes);
++ FALCON_STAT(efx, GTxGoodOct, tx_good_bytes);
++ FALCON_STAT(efx, GTxSglColPkt, tx_single_collision);
++ FALCON_STAT(efx, GTxMultColPkt, tx_multiple_collision);
++ FALCON_STAT(efx, GTxExColPkt, tx_excessive_collision);
++ FALCON_STAT(efx, GTxDefPkt, tx_deferred);
++ FALCON_STAT(efx, GTxLateCol, tx_late_collision);
++ FALCON_STAT(efx, GTxExDefPkt, tx_excessive_deferred);
++ FALCON_STAT(efx, GTxPausePkt, tx_pause);
++ FALCON_STAT(efx, GTxBadPkt, tx_bad);
++ FALCON_STAT(efx, GTxUcastPkt, tx_unicast);
++ FALCON_STAT(efx, GTxMcastPkt, tx_multicast);
++ FALCON_STAT(efx, GTxBcastPkt, tx_broadcast);
++ FALCON_STAT(efx, GTxLt64Pkt, tx_lt64);
++ FALCON_STAT(efx, GTx64Pkt, tx_64);
++ FALCON_STAT(efx, GTx65to127Pkt, tx_65_to_127);
++ FALCON_STAT(efx, GTx128to255Pkt, tx_128_to_255);
++ FALCON_STAT(efx, GTx256to511Pkt, tx_256_to_511);
++ FALCON_STAT(efx, GTx512to1023Pkt, tx_512_to_1023);
++ FALCON_STAT(efx, GTx1024to15xxPkt, tx_1024_to_15xx);
++ FALCON_STAT(efx, GTx15xxtoJumboPkt, tx_15xx_to_jumbo);
++ FALCON_STAT(efx, GTxGtJumboPkt, tx_gtjumbo);
++ FALCON_STAT(efx, GTxNonTcpUdpPkt, tx_non_tcpudp);
++ FALCON_STAT(efx, GTxMacSrcErrPkt, tx_mac_src_error);
++ FALCON_STAT(efx, GTxIpSrcErrPkt, tx_ip_src_error);
++
++ /* Pause frames are erroneously counted as errors (SFC bug 3269) */
++ new_rx_pause = mac_stats->rx_pause;
++ new_tx_pause = mac_stats->tx_pause;
++ mac_stats->rx_bad -= (new_rx_pause - old_rx_pause);
++ mac_stats->tx_bad -= (new_tx_pause - old_tx_pause);
++
++ /* Derive stats that the MAC doesn't provide directly */
++ mac_stats->tx_bad_bytes =
++ mac_stats->tx_bytes - mac_stats->tx_good_bytes;
++ mac_stats->tx_packets =
++ mac_stats->tx_lt64 + mac_stats->tx_64 +
++ mac_stats->tx_65_to_127 + mac_stats->tx_128_to_255 +
++ mac_stats->tx_256_to_511 + mac_stats->tx_512_to_1023 +
++ mac_stats->tx_1024_to_15xx + mac_stats->tx_15xx_to_jumbo +
++ mac_stats->tx_gtjumbo;
++ mac_stats->tx_collision =
++ mac_stats->tx_single_collision +
++ mac_stats->tx_multiple_collision +
++ mac_stats->tx_excessive_collision +
++ mac_stats->tx_late_collision;
++ mac_stats->rx_bytes =
++ mac_stats->rx_good_bytes + mac_stats->rx_bad_bytes;
++ mac_stats->rx_packets =
++ mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64 +
++ mac_stats->rx_64 + mac_stats->rx_65_to_127 +
++ mac_stats->rx_128_to_255 + mac_stats->rx_256_to_511 +
++ mac_stats->rx_512_to_1023 + mac_stats->rx_1024_to_15xx +
++ mac_stats->rx_15xx_to_jumbo + mac_stats->rx_gtjumbo;
++ mac_stats->rx_good = mac_stats->rx_packets - mac_stats->rx_bad;
++ mac_stats->rx_lt64 = mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64;
++}
++
++static int falcon_check_gmac(struct efx_nic *efx)
++{
++ /* Nothing to do */
++ return 0;
++}
++
++static void falcon_gmac_sim_phy_event(struct efx_nic *efx)
++{
++ efx_qword_t phy_event;
++
++ EFX_POPULATE_QWORD_2(phy_event,
++ EV_CODE, GLOBAL_EV_DECODE,
++ G_PHY0_INTR, 1);
++ falcon_generate_event(&efx->channel[0], &phy_event);
++}
++
++static void falcon_gmac_reset_phy(struct efx_nic *efx)
++{
++ struct mii_if_info *gmii = &efx->mii;
++ int bmcr, i;
++
++ /* Perform software reset to make new settings take effect */
++ bmcr = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_BMCR);
++ bmcr |= BMCR_RESET;
++ gmii->mdio_write(gmii->dev, gmii->phy_id, MII_BMCR, bmcr);
++
++ /* Wait for the reset to deassert */
++ for (i = 20; i; --i) {
++ udelay(10);
++ if ((gmii->mdio_read(gmii->dev, gmii->phy_id, MII_BMCR) &
++ BMCR_RESET) == 0)
++ return;
++ }
++
++ EFX_ERR(efx, "wait for PHY reset timed out\n");
++}
++
++
++static int falcon_gmac_get_settings(struct efx_nic *efx,
++ struct ethtool_cmd *ecmd)
++{
++ struct mii_if_info *gmii = &efx->mii;
++ int rc;
++
++ rc = mii_ethtool_gset(gmii, ecmd);
++ ecmd->supported &= ~(SUPPORTED_1000baseT_Half);
++ return rc;
++}
++
++static int falcon_gmac_set_settings(struct efx_nic *efx,
++ struct ethtool_cmd *ecmd)
++{
++ struct mii_if_info *gmii = &efx->mii;
++ int rc;
++
++ /* 1000Mbps half-duplex is technically legal, but none of our
++ * current hardware supports it, so just disallow it. */
++ if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
++ EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"
++ " setting\n");
++ return -EINVAL;
++ }
++
++ /* Use MII to set all other settings */
++ rc = mii_ethtool_sset(gmii, ecmd);
++ if (rc)
++ return rc;
++
++ /* Reset the PHY */
++ falcon_gmac_reset_phy(efx);
++
++ return 0;
++}
++
++static int falcon_gmac_set_pause(struct efx_nic *efx,
++ enum efx_fc_type flow_control)
++{
++ struct mii_if_info *gmii = &efx->mii;
++ int adv;
++
++ /* GMAC has tiny MAC FIFO, so TX flow control won't work */
++ if (flow_control & EFX_FC_TX)
++ return -EINVAL;
++
++ efx->flow_control = flow_control;
++
++ /* Push autonegotiation to PHY */
++ adv = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_ADVERTISE);
++ adv &= ~GM_ADVERTISE_PAUSE_CAP;
++ adv |= (flow_control & EFX_FC_AUTO) ? GM_ADVERTISE_PAUSE_CAP : 0;
++ gmii->mdio_write(gmii->dev, gmii->phy_id, MII_ADVERTISE, adv);
++
++ falcon_gmac_reset_phy(efx);
++
++ return 0;
++}
++
++
++struct efx_mac_operations falcon_gmac_operations = {
++ .mac_writel = falcon_gmac_writel,
++ .mac_readl = falcon_gmac_readl,
++ .init = falcon_init_gmac,
++ .reconfigure = falcon_reconfigure_gmac,
++ .update_stats = falcon_update_stats_gmac,
++ .fini = falcon_fini_gmac,
++ .check_hw = falcon_check_gmac,
++ .fake_phy_event = falcon_gmac_sim_phy_event,
++ .get_settings = falcon_gmac_get_settings,
++ .set_settings = falcon_gmac_set_settings,
++ .set_pause = falcon_gmac_set_pause,
++};
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/falcon.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/falcon.h
+--- linux-2.6.18.8/drivers/net/sfc/falcon.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/falcon.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,177 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_FALCON_H
++#define EFX_FALCON_H
++
++#include <asm/io.h>
++#include <linux/spinlock.h>
++#include "net_driver.h"
++
++/*
++ * Falcon hardware control
++ */
++
++enum falcon_revision {
++ FALCON_REV_A0 = 0,
++ FALCON_REV_A1 = 1,
++ FALCON_REV_B0 = 2,
++};
++
++#if defined(EFX_USE_PCI_DEV_REVISION)
++#define FALCON_REV(efx) ((efx)->pci_dev->revision)
++#else
++#define FALCON_REV(efx) ((efx)->revision)
++#endif
++
++extern struct efx_nic_type falcon_a_nic_type;
++extern struct efx_nic_type falcon_b_nic_type;
++
++/**************************************************************************
++ *
++ * Externs
++ *
++ **************************************************************************
++ */
++
++/* TX data path */
++extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
++extern int falcon_init_tx(struct efx_tx_queue *tx_queue);
++extern void falcon_fini_tx(struct efx_tx_queue *tx_queue);
++extern void falcon_remove_tx(struct efx_tx_queue *tx_queue);
++#if defined(EFX_USE_FASTCALL)
++extern void fastcall falcon_push_buffers(struct efx_tx_queue *tx_queue);
++#else
++extern void falcon_push_buffers(struct efx_tx_queue *tx_queue);
++#endif
++
++/* RX data path */
++extern int falcon_probe_rx(struct efx_rx_queue *rx_queue);
++extern int falcon_init_rx(struct efx_rx_queue *rx_queue);
++extern void falcon_fini_rx(struct efx_rx_queue *rx_queue);
++extern void falcon_remove_rx(struct efx_rx_queue *rx_queue);
++#if defined(EFX_USE_FASTCALL)
++extern void fastcall falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
++#else
++extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
++#endif
++
++/* Event data path */
++extern int falcon_probe_eventq(struct efx_channel *channel);
++extern int falcon_init_eventq(struct efx_channel *channel);
++extern void falcon_fini_eventq(struct efx_channel *channel);
++extern void falcon_remove_eventq(struct efx_channel *channel);
++#if defined(EFX_USE_FASTCALL)
++extern int fastcall falcon_process_eventq(struct efx_channel *channel,
++ int *rx_quota);
++#else
++extern int falcon_process_eventq(struct efx_channel *channel, int *rx_quota);
++#endif
++#if defined(EFX_USE_FASTCALL)
++extern void fastcall falcon_eventq_read_ack(struct efx_channel *channel);
++#else
++extern void falcon_eventq_read_ack(struct efx_channel *channel);
++#endif
++
++/* Ports */
++extern int falcon_probe_port(struct efx_nic *efx);
++extern void falcon_remove_port(struct efx_nic *efx);
++
++/* MAC/PHY */
++extern void falcon_check_xaui_link_up(struct efx_nic *efx);
++extern int falcon_xaui_link_ok(struct efx_nic *efx);
++extern int falcon_dma_stats(struct efx_nic *efx,
++ unsigned int done_offset);
++extern void falcon_drain_tx_fifo(struct efx_nic *efx);
++extern void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
++extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
++
++/* Interrupts and test events */
++extern int falcon_init_interrupt(struct efx_nic *efx);
++extern void falcon_enable_interrupts(struct efx_nic *efx);
++extern void falcon_generate_test_event(struct efx_channel *channel,
++ unsigned int magic);
++extern void falcon_generate_interrupt(struct efx_nic *efx);
++extern void falcon_set_int_moderation(struct efx_channel *channel);
++extern void falcon_disable_interrupts(struct efx_nic *efx);
++extern void falcon_fini_interrupt(struct efx_nic *efx);
++
++/* Global Resources */
++extern int falcon_probe_nic(struct efx_nic *efx);
++extern int falcon_probe_resources(struct efx_nic *efx);
++extern int falcon_init_nic(struct efx_nic *efx);
++extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
++extern void falcon_fini_nic(struct efx_nic *efx);
++extern void falcon_remove_resources(struct efx_nic *efx);
++extern void falcon_remove_nic(struct efx_nic *efx);
++extern void falcon_update_nic_stats(struct efx_nic *efx);
++extern void falcon_set_multicast_hash(struct efx_nic *efx);
++extern int falcon_reset_xaui(struct efx_nic *efx);
++
++/**************************************************************************
++ *
++ * Falcon MAC stats
++ *
++ **************************************************************************
++ */
++
++#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
++#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
++
++/* Retrieve statistic from statistics block */
++#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
++ if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
++ (efx)->mac_stats.efx_stat += le16_to_cpu( \
++ *((__force __le16 *) \
++ (efx->stats_buffer.addr + \
++ FALCON_STAT_OFFSET(falcon_stat)))); \
++ else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
++ (efx)->mac_stats.efx_stat += le32_to_cpu( \
++ *((__force __le32 *) \
++ (efx->stats_buffer.addr + \
++ FALCON_STAT_OFFSET(falcon_stat)))); \
++ else \
++ (efx)->mac_stats.efx_stat += le64_to_cpu( \
++ *((__force __le64 *) \
++ (efx->stats_buffer.addr + \
++ FALCON_STAT_OFFSET(falcon_stat)))); \
++ } while (0)
++
++#define FALCON_MAC_STATS_SIZE 0x100
++
++#define MAC_DATA_LBN 0
++#define MAC_DATA_WIDTH 32
++
++extern void falcon_generate_event(struct efx_channel *channel,
++ efx_qword_t *event);
++
++#ifdef CONFIG_SFC_DEBUGFS
++struct seq_file;
++extern int falcon_debugfs_read_hardware_desc(struct seq_file *file, void *data);
++#endif
++
++#endif /* EFX_FALCON_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/falcon_hwdefs.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/falcon_hwdefs.h
+--- linux-2.6.18.8/drivers/net/sfc/falcon_hwdefs.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/falcon_hwdefs.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1620 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_FALCON_HWDEFS_H
++#define EFX_FALCON_HWDEFS_H
++
++/*
++ * Falcon hardware value definitions.
++ * Falcon is the internal codename for the SFC4000 controller that is
++ * present in SFE400X evaluation boards
++ */
++
++/**************************************************************************
++ *
++ * Falcon registers
++ *
++ **************************************************************************
++ */
++
++/* Address region register */
++#define ADR_REGION_REG_KER 0x00
++#define ADR_REGION0_LBN 0
++#define ADR_REGION0_WIDTH 18
++#define ADR_REGION1_LBN 32
++#define ADR_REGION1_WIDTH 18
++#define ADR_REGION2_LBN 64
++#define ADR_REGION2_WIDTH 18
++#define ADR_REGION3_LBN 96
++#define ADR_REGION3_WIDTH 18
++
++/* Interrupt enable register */
++#define INT_EN_REG_KER 0x0010
++#define INT_LEVEL_SEL_LBN 8
++#define INT_LEVEL_SEL_WIDTH 6
++#define MEM_PERR_INT_EN_KER_LBN 5
++#define MEM_PERR_INT_EN_KER_WIDTH 1
++#define KER_INT_CHAR_LBN 4
++#define KER_INT_CHAR_WIDTH 1
++#define KER_INT_KER_LBN 3
++#define KER_INT_KER_WIDTH 1
++#define ILL_ADR_ERR_INT_EN_KER_LBN 2
++#define ILL_ADR_ERR_INT_EN_KER_WIDTH 1
++#define SRM_PERR_INT_EN_KER_LBN 1
++#define SRM_PERR_INT_EN_KER_WIDTH 1
++#define DRV_INT_EN_KER_LBN 0
++#define DRV_INT_EN_KER_WIDTH 1
++
++/* Interrupt status address register */
++#define INT_ADR_REG_KER 0x0030
++#define NORM_INT_VEC_DIS_KER_LBN 64
++#define NORM_INT_VEC_DIS_KER_WIDTH 1
++#define INT_ADR_KER_LBN 0
++#define INT_ADR_KER_WIDTH EFX_DMA_TYPE_WIDTH(64) /* not 46 for this one */
++
++/* Interrupt status register (B0 only) */
++#define INT_ISR0_B0 0x90
++#define INT_ISR1_B0 0xA0
++
++/* Interrupt acknowledge register (A0/A1 only) */
++#define INT_ACK_REG_KER_A1 0x0050
++#define INT_ACK_DUMMY_DATA_LBN 0
++#define INT_ACK_DUMMY_DATA_WIDTH 32
++
++/* Interrupt acknowledge work-around register (A0/A1 only )*/
++#define WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1 0x0070
++
++/* Hardware initialisation register */
++#define HW_INIT_REG_KER 0x00c0
++#define BCSR_TARGET_MASK_LBN 101
++#define BCSR_TARGET_MASK_WIDTH 4
++#define PCIE_TIMEOUT_DIS_LBN 123
++#define PCIE_TIMEOUT_DIS_WIDTH 1
++#define B2B_REQ_EN_B0_LBN 45
++#define B2B_REQ_EN_B0_WIDTH 1
++#define FC_BLOCKING_EN_B0_LBN 44
++#define FC_BLOCKING_EN_B0_WIDTH 1
++
++/* SPI host command register */
++#define EE_SPI_HCMD_REG_KER 0x0100
++#define EE_SPI_HCMD_CMD_EN_LBN 31
++#define EE_SPI_HCMD_CMD_EN_WIDTH 1
++#define EE_WR_TIMER_ACTIVE_LBN 28
++#define EE_WR_TIMER_ACTIVE_WIDTH 1
++#define EE_SPI_HCMD_SF_SEL_LBN 24
++#define EE_SPI_HCMD_SF_SEL_WIDTH 1
++#define EE_SPI_EEPROM 0
++#define EE_SPI_FLASH 1
++#define EE_SPI_HCMD_DABCNT_LBN 16
++#define EE_SPI_HCMD_DABCNT_WIDTH 5
++#define EE_SPI_HCMD_READ_LBN 15
++#define EE_SPI_HCMD_READ_WIDTH 1
++#define EE_SPI_READ 1
++#define EE_SPI_WRITE 0
++#define EE_SPI_HCMD_DUBCNT_LBN 12
++#define EE_SPI_HCMD_DUBCNT_WIDTH 2
++#define EE_SPI_HCMD_ADBCNT_LBN 8
++#define EE_SPI_HCMD_ADBCNT_WIDTH 2
++#define EE_SPI_HCMD_ENC_LBN 0
++#define EE_SPI_HCMD_ENC_WIDTH 8
++
++/* SPI host address register */
++#define EE_SPI_HADR_REG_KER 0x0110
++#define EE_SPI_HADR_DUBYTE_LBN 24
++#define EE_SPI_HADR_DUBYTE_WIDTH 8
++#define EE_SPI_HADR_ADR_LBN 0
++#define EE_SPI_HADR_ADR_WIDTH 24
++
++/* SPI host data register */
++#define EE_SPI_HDATA_REG_KER 0x0120
++#define EE_SPI_HDATA3_LBN 96
++#define EE_SPI_HDATA3_WIDTH 32
++#define EE_SPI_HDATA2_LBN 64
++#define EE_SPI_HDATA2_WIDTH 32
++#define EE_SPI_HDATA1_LBN 32
++#define EE_SPI_HDATA1_WIDTH 32
++#define EE_SPI_HDATA0_LBN 0
++#define EE_SPI_HDATA0_WIDTH 32
++
++/* SPI/VPD config register */
++#define EE_VPD_CFG_REG_KER 0x0140
++#define EE_VPD_EN_LBN 0
++#define EE_VPD_EN_WIDTH 1
++#define EE_VPD_EN_AD9_MODE_LBN 1
++#define EE_VPD_EN_AD9_MODE_WIDTH 1
++#define EE_EE_CLOCK_DIV_LBN 112
++#define EE_EE_CLOCK_DIV_WIDTH 7
++#define EE_SF_CLOCK_DIV_LBN 120
++#define EE_SF_CLOCK_DIV_WIDTH 7
++
++/* PCIE CORE ACCESS REG */
++#define PCIE_CORE_INDIRECT_REG 0x01f0
++#define PCIE_CORE_ADDR_LBN 0
++#define PCIE_CORE_ADDR_WIDTH 12
++#define PCIE_CORE_RW_LBN 15
++#define PCIE_CORE_RW_WIDTH 1
++#define PCIE_CORE_VALUE_LBN 32
++#define PCIE_CORE_VALUE_WIDTH 32
++
++#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
++#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
++#define PCIE_CORE_ADDR_ACK_RPL_TIMER 0x700
++#define PCIE_CORE_ACK_TL_LBN 0
++#define PCIE_CORE_ACK_TL_WIDTH 16
++#define PCIE_CORE_RPL_TL_LBN 16
++#define PCIE_CORE_RPL_TL_WIDTH 16
++
++#define PCIE_CORE_ADDR_ACK_FREQ 0x70C
++#define PCIE_CORE_ACK_FREQ_LBN 0
++#define PCIE_CORE_ACK_FREQ_WIDTH 7
++
++
++/* NIC status register */
++#define NIC_STAT_REG 0x0200
++#define EE_STRAP_EN_LBN 31
++#define EE_STRAP_EN_WIDTH 1
++#define EE_STRAP_OVR_LBN 24
++#define EE_STRAP_OVR_WIDTH 4
++#define ONCHIP_SRAM_LBN 16
++#define ONCHIP_SRAM_WIDTH 1
++#define SF_PRST_LBN 9
++#define SF_PRST_WIDTH 1
++#define EE_PRST_LBN 8
++#define EE_PRST_WIDTH 1
++#define EE_STRAP_LBN 7
++#define EE_STRAP_WIDTH 1
++/* See pic_mode_t for decoding of this field */
++#define STRAP_ISCSI_EN_LBN 3
++#define STRAP_ISCSI_EN_WIDTH 1
++#define STRAP_PINS_LBN 0
++#define STRAP_PINS_WIDTH 3
++/* These bit definitions are extrapolated from the list of numerical
++ * values for STRAP_PINS.
++ */
++#define STRAP_10G_LBN 2
++#define STRAP_10G_WIDTH 1
++#define STRAP_DUAL_PORT_LBN 1
++#define STRAP_DUAL_PORT_WIDTH 1
++#define STRAP_PCIE_LBN 0
++#define STRAP_PCIE_WIDTH 1
++
++#define FLASH_PRESENT_LBN 7
++#define FLASH_PRESENT_WIDTH 1
++#define EEPROM_PRESENT_LBN 6
++#define EEPROM_PRESENT_WIDTH 1
++#define BOOTED_USING_NVDEVICE_LBN 3
++#define BOOTED_USING_NVDEVICE_WIDTH 1
++
++/* GPIO control register */
++
++#define GPIO_CTL_REG_KER 0x0210
++
++#define GPIO_USE_NIC_CLK_LBN (30)
++#define GPIO_USE_NIC_CLK_WIDTH (1)
++
++#define GPIO_OUTPUTS_LBN (16)
++#define GPIO_OUTPUTS_WIDTH (4)
++
++#define GPIO_INPUTS_LBN (8)
++#define GPIO_INPUT_WIDTH (4)
++
++#define GPIO_DIRECTION_LBN (24)
++#define GPIO_DIRECTION_WIDTH (4)
++#define GPIO_DIRECTION_OUT (1)
++#define GPIO_SRAM_SLEEP (1 << 1)
++
++#define GPIO3_OEN_LBN (GPIO_DIRECTION_LBN + 3)
++#define GPIO3_OEN_WIDTH 1
++#define GPIO2_OEN_LBN (GPIO_DIRECTION_LBN + 2)
++#define GPIO2_OEN_WIDTH 1
++#define GPIO1_OEN_LBN (GPIO_DIRECTION_LBN + 1)
++#define GPIO1_OEN_WIDTH 1
++#define GPIO0_OEN_LBN (GPIO_DIRECTION_LBN + 0)
++#define GPIO0_OEN_WIDTH 1
++
++#define GPIO3_OUT_LBN (GPIO_OUTPUTS_LBN + 3)
++#define GPIO3_OUT_WIDTH 1
++#define GPIO2_OUT_LBN (GPIO_OUTPUTS_LBN + 2)
++#define GPIO2_OUT_WIDTH 1
++#define GPIO1_OUT_LBN (GPIO_OUTPUTS_LBN + 1)
++#define GPIO1_OUT_WIDTH 1
++#define GPIO0_OUT_LBN (GPIO_OUTPUTS_LBN + 0)
++#define GPIO0_OUT_WIDTH 1
++
++#define GPIO3_IN_LBN (GPIO_INPUTS_LBN + 3)
++#define GPIO3_IN_WIDTH 1
++#define GPIO2_IN_LBN (GPIO_INPUTS_LBN + 2)
++#define GPIO2_IN_WIDTH 1
++#define GPIO1_IN_LBN (GPIO_INPUTS_LBN + 1)
++#define GPIO1_IN_WIDTH 1
++#define GPIO0_IN_LBN (GPIO_INPUTS_LBN + 0)
++#define GPIO0_IN_WIDTH 1
++
++/* Global control register */
++#define GLB_CTL_REG_KER 0x0220
++#define EXT_PHY_RST_CTL_LBN 63
++#define EXT_PHY_RST_CTL_WIDTH 1
++#define PCIE_SD_RST_CTL_LBN 61
++#define PCIE_SD_RST_CTL_WIDTH 1
++#define PCIX_RST_CTL_LBN 60
++#define PCIX_RST_CTL_WIDTH 1
++#define PCIE_STCK_RST_CTL_LBN 59
++
++#define PCIE_STCK_RST_CTL_WIDTH 1
++#define PCIE_NSTCK_RST_CTL_LBN 58
++#define PCIE_NSTCK_RST_CTL_WIDTH 1
++#define PCIE_CORE_RST_CTL_LBN 57
++#define PCIE_CORE_RST_CTL_WIDTH 1
++#define EE_RST_CTL_LBN 49
++#define EE_RST_CTL_WIDTH 1
++#define CS_RST_CTL_LBN 48
++#define CS_RST_CTL_WIDTH 1
++#define RST_EXT_PHY_LBN 31
++#define RST_EXT_PHY_WIDTH 1
++#define RST_XGRX_LBN 24
++#define RST_XGRX_WIDTH 1
++#define RST_XGTX_LBN 23
++#define RST_XGTX_WIDTH 1
++#define RST_EM_LBN 22
++#define RST_EM_WIDTH 1
++#define INT_RST_DUR_LBN 4
++#define INT_RST_DUR_WIDTH 3
++#define EXT_PHY_RST_DUR_LBN 1
++#define EXT_PHY_RST_DUR_WIDTH 3
++#define SWRST_LBN 0
++#define SWRST_WIDTH 1
++#define INCLUDE_IN_RESET 0
++#define EXCLUDE_FROM_RESET 1
++
++/* Fatal interrupt register */
++#define FATAL_INTR_REG_KER 0x0230
++#define PCI_BUSERR_INT_KER_EN_LBN 43
++#define PCI_BUSERR_INT_KER_EN_WIDTH 1
++#define SRAM_OOB_INT_KER_EN_LBN 42
++#define SRAM_OOB_INT_KER_EN_WIDTH 1
++#define BUFID_OOB_INT_KER_EN_LBN 41
++#define BUFID_OOB_INT_KER_EN_WIDTH 1
++#define MEM_PERR_INT_KER_EN_LBN 40
++#define MEM_PERR_INT_KER_EN_WIDTH 1
++#define RBUF_OWN_INT_KER_EN_LBN 39
++#define RBUF_OWN_INT_KER_EN_WIDTH 1
++#define TBUF_OWN_INT_KER_EN_LBN 38
++#define TBUF_OWN_INT_KER_EN_WIDTH 1
++#define RDESCQ_OWN_INT_KER_EN_LBN 37
++#define RDESCQ_OWN_INT_KER_EN_WIDTH 1
++#define TDESCQ_OWN_INT_KER_EN_LBN 36
++#define TDESCQ_OWN_INT_KER_EN_WIDTH 1
++#define EVQ_OWN_INT_KER_EN_LBN 35
++#define EVQ_OWN_INT_KER_EN_WIDTH 1
++#define EVFF_OFLO_INT_KER_EN_LBN 34
++#define EVFF_OFLO_INT_KER_EN_WIDTH 1
++#define ILL_ADR_INT_KER_EN_LBN 33
++#define ILL_ADR_INT_KER_EN_WIDTH 1
++#define SRM_PERR_INT_KER_EN_LBN 32
++#define SRM_PERR_INT_KER_EN_WIDTH 1
++#define MEM_PERR_INT_KER_LBN 8
++#define MEM_PERR_INT_KER_WIDTH 1
++#define INT_KER_ERROR_LBN 0
++#define INT_KER_ERROR_WIDTH 12
++
++#define DP_CTRL_REG 0x250
++#define FLS_EVQ_ID_LBN 0
++#define FLS_EVQ_ID_WIDTH 11
++
++#define MEM_STAT_REG_KER 0x260
++
++/* Debug probe register */
++#define DEBUG_REG_KER 0x0270
++#define DEBUG_BLK_SEL2_LBN 47
++#define DEBUG_BLK_SEL2_WIDTH 3
++#define DEBUG_BLK_SEL1_LBN 44
++#define DEBUG_BLK_SEL1_WIDTH 3
++#define DEBUG_BLK_SEL0_LBN 41
++#define DEBUG_BLK_SEL0_WIDTH 3
++#define DEBUG_BLK_SEL_MISC 7
++#define DEBUG_BLK_SEL_SERDES 6
++#define DEBUG_BLK_SEL_EM 5
++#define DEBUG_BLK_SEL_SR 4
++#define DEBUG_BLK_SEL_EV 3
++#define DEBUG_BLK_SEL_RX 2
++#define DEBUG_BLK_SEL_TX 1
++#define DEBUG_BLK_SEL_BIU 0
++#define MISC_DEBUG_ADDR_LBN 36
++#define MISC_DEBUG_ADDR_WIDTH 5
++#define SERDES_DEBUG_ADDR_LBN 31
++#define SERDES_DEBUG_ADDR_WIDTH 5
++#define EM_DEBUG_ADDR_LBN 26
++#define EM_DEBUG_ADDR_WIDTH 5
++#define SR_DEBUG_ADDR_LBN 21
++#define SR_DEBUG_ADDR_WIDTH 5
++#define EV_DEBUG_ADDR_LBN 16
++#define EV_DEBUG_ADDR_WIDTH 5
++#define RX_DEBUG_ADDR_LBN 11
++#define RX_DEBUG_ADDR_WIDTH 5
++#define TX_DEBUG_ADDR_LBN 5
++#define TX_DEBUG_ADDR_WIDTH 5
++#define BIU_DEBUG_ADDR_LBN 1
++#define BIU_DEBUG_ADDR_WIDTH 5
++#define DEBUG_DIS_LBN 0
++#define DEBUG_DIS_WIDTH 1
++
++/* Scratch register 0 */
++#define DRIVER_REG0_KER 0x0280
++#define DRIVER_DW0_LBN 0
++#define DRIVER_DW0_WIDTH 32
++
++/* Scratch register 1 */
++#define DRIVER_REG1_KER 0x0290
++#define DRIVER_DW1_LBN 0
++#define DRIVER_DW1_WIDTH 32
++
++/* Scratch register 2 */
++#define DRIVER_REG2_KER 0x02A0
++#define DRIVER_DW2_LBN 0
++#define DRIVER_DW2_WIDTH 32
++
++/* Scratch register 3 */
++#define DRIVER_REG3_KER 0x02B0
++#define DRIVER_DW3_LBN 0
++#define DRIVER_DW3_WIDTH 32
++
++/* Scratch register 4 */
++#define DRIVER_REG4_KER 0x02C0
++#define DRIVER_DW3_LBN 0
++#define DRIVER_DW3_WIDTH 32
++
++/* Scratch register 5 */
++#define DRIVER_REG5_KER 0x02D0
++#define DRIVER_DW3_LBN 0
++#define DRIVER_DW3_WIDTH 32
++
++/* Scratch register 6 */
++#define DRIVER_REG6_KER 0x02E0
++#define DRIVER_DW3_LBN 0
++#define DRIVER_DW3_WIDTH 32
++
++/* Scratch register 7 */
++#define DRIVER_REG7_KER 0x02F0
++#define DRIVER_DW3_LBN 0
++#define DRIVER_DW3_WIDTH 32
++
++/* FPGA build version */
++#define ALTERA_BUILD_REG_KER 0x0300
++#define VER_MAJOR_LBN 24
++#define VER_MAJOR_WIDTH 8
++#define VER_MINOR_LBN 16
++#define VER_MINOR_WIDTH 8
++#define VER_BUILD_LBN 0
++#define VER_BUILD_WIDTH 16
++#define VER_ALL_LBN 0
++#define VER_ALL_WIDTH 32
++
++/* Spare EEPROM bits register (flash 0x390) */
++#define SPARE_REG_KER 0x310
++#define MEM_PERR_EN_LBN 64
++#define MEM_PERR_EN_WIDTH 38
++#define MEM_PERR_EN_TX_DATA_LBN 72
++#define MEM_PERR_EN_TX_DATA_WIDTH 2
++#define SPARE_EE_BITS_LBN 1
++#define SPARE_EE_BITS_WIDTH 31
++#define PCIE_LEGACY_ENDPOINT_LBN 0
++#define PCIE_LEGACY_ENDPOINT_WIDTH 1
++
++/* Page mapped view of bottom 1024 EVQ RPTRS */
++#define EVQ_RPTR_REG_P0 0x400
++/* Bit definitions are as for the densely mapped
++ * RPTR registers. */
++
++/* Timer table for kernel access */
++#define TIMER_CMD_REG_KER 0x420
++#define TIMER_MODE_LBN 12
++#define TIMER_MODE_WIDTH 2
++#define TIMER_MODE_DIS 0
++#define TIMER_MODE_INT_HLDOFF 2
++#define TIMER_VAL_LBN 0
++#define TIMER_VAL_WIDTH 12
++
++/* Driver generated event register */
++#define DRV_EV_REG_KER 0x440
++#define DRV_EV_QID_LBN 64
++#define DRV_EV_QID_WIDTH 12
++#define DRV_EV_DATA_LBN 0
++#define DRV_EV_DATA_WIDTH 64
++
++/* Event Queue control register */
++#define EVQ_CTL_REG_KER 0x450
++#define EVQ_FIFO_NOTAF_TH_LBN 0
++#define EVQ_FIFO_NOTAF_TH_WIDTH 6
++#define EVQ_FIFO_AF_TH_LBN 8
++#define EVQ_FIFO_AF_TH_WIDTH 6
++
++/* Buffer table configuration register */
++#define BUF_TBL_CFG_REG_KER 0x600
++#define BUF_TBL_MODE_LBN 3
++#define BUF_TBL_MODE_WIDTH 1
++#define BUF_TBL_MODE_HALF 0
++#define BUF_TBL_MODE_FULL 1
++
++/* SRAM receive descriptor cache configuration register */
++#define SRM_RX_DC_CFG_REG_KER 0x610
++#define SRM_RX_DC_BASE_ADR_LBN 0
++#define SRM_RX_DC_BASE_ADR_WIDTH 21
++
++/* SRAM transmit descriptor cache configuration register */
++#define SRM_TX_DC_CFG_REG_KER 0x620
++#define SRM_TX_DC_BASE_ADR_LBN 0
++#define SRM_TX_DC_BASE_ADR_WIDTH 21
++
++/* SRAM configuration register */
++#define SRM_CFG_REG_KER 0x630
++#define SRAM_OOB_ADR_INTEN_LBN 5
++#define SRAM_OOB_ADR_INTEN_WIDTH 1
++#define SRAM_OOB_BUF_INTEN_LBN 4
++#define SRAM_OOB_BUF_INTEN_WIDTH 1
++#define SRAM_OOB_BT_INIT_EN_LBN 3
++#define SRAM_OOB_BT_INIT_EN_WIDTH 1
++#define SRM_NUM_BANK_LBN 2
++#define SRM_NUM_BANK_WIDTH 1
++#define SRM_BANK_SIZE_LBN 0
++#define SRM_BANK_SIZE_WIDTH 2
++#define SRM_NUM_BANKS_AND_BANK_SIZE_LBN 0
++#define SRM_NUM_BANKS_AND_BANK_SIZE_WIDTH 3
++#define SRM_NB_BSZ_1BANKS_2M 0
++#define SRM_NB_BSZ_1BANKS_4M 1
++#define SRM_NB_BSZ_1BANKS_8M 2
++#define SRM_NB_BSZ_DEFAULT 3 /* char driver will set the default */
++#define SRM_NB_BSZ_2BANKS_4M 4
++#define SRM_NB_BSZ_2BANKS_8M 5
++#define SRM_NB_BSZ_2BANKS_16M 6
++#define SRM_NB_BSZ_RESERVED 7
++
++/* Special buffer table update register */
++#define BUF_TBL_UPD_REG_KER 0x0650
++#define BUF_UPD_CMD_LBN 63
++#define BUF_UPD_CMD_WIDTH 1
++#define BUF_CLR_CMD_LBN 62
++#define BUF_CLR_CMD_WIDTH 1
++#define BUF_CLR_END_ID_LBN 32
++#define BUF_CLR_END_ID_WIDTH 20
++#define BUF_CLR_START_ID_LBN 0
++#define BUF_CLR_START_ID_WIDTH 20
++
++/* Receive configuration register */
++#define RX_CFG_REG_KER 0x800
++
++/* B0 */
++#define RX_TOEP_TCP_SUPPRESS_B0_LBN 48
++#define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1
++#define RX_INGR_EN_B0_LBN 47
++#define RX_INGR_EN_B0_WIDTH 1
++#define RX_TOEP_IPV4_B0_LBN 46
++#define RX_TOEP_IPV4_B0_WIDTH 1
++#define RX_HASH_ALG_B0_LBN 45
++#define RX_HASH_ALG_B0_WIDTH 1
++#define RX_HASH_INSERT_HDR_B0_LBN 44
++#define RX_HASH_INSERT_HDR_B0_WIDTH 1
++#define RX_DESC_PUSH_EN_B0_LBN 43
++#define RX_DESC_PUSH_EN_B0_WIDTH 1
++#define RX_RDW_PATCH_EN_LBN 42 /* Non head of line blocking */
++#define RX_RDW_PATCH_EN_WIDTH 1
++#define RX_PCI_BURST_SIZE_B0_LBN 39
++#define RX_PCI_BURST_SIZE_B0_WIDTH 3
++#define RX_OWNERR_CTL_B0_LBN 38
++#define RX_OWNERR_CTL_B0_WIDTH 1
++#define RX_XON_TX_TH_B0_LBN 33
++#define RX_XON_TX_TH_B0_WIDTH 5
++#define RX_XOFF_TX_TH_B0_LBN 28
++#define RX_XOFF_TX_TH_B0_WIDTH 5
++#define RX_USR_BUF_SIZE_B0_LBN 19
++#define RX_USR_BUF_SIZE_B0_WIDTH 9
++#define RX_XON_MAC_TH_B0_LBN 10
++#define RX_XON_MAC_TH_B0_WIDTH 9
++#define RX_XOFF_MAC_TH_B0_LBN 1
++#define RX_XOFF_MAC_TH_B0_WIDTH 9
++#define RX_XOFF_MAC_EN_B0_LBN 0
++#define RX_XOFF_MAC_EN_B0_WIDTH 1
++
++/* A1 */
++#define RX_DESC_PUSH_EN_A1_LBN 35
++#define RX_DESC_PUSH_EN_A1_WIDTH 1
++#define RX_PCI_BURST_SIZE_A1_LBN 31
++#define RX_PCI_BURST_SIZE_A1_WIDTH 3
++#define RX_OWNERR_CTL_A1_LBN 30
++#define RX_OWNERR_CTL_A1_WIDTH 1
++#define RX_XON_TX_TH_A1_LBN 25
++#define RX_XON_TX_TH_A1_WIDTH 5
++#define RX_XOFF_TX_TH_A1_LBN 20
++#define RX_XOFF_TX_TH_A1_WIDTH 5
++#define RX_USR_BUF_SIZE_A1_LBN 11
++#define RX_USR_BUF_SIZE_A1_WIDTH 9
++#define RX_XON_MAC_TH_A1_LBN 6
++#define RX_XON_MAC_TH_A1_WIDTH 5
++#define RX_XOFF_MAC_TH_A1_LBN 1
++#define RX_XOFF_MAC_TH_A1_WIDTH 5
++#define RX_XOFF_MAC_EN_A1_LBN 0
++#define RX_XOFF_MAC_EN_A1_WIDTH 1
++
++/* Receive filter control register */
++#define RX_FILTER_CTL_REG 0x810
++#define SCATTER_ENBL_NO_MATCH_Q_B0_LBN 40
++#define SCATTER_ENBL_NO_MATCH_Q_B0_WIDTH 1
++#define UDP_FULL_SRCH_LIMIT_LBN 32
++#define UDP_FULL_SRCH_LIMIT_WIDTH 8
++#define NUM_KER_LBN 24
++#define NUM_KER_WIDTH 2
++#define UDP_WILD_SRCH_LIMIT_LBN 16
++#define UDP_WILD_SRCH_LIMIT_WIDTH 8
++#define TCP_WILD_SRCH_LIMIT_LBN 8
++#define TCP_WILD_SRCH_LIMIT_WIDTH 8
++#define TCP_FULL_SRCH_LIMIT_LBN 0
++#define TCP_FULL_SRCH_LIMIT_WIDTH 8
++
++/* RX queue flush register */
++#define RX_FLUSH_DESCQ_REG_KER 0x0820
++#define RX_FLUSH_DESCQ_CMD_LBN 24
++#define RX_FLUSH_DESCQ_CMD_WIDTH 1
++#define RX_FLUSH_DESCQ_LBN 0
++#define RX_FLUSH_DESCQ_WIDTH 12
++
++/* Receive descriptor update register */
++#define RX_DESC_UPD_REG_KER 0x0830
++#define RX_DESC_WPTR_LBN 96
++#define RX_DESC_WPTR_WIDTH 12
++#define RX_DESC_UPD_REG_KER_DWORD (RX_DESC_UPD_REG_KER + 12)
++#define RX_DESC_WPTR_DWORD_LBN 0
++#define RX_DESC_WPTR_DWORD_WIDTH 12
++
++/* Receive descriptor cache configuration register */
++#define RX_DC_CFG_REG_KER 0x840
++#define RX_DC_SIZE_LBN 0
++#define RX_DC_SIZE_WIDTH 2
++
++#define RX_DC_PF_WM_REG_KER 0x850
++#define RX_DC_PF_LWM_LBN 0
++#define RX_DC_PF_LWM_WIDTH 6
++
++/* RX no descriptor drop counter */
++#define RX_NODESC_DROP_REG_KER 0x880
++#define RX_NODESC_DROP_CNT_LBN 0
++#define RX_NODESC_DROP_CNT_WIDTH 16
++
++/* RX black magic register */
++#define RX_SELF_RST_REG_KER 0x890
++#define RX_ISCSI_DIS_LBN 17
++#define RX_ISCSI_DIS_WIDTH 1
++#define RX_PREFETCH_TIMEOUT_EN_LBN 10
++#define RX_PREFETCH_TIMEOUT_EN_WIDTH 1
++#define RX_NODESC_WAIT_DIS_LBN 9
++#define RX_NODESC_WAIT_DIS_WIDTH 1
++#define RX_RECOVERY_EN_LBN 8
++#define RX_RECOVERY_EN_WIDTH 1
++#define RX_SHUTDOWN_REASON_LBN 0
++#define RX_SHUTDOWN_REASON_WIDTH (3)
++
++/* TX queue flush register */
++#define TX_FLUSH_DESCQ_REG_KER 0x0a00
++#define TX_FLUSH_DESCQ_CMD_LBN 12
++#define TX_FLUSH_DESCQ_CMD_WIDTH 1
++#define TX_FLUSH_DESCQ_LBN 0
++#define TX_FLUSH_DESCQ_WIDTH 12
++
++/* Transmit descriptor update register */
++#define TX_DESC_UPD_REG_KER 0x0a10
++#define TX_DESC_WPTR_LBN 96
++#define TX_DESC_WPTR_WIDTH 12
++#define TX_DESC_UPD_REG_KER_DWORD (TX_DESC_UPD_REG_KER + 12)
++#define TX_DESC_WPTR_DWORD_LBN 0
++#define TX_DESC_WPTR_DWORD_WIDTH 12
++
++/* Transmit descriptor cache configuration register */
++#define TX_DC_CFG_REG_KER 0xa20
++#define TX_DC_SIZE_LBN 0
++#define TX_DC_SIZE_WIDTH 2
++
++/* Transmit checksum configuration register (A0/A1 only) */
++#define TX_CHKSM_CFG_REG_KER_A1 0xa30
++
++/* Transmit configuration register */
++#define TX_CFG_REG_KER 0xa50
++#define TX_NO_EOP_DISC_EN_LBN 5
++#define TX_NO_EOP_DISC_EN_WIDTH 1
++
++/* Transmit configuration register 2 */
++#define TX_CFG2_REG_KER 0xa80
++#define TX_CSR_PUSH_EN_LBN 89
++#define TX_CSR_PUSH_EN_WIDTH 1
++#define TX_RX_SPACER_LBN 64
++#define TX_RX_SPACER_WIDTH 8
++#define TX_SW_EV_EN_LBN 59
++#define TX_SW_EV_EN_WIDTH 1
++#define TX_RX_SPACER_EN_LBN 57
++#define TX_RX_SPACER_EN_WIDTH 1
++#define TX_PREF_WD_TMR_LBN 22
++#define TX_PREF_WD_TMR_WIDTH 22
++#define TX_PREF_THRESHOLD_LBN 19
++#define TX_PREF_THRESHOLD_WIDTH 2
++#define TX_ONE_PKT_PER_Q_LBN 18
++#define TX_ONE_PKT_PER_Q_WIDTH 1
++#define TX_DIS_NON_IP_EV_LBN 17
++#define TX_DIS_NON_IP_EV_WIDTH 1
++#define TX_DMA_SPACER_LBN 8
++#define TX_DMA_SPACER_WIDTH 8
++#define TX_FLUSH_MIN_LEN_EN_B0_LBN 7
++#define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1
++#define TX_TCP_DIS_A1_LBN 7
++#define TX_TCP_DIS_A1_WIDTH 1
++#define TX_IP_DIS_A1_LBN 6
++#define TX_IP_DIS_A1_WIDTH 1
++#define TX_MAX_CPL_LBN 2
++#define TX_MAX_CPL_WIDTH 2
++#define TX_MAX_PREF_LBN 0
++#define TX_MAX_PREF_WIDTH 2
++
++/* Transmit VLAN filter control register */
++#define TX_VLAN_REG 0xae0
++
++/* PHY management transmit data register */
++#define MD_TXD_REG_KER 0xc00
++#define MD_TXD_LBN 0
++#define MD_TXD_WIDTH 16
++
++/* PHY management receive data register */
++#define MD_RXD_REG_KER 0xc10
++#define MD_RXD_LBN 0
++#define MD_RXD_WIDTH 16
++
++/* PHY management configuration & status register */
++#define MD_CS_REG_KER 0xc20
++#define MD_PT_LBN 7
++#define MD_PT_WIDTH 3
++#define MD_PL_LBN 6
++#define MD_PL_WIDTH 1
++#define MD_INT_CLR_LBN 5
++#define MD_INT_CLR_WIDTH 1
++#define MD_GC_LBN 4
++#define MD_GC_WIDTH 1
++#define MD_PRSP_LBN 3
++#define MD_PRSP_WIDTH 1
++#define MD_RIC_LBN 2
++#define MD_RIC_WIDTH 1
++#define MD_RDC_LBN 1
++#define MD_RDC_WIDTH 1
++#define MD_WRC_LBN 0
++#define MD_WRC_WIDTH 1
++
++/* PHY management PHY address register */
++#define MD_PHY_ADR_REG_KER 0xc30
++#define MD_PHY_ADR_LBN 0
++#define MD_PHY_ADR_WIDTH 16
++
++/* PHY management ID register */
++#define MD_ID_REG_KER 0xc40
++#define MD_PRT_ADR_LBN 11
++#define MD_PRT_ADR_WIDTH 5
++#define MD_DEV_ADR_LBN 6
++#define MD_DEV_ADR_WIDTH 5
++/* Used for writing both at once */
++#define MD_PRT_DEV_ADR_LBN 6
++#define MD_PRT_DEV_ADR_WIDTH 10
++
++/* PHY management status & mask register (DWORD read only) */
++#define MD_STAT_REG_KER 0xc50
++#define MD_PINT_LBN 4
++#define MD_PINT_WIDTH 1
++#define MD_DONE_LBN 3
++#define MD_DONE_WIDTH 1
++#define MD_BSERR_LBN 2
++#define MD_BSERR_WIDTH 1
++#define MD_LNFL_LBN 1
++#define MD_LNFL_WIDTH 1
++#define MD_BSY_LBN 0
++#define MD_BSY_WIDTH 1
++
++/* Port 0 and 1 MAC stats registers */
++#define MAC0_STAT_DMA_REG_KER 0xc60
++#define MAC1_STAT_DMA_REG_KER 0xc70
++#define MAC_STAT_DMA_CMD_LBN 48
++#define MAC_STAT_DMA_CMD_WIDTH 1
++#define MAC_STAT_DMA_REGION_LBN 46
++#define MAC_STAT_DMA_REGION_WIDTH 2
++#define MAC_STAT_DMA_ADR_LBN 0
++#define MAC_STAT_DMA_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
++
++/* Port 0 and 1 MAC control registers */
++#define MAC0_CTRL_REG_KER 0xc80
++#define MAC1_CTRL_REG_KER 0xc90
++#define MAC_XOFF_VAL_LBN 16
++#define MAC_XOFF_VAL_WIDTH 16
++#define TXFIFO_DRAIN_EN_B0_LBN 7
++#define TXFIFO_DRAIN_EN_B0_WIDTH 1
++#define MAC_XG_DISTXCRC_LBN 5
++#define MAC_XG_DISTXCRC_WIDTH 1
++#define MAC_BCAD_ACPT_LBN 4
++#define MAC_BCAD_ACPT_WIDTH 1
++#define MAC_UC_PROM_LBN 3
++#define MAC_UC_PROM_WIDTH 1
++#define MAC_LINK_STATUS_LBN 2
++#define MAC_LINK_STATUS_WIDTH 1
++#define MAC_SPEED_LBN 0
++#define MAC_SPEED_WIDTH 2
++
++/* External interrupt control (replaces MAC1_CTRL in B0) */
++#define GEN_MODE_REG_KER 0xc90
++#define XFP_PHY_INT_POL_SEL_LBN 3
++#define XFP_PHY_INT_POL_SEL_WIDTH 1
++#define XG_PHY_INT_POL_SEL_LBN 2
++#define XG_PHY_INT_POL_SEL_WIDTH 1
++#define XFP_PHY_INT_MASK_LBN 1
++#define XFP_PHY_INT_MASK_WIDTH 1
++#define XG_PHY_INT_MASK_LBN 0
++#define XG_PHY_INT_MASK_WIDTH 1
++
++
++/* 10G XAUI XGXS default values */
++#define XX_TXDRV_DEQ_DEFAULT 0xe /* deq=.6 */
++#define XX_TXDRV_DTX_DEFAULT 0x5 /* 1.25 */
++#define XX_SD_CTL_DRV_DEFAULT 0 /* 20mA */
++
++/* Multicast address hash table */
++#define MAC_MCAST_HASH_REG0_KER 0xca0
++#define MAC_MCAST_HASH_REG1_KER 0xcb0
++
++/* MAC test register. */
++#define MAC_TEST_REG_KER 0xcc0
++#define MAC_PTLOOP_EN_LBN 0
++#define MAC_PTLOOP_EN_WIDTH 1
++
++/* GMAC registers */
++#define FALCON_GMAC_REGBANK 0xe00
++#define FALCON_GMAC_REGBANK_SIZE 0x200
++#define FALCON_GMAC_REG_SIZE 0x10
++
++/* Source MAC filter control register */
++#define TX_SRC_MAC_CTRL_REG 0x1100
++
++/* XMAC registers */
++#define FALCON_XMAC_REGBANK 0x1200
++#define FALCON_XMAC_REGBANK_SIZE 0x200
++#define FALCON_XMAC_REG_SIZE 0x10
++
++/* XGMAC address register low */
++#define XM_ADR_LO_REG_MAC 0x00
++#define XM_ADR_3_LBN 24
++#define XM_ADR_3_WIDTH 8
++#define XM_ADR_2_LBN 16
++#define XM_ADR_2_WIDTH 8
++#define XM_ADR_1_LBN 8
++#define XM_ADR_1_WIDTH 8
++#define XM_ADR_0_LBN 0
++#define XM_ADR_0_WIDTH 8
++
++/* XGMAC address register high */
++#define XM_ADR_HI_REG_MAC 0x01
++#define XM_ADR_5_LBN 8
++#define XM_ADR_5_WIDTH 8
++#define XM_ADR_4_LBN 0
++#define XM_ADR_4_WIDTH 8
++
++/* XGMAC global configuration */
++#define XM_GLB_CFG_REG_MAC 0x02
++#define XM_LINE_LB_DEEP_RSVD_LBN 28
++#define XM_LINE_LB_DEEP_RSVD_WIDTH 1
++#define XM_RMTFLT_GEN_LBN 17
++#define XM_RMTFLT_GEN_WIDTH 1
++#define XM_DEBUG_MODE_LBN 16
++#define XM_DEBUG_MODE_WIDTH 1
++#define XM_RX_STAT_EN_LBN 11
++#define XM_RX_STAT_EN_WIDTH 1
++#define XM_TX_STAT_EN_LBN 10
++#define XM_TX_STAT_EN_WIDTH 1
++#define XM_RX_JUMBO_MODE_LBN 6
++#define XM_RX_JUMBO_MODE_WIDTH 1
++#define XM_WAN_MODE_LBN 5
++#define XM_WAN_MODE_WIDTH 1
++#define XM_AUTOCLR_MODE_LBN 4
++#define XM_AUTOCLR_MODE_WIDTH 1
++#define XM_INTCLR_MODE_LBN 3
++#define XM_INTCLR_MODE_WIDTH 1
++#define XM_CORE_RST_LBN 0
++#define XM_CORE_RST_WIDTH 1
++
++/* XGMAC transmit configuration */
++#define XM_TX_CFG_REG_MAC 0x03
++#define XM_TX_PROG_LBN 24
++#define XM_TX_PROG_WIDTH 1
++#define XM_IPG_LBN 16
++#define XM_IPG_WIDTH 4
++#define XM_FCNTL_LBN 10
++#define XM_FCNTL_WIDTH 1
++#define XM_TXCRC_LBN 8
++#define XM_TXCRC_WIDTH 1
++#define XM_EDRC_LBN 6
++#define XM_EDRC_WIDTH 1
++#define XM_AUTO_PAD_LBN 5
++#define XM_AUTO_PAD_WIDTH 1
++#define XM_TX_PRMBL_LBN 2
++#define XM_TX_PRMBL_WIDTH 1
++#define XM_TXEN_LBN 1
++#define XM_TXEN_WIDTH 1
++#define XM_TX_RST_LBN 0
++#define XM_TX_RST_WIDTH 1
++
++/* XGMAC receive configuration */
++#define XM_RX_CFG_REG_MAC 0x04
++#define XM_PASS_LENERR_LBN 26
++#define XM_PASS_LENERR_WIDTH 1
++#define XM_PASS_CRC_ERR_LBN 25
++#define XM_PASS_CRC_ERR_WIDTH 1
++#define XM_PASS_PRMBLE_ERR_LBN 24
++#define XM_PASS_PRMBLE_ERR_WIDTH 1
++#define XM_ACPT_ALL_MCAST_LBN 11
++#define XM_ACPT_ALL_MCAST_WIDTH 1
++#define XM_ACPT_ALL_UCAST_LBN 9
++#define XM_ACPT_ALL_UCAST_WIDTH 1
++#define XM_AUTO_DEPAD_LBN 8
++#define XM_AUTO_DEPAD_WIDTH 1
++#define XM_RXCRC_LBN 3
++#define XM_RXCRC_WIDTH 1
++#define XM_RX_PRMBL_LBN 2
++#define XM_RX_PRMBL_WIDTH 1
++#define XM_RXEN_LBN 1
++#define XM_RXEN_WIDTH 1
++#define XM_RX_RST_LBN 0
++#define XM_RX_RST_WIDTH 1
++
++/* XGMAC flow control register */
++#define XM_FC_REG_MAC 0x7
++#define XM_PAUSE_TIME_LBN 16
++#define XM_PAUSE_TIME_WIDTH 16
++#define XM_DIS_FCNTL_LBN 0
++#define XM_DIS_FCNTL_WIDTH 1
++
++/* XGMAC pause time count register */
++#define XM_PAUSE_TIME_REG_MAC 0x9
++#define XM_TX_PAUSE_CNT_LBN 16
++#define XM_TX_PAUSE_CNT_WIDTH 16
++#define XM_RX_PAUSE_CNT_LBN 0
++#define XM_RX_PAUSE_CNT_WIDTH 16
++
++/* XGMAC transmit parameter register */
++#define XM_TX_PARAM_REG_MAC 0x0d
++#define XM_TX_JUMBO_MODE_LBN 31
++#define XM_TX_JUMBO_MODE_WIDTH 1
++#define XM_MAX_TX_FRM_SIZE_LBN 16
++#define XM_MAX_TX_FRM_SIZE_WIDTH 14
++#define XM_PAD_CHAR_LBN 0
++#define XM_PAD_CHAR_WIDTH 8
++
++/* XGMAC receive parameter register */
++#define XM_RX_PARAM_REG_MAC 0x0e
++#define XM_MAX_RX_FRM_SIZE_LBN 0
++#define XM_MAX_RX_FRM_SIZE_WIDTH 14
++
++/* XGXS/XAUI powerdown/reset register */
++#define XX_PWR_RST_REG_MAC 0x10
++
++#define XX_PWRDND_EN_LBN 15
++#define XX_PWRDND_EN_WIDTH 1
++#define XX_PWRDNC_EN_LBN 14
++#define XX_PWRDNC_EN_WIDTH 1
++#define XX_PWRDNB_EN_LBN 13
++#define XX_PWRDNB_EN_WIDTH 1
++#define XX_PWRDNA_EN_LBN 12
++#define XX_PWRDNA_EN_WIDTH 1
++#define XX_RSTPLLCD_EN_LBN 9
++#define XX_RSTPLLCD_EN_WIDTH 1
++#define XX_RSTPLLAB_EN_LBN 8
++#define XX_RSTPLLAB_EN_WIDTH 1
++#define XX_RESETD_EN_LBN 7
++#define XX_RESETD_EN_WIDTH 1
++#define XX_RESETC_EN_LBN 6
++#define XX_RESETC_EN_WIDTH 1
++#define XX_RESETB_EN_LBN 5
++#define XX_RESETB_EN_WIDTH 1
++#define XX_RESETA_EN_LBN 4
++#define XX_RESETA_EN_WIDTH 1
++#define XX_RSTXGXSRX_EN_LBN 2
++#define XX_RSTXGXSRX_EN_WIDTH 1
++#define XX_RSTXGXSTX_EN_LBN 1
++#define XX_RSTXGXSTX_EN_WIDTH 1
++#define XX_RST_XX_EN_LBN 0
++#define XX_RST_XX_EN_WIDTH 1
++
++/* XGXS/XAUI powerdown/reset control register */
++#define XX_SD_CTL_REG_MAC 0x11
++#define XX_TERMADJ1_LBN 17
++#define XX_TERMADJ1_WIDTH 1
++#define XX_TERMADJ0_LBN 16
++#define XX_TERMADJ0_WIDTH 1
++#define XX_HIDRVD_LBN 15
++#define XX_HIDRVD_WIDTH 1
++#define XX_LODRVD_LBN 14
++#define XX_LODRVD_WIDTH 1
++#define XX_HIDRVC_LBN 13
++#define XX_HIDRVC_WIDTH 1
++#define XX_LODRVC_LBN 12
++#define XX_LODRVC_WIDTH 1
++#define XX_HIDRVB_LBN 11
++#define XX_HIDRVB_WIDTH 1
++#define XX_LODRVB_LBN 10
++#define XX_LODRVB_WIDTH 1
++#define XX_HIDRVA_LBN 9
++#define XX_HIDRVA_WIDTH 1
++#define XX_LODRVA_LBN 8
++#define XX_LODRVA_WIDTH 1
++#define XX_LPBKD_LBN 3
++#define XX_LPBKD_WIDTH 1
++#define XX_LPBKC_LBN 2
++#define XX_LPBKC_WIDTH 1
++#define XX_LPBKB_LBN 1
++#define XX_LPBKB_WIDTH 1
++#define XX_LPBKA_LBN 0
++#define XX_LPBKA_WIDTH 1
++
++#define XX_TXDRV_CTL_REG_MAC 0x12
++#define XX_DEQD_LBN 28
++#define XX_DEQD_WIDTH 4
++#define XX_DEQC_LBN 24
++#define XX_DEQC_WIDTH 4
++#define XX_DEQB_LBN 20
++#define XX_DEQB_WIDTH 4
++#define XX_DEQA_LBN 16
++#define XX_DEQA_WIDTH 4
++#define XX_DTXD_LBN 12
++#define XX_DTXD_WIDTH 4
++#define XX_DTXC_LBN 8
++#define XX_DTXC_WIDTH 4
++#define XX_DTXB_LBN 4
++#define XX_DTXB_WIDTH 4
++#define XX_DTXA_LBN 0
++#define XX_DTXA_WIDTH 4
++
++/* XAUI XGXS core status register */
++#define XX_FORCE_SIG_LBN 24
++#define XX_FORCE_SIG_WIDTH 8
++#define XX_FORCE_SIG_DECODE_FORCED 0xff
++#define XX_XGXS_LB_EN_LBN 23
++#define XX_XGXS_LB_EN_WIDTH 1
++#define XX_XGMII_LB_EN_LBN 22
++#define XX_XGMII_LB_EN_WIDTH 1
++#define XX_CORE_STAT_REG_MAC 0x16
++#define XX_ALIGN_DONE_LBN 20
++#define XX_ALIGN_DONE_WIDTH 1
++#define XX_SYNC_STAT_LBN 16
++#define XX_SYNC_STAT_WIDTH 4
++#define XX_SYNC_STAT_DECODE_SYNCED 0xf
++#define XX_COMMA_DET_LBN 12
++#define XX_COMMA_DET_WIDTH 4
++#define XX_COMMA_DET_DECODE_DETECTED 0xf
++#define XX_COMMA_DET_RESET 0xf
++#define XX_CHARERR_LBN 4
++#define XX_CHARERR_WIDTH 4
++#define XX_CHARERR_RESET 0xf
++#define XX_DISPERR_LBN 0
++#define XX_DISPERR_WIDTH 4
++#define XX_DISPERR_RESET 0xf
++
++/* Receive filter table */
++#define RX_FILTER_TBL0 0xF00000
++
++/* Receive descriptor pointer table */
++#define RX_DESC_PTR_TBL_KER_A1 0x11800
++#define RX_DESC_PTR_TBL_KER_B0 0xF40000
++#define RX_DESC_PTR_TBL_KER_P0 0x900
++#define RX_ISCSI_DDIG_EN_LBN 88
++#define RX_ISCSI_DDIG_EN_WIDTH 1
++#define RX_ISCSI_HDIG_EN_LBN 87
++#define RX_ISCSI_HDIG_EN_WIDTH 1
++#define RX_DESC_PREF_ACT_LBN 86
++#define RX_DESC_PREF_ACT_WIDTH 1
++#define RX_DC_HW_RPTR_LBN 80
++#define RX_DC_HW_RPTR_WIDTH 6
++#define RX_DESCQ_HW_RPTR_LBN 68
++#define RX_DESCQ_HW_RPTR_WIDTH 12
++#define RX_DESCQ_SW_WPTR_LBN 56
++#define RX_DESCQ_SW_WPTR_WIDTH 12
++#define RX_DESCQ_BUF_BASE_ID_LBN 36
++#define RX_DESCQ_BUF_BASE_ID_WIDTH 20
++#define RX_DESCQ_EVQ_ID_LBN 24
++#define RX_DESCQ_EVQ_ID_WIDTH 12
++#define RX_DESCQ_OWNER_ID_LBN 10
++#define RX_DESCQ_OWNER_ID_WIDTH 14
++#define RX_DESCQ_LABEL_LBN 5
++#define RX_DESCQ_LABEL_WIDTH 5
++#define RX_DESCQ_SIZE_LBN 3
++#define RX_DESCQ_SIZE_WIDTH 2
++#define RX_DESCQ_SIZE_4K 3
++#define RX_DESCQ_SIZE_2K 2
++#define RX_DESCQ_SIZE_1K 1
++#define RX_DESCQ_SIZE_512 0
++#define RX_DESCQ_TYPE_LBN 2
++#define RX_DESCQ_TYPE_WIDTH 1
++#define RX_DESCQ_JUMBO_LBN 1
++#define RX_DESCQ_JUMBO_WIDTH 1
++#define RX_DESCQ_EN_LBN 0
++#define RX_DESCQ_EN_WIDTH 1
++
++/* Transmit descriptor pointer table */
++#define TX_DESC_PTR_TBL_KER_A1 0x11900
++#define TX_DESC_PTR_TBL_KER_B0 0xF50000
++#define TX_DESC_PTR_TBL_KER_P0 0xa40
++#define TX_NON_IP_DROP_DIS_B0_LBN 91
++#define TX_NON_IP_DROP_DIS_B0_WIDTH 1
++#define TX_IP_CHKSM_DIS_B0_LBN 90
++#define TX_IP_CHKSM_DIS_B0_WIDTH 1
++#define TX_TCP_CHKSM_DIS_B0_LBN 89
++#define TX_TCP_CHKSM_DIS_B0_WIDTH 1
++#define TX_DESCQ_EN_LBN 88
++#define TX_DESCQ_EN_WIDTH 1
++#define TX_ISCSI_DDIG_EN_LBN 87
++#define TX_ISCSI_DDIG_EN_WIDTH 1
++#define TX_ISCSI_HDIG_EN_LBN 86
++#define TX_ISCSI_HDIG_EN_WIDTH 1
++#define TX_DC_HW_RPTR_LBN 80
++#define TX_DC_HW_RPTR_WIDTH 6
++#define TX_DESCQ_HW_RPTR_LBN 68
++#define TX_DESCQ_HW_RPTR_WIDTH 12
++#define TX_DESCQ_SW_WPTR_LBN 56
++#define TX_DESCQ_SW_WPTR_WIDTH 12
++#define TX_DESCQ_BUF_BASE_ID_LBN 36
++#define TX_DESCQ_BUF_BASE_ID_WIDTH 20
++#define TX_DESCQ_EVQ_ID_LBN 24
++#define TX_DESCQ_EVQ_ID_WIDTH 12
++#define TX_DESCQ_OWNER_ID_LBN 10
++#define TX_DESCQ_OWNER_ID_WIDTH 14
++#define TX_DESCQ_LABEL_LBN 5
++#define TX_DESCQ_LABEL_WIDTH 5
++#define TX_DESCQ_SIZE_LBN 3
++#define TX_DESCQ_SIZE_WIDTH 2
++#define TX_DESCQ_SIZE_4K 3
++#define TX_DESCQ_SIZE_2K 2
++#define TX_DESCQ_SIZE_1K 1
++#define TX_DESCQ_SIZE_512 0
++#define TX_DESCQ_TYPE_LBN 1
++#define TX_DESCQ_TYPE_WIDTH 2
++#define TX_DESCQ_FLUSH_LBN 0
++#define TX_DESCQ_FLUSH_WIDTH 1
++
++/* Event queue pointer */
++#define EVQ_PTR_TBL_KER_A1 0x11a00
++#define EVQ_PTR_TBL_KER_B0 0xf60000
++#define EVQ_PTR_TBL_KER_P0 0x500
++#define EVQ_WKUP_OR_INT_EN_LBN 39
++#define EVQ_WKUP_OR_INT_EN_WIDTH 1
++#define EVQ_NXT_WPTR_LBN 24
++#define EVQ_NXT_WPTR_WIDTH 15
++#define EVQ_EN_LBN 23
++#define EVQ_EN_WIDTH 1
++#define EVQ_SIZE_LBN 20
++#define EVQ_SIZE_WIDTH 3
++#define EVQ_SIZE_32K 6
++#define EVQ_SIZE_16K 5
++#define EVQ_SIZE_8K 4
++#define EVQ_SIZE_4K 3
++#define EVQ_SIZE_2K 2
++#define EVQ_SIZE_1K 1
++#define EVQ_SIZE_512 0
++#define EVQ_BUF_BASE_ID_LBN 0
++#define EVQ_BUF_BASE_ID_WIDTH 20
++
++/* Event queue read pointer */
++#define EVQ_RPTR_REG_KER_A1 0x11b00
++#define EVQ_RPTR_REG_KER_B0 0xfa0000
++#define EVQ_RPTR_LBN 0
++#define EVQ_RPTR_WIDTH 14
++#define EVQ_RPTR_REG_KER_DWORD (EVQ_RPTR_REG_KER + 0)
++#define EVQ_RPTR_DWORD_LBN 0
++#define EVQ_RPTR_DWORD_WIDTH 14
++
++/* RSS indirection table */
++#define RX_RSS_INDIR_TBL_B0 0xFB0000
++#define RX_RSS_INDIR_ENT_B0_LBN 0
++#define RX_RSS_INDIR_ENT_B0_WIDTH 6
++
++/* Special buffer descriptors (full-mode) */
++#define BUF_FULL_TBL_KER_A1 0x8000
++#define BUF_FULL_TBL_KER_B0 0x800000
++#define IP_DAT_BUF_SIZE_LBN 50
++#define IP_DAT_BUF_SIZE_WIDTH 1
++#define IP_DAT_BUF_SIZE_8K 1
++#define IP_DAT_BUF_SIZE_4K 0
++#define BUF_ADR_REGION_LBN 48
++#define BUF_ADR_REGION_WIDTH 2
++#define BUF_ADR_FBUF_LBN 14
++#define BUF_ADR_FBUF_WIDTH 34
++#define BUF_OWNER_ID_FBUF_LBN 0
++#define BUF_OWNER_ID_FBUF_WIDTH 14
++
++/* Special buffer descriptors (half-mode) */
++#define BUF_HALF_TBL_KER_A1 0x8000
++#define BUF_HALF_TBL_KER_B0 0x800000
++#define BUF_ADR_HBUF_ODD_LBN 44
++#define BUF_ADR_HBUF_ODD_WIDTH 20
++#define BUF_OWNER_ID_HBUF_ODD_LBN 32
++#define BUF_OWNER_ID_HBUF_ODD_WIDTH 12
++#define BUF_ADR_HBUF_EVEN_LBN 12
++#define BUF_ADR_HBUF_EVEN_WIDTH 20
++#define BUF_OWNER_ID_HBUF_EVEN_LBN 0
++#define BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
++
++#define SRM_DBG_REG_B0 0x3000000
++
++/* Transmit descriptor */
++#define TX_KER_PORT_LBN 63
++#define TX_KER_PORT_WIDTH 1
++#define TX_KER_CONT_LBN 62
++#define TX_KER_CONT_WIDTH 1
++#define TX_KER_BYTE_CNT_LBN 48
++#define TX_KER_BYTE_CNT_WIDTH 14
++#define TX_KER_BUF_REGION_LBN 46
++#define TX_KER_BUF_REGION_WIDTH 2
++#define TX_KER_BUF_REGION0_DECODE 0
++#define TX_KER_BUF_REGION1_DECODE 1
++#define TX_KER_BUF_REGION2_DECODE 2
++#define TX_KER_BUF_REGION3_DECODE 3
++#define TX_KER_BUF_ADR_LBN 0
++#define TX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
++
++/* Receive descriptor */
++#define RX_KER_BUF_SIZE_LBN 48
++#define RX_KER_BUF_SIZE_WIDTH 14
++#define RX_KER_BUF_REGION_LBN 46
++#define RX_KER_BUF_REGION_WIDTH 2
++#define RX_KER_BUF_REGION0_DECODE 0
++#define RX_KER_BUF_REGION1_DECODE 1
++#define RX_KER_BUF_REGION2_DECODE 2
++#define RX_KER_BUF_REGION3_DECODE 3
++#define RX_KER_BUF_ADR_LBN 0
++#define RX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
++
++/**************************************************************************
++ *
++ * Falcon events
++ *
++ **************************************************************************
++ */
++
++/* Event queue entries */
++#define EV_CODE_LBN 60
++#define EV_CODE_WIDTH 4
++#define RX_IP_EV_DECODE 0
++#define TX_IP_EV_DECODE 2
++#define DRIVER_EV_DECODE 5
++#define GLOBAL_EV_DECODE 6
++#define DRV_GEN_EV_DECODE 7
++#define WHOLE_EVENT_LBN 0
++#define WHOLE_EVENT_WIDTH 64
++
++/* Receive events */
++#define RX_EV_PKT_OK_LBN 56
++#define RX_EV_PKT_OK_WIDTH 1
++#define RX_EV_PAUSE_FRM_ERR_LBN 55
++#define RX_EV_PAUSE_FRM_ERR_WIDTH 1
++#define RX_EV_BUF_OWNER_ID_ERR_LBN 54
++#define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
++#define RX_EV_IF_FRAG_ERR_LBN 53
++#define RX_EV_IF_FRAG_ERR_WIDTH 1
++#define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
++#define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
++#define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
++#define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
++#define RX_EV_ETH_CRC_ERR_LBN 50
++#define RX_EV_ETH_CRC_ERR_WIDTH 1
++#define RX_EV_FRM_TRUNC_LBN 49
++#define RX_EV_FRM_TRUNC_WIDTH 1
++#define RX_EV_DRIB_NIB_LBN 48
++#define RX_EV_DRIB_NIB_WIDTH 1
++#define RX_EV_TOBE_DISC_LBN 47
++#define RX_EV_TOBE_DISC_WIDTH 1
++#define RX_EV_PKT_TYPE_LBN 44
++#define RX_EV_PKT_TYPE_WIDTH 3
++#define RX_EV_PKT_TYPE_ETH_DECODE 0
++#define RX_EV_PKT_TYPE_LLC_DECODE 1
++#define RX_EV_PKT_TYPE_JUMBO_DECODE 2
++#define RX_EV_PKT_TYPE_VLAN_DECODE 3
++#define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4
++#define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5
++#define RX_EV_HDR_TYPE_LBN 42
++#define RX_EV_HDR_TYPE_WIDTH 2
++#define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0
++#define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1
++#define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2
++#define RX_EV_HDR_TYPE_NON_IP_DECODE 3
++#define RX_EV_HDR_TYPE_HAS_CHECKSUMS(hdr_type) \
++ ((hdr_type) <= RX_EV_HDR_TYPE_UDP_IPV4_DECODE)
++#define RX_EV_DESC_Q_EMPTY_LBN 41
++#define RX_EV_DESC_Q_EMPTY_WIDTH 1
++#define RX_EV_MCAST_HASH_MATCH_LBN 40
++#define RX_EV_MCAST_HASH_MATCH_WIDTH 1
++#define RX_EV_MCAST_PKT_LBN 39
++#define RX_EV_MCAST_PKT_WIDTH 1
++#define RX_EV_RECOVERY_FLAG_LBN 37
++#define RX_EV_RECOVERY_FLAG_WIDTH 1
++#define RX_EV_Q_LABEL_LBN 32
++#define RX_EV_Q_LABEL_WIDTH 5
++#define RX_EV_JUMBO_CONT_LBN 31
++#define RX_EV_JUMBO_CONT_WIDTH 1
++#define RX_EV_PORT_LBN 30
++#define RX_EV_PORT_WIDTH 1
++#define RX_EV_BYTE_CNT_LBN 16
++#define RX_EV_BYTE_CNT_WIDTH 14
++#define RX_EV_SOP_LBN 15
++#define RX_EV_SOP_WIDTH 1
++#define RX_ISCSI_DDIG_ERR_LBN 13
++#define RX_ISCSI_DDIG_ERR_WIDTH 1
++#define RX_ISCSI_HDIG_ERR_LBN 12
++#define RX_ISCSI_HDIG_ERR_WIDTH 1
++#define RX_EV_DESC_PTR_LBN 0
++#define RX_EV_DESC_PTR_WIDTH 12
++
++/* Transmit events */
++#define TX_EV_PKT_ERR_LBN 38
++#define TX_EV_PKT_ERR_WIDTH 1
++#define TX_EV_PKT_TOO_BIG_LBN 37
++#define TX_EV_PKT_TOO_BIG_WIDTH 1
++#define TX_EV_Q_LABEL_LBN 32
++#define TX_EV_Q_LABEL_WIDTH 5
++#define TX_EV_PORT_LBN 16
++#define TX_EV_PORT_WIDTH 1
++#define TX_EV_WQ_FF_FULL_LBN 15
++#define TX_EV_WQ_FF_FULL_WIDTH 1
++#define TX_EV_BUF_OWNER_ID_ERR_LBN 14
++#define TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
++#define TX_EV_COMP_LBN 12
++#define TX_EV_COMP_WIDTH 1
++#define TX_EV_DESC_PTR_LBN 0
++#define TX_EV_DESC_PTR_WIDTH 12
++
++/* Driver events */
++#define DRIVER_EV_SUB_CODE_LBN 56
++#define DRIVER_EV_SUB_CODE_WIDTH 4
++#define DRIVER_EV_SUB_DATA_LBN 0
++#define DRIVER_EV_SUB_DATA_WIDTH 14
++#define TX_DESCQ_FLS_DONE_EV_DECODE 0
++#define RX_DESCQ_FLS_DONE_EV_DECODE 1
++#define EVQ_INIT_DONE_EV_DECODE 2
++#define EVQ_NOT_EN_EV_DECODE 3
++#define RX_DESCQ_FLSFF_OVFL_EV_DECODE 4
++#define SRM_UPD_DONE_EV_DECODE 5
++#define WAKE_UP_EV_DECODE 6
++#define TX_PKT_NON_TCP_UDP_DECODE 9
++#define TIMER_EV_DECODE 10
++#define RX_RECOVERY_EV_DECODE 11
++#define RX_DSC_ERROR_EV_DECODE 14
++#define TX_DSC_ERROR_EV_DECODE 15
++#define DRIVER_EV_TX_DESCQ_ID_LBN 0
++#define DRIVER_EV_TX_DESCQ_ID_WIDTH 12
++#define DRIVER_EV_RX_FLUSH_FAIL_LBN 12
++#define DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
++#define DRIVER_EV_RX_DESCQ_ID_LBN 0
++#define DRIVER_EV_RX_DESCQ_ID_WIDTH 12
++#define DRIVER_EV_EVQ_ID_LBN 0
++#define DRIVER_EV_EVQ_ID_WIDTH 12
++#define DRIVER_EV_SRM_UPD_LBN 0
++#define DRIVER_EV_SRM_UPD_WIDTH 2
++#define SRM_CLR_EV_DECODE 0
++#define SRM_UPD_EV_DECODE 1
++#define SRM_ILLCLR_EV_DECODE 2
++
++/* Global events */
++#define RX_RECOVERY_B0_LBN 12
++#define RX_RECOVERY_B0_WIDTH 1
++#define XG_MNT_INTR_B0_LBN 11
++#define XG_MNT_INTR_B0_WIDTH 1
++
++#define RX_RECOVERY_A1_LBN 11
++#define RX_RECOVERY_A1_WIDTH 1
++
++#define XFP_PHY_INTR_LBN 10
++#define XFP_PHY_INTR_WIDTH 1
++#define XG_PHY_INTR_LBN 9
++#define XG_PHY_INTR_WIDTH 1
++#define G_PHY1_INTR_LBN 8
++#define G_PHY1_INTR_WIDTH 1
++#define G_PHY0_INTR_LBN 7
++#define G_PHY0_INTR_WIDTH 1
++
++/* Driver-generated test events */
++#define DRV_GEN_EV_CODE_LBN 60
++#define DRV_GEN_EV_CODE_WIDTH 4
++#define DRV_GEN_EV_DATA_LBN 0
++#define DRV_GEN_EV_DATA_WIDTH 60
++#define EVQ_MAGIC_LBN 0
++#define EVQ_MAGIC_WIDTH 32
++
++/**************************************************************************
++ *
++ * Falcon MAC stats
++ *
++ **************************************************************************
++ *
++ */
++
++#define GRxGoodOct_offset 0x0
++#define GRxGoodOct_WIDTH 48
++#define GRxBadOct_offset 0x8
++#define GRxBadOct_WIDTH 48
++#define GRxMissPkt_offset 0x10
++#define GRxMissPkt_WIDTH 32
++#define GRxFalseCRS_offset 0x14
++#define GRxFalseCRS_WIDTH 32
++#define GRxPausePkt_offset 0x18
++#define GRxPausePkt_WIDTH 32
++#define GRxBadPkt_offset 0x1C
++#define GRxBadPkt_WIDTH 32
++#define GRxUcastPkt_offset 0x20
++#define GRxUcastPkt_WIDTH 32
++#define GRxMcastPkt_offset 0x24
++#define GRxMcastPkt_WIDTH 32
++#define GRxBcastPkt_offset 0x28
++#define GRxBcastPkt_WIDTH 32
++#define GRxGoodLt64Pkt_offset 0x2C
++#define GRxGoodLt64Pkt_WIDTH 32
++#define GRxBadLt64Pkt_offset 0x30
++#define GRxBadLt64Pkt_WIDTH 32
++#define GRx64Pkt_offset 0x34
++#define GRx64Pkt_WIDTH 32
++#define GRx65to127Pkt_offset 0x38
++#define GRx65to127Pkt_WIDTH 32
++#define GRx128to255Pkt_offset 0x3C
++#define GRx128to255Pkt_WIDTH 32
++#define GRx256to511Pkt_offset 0x40
++#define GRx256to511Pkt_WIDTH 32
++#define GRx512to1023Pkt_offset 0x44
++#define GRx512to1023Pkt_WIDTH 32
++#define GRx1024to15xxPkt_offset 0x48
++#define GRx1024to15xxPkt_WIDTH 32
++#define GRx15xxtoJumboPkt_offset 0x4C
++#define GRx15xxtoJumboPkt_WIDTH 32
++#define GRxGtJumboPkt_offset 0x50
++#define GRxGtJumboPkt_WIDTH 32
++#define GRxFcsErr64to15xxPkt_offset 0x54
++#define GRxFcsErr64to15xxPkt_WIDTH 32
++#define GRxFcsErr15xxtoJumboPkt_offset 0x58
++#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
++#define GRxFcsErrGtJumboPkt_offset 0x5C
++#define GRxFcsErrGtJumboPkt_WIDTH 32
++#define GTxGoodBadOct_offset 0x80
++#define GTxGoodBadOct_WIDTH 48
++#define GTxGoodOct_offset 0x88
++#define GTxGoodOct_WIDTH 48
++#define GTxSglColPkt_offset 0x90
++#define GTxSglColPkt_WIDTH 32
++#define GTxMultColPkt_offset 0x94
++#define GTxMultColPkt_WIDTH 32
++#define GTxExColPkt_offset 0x98
++#define GTxExColPkt_WIDTH 32
++#define GTxDefPkt_offset 0x9C
++#define GTxDefPkt_WIDTH 32
++#define GTxLateCol_offset 0xA0
++#define GTxLateCol_WIDTH 32
++#define GTxExDefPkt_offset 0xA4
++#define GTxExDefPkt_WIDTH 32
++#define GTxPausePkt_offset 0xA8
++#define GTxPausePkt_WIDTH 32
++#define GTxBadPkt_offset 0xAC
++#define GTxBadPkt_WIDTH 32
++#define GTxUcastPkt_offset 0xB0
++#define GTxUcastPkt_WIDTH 32
++#define GTxMcastPkt_offset 0xB4
++#define GTxMcastPkt_WIDTH 32
++#define GTxBcastPkt_offset 0xB8
++#define GTxBcastPkt_WIDTH 32
++#define GTxLt64Pkt_offset 0xBC
++#define GTxLt64Pkt_WIDTH 32
++#define GTx64Pkt_offset 0xC0
++#define GTx64Pkt_WIDTH 32
++#define GTx65to127Pkt_offset 0xC4
++#define GTx65to127Pkt_WIDTH 32
++#define GTx128to255Pkt_offset 0xC8
++#define GTx128to255Pkt_WIDTH 32
++#define GTx256to511Pkt_offset 0xCC
++#define GTx256to511Pkt_WIDTH 32
++#define GTx512to1023Pkt_offset 0xD0
++#define GTx512to1023Pkt_WIDTH 32
++#define GTx1024to15xxPkt_offset 0xD4
++#define GTx1024to15xxPkt_WIDTH 32
++#define GTx15xxtoJumboPkt_offset 0xD8
++#define GTx15xxtoJumboPkt_WIDTH 32
++#define GTxGtJumboPkt_offset 0xDC
++#define GTxGtJumboPkt_WIDTH 32
++#define GTxNonTcpUdpPkt_offset 0xE0
++#define GTxNonTcpUdpPkt_WIDTH 16
++#define GTxMacSrcErrPkt_offset 0xE4
++#define GTxMacSrcErrPkt_WIDTH 16
++#define GTxIpSrcErrPkt_offset 0xE8
++#define GTxIpSrcErrPkt_WIDTH 16
++#define GDmaDone_offset 0xEC
++#define GDmaDone_WIDTH 32
++
++#define XgRxOctets_offset 0x0
++#define XgRxOctets_WIDTH 48
++#define XgRxOctetsOK_offset 0x8
++#define XgRxOctetsOK_WIDTH 48
++#define XgRxPkts_offset 0x10
++#define XgRxPkts_WIDTH 32
++#define XgRxPktsOK_offset 0x14
++#define XgRxPktsOK_WIDTH 32
++#define XgRxBroadcastPkts_offset 0x18
++#define XgRxBroadcastPkts_WIDTH 32
++#define XgRxMulticastPkts_offset 0x1C
++#define XgRxMulticastPkts_WIDTH 32
++#define XgRxUnicastPkts_offset 0x20
++#define XgRxUnicastPkts_WIDTH 32
++#define XgRxUndersizePkts_offset 0x24
++#define XgRxUndersizePkts_WIDTH 32
++#define XgRxOversizePkts_offset 0x28
++#define XgRxOversizePkts_WIDTH 32
++#define XgRxJabberPkts_offset 0x2C
++#define XgRxJabberPkts_WIDTH 32
++#define XgRxUndersizeFCSerrorPkts_offset 0x30
++#define XgRxUndersizeFCSerrorPkts_WIDTH 32
++#define XgRxDropEvents_offset 0x34
++#define XgRxDropEvents_WIDTH 32
++#define XgRxFCSerrorPkts_offset 0x38
++#define XgRxFCSerrorPkts_WIDTH 32
++#define XgRxAlignError_offset 0x3C
++#define XgRxAlignError_WIDTH 32
++#define XgRxSymbolError_offset 0x40
++#define XgRxSymbolError_WIDTH 32
++#define XgRxInternalMACError_offset 0x44
++#define XgRxInternalMACError_WIDTH 32
++#define XgRxControlPkts_offset 0x48
++#define XgRxControlPkts_WIDTH 32
++#define XgRxPausePkts_offset 0x4C
++#define XgRxPausePkts_WIDTH 32
++#define XgRxPkts64Octets_offset 0x50
++#define XgRxPkts64Octets_WIDTH 32
++#define XgRxPkts65to127Octets_offset 0x54
++#define XgRxPkts65to127Octets_WIDTH 32
++#define XgRxPkts128to255Octets_offset 0x58
++#define XgRxPkts128to255Octets_WIDTH 32
++#define XgRxPkts256to511Octets_offset 0x5C
++#define XgRxPkts256to511Octets_WIDTH 32
++#define XgRxPkts512to1023Octets_offset 0x60
++#define XgRxPkts512to1023Octets_WIDTH 32
++#define XgRxPkts1024to15xxOctets_offset 0x64
++#define XgRxPkts1024to15xxOctets_WIDTH 32
++#define XgRxPkts15xxtoMaxOctets_offset 0x68
++#define XgRxPkts15xxtoMaxOctets_WIDTH 32
++#define XgRxLengthError_offset 0x6C
++#define XgRxLengthError_WIDTH 32
++#define XgTxPkts_offset 0x80
++#define XgTxPkts_WIDTH 32
++#define XgTxOctets_offset 0x88
++#define XgTxOctets_WIDTH 48
++#define XgTxMulticastPkts_offset 0x90
++#define XgTxMulticastPkts_WIDTH 32
++#define XgTxBroadcastPkts_offset 0x94
++#define XgTxBroadcastPkts_WIDTH 32
++#define XgTxUnicastPkts_offset 0x98
++#define XgTxUnicastPkts_WIDTH 32
++#define XgTxControlPkts_offset 0x9C
++#define XgTxControlPkts_WIDTH 32
++#define XgTxPausePkts_offset 0xA0
++#define XgTxPausePkts_WIDTH 32
++#define XgTxPkts64Octets_offset 0xA4
++#define XgTxPkts64Octets_WIDTH 32
++#define XgTxPkts65to127Octets_offset 0xA8
++#define XgTxPkts65to127Octets_WIDTH 32
++#define XgTxPkts128to255Octets_offset 0xAC
++#define XgTxPkts128to255Octets_WIDTH 32
++#define XgTxPkts256to511Octets_offset 0xB0
++#define XgTxPkts256to511Octets_WIDTH 32
++#define XgTxPkts512to1023Octets_offset 0xB4
++#define XgTxPkts512to1023Octets_WIDTH 32
++#define XgTxPkts1024to15xxOctets_offset 0xB8
++#define XgTxPkts1024to15xxOctets_WIDTH 32
++#define XgTxPkts1519toMaxOctets_offset 0xBC
++#define XgTxPkts1519toMaxOctets_WIDTH 32
++#define XgTxUndersizePkts_offset 0xC0
++#define XgTxUndersizePkts_WIDTH 32
++#define XgTxOversizePkts_offset 0xC4
++#define XgTxOversizePkts_WIDTH 32
++#define XgTxNonTcpUdpPkt_offset 0xC8
++#define XgTxNonTcpUdpPkt_WIDTH 16
++#define XgTxMacSrcErrPkt_offset 0xCC
++#define XgTxMacSrcErrPkt_WIDTH 16
++#define XgTxIpSrcErrPkt_offset 0xD0
++#define XgTxIpSrcErrPkt_WIDTH 16
++#define XgDmaDone_offset 0xD4
++#define XgDmaDone_WIDTH 32
++
++#define FALCON_STATS_NOT_DONE 0x00000000
++#define FALCON_STATS_DONE 0xffffffff
++
++/* Interrupt status register bits */
++#define FATAL_INT_LBN 64
++#define FATAL_INT_WIDTH 1
++#define INT_EVQS_LBN 40
++#define INT_EVQS_WIDTH 4
++#define INT_FLAG_LBN 32
++#define INT_FLAG_WIDTH 1
++#define EVQ_FIFO_HF_LBN 1
++#define EVQ_FIFO_HF_WIDTH 1
++#define EVQ_FIFO_AF_LBN 0
++#define EVQ_FIFO_AF_WIDTH 1
++
++/**************************************************************************
++ *
++ * Falcon non-volatile configuration
++ *
++ **************************************************************************
++ */
++
++/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
++struct falcon_nvconfig_board_v2 {
++ __le16 nports;
++ u8 port0_phy_addr;
++ u8 port0_phy_type;
++ u8 port1_phy_addr;
++ u8 port1_phy_type;
++ __le16 asic_sub_revision;
++ __le16 board_revision;
++} __attribute__ ((packed));
++
++/* Board configuration v3 extra information */
++struct falcon_nvconfig_board_v3 {
++ __le32 spi_device_type[2];
++} __attribute__ ((packed));
++
++/* Bit numbers for spi_device_type */
++#define SPI_DEV_TYPE_SIZE_LBN 0
++#define SPI_DEV_TYPE_SIZE_WIDTH 5
++#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
++#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
++#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
++#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
++#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
++#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
++#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
++#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
++#define SPI_DEV_TYPE_FIELD(type, field) \
++ (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(field))
++
++#define NVCONFIG_BASE 0x300
++#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
++struct falcon_nvconfig {
++ efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
++ u8 mac_address[2][8]; /* 0x310 */
++ efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
++ efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
++ efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
++ efx_oword_t hw_init_reg; /* 0x350 */
++ efx_oword_t nic_stat_reg; /* 0x360 */
++ efx_oword_t glb_ctl_reg; /* 0x370 */
++ efx_oword_t srm_cfg_reg; /* 0x380 */
++ efx_oword_t spare_reg; /* 0x390 */
++ __le16 board_magic_num; /* 0x3A0 */
++ __le16 board_struct_ver;
++ __le16 board_checksum;
++ struct falcon_nvconfig_board_v2 board_v2;
++ efx_oword_t ee_base_page_reg; /* 0x3B0 */
++ struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
++} __attribute__ ((packed));
++
++#endif /* EFX_FALCON_HWDEFS_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/falcon_io.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/falcon_io.h
+--- linux-2.6.18.8/drivers/net/sfc/falcon_io.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/falcon_io.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,259 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_FALCON_IO_H
++#define EFX_FALCON_IO_H
++
++#include "net_driver.h"
++#include "falcon.h"
++
++/**************************************************************************
++ *
++ * Falcon hardware access
++ *
++ **************************************************************************
++ *
++ * Notes on locking strategy:
++ *
++ * Most Falcon registers require 16-byte (or 8-byte, for SRAM
++ * registers) atomic writes which necessitates locking.
++ * Under normal operation few writes to the Falcon BAR are made and these
++ * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
++ * cased to allow 4-byte (hence lockless) accesses.
++ *
++ * It *is* safe to write to these 4-byte registers in the middle of an
++ * access to an 8-byte or 16-byte register. We therefore use a
++ * spinlock to protect accesses to the larger registers, but no locks
++ * for the 4-byte registers.
++ *
++ * A write barrier is needed to ensure that DW3 is written after DW0/1/2
++ * due to the way the 16byte registers are "collected" in the Falcon BIU
++ *
++ * We also lock when carrying out reads, to ensure consistency of the
++ * data (made possible since the BIU reads all 128 bits into a cache).
++ * Reads are very rare, so this isn't a significant performance
++ * impact. (Most data transferred from NIC to host is DMAed directly
++ * into host memory).
++ *
++ * I/O BAR access uses locks for both reads and writes (but is only provided
++ * for testing purposes).
++ */
++
++/* Special buffer descriptors (Falcon SRAM) */
++#define BUF_TBL_KER_A1 0x18000
++#define BUF_TBL_KER_B0 0x800000
++
++
++#if BITS_PER_LONG == 64
++#define FALCON_USE_QWORD_IO 1
++#endif
++
++#define _falcon_writeq(efx, value, reg) \
++ __raw_writeq((__force u64) (value), (efx)->membase + (reg))
++#define _falcon_writel(efx, value, reg) \
++ __raw_writel((__force u32) (value), (efx)->membase + (reg))
++#define _falcon_readq(efx, reg) \
++ ((__force __le64) __raw_readq((efx)->membase + (reg)))
++#define _falcon_readl(efx, reg) \
++ ((__force __le32) __raw_readl((efx)->membase + (reg)))
++
++/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
++static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
++ unsigned int reg)
++{
++ unsigned long flags __attribute__ ((unused));
++
++ EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
++ EFX_OWORD_VAL(*value));
++
++ spin_lock_irqsave(&efx->biu_lock, flags);
++#ifdef FALCON_USE_QWORD_IO
++ _falcon_writeq(efx, value->u64[0], reg + 0);
++ wmb();
++ _falcon_writeq(efx, value->u64[1], reg + 8);
++#else
++ _falcon_writel(efx, value->u32[0], reg + 0);
++ _falcon_writel(efx, value->u32[1], reg + 4);
++ _falcon_writel(efx, value->u32[2], reg + 8);
++ wmb();
++ _falcon_writel(efx, value->u32[3], reg + 12);
++#endif
++ mmiowb();
++ spin_unlock_irqrestore(&efx->biu_lock, flags);
++}
++
++/* Writes to an 8-byte Falcon SRAM register, locking as appropriate. */
++static inline void falcon_write_sram(struct efx_nic *efx, efx_qword_t *value,
++ unsigned int index)
++{
++ unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
++ unsigned long flags __attribute__ ((unused));
++
++ EFX_REGDUMP(efx, "writing SRAM register %x with " EFX_QWORD_FMT "\n",
++ reg, EFX_QWORD_VAL(*value));
++
++ spin_lock_irqsave(&efx->biu_lock, flags);
++#ifdef FALCON_USE_QWORD_IO
++ _falcon_writeq(efx, value->u64[0], reg + 0);
++#else
++ _falcon_writel(efx, value->u32[0], reg + 0);
++ wmb();
++ _falcon_writel(efx, value->u32[1], reg + 4);
++#endif
++ mmiowb();
++ spin_unlock_irqrestore(&efx->biu_lock, flags);
++}
++
++/* Write dword to Falcon register that allows partial writes
++ *
++ * Some Falcon registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
++ * TX_DESC_UPD_REG) can be written to as a single dword. This allows
++ * for lockless writes.
++ */
++static inline void falcon_writel(struct efx_nic *efx, efx_dword_t *value,
++ unsigned int reg)
++{
++ EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
++ reg, EFX_DWORD_VAL(*value));
++
++ /* No lock required */
++ _falcon_writel(efx, value->u32[0], reg);
++}
++
++/* Read from a Falcon register
++ *
++ * This reads an entire 16-byte Falcon register in one go, locking as
++ * appropriate. It is essential to read the first dword first, as this
++ * prompts Falcon to load the current value into the shadow register.
++ */
++static inline void falcon_read(struct efx_nic *efx, efx_oword_t *value,
++ unsigned int reg)
++{
++ unsigned long flags __attribute__ ((unused));
++
++ spin_lock_irqsave(&efx->biu_lock, flags);
++ value->u32[0] = _falcon_readl(efx, reg + 0);
++ rmb();
++ value->u32[1] = _falcon_readl(efx, reg + 4);
++ value->u32[2] = _falcon_readl(efx, reg + 8);
++ value->u32[3] = _falcon_readl(efx, reg + 12);
++ spin_unlock_irqrestore(&efx->biu_lock, flags);
++
++ EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
++ EFX_OWORD_VAL(*value));
++}
++
++/* This reads an 8-byte Falcon SRAM entry in one go. */
++static inline void falcon_read_sram(struct efx_nic *efx, efx_qword_t *value,
++ unsigned int index)
++{
++ unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
++ unsigned long flags __attribute__ ((unused));
++
++ spin_lock_irqsave(&efx->biu_lock, flags);
++#ifdef FALCON_USE_QWORD_IO
++ value->u64[0] = _falcon_readq(efx, reg + 0);
++#else
++ value->u32[0] = _falcon_readl(efx, reg + 0);
++ rmb();
++ value->u32[1] = _falcon_readl(efx, reg + 4);
++#endif
++ spin_unlock_irqrestore(&efx->biu_lock, flags);
++
++ EFX_REGDUMP(efx, "read from SRAM register %x, got "EFX_QWORD_FMT"\n",
++ reg, EFX_QWORD_VAL(*value));
++}
++
++/* Read dword from Falcon register that allows partial writes (sic) */
++static inline void falcon_readl(struct efx_nic *efx, efx_dword_t *value,
++ unsigned int reg)
++{
++ value->u32[0] = _falcon_readl(efx, reg);
++ EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
++ reg, EFX_DWORD_VAL(*value));
++}
++
++/* Write to a register forming part of a table */
++static inline void falcon_write_table(struct efx_nic *efx, efx_oword_t *value,
++ unsigned int reg, unsigned int index)
++{
++ falcon_write(efx, value, reg + index * sizeof(efx_oword_t));
++}
++
++/* Read to a register forming part of a table */
++static inline void falcon_read_table(struct efx_nic *efx, efx_oword_t *value,
++ unsigned int reg, unsigned int index)
++{
++ falcon_read(efx, value, reg + index * sizeof(efx_oword_t));
++}
++
++/* Write to a dword register forming part of a table */
++static inline void falcon_writel_table(struct efx_nic *efx, efx_dword_t *value,
++ unsigned int reg, unsigned int index)
++{
++ falcon_writel(efx, value, reg + index * sizeof(efx_oword_t));
++}
++
++/* Page-mapped register block size */
++#define FALCON_PAGE_BLOCK_SIZE 0x2000
++
++/* Calculate offset to page-mapped register block */
++#define FALCON_PAGED_REG(page, reg) \
++ ((page) * FALCON_PAGE_BLOCK_SIZE + (reg))
++
++/* As for falcon_write(), but for a page-mapped register. */
++static inline void falcon_write_page(struct efx_nic *efx, efx_oword_t *value,
++ unsigned int reg, unsigned int page)
++{
++ falcon_write(efx, value, FALCON_PAGED_REG(page, reg));
++}
++
++/* As for falcon_writel(), but for a page-mapped register. */
++static inline void falcon_writel_page(struct efx_nic *efx, efx_dword_t *value,
++ unsigned int reg, unsigned int page)
++{
++ falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
++}
++
++/* Write dword to Falcon page-mapped register with an extra lock.
++ *
++ * As for falcon_writel_page(), but for a register that suffers from
++ * SFC bug 3181. Take out a lock so the BIU collector cannot be
++ * confused. */
++static inline void falcon_writel_page_locked(struct efx_nic *efx,
++ efx_dword_t *value,
++ unsigned int reg,
++ unsigned int page)
++{
++ unsigned long flags __attribute__ ((unused));
++
++ spin_lock_irqsave(&efx->biu_lock, flags);
++ falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
++ spin_unlock_irqrestore(&efx->biu_lock, flags);
++}
++
++#endif /* EFX_FALCON_IO_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/falcon_xmac.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/falcon_xmac.c
+--- linux-2.6.18.8/drivers/net/sfc/falcon_xmac.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/falcon_xmac.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,691 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/delay.h>
++#include "net_driver.h"
++#include "efx.h"
++#include "falcon.h"
++#include "falcon_hwdefs.h"
++#include "falcon_io.h"
++#include "mac.h"
++#include "gmii.h"
++#include "mdio_10g.h"
++#include "phy.h"
++#include "boards.h"
++#include "workarounds.h"
++
++/**************************************************************************
++ *
++ * MAC register access
++ *
++ **************************************************************************/
++
++/* Offset of an XMAC register within Falcon */
++#define FALCON_XMAC_REG(mac_reg) \
++ (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE))
++
++static void falcon_xmac_writel(struct efx_nic *efx,
++ efx_dword_t *value, unsigned int mac_reg)
++{
++ efx_oword_t temp;
++
++ EFX_POPULATE_OWORD_1(temp, MAC_DATA, EFX_DWORD_FIELD(*value, MAC_DATA));
++ falcon_write(efx, &temp, FALCON_XMAC_REG(mac_reg));
++}
++
++static void falcon_xmac_readl(struct efx_nic *efx,
++ efx_dword_t *value, unsigned int mac_reg)
++{
++ efx_oword_t temp;
++
++ falcon_read(efx, &temp, FALCON_XMAC_REG(mac_reg));
++ EFX_POPULATE_DWORD_1(*value, MAC_DATA, EFX_OWORD_FIELD(temp, MAC_DATA));
++}
++
++/**************************************************************************
++ *
++ * MAC operations
++ *
++ *************************************************************************/
++static int falcon_reset_xmac(struct efx_nic *efx)
++{
++ efx_dword_t reg;
++ int count;
++
++ EFX_POPULATE_DWORD_1(reg, XM_CORE_RST, 1);
++ efx->mac_op->mac_writel(efx, &reg, XM_GLB_CFG_REG_MAC);
++
++ for (count = 0; count < 10000; count++) { /* wait upto 100ms */
++ efx->mac_op->mac_readl(efx, &reg, XM_GLB_CFG_REG_MAC);
++ if (EFX_DWORD_FIELD(reg, XM_CORE_RST) == 0)
++ return 0;
++ udelay(10);
++ }
++
++ EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
++ return -ETIMEDOUT;
++}
++
++/* Configure the XAUI driver that is an output from Falcon */
++static void falcon_setup_xaui(struct efx_nic *efx)
++{
++ efx_dword_t sdctl, txdrv;
++
++ /* Move the XAUI into low power, unless there is no PHY, in
++ * which case the XAUI will have to drive a cable. */
++ if (efx->phy_type == PHY_TYPE_NONE)
++ return;
++
++ efx->mac_op->mac_readl(efx, &sdctl, XX_SD_CTL_REG_MAC);
++ EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT);
++ EFX_SET_DWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT);
++ EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT);
++ EFX_SET_DWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT);
++ EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT);
++ EFX_SET_DWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT);
++ EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT);
++ EFX_SET_DWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT);
++ efx->mac_op->mac_writel(efx, &sdctl, XX_SD_CTL_REG_MAC);
++
++ EFX_POPULATE_DWORD_8(txdrv,
++ XX_DEQD, XX_TXDRV_DEQ_DEFAULT,
++ XX_DEQC, XX_TXDRV_DEQ_DEFAULT,
++ XX_DEQB, XX_TXDRV_DEQ_DEFAULT,
++ XX_DEQA, XX_TXDRV_DEQ_DEFAULT,
++ XX_DTXD, XX_TXDRV_DTX_DEFAULT,
++ XX_DTXC, XX_TXDRV_DTX_DEFAULT,
++ XX_DTXB, XX_TXDRV_DTX_DEFAULT,
++ XX_DTXA, XX_TXDRV_DTX_DEFAULT);
++ efx->mac_op->mac_writel(efx, &txdrv, XX_TXDRV_CTL_REG_MAC);
++}
++
++static void falcon_hold_xaui_in_rst(struct efx_nic *efx)
++{
++ efx_dword_t reg;
++
++ EFX_ZERO_DWORD(reg);
++ EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 1);
++ EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 1);
++ EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 1);
++ EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 1);
++ EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 1);
++ EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 1);
++ EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 1);
++ EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 1);
++ EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 1);
++ EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 1);
++ EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
++ EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
++ efx->mac_op->mac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
++ udelay(10);
++}
++
++static int _falcon_reset_xaui_a(struct efx_nic *efx)
++{
++ efx_dword_t reg;
++
++ if (!efx->is_asic)
++ return 0;
++
++ falcon_hold_xaui_in_rst(efx);
++ efx->mac_op->mac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
++
++ /* Follow the RAMBUS XAUI data reset sequencing
++ * Channels A and B first: power down, reset PLL, reset, clear
++ */
++ EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 0);
++ EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 0);
++ efx->mac_op->mac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
++ udelay(10);
++
++ EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 0);
++ efx->mac_op->mac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
++ udelay(10);
++
++ EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 0);
++ EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 0);
++ efx->mac_op->mac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
++ udelay(10);
++
++ /* Channels C and D: power down, reset PLL, reset, clear */
++ EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 0);
++ EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 0);
++ efx->mac_op->mac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
++ udelay(10);
++
++ EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 0);
++ efx->mac_op->mac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
++ udelay(10);
++
++ EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 0);
++ EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 0);
++ efx->mac_op->mac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
++ udelay(10);
++
++ /* Setup XAUI */
++ falcon_setup_xaui(efx);
++ udelay(10);
++
++ /* Take XGXS out of reset */
++ EFX_ZERO_DWORD(reg);
++ efx->mac_op->mac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
++ udelay(10);
++
++ return 0;
++}
++
++static int _falcon_reset_xaui_b(struct efx_nic *efx)
++{
++ efx_dword_t reg;
++ int count;
++
++ if (!efx->is_asic)
++ return 0;
++
++ EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1);
++ efx->mac_op->mac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
++
++ /* Give some time for the link to establish */
++ for (count = 0; count < 1000; count++) { /* wait upto 10ms */
++ efx->mac_op->mac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
++ if (EFX_DWORD_FIELD(reg, XX_RST_XX_EN) == 0) {
++ falcon_setup_xaui(efx);
++ return 0;
++ }
++ udelay(10);
++ }
++ EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n");
++ return -ETIMEDOUT;
++}
++
++int falcon_reset_xaui(struct efx_nic *efx)
++{
++ int rc;
++
++ if (EFX_WORKAROUND_9388(efx)) {
++ falcon_hold_xaui_in_rst(efx);
++ efx->phy_op->reset_xaui(efx);
++ rc = _falcon_reset_xaui_a(efx);
++ } else {
++ rc = _falcon_reset_xaui_b(efx);
++ }
++ return rc;
++}
++
++static int falcon_init_xmac(struct efx_nic *efx)
++{
++ int rc;
++
++ /* Initialize the PHY first so the clock is around */
++ rc = efx->phy_op->init(efx);
++ if (rc)
++ goto fail1;
++
++ rc = falcon_reset_xaui(efx);
++ if (rc)
++ goto fail2;
++
++ /* Wait again. Give the PHY and MAC time to faff */
++ schedule_timeout_uninterruptible(HZ / 10);
++
++ /* Reset the MAC */
++ rc = falcon_reset_xmac(efx);
++ if (rc)
++ goto fail2;
++
++ return 0;
++
++ fail2:
++ efx->phy_op->fini(efx);
++ fail1:
++ return rc;
++}
++
++/* Get status of XAUI link */
++int falcon_xaui_link_ok(struct efx_nic *efx)
++{
++ efx_dword_t reg;
++ int align_done;
++ int sync_status;
++ int link_ok = 0;
++
++ /* If we're in internal loopback, then the link is up.
++ * The A1 FPGA/4G has RX and TX XAUI wired together, so the link is up.
++ * The B0 FPGA has XAUI offchip, so it is always up.
++ */
++ if (!efx->is_asic || LOOPBACK_INTERNAL(efx))
++ return 1;
++
++ /* Read link status */
++ efx->mac_op->mac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
++
++ align_done = EFX_DWORD_FIELD(reg, XX_ALIGN_DONE);
++ sync_status = EFX_DWORD_FIELD(reg, XX_SYNC_STAT);
++ if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED))
++ link_ok = 1;
++
++ /* Clear link status ready for next read */
++ EFX_SET_DWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
++ EFX_SET_DWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
++ EFX_SET_DWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
++
++ efx->mac_op->mac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
++
++ return link_ok;
++}
++
++/* Do most of the heavy lifting of falcon_reconfigure_xmac */
++static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
++{
++ unsigned int max_frame_len;
++ efx_dword_t reg;
++ efx_oword_t mac_test_reg;
++ int rx_fc = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
++
++ if (FALCON_REV(efx) <= FALCON_REV_A1 && !efx->is_asic) {
++ /* 10G FPGA's have the XAUI TX and RX wired together. Fake
++ * the link status and configure the link options before
++ * the MAC wrapper is configured */
++ efx->link_options = GM_LPA_10000FULL;
++ efx->link_up = 1;
++ }
++
++ /* Configure MAC - cut-thru mode is hard wired on */
++ EFX_POPULATE_DWORD_3(reg,
++ XM_RX_JUMBO_MODE, 1,
++ XM_TX_STAT_EN, 1,
++ XM_RX_STAT_EN, 1);
++ efx->mac_op->mac_writel(efx, &reg, XM_GLB_CFG_REG_MAC);
++
++ /* Configure TX */
++ EFX_POPULATE_DWORD_6(reg,
++ XM_TXEN, 1,
++ XM_TX_PRMBL, 1,
++ XM_AUTO_PAD, 1,
++ XM_TXCRC, 1,
++ XM_FCNTL, 1,
++ XM_IPG, 0x3);
++ efx->mac_op->mac_writel(efx, &reg, XM_TX_CFG_REG_MAC);
++
++ /* Configure RX */
++ EFX_POPULATE_DWORD_5(reg,
++ XM_RXEN, 1,
++ XM_AUTO_DEPAD, 0,
++ XM_ACPT_ALL_MCAST, 1,
++ XM_ACPT_ALL_UCAST, efx->promiscuous,
++ XM_PASS_CRC_ERR, 1);
++ efx->mac_op->mac_writel(efx, &reg, XM_RX_CFG_REG_MAC);
++
++ /* Set frame length */
++ max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
++ EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len);
++ efx->mac_op->mac_writel(efx, &reg, XM_RX_PARAM_REG_MAC);
++ EFX_POPULATE_DWORD_2(reg,
++ XM_MAX_TX_FRM_SIZE, max_frame_len,
++ XM_TX_JUMBO_MODE, 1);
++ efx->mac_op->mac_writel(efx, &reg, XM_TX_PARAM_REG_MAC);
++
++ EFX_POPULATE_DWORD_2(reg,
++ XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
++ XM_DIS_FCNTL, rx_fc ? 0 : 1);
++ efx->mac_op->mac_writel(efx, &reg, XM_FC_REG_MAC);
++
++ /* Set MAC address */
++ EFX_POPULATE_DWORD_4(reg,
++ XM_ADR_0, efx->net_dev->dev_addr[0],
++ XM_ADR_1, efx->net_dev->dev_addr[1],
++ XM_ADR_2, efx->net_dev->dev_addr[2],
++ XM_ADR_3, efx->net_dev->dev_addr[3]);
++ efx->mac_op->mac_writel(efx, &reg, XM_ADR_LO_REG_MAC);
++ EFX_POPULATE_DWORD_2(reg,
++ XM_ADR_4, efx->net_dev->dev_addr[4],
++ XM_ADR_5, efx->net_dev->dev_addr[5]);
++ efx->mac_op->mac_writel(efx, &reg, XM_ADR_HI_REG_MAC);
++
++ /* Handle B0 FPGA loopback where RAMBUS XGXS block not present */
++ if (FALCON_REV(efx) == FALCON_REV_B0 && !efx->is_asic) {
++ int xgmii_loopback =
++ (efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
++
++ /* Set the MAC loopback bit. */
++ EFX_POPULATE_OWORD_1(mac_test_reg,
++ MAC_PTLOOP_EN, xgmii_loopback);
++ falcon_write(efx, &mac_test_reg, MAC_TEST_REG_KER);
++ }
++}
++
++/* Do most of the heavy lifting of falcon_reconfigure_xmac */
++static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
++{
++ efx_dword_t reg;
++ int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0;
++ int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0;
++ int xgmii_loopback =
++ (efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
++
++ if (FALCON_REV(efx) == FALCON_REV_B0 && !efx->is_asic)
++ /* RAMBUS XGXS block is not present */
++ return;
++
++ /* XGXS block is flaky and will need to be reset if moving
++ * into our out of XGMII, XGXS or XAUI loopbacks. */
++ if (EFX_WORKAROUND_5147(efx)) {
++ int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
++ int reset_xgxs;
++
++ efx->mac_op->mac_readl(efx, &reg,
++ XX_CORE_STAT_REG_MAC);
++ old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN);
++ old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN);
++
++ efx->mac_op->mac_readl(efx, &reg, XX_SD_CTL_REG_MAC);
++ old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA);
++
++ /* The PHY driver may have turned XAUI off */
++ reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
++ (xaui_loopback != old_xaui_loopback) ||
++ (xgmii_loopback != old_xgmii_loopback));
++ if (reset_xgxs) {
++ efx->mac_op->mac_readl(efx, &reg,
++ XX_PWR_RST_REG_MAC);
++ EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
++ EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
++ efx->mac_op->mac_writel(efx, &reg,
++ XX_PWR_RST_REG_MAC);
++ udelay(1);
++ EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0);
++ EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0);
++ efx->mac_op->mac_writel(efx, &reg,
++ XX_PWR_RST_REG_MAC);
++ udelay(1);
++ }
++ }
++
++ efx->mac_op->mac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
++ EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG,
++ (xgxs_loopback || xaui_loopback) ?
++ XX_FORCE_SIG_DECODE_FORCED : 0);
++ EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback);
++ EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback);
++ efx->mac_op->mac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
++
++ efx->mac_op->mac_readl(efx, &reg, XX_SD_CTL_REG_MAC);
++ EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback);
++ EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback);
++ EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback);
++ EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback);
++ efx->mac_op->mac_writel(efx, &reg, XX_SD_CTL_REG_MAC);
++}
++
++
++/* Sometimes the XAUI link between Falcon and XFP fails to come up. The state
++ * of the link is checked during phy_reconfigure(). After XAIU is reset then
++ * the MAC must be reconfigured.
++ */
++#define MAX_XAUI_TRIES (5) /* It's never been seen to take more than 2 */
++
++void falcon_check_xaui_link_up(struct efx_nic *efx)
++{
++ int max_tries, tries;
++ tries = EFX_WORKAROUND_5147(efx) ? MAX_XAUI_TRIES : 1;
++ max_tries = tries;
++
++ if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
++ (efx->phy_type == PHY_TYPE_NONE) ||
++ !efx->phy_powered)
++ return;
++
++ while (tries) {
++ if (falcon_xaui_link_ok(efx))
++ return;
++
++ EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
++ __func__, tries);
++ (void) falcon_reset_xaui(efx);
++ /* Cannot use full reconfigure. Need to avoid recursion */
++
++ /* Give the poor thing time to sort itself out: if we retry
++ * too fast it will never train. */
++ udelay(200);
++
++ falcon_reconfigure_xgxs_core(efx);
++
++ tries--;
++ }
++
++ EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n",
++ max_tries);
++}
++
++static void falcon_reconfigure_xmac(struct efx_nic *efx)
++{
++ falcon_deconfigure_mac_wrapper(efx);
++
++ /* In internal loopback modes disable transmit */
++ efx->tx_disabled = LOOPBACK_INTERNAL(efx);
++
++ efx->phy_op->reconfigure(efx);
++
++ falcon_reconfigure_xgxs_core(efx);
++ falcon_reconfigure_xmac_core(efx);
++
++ /* Reconfigure MAC wrapper */
++ falcon_reconfigure_mac_wrapper(efx);
++
++ /* Ensure XAUI link is up - might repeat reconfigure_xmac_core */
++ falcon_check_xaui_link_up(efx);
++}
++
++static void falcon_fini_xmac(struct efx_nic *efx)
++{
++ /* Isolate the MAC - PHY */
++ falcon_deconfigure_mac_wrapper(efx);
++
++ /* Potentially power down the PHY */
++ efx->phy_op->fini(efx);
++}
++
++static void falcon_update_stats_xmac(struct efx_nic *efx)
++{
++ struct efx_mac_stats *mac_stats = &efx->mac_stats;
++ int rc;
++
++ rc = falcon_dma_stats(efx, XgDmaDone_offset);
++ if (rc)
++ return;
++
++ /* Update MAC stats from DMAed values */
++ FALCON_STAT(efx, XgRxOctets, rx_bytes);
++ FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
++ FALCON_STAT(efx, XgRxPkts, rx_packets);
++ FALCON_STAT(efx, XgRxPktsOK, rx_good);
++ FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
++ FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
++ FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
++ FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
++ FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
++ FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
++ FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
++ FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
++ FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
++ FALCON_STAT(efx, XgRxAlignError, rx_align_error);
++ FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
++ FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
++ FALCON_STAT(efx, XgRxControlPkts, rx_control);
++ FALCON_STAT(efx, XgRxPausePkts, rx_pause);
++ FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
++ FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
++ FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
++ FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
++ FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
++ FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
++ FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
++ FALCON_STAT(efx, XgRxLengthError, rx_length_error);
++ FALCON_STAT(efx, XgTxPkts, tx_packets);
++ FALCON_STAT(efx, XgTxOctets, tx_bytes);
++ FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
++ FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
++ FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
++ FALCON_STAT(efx, XgTxControlPkts, tx_control);
++ FALCON_STAT(efx, XgTxPausePkts, tx_pause);
++ FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
++ FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
++ FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
++ FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
++ FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
++ FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
++ FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
++ FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
++ FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
++ FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
++ FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
++ FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
++
++ /* Update derived statistics */
++ mac_stats->tx_good_bytes =
++ (mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
++ mac_stats->rx_bad_bytes =
++ (mac_stats->rx_bytes - mac_stats->rx_good_bytes);
++}
++
++#define EFX_XAUI_RETRAIN_MAX 8
++
++static int falcon_check_xmac(struct efx_nic *efx)
++{
++ unsigned link_ok, phyxs_ok = 1;
++ unsigned has_phyxs = efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS);
++
++ /* Check the remote XAUI link status */
++ link_ok = falcon_xaui_link_ok(efx);
++
++ if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
++ !efx->phy_powered)
++ return 0;
++
++ if (link_ok && has_phyxs && !LOOPBACK_INTERNAL(efx)) {
++ /* Does the PHYXS think we have lane sync? */
++ phyxs_ok = mdio_clause45_phyxgxs_lane_sync(efx);
++ }
++
++ if (EFX_WORKAROUND_5147(efx) && (!link_ok || !phyxs_ok)) {
++ (void) falcon_reset_xaui(efx);
++ falcon_reconfigure_xgxs_core(efx);
++ }
++
++ /* Call the PHY check_hw routine */
++ efx->phy_op->check_hw(efx);
++ return 0;
++}
++
++/* Simulate a PHY event */
++static void falcon_xmac_sim_phy_event(struct efx_nic *efx)
++{
++ efx_qword_t phy_event;
++
++ EFX_POPULATE_QWORD_2(phy_event,
++ EV_CODE, GLOBAL_EV_DECODE,
++ XG_PHY_INTR, 1);
++ falcon_generate_event(&efx->channel[0], &phy_event);
++}
++
++static int falcon_xmac_get_settings(struct efx_nic *efx,
++ struct ethtool_cmd *ecmd)
++{
++ mdio_clause45_get_settings(efx, ecmd);
++ ecmd->transceiver = XCVR_INTERNAL;
++ ecmd->phy_address = efx->mii.phy_id;
++ ecmd->autoneg = AUTONEG_DISABLE;
++ ecmd->duplex = DUPLEX_FULL;
++ return 0;
++}
++
++static int falcon_xmac_set_settings(struct efx_nic *efx,
++ struct ethtool_cmd *ecmd)
++{
++ if (ecmd->transceiver != XCVR_INTERNAL)
++ return -EINVAL;
++ if (ecmd->autoneg != AUTONEG_DISABLE)
++ return -EINVAL;
++ if (ecmd->duplex != DUPLEX_FULL)
++ return -EINVAL;
++
++ return mdio_clause45_set_settings(efx, ecmd);
++}
++
++
++static int falcon_xmac_set_pause(struct efx_nic *efx,
++ enum efx_fc_type flow_control)
++{
++ int reset;
++
++ if (flow_control & EFX_FC_AUTO) {
++ EFX_LOG(efx, "10G does not support flow control "
++ "autonegotiation\n");
++ return -EINVAL;
++ }
++
++ if ((flow_control & EFX_FC_TX) && !(flow_control & EFX_FC_RX))
++ return -EINVAL;
++
++ /* TX flow control may automatically turn itself off if the
++ * link partner (intermittently) stops responding to pause
++ * frames. There isn't any indication that this has happened,
++ * so the best we do is leave it up to the user to spot this
++ * and fix it be cycling transmit flow control on this end. */
++ reset = ((flow_control & EFX_FC_TX) &&
++ !(efx->flow_control & EFX_FC_TX));
++ if (EFX_WORKAROUND_11482(efx) && reset) {
++ if (FALCON_REV(efx) == FALCON_REV_B0) {
++ /* Recover by resetting the EM block */
++ mutex_lock(&efx->mac_lock);
++ if (efx->link_up)
++ falcon_drain_tx_fifo(efx);
++ mutex_unlock(&efx->mac_lock);
++ } else {
++ /* Schedule a reset to recover */
++ efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
++ }
++ }
++
++ efx->flow_control = flow_control;
++
++ return 0;
++}
++
++struct efx_mac_operations falcon_xmac_operations = {
++ .mac_writel = falcon_xmac_writel,
++ .mac_readl = falcon_xmac_readl,
++ .init = falcon_init_xmac,
++ .reconfigure = falcon_reconfigure_xmac,
++ .update_stats = falcon_update_stats_xmac,
++ .fini = falcon_fini_xmac,
++ .check_hw = falcon_check_xmac,
++ .fake_phy_event = falcon_xmac_sim_phy_event,
++ .get_settings = falcon_xmac_get_settings,
++ .set_settings = falcon_xmac_set_settings,
++ .set_pause = falcon_xmac_set_pause,
++};
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/gmii.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/gmii.h
+--- linux-2.6.18.8/drivers/net/sfc/gmii.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/gmii.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,212 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_GMII_H
++#define EFX_GMII_H
++
++/*
++ * GMII interface
++ */
++
++#include <linux/mii.h>
++
++/* GMII registers, excluding registers already defined as MII
++ * registers in mii.h
++ */
++#define GMII_IER 0x12 /* Interrupt enable register */
++#define GMII_ISR 0x13 /* Interrupt status register */
++
++/* Interrupt enable register */
++#define IER_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
++#define IER_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
++#define IER_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
++#define IER_PAGE_RCVD 0x1000 /* Bit 12 - page received */
++#define IER_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
++#define IER_LINK_CHG 0x0400 /* Bit 10 - link status changed */
++#define IER_SYM_ERR 0x0200 /* Bit 9 - symbol error */
++#define IER_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
++#define IER_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
++#define IER_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
++#define IER_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
++#define IER_ENERGY 0x0010 /* Bit 4 - energy detect */
++#define IER_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
++#define IER_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
++#define IER_JABBER 0x0001 /* Bit 0 - jabber */
++
++/* Interrupt status register */
++#define ISR_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
++#define ISR_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
++#define ISR_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
++#define ISR_PAGE_RCVD 0x1000 /* Bit 12 - page received */
++#define ISR_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
++#define ISR_LINK_CHG 0x0400 /* Bit 10 - link status changed */
++#define ISR_SYM_ERR 0x0200 /* Bit 9 - symbol error */
++#define ISR_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
++#define ISR_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
++#define ISR_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
++#define ISR_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
++#define ISR_ENERGY 0x0010 /* Bit 4 - energy detect */
++#define ISR_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
++#define ISR_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
++#define ISR_JABBER 0x0001 /* Bit 0 - jabber */
++
++/* Logically extended advertisement register */
++#define GM_ADVERTISE_SLCT ADVERTISE_SLCT
++#define GM_ADVERTISE_CSMA ADVERTISE_CSMA
++#define GM_ADVERTISE_10HALF ADVERTISE_10HALF
++#define GM_ADVERTISE_1000XFULL ADVERTISE_1000XFULL
++#define GM_ADVERTISE_10FULL ADVERTISE_10FULL
++#define GM_ADVERTISE_1000XHALF ADVERTISE_1000XHALF
++#define GM_ADVERTISE_100HALF ADVERTISE_100HALF
++#define GM_ADVERTISE_1000XPAUSE ADVERTISE_1000XPAUSE
++#define GM_ADVERTISE_100FULL ADVERTISE_100FULL
++#define GM_ADVERTISE_1000XPSE_ASYM ADVERTISE_1000XPSE_ASYM
++#define GM_ADVERTISE_100BASE4 ADVERTISE_100BASE4
++#define GM_ADVERTISE_PAUSE_CAP ADVERTISE_PAUSE_CAP
++#define GM_ADVERTISE_PAUSE_ASYM ADVERTISE_PAUSE_ASYM
++#define GM_ADVERTISE_RESV ADVERTISE_RESV
++#define GM_ADVERTISE_RFAULT ADVERTISE_RFAULT
++#define GM_ADVERTISE_LPACK ADVERTISE_LPACK
++#define GM_ADVERTISE_NPAGE ADVERTISE_NPAGE
++#define GM_ADVERTISE_1000FULL (ADVERTISE_1000FULL << 8)
++#define GM_ADVERTISE_1000HALF (ADVERTISE_1000HALF << 8)
++#define GM_ADVERTISE_1000 (GM_ADVERTISE_1000FULL | \
++ GM_ADVERTISE_1000HALF)
++#define GM_ADVERTISE_FULL (GM_ADVERTISE_1000FULL | \
++ ADVERTISE_FULL)
++#define GM_ADVERTISE_ALL (GM_ADVERTISE_1000FULL | \
++ GM_ADVERTISE_1000HALF | \
++ ADVERTISE_ALL)
++
++/* Logically extended link partner ability register */
++#define GM_LPA_SLCT LPA_SLCT
++#define GM_LPA_10HALF LPA_10HALF
++#define GM_LPA_1000XFULL LPA_1000XFULL
++#define GM_LPA_10FULL LPA_10FULL
++#define GM_LPA_1000XHALF LPA_1000XHALF
++#define GM_LPA_100HALF LPA_100HALF
++#define GM_LPA_1000XPAUSE LPA_1000XPAUSE
++#define GM_LPA_100FULL LPA_100FULL
++#define GM_LPA_1000XPAUSE_ASYM LPA_1000XPAUSE_ASYM
++#define GM_LPA_100BASE4 LPA_100BASE4
++#define GM_LPA_PAUSE_CAP LPA_PAUSE_CAP
++#define GM_LPA_PAUSE_ASYM LPA_PAUSE_ASYM
++#define GM_LPA_RESV LPA_RESV
++#define GM_LPA_RFAULT LPA_RFAULT
++#define GM_LPA_LPACK LPA_LPACK
++#define GM_LPA_NPAGE LPA_NPAGE
++#define GM_LPA_1000FULL (LPA_1000FULL << 6)
++#define GM_LPA_1000HALF (LPA_1000HALF << 6)
++#define GM_LPA_10000FULL 0x00040000
++#define GM_LPA_10000HALF 0x00080000
++#define GM_LPA_DUPLEX (GM_LPA_1000FULL | GM_LPA_10000FULL \
++ | LPA_DUPLEX)
++#define GM_LPA_10 (LPA_10FULL | LPA_10HALF)
++#define GM_LPA_100 LPA_100
++#define GM_LPA_1000 (GM_LPA_1000FULL | GM_LPA_1000HALF)
++#define GM_LPA_10000 (GM_LPA_10000FULL | GM_LPA_10000HALF)
++
++/* Retrieve GMII autonegotiation advertised abilities
++ *
++ * The MII advertisment register (MII_ADVERTISE) is logically extended
++ * to include advertisement bits ADVERTISE_1000FULL and
++ * ADVERTISE_1000HALF from MII_CTRL1000. The result can be tested
++ * against the GM_ADVERTISE_xxx constants.
++ */
++static inline unsigned int gmii_advertised(struct mii_if_info *gmii)
++{
++ unsigned int advertise;
++ unsigned int ctrl1000;
++
++ advertise = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_ADVERTISE);
++ ctrl1000 = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_CTRL1000);
++ return (((ctrl1000 << 8) & GM_ADVERTISE_1000) | advertise);
++}
++
++/* Retrieve GMII autonegotiation link partner abilities
++ *
++ * The MII link partner ability register (MII_LPA) is logically
++ * extended by adding bits LPA_1000HALF and LPA_1000FULL from
++ * MII_STAT1000. The result can be tested against the GM_LPA_xxx
++ * constants.
++ */
++static inline unsigned int gmii_lpa(struct mii_if_info *gmii)
++{
++ unsigned int lpa;
++ unsigned int stat1000;
++
++ lpa = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_LPA);
++ stat1000 = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_STAT1000);
++ return (((stat1000 << 6) & GM_LPA_1000) | lpa);
++}
++
++/* Calculate GMII autonegotiated link technology
++ *
++ * "negotiated" should be the result of gmii_advertised() logically
++ * ANDed with the result of gmii_lpa().
++ *
++ * "tech" will be negotiated with the unused bits masked out. For
++ * example, if both ends of the link are capable of both
++ * GM_LPA_1000FULL and GM_LPA_100FULL, GM_LPA_100FULL will be masked
++ * out.
++ */
++static inline unsigned int gmii_nway_result(unsigned int negotiated)
++{
++ unsigned int other_bits;
++
++ /* Mask out the speed and duplexity bits */
++ other_bits = negotiated & ~(GM_LPA_10 | GM_LPA_100 | GM_LPA_1000);
++
++ if (negotiated & GM_LPA_1000FULL)
++ return (other_bits | GM_LPA_1000FULL);
++ else if (negotiated & GM_LPA_1000HALF)
++ return (other_bits | GM_LPA_1000HALF);
++ else
++ return (other_bits | mii_nway_result(negotiated));
++}
++
++/* Calculate GMII non-autonegotiated link technology
++ *
++ * This provides an equivalent to gmii_nway_result for the case when
++ * autonegotiation is disabled.
++ */
++static inline unsigned int gmii_forced_result(unsigned int bmcr)
++{
++ unsigned int result;
++ int full_duplex;
++
++ full_duplex = bmcr & BMCR_FULLDPLX;
++ if (bmcr & BMCR_SPEED1000)
++ result = full_duplex ? GM_LPA_1000FULL : GM_LPA_1000HALF;
++ else if (bmcr & BMCR_SPEED100)
++ result = full_duplex ? GM_LPA_100FULL : GM_LPA_100HALF;
++ else
++ result = full_duplex ? GM_LPA_10FULL : GM_LPA_10HALF;
++ return result;
++}
++
++#endif /* EFX_GMII_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/i2c-direct.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/i2c-direct.c
+--- linux-2.6.18.8/drivers/net/sfc/i2c-direct.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/i2c-direct.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,398 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <asm/io.h>
++#include <linux/delay.h>
++#include "net_driver.h"
++#include "i2c-direct.h"
++
++/* EEPROM access via I2C
++ * data (SDA) and clock (SCL) line read/writes
++ */
++
++static inline void setsda(struct efx_i2c_interface *i2c, int state)
++{
++ udelay(i2c->op->udelay);
++ i2c->sda = state;
++ i2c->op->setsda(i2c);
++ udelay(i2c->op->udelay);
++}
++
++static inline void setscl(struct efx_i2c_interface *i2c, int state)
++{
++ udelay(i2c->op->udelay);
++ i2c->scl = state;
++ i2c->op->setscl(i2c);
++ udelay(i2c->op->udelay);
++}
++
++static inline int getsda(struct efx_i2c_interface *i2c)
++{
++ int sda;
++
++ udelay(i2c->op->udelay);
++ sda = i2c->op->getsda(i2c);
++ udelay(i2c->op->udelay);
++ return sda;
++}
++
++static inline int getscl(struct efx_i2c_interface *i2c)
++{
++ int scl;
++
++ udelay(i2c->op->udelay);
++ scl = i2c->op->getscl(i2c);
++ udelay(i2c->op->udelay);
++ return scl;
++}
++
++/*
++ * I2C low-level protocol operations
++ *
++ */
++
++static inline void i2c_release(struct efx_i2c_interface *i2c)
++{
++ EFX_WARN_ON_PARANOID(!i2c->scl);
++ EFX_WARN_ON_PARANOID(!i2c->sda);
++ /* Just in case */
++ setscl(i2c, 1);
++ setsda(i2c, 1);
++ EFX_BUG_ON_PARANOID(getsda(i2c) != 1);
++ EFX_BUG_ON_PARANOID(getscl(i2c) != 1);
++}
++
++static inline void i2c_start(struct efx_i2c_interface *i2c)
++{
++ /* We may be restarting immediately after a {send,recv}_bit,
++ * so SCL will not necessarily already be high.
++ */
++ EFX_WARN_ON_PARANOID(!i2c->sda);
++ setscl(i2c, 1);
++ setsda(i2c, 0);
++ setscl(i2c, 0);
++ setsda(i2c, 1);
++}
++
++static inline void i2c_send_bit(struct efx_i2c_interface *i2c, int bit)
++{
++ EFX_WARN_ON_PARANOID(i2c->scl != 0);
++ setsda(i2c, bit);
++ setscl(i2c, 1);
++ setscl(i2c, 0);
++ setsda(i2c, 1);
++}
++
++static inline int i2c_recv_bit(struct efx_i2c_interface *i2c)
++{
++ int bit;
++
++ EFX_WARN_ON_PARANOID(i2c->scl != 0);
++ EFX_WARN_ON_PARANOID(!i2c->sda);
++ setscl(i2c, 1);
++ bit = getsda(i2c);
++ setscl(i2c, 0);
++ return bit;
++}
++
++static inline void i2c_stop(struct efx_i2c_interface *i2c)
++{
++ EFX_WARN_ON_PARANOID(i2c->scl != 0);
++ setsda(i2c, 0);
++ setscl(i2c, 1);
++ setsda(i2c, 1);
++}
++
++/*
++ * I2C mid-level protocol operations
++ *
++ */
++
++/* Sends a byte via the I2C bus and checks for an acknowledgement from
++ * the slave device.
++ */
++static int i2c_send_byte(struct efx_i2c_interface *i2c, u8 byte)
++{
++ int i;
++
++ /* Send byte */
++ for (i = 0; i < 8; i++) {
++ i2c_send_bit(i2c, !!(byte & 0x80));
++ byte <<= 1;
++ }
++
++ /* Check for acknowledgement from slave */
++ return (i2c_recv_bit(i2c) == 0 ? 0 : -EIO);
++}
++
++/* Receives a byte via the I2C bus and sends ACK/NACK to the slave device. */
++static u8 i2c_recv_byte(struct efx_i2c_interface *i2c, int ack)
++{
++ u8 value = 0;
++ int i;
++
++ /* Receive byte */
++ for (i = 0; i < 8; i++)
++ value = (value << 1) | i2c_recv_bit(i2c);
++
++ /* Send ACK/NACK */
++ i2c_send_bit(i2c, (ack ? 0 : 1));
++
++ return value;
++}
++
++/* Calculate command byte for a read operation */
++static inline u8 i2c_read_cmd(u8 device_id)
++{
++ return ((device_id << 1) | 1);
++}
++
++/* Calculate command byte for a write operation */
++static inline u8 i2c_write_cmd(u8 device_id)
++{
++ return ((device_id << 1) | 0);
++}
++
++int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id)
++{
++ int rc;
++
++ /* If someone is driving the bus low we just give up. */
++ if (getsda(i2c) == 0 || getscl(i2c) == 0) {
++ EFX_ERR(i2c->efx, "%s someone is holding the I2C bus low."
++ " Giving up.\n", __func__);
++ return -EFAULT;
++ }
++
++ /* Pretend to initiate a device write */
++ i2c_start(i2c);
++ rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
++ if (rc)
++ goto out;
++
++ out:
++ i2c_stop(i2c);
++ i2c_release(i2c);
++
++ return rc;
++}
++
++/* This performs a fast read of one or more consecutive bytes from an
++ * I2C device. Not all devices support consecutive reads of more than
++ * one byte; for these devices use efx_i2c_read() instead.
++ */
++int efx_i2c_fast_read(struct efx_i2c_interface *i2c,
++ u8 device_id, u8 offset, u8 *data, unsigned int len)
++{
++ int i;
++ int rc;
++
++ EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
++ EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
++ EFX_WARN_ON_PARANOID(data == NULL);
++ EFX_WARN_ON_PARANOID(len < 1);
++
++ /* Select device and starting offset */
++ i2c_start(i2c);
++ rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
++ if (rc)
++ goto out;
++ rc = i2c_send_byte(i2c, offset);
++ if (rc)
++ goto out;
++
++ /* Read data from device */
++ i2c_start(i2c);
++ rc = i2c_send_byte(i2c, i2c_read_cmd(device_id));
++ if (rc)
++ goto out;
++ for (i = 0; i < (len - 1); i++)
++ /* Read and acknowledge all but the last byte */
++ data[i] = i2c_recv_byte(i2c, 1);
++ /* Read last byte with no acknowledgement */
++ data[i] = i2c_recv_byte(i2c, 0);
++
++ out:
++ i2c_stop(i2c);
++ i2c_release(i2c);
++
++ return rc;
++}
++
++/* This performs a fast write of one or more consecutive bytes to an
++ * I2C device. Not all devices support consecutive writes of more
++ * than one byte; for these devices use efx_i2c_write() instead.
++ */
++int efx_i2c_fast_write(struct efx_i2c_interface *i2c,
++ u8 device_id, u8 offset,
++ const u8 *data, unsigned int len)
++{
++ int i;
++ int rc;
++
++ EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
++ EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
++ EFX_WARN_ON_PARANOID(len < 1);
++
++ /* Select device and starting offset */
++ i2c_start(i2c);
++ rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
++ if (rc)
++ goto out;
++ rc = i2c_send_byte(i2c, offset);
++ if (rc)
++ goto out;
++
++ /* Write data to device */
++ for (i = 0; i < len; i++) {
++ rc = i2c_send_byte(i2c, data[i]);
++ if (rc)
++ goto out;
++ }
++
++ out:
++ i2c_stop(i2c);
++ i2c_release(i2c);
++
++ return rc;
++}
++
++/* I2C byte-by-byte read */
++int efx_i2c_read(struct efx_i2c_interface *i2c,
++ u8 device_id, u8 offset, u8 *data, unsigned int len)
++{
++ int rc;
++
++ /* i2c_fast_read with length 1 is a single byte read */
++ for (; len > 0; offset++, data++, len--) {
++ rc = efx_i2c_fast_read(i2c, device_id, offset, data, 1);
++ if (rc)
++ return rc;
++ }
++
++ return 0;
++}
++
++/* I2C byte-by-byte write */
++int efx_i2c_write(struct efx_i2c_interface *i2c,
++ u8 device_id, u8 offset, const u8 *data, unsigned int len)
++{
++ int rc;
++
++ /* i2c_fast_write with length 1 is a single byte write */
++ for (; len > 0; offset++, data++, len--) {
++ rc = efx_i2c_fast_write(i2c, device_id, offset, data, 1);
++ if (rc)
++ return rc;
++ mdelay(i2c->op->mdelay);
++ }
++
++ return 0;
++}
++
++
++/* This is just a slightly neater wrapper round efx_i2c_fast_write
++ * in the case where the target doesn't take an offset
++ */
++int efx_i2c_send_bytes(struct efx_i2c_interface *i2c,
++ u8 device_id, const u8 *data, unsigned int len)
++{
++ return efx_i2c_fast_write(i2c, device_id, data[0], data + 1, len - 1);
++}
++
++/* I2C receiving of bytes - does not send an offset byte */
++int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id,
++ u8 *bytes, unsigned int len)
++{
++ int i;
++ int rc;
++
++ EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
++ EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
++ EFX_WARN_ON_PARANOID(len < 1);
++
++ /* Select device */
++ i2c_start(i2c);
++
++ /* Read data from device */
++ rc = i2c_send_byte(i2c, i2c_read_cmd(device_id));
++ if (rc)
++ goto out;
++
++ for (i = 0; i < (len - 1); i++)
++ /* Read and acknowledge all but the last byte */
++ bytes[i] = i2c_recv_byte(i2c, 1);
++ /* Read last byte with no acknowledgement */
++ bytes[i] = i2c_recv_byte(i2c, 0);
++
++ out:
++ i2c_stop(i2c);
++ i2c_release(i2c);
++
++ return rc;
++}
++
++/* SMBus and some I2C devices will time out if the I2C clock is
++ * held low for too long. This is most likely to happen in virtualised
++ * systems (when the entire domain is descheduled) but could in
++ * principle happen due to preemption on any busy system (and given the
++ * potential length of an I2C operation turning preemption off is not
++ * a sensible option). The following functions deal with the failure by
++ * retrying up to a fixed number of times.
++ */
++
++#define I2C_MAX_RETRIES (10)
++
++/* The timeout problem will result in -EIO. If the wrapped function
++ * returns any other error, pass this up and do not retry. */
++#define RETRY_WRAPPER(_f) \
++ int retries = I2C_MAX_RETRIES; \
++ int rc; \
++ while (retries) { \
++ rc = _f; \
++ if (rc != -EIO) \
++ return rc; \
++ retries--; \
++ } \
++ return rc; \
++
++int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c, u8 device_id)
++{
++ RETRY_WRAPPER(efx_i2c_check_presence(i2c, device_id))
++}
++
++int efx_i2c_read_retry(struct efx_i2c_interface *i2c,
++ u8 device_id, u8 offset, u8 *data, unsigned int len)
++{
++ RETRY_WRAPPER(efx_i2c_read(i2c, device_id, offset, data, len))
++}
++
++int efx_i2c_write_retry(struct efx_i2c_interface *i2c,
++ u8 device_id, u8 offset, const u8 *data, unsigned int len)
++{
++ RETRY_WRAPPER(efx_i2c_write(i2c, device_id, offset, data, len))
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/i2c-direct.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/i2c-direct.h
+--- linux-2.6.18.8/drivers/net/sfc/i2c-direct.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/i2c-direct.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,108 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005: Fen Systems Ltd.
++ * Copyright 2006: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_I2C_DIRECT_H
++#define EFX_I2C_DIRECT_H
++
++#include "net_driver.h"
++
++/*
++ * Direct control of an I2C bus
++ */
++
++struct efx_i2c_interface;
++
++/**
++ * struct efx_i2c_bit_operations - I2C bus direct control methods
++ *
++ * I2C bus direct control methods.
++ *
++ * @setsda: Set state of SDA line
++ * @setscl: Set state of SCL line
++ * @getsda: Get state of SDA line
++ * @getscl: Get state of SCL line
++ * @udelay: Delay between each bit operation
++ * @mdelay: Delay between each byte write
++ */
++struct efx_i2c_bit_operations {
++ void (*setsda) (struct efx_i2c_interface *i2c);
++ void (*setscl) (struct efx_i2c_interface *i2c);
++ int (*getsda) (struct efx_i2c_interface *i2c);
++ int (*getscl) (struct efx_i2c_interface *i2c);
++ unsigned int udelay;
++ unsigned int mdelay;
++};
++
++/**
++ * struct efx_i2c_interface - an I2C interface
++ *
++ * An I2C interface.
++ *
++ * @efx: Attached Efx NIC
++ * @op: I2C bus control methods
++ * @sda: Current output state of SDA line
++ * @scl: Current output state of SCL line
++ */
++struct efx_i2c_interface {
++ struct efx_nic *efx;
++ struct efx_i2c_bit_operations *op;
++ unsigned int sda:1;
++ unsigned int scl:1;
++};
++
++extern int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id);
++extern int efx_i2c_fast_read(struct efx_i2c_interface *i2c,
++ u8 device_id, u8 offset,
++ u8 *data, unsigned int len);
++extern int efx_i2c_fast_write(struct efx_i2c_interface *i2c,
++ u8 device_id, u8 offset,
++ const u8 *data, unsigned int len);
++extern int efx_i2c_read(struct efx_i2c_interface *i2c,
++ u8 device_id, u8 offset, u8 *data, unsigned int len);
++extern int efx_i2c_write(struct efx_i2c_interface *i2c,
++ u8 device_id, u8 offset,
++ const u8 *data, unsigned int len);
++
++extern int efx_i2c_send_bytes(struct efx_i2c_interface *i2c, u8 device_id,
++ const u8 *bytes, unsigned int len);
++
++extern int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id,
++ u8 *bytes, unsigned int len);
++
++
++/* Versions of the API that retry on failure. */
++extern int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c,
++ u8 device_id);
++
++extern int efx_i2c_read_retry(struct efx_i2c_interface *i2c,
++ u8 device_id, u8 offset, u8 *data, unsigned int len);
++
++extern int efx_i2c_write_retry(struct efx_i2c_interface *i2c,
++ u8 device_id, u8 offset,
++ const u8 *data, unsigned int len);
++
++#endif /* EFX_I2C_DIRECT_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/Kconfig linux-2.6.18-xen-3.3.0/drivers/net/sfc/Kconfig
+--- linux-2.6.18.8/drivers/net/sfc/Kconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,35 @@
++config SFC
++ tristate "Solarflare Solarstorm SFC4000 support"
++ depends on PCI && INET
++ select MII
++ help
++ This driver supports 10-gigabit Ethernet cards based on
++ the Solarflare Communications Solarstorm SFC4000 controller.
++
++ To compile this driver as a module, choose M here. The module
++ will be called sfc.
++
++config SFC_DEBUGFS
++ bool "Solarflare Solarstorm SFC4000 debugging support"
++ depends on SFC && DEBUG_FS
++ default N
++ help
++ This option creates an "sfc" subdirectory of debugfs with
++ debugging information for the SFC4000 driver.
++
++ If unsure, say N.
++
++config SFC_MTD
++ depends on SFC && MTD && MTD_PARTITIONS
++ tristate "Solarflare Solarstorm SFC4000 flash/EEPROM support"
++ help
++ This module exposes the on-board flash and/or EEPROM memory as
++ MTD devices (e.g. /dev/mtd1). This makes it possible to upload a
++ new boot ROM to the NIC.
++
++config SFC_RESOURCE
++ depends on SFC && X86
++ tristate "Solarflare Solarstorm SFC4000 resource driver"
++ help
++ This module provides the SFC resource manager driver.
++
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/kernel_compat.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/kernel_compat.c
+--- linux-2.6.18.8/drivers/net/sfc/kernel_compat.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/kernel_compat.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,654 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#define EFX_IN_KCOMPAT_C 1
++
++#include "net_driver.h"
++#include <linux/mii.h>
++#include <linux/ethtool.h>
++#include <linux/random.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/rtnetlink.h>
++#include <linux/bootmem.h>
++#include <asm/uaccess.h>
++#include "gmii.h"
++#include "ethtool.h"
++
++/*
++ * Kernel backwards compatibility
++ *
++ * This file provides functionality missing from earlier kernels.
++ */
++
++/**************************************************************************
++ *
++ * GMII-friendly versions of mii_ethtool_[gs]set
++ *
++ **************************************************************************
++ *
++ * Kernels prior to 2.6.12 don't support GMII PHYs via
++ * mii_ethtool_gset and mii_ethtool_sset. These are those functions
++ * taken from a 2.6.12 kernel tree, with the tests for
++ * mii->supports_gmii removed (since that field doesn't exist in older
++ * kernels).
++ *
++ */
++
++#ifdef EFX_NEED_MII_ETHTOOL_FIX
++int efx_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
++{
++ struct net_device *dev = mii->dev;
++ u32 advert, bmcr, lpa, nego;
++ u32 advert2 = 0, bmcr2 = 0, lpa2 = 0;
++
++ ecmd->supported =
++ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
++ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
++ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
++ ecmd->supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
++
++ /* only supports twisted-pair */
++ ecmd->port = PORT_MII;
++
++ /* only supports internal transceiver */
++ ecmd->transceiver = XCVR_INTERNAL;
++
++ /* this isn't fully supported at higher layers */
++ ecmd->phy_address = mii->phy_id;
++
++ ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
++ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
++ advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
++
++ if (advert & ADVERTISE_10HALF)
++ ecmd->advertising |= ADVERTISED_10baseT_Half;
++ if (advert & ADVERTISE_10FULL)
++ ecmd->advertising |= ADVERTISED_10baseT_Full;
++ if (advert & ADVERTISE_100HALF)
++ ecmd->advertising |= ADVERTISED_100baseT_Half;
++ if (advert & ADVERTISE_100FULL)
++ ecmd->advertising |= ADVERTISED_100baseT_Full;
++ if (advert2 & ADVERTISE_1000HALF)
++ ecmd->advertising |= ADVERTISED_1000baseT_Half;
++ if (advert2 & ADVERTISE_1000FULL)
++ ecmd->advertising |= ADVERTISED_1000baseT_Full;
++
++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
++ lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
++ bmcr2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
++ lpa2 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000);
++ if (bmcr & BMCR_ANENABLE) {
++ ecmd->advertising |= ADVERTISED_Autoneg;
++ ecmd->autoneg = AUTONEG_ENABLE;
++
++ nego = mii_nway_result(advert & lpa);
++ if ((bmcr2 & (ADVERTISE_1000HALF | ADVERTISE_1000FULL)) &
++ (lpa2 >> 2))
++ ecmd->speed = SPEED_1000;
++ else if (nego == LPA_100FULL || nego == LPA_100HALF)
++ ecmd->speed = SPEED_100;
++ else
++ ecmd->speed = SPEED_10;
++ if ((lpa2 & LPA_1000FULL) || nego == LPA_100FULL ||
++ nego == LPA_10FULL) {
++ ecmd->duplex = DUPLEX_FULL;
++ mii->full_duplex = 1;
++ } else {
++ ecmd->duplex = DUPLEX_HALF;
++ mii->full_duplex = 0;
++ }
++ } else {
++ ecmd->autoneg = AUTONEG_DISABLE;
++
++ ecmd->speed = ((bmcr & BMCR_SPEED1000 &&
++ (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 :
++ (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10);
++ ecmd->duplex =
++ (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
++ }
++
++ /* ignore maxtxpkt, maxrxpkt for now */
++
++ return 0;
++}
++
++int efx_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
++{
++ struct net_device *dev = mii->dev;
++
++ if (ecmd->speed != SPEED_10 &&
++ ecmd->speed != SPEED_100 &&
++ ecmd->speed != SPEED_1000)
++ return -EINVAL;
++ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
++ return -EINVAL;
++ if (ecmd->port != PORT_MII)
++ return -EINVAL;
++ if (ecmd->transceiver != XCVR_INTERNAL)
++ return -EINVAL;
++ if (ecmd->phy_address != mii->phy_id)
++ return -EINVAL;
++ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
++ return -EINVAL;
++
++ /* ignore supported, maxtxpkt, maxrxpkt */
++
++ if (ecmd->autoneg == AUTONEG_ENABLE) {
++ u32 bmcr, advert, tmp;
++ u32 advert2 = 0, tmp2 = 0;
++
++ if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
++ ADVERTISED_10baseT_Full |
++ ADVERTISED_100baseT_Half |
++ ADVERTISED_100baseT_Full |
++ ADVERTISED_1000baseT_Half |
++ ADVERTISED_1000baseT_Full)) == 0)
++ return -EINVAL;
++
++ /* advertise only what has been requested */
++ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
++ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
++ advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
++ tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
++ if (ecmd->advertising & ADVERTISED_10baseT_Half)
++ tmp |= ADVERTISE_10HALF;
++ if (ecmd->advertising & ADVERTISED_10baseT_Full)
++ tmp |= ADVERTISE_10FULL;
++ if (ecmd->advertising & ADVERTISED_100baseT_Half)
++ tmp |= ADVERTISE_100HALF;
++ if (ecmd->advertising & ADVERTISED_100baseT_Full)
++ tmp |= ADVERTISE_100FULL;
++ if (ecmd->advertising & ADVERTISED_1000baseT_Half)
++ tmp2 |= ADVERTISE_1000HALF;
++ if (ecmd->advertising & ADVERTISED_1000baseT_Full)
++ tmp2 |= ADVERTISE_1000FULL;
++ if (advert != tmp) {
++ mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
++ mii->advertising = tmp;
++ }
++ if (advert2 != tmp2)
++ mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2);
++
++ /* turn on autonegotiation, and force a renegotiate */
++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
++ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
++ mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
++
++ mii->force_media = 0;
++ } else {
++ u32 bmcr, tmp;
++
++ /* turn off auto negotiation, set speed and duplexity */
++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
++ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
++ BMCR_SPEED1000 | BMCR_FULLDPLX);
++ if (ecmd->speed == SPEED_1000)
++ tmp |= BMCR_SPEED1000;
++ else if (ecmd->speed == SPEED_100)
++ tmp |= BMCR_SPEED100;
++ if (ecmd->duplex == DUPLEX_FULL) {
++ tmp |= BMCR_FULLDPLX;
++ mii->full_duplex = 1;
++ } else {
++ mii->full_duplex = 0;
++ }
++ if (bmcr != tmp)
++ mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
++
++ mii->force_media = 1;
++ }
++ return 0;
++}
++#endif /* NEED_EFX_MII_ETHTOOL_GSET */
++
++/**************************************************************************
++ *
++ * unregister_netdevice_notifier : Has a race before 2.6.17
++ *
++ **************************************************************************
++ *
++ */
++
++#ifdef EFX_NEED_UNREGISTER_NETDEVICE_NOTIFIER_FIX
++/**
++ * efx_unregister_netdevice_notifier - fixed unregister_netdevice_notifier
++ * @nb: notifier to unregister
++ *
++ * unregister_netdevice_notifier() does not wait for the notifier
++ * to be unused before 2.6.17. This wrapper fixes that.
++ */
++int efx_unregister_netdevice_notifier(struct notifier_block *nb)
++{
++ int res;
++
++ res = unregister_netdevice_notifier(nb);
++ /* Ensure any outstanding calls complete. */
++ rtnl_lock();
++ rtnl_unlock();
++ return res;
++}
++#endif /* NEED_EFX_UNREGISTER_NETDEVICE_NOTIFIER */
++
++/**************************************************************************
++ *
++ * IOMMU-locking versions of pci_[un]map_single and
++ * pci_{alloc,free}_consistent. See SFC bug 4560.
++ *
++ **************************************************************************
++ *
++ */
++#ifdef EFX_NEED_IOMMU_LOCK
++
++/*
++ * efx_use_iommu_lock - IOMMU lock use control
++ *
++ * If set to 1, the driver will attempt to mitigate the race condition
++ * bug around IOMMU accesses in some 2.6 kernels. If set to 2, the
++ * driver will use the lock even if it thinks it doesn't need to.
++ * Note that this is only a best-effort attempt; in particular, we
++ * cannot do anything about other drivers touching the IOMMU.
++ */
++static unsigned int efx_use_iommu_lock = 1;
++EXPORT_SYMBOL(efx_use_iommu_lock);
++
++/*
++ * efx_iommu_lock - lock around IOMMU accesses
++ *
++ * This spinlock should be held while calling functions that access
++ * the IOMMU if efx_use_iommu_lock is >= 2. The efx_pci_*()
++ * functions do this where possible.
++ */
++static spinlock_t efx_iommu_lock = SPIN_LOCK_UNLOCKED;
++EXPORT_SYMBOL(efx_iommu_lock);
++
++/* Don't use the IOMMU lock if the device can access the whole of memory */
++#define EFX_DMA_CONSISTENT(_efx) \
++ (((_efx)->dma_mask >> PAGE_SHIFT) >= max_pfn)
++/**
++ * efx_pci_map_single - map buffer for DMA, under IOMMU lock
++ * @pci: PCI device
++ * @ptr: Buffer
++ * @size: Buffer length
++ * @direction: DMA direction
++ *
++ * Wrapper for pci_map_single that uses efx_iommu_lock if necessary.
++ */
++dma_addr_t efx_pci_map_single(struct pci_dev *pci, void *ptr, size_t size,
++ int direction)
++{
++ struct efx_nic *efx = pci_get_drvdata(pci);
++ unsigned long flags __attribute__ ((unused));
++ dma_addr_t dma_addr;
++
++ if (unlikely((efx_use_iommu_lock &&
++ (!EFX_NO_IOMMU) &&
++ (!EFX_DMA_CONSISTENT(efx))) ||
++ efx_use_iommu_lock >= 2)) {
++ spin_lock_irqsave(&efx_iommu_lock, flags);
++ dma_addr = pci_map_single(pci, ptr, size, direction);
++ spin_unlock_irqrestore(&efx_iommu_lock, flags);
++ } else {
++ dma_addr = pci_map_single(pci, ptr, size, direction);
++ }
++ return dma_addr;
++}
++
++/**
++ * efx_pci_unmap_single - unmap buffer for DMA, under IOMMU lock
++ * @pci: PCI device
++ * @dma_addr: DMA address
++ * @size: Buffer length
++ * @direction: DMA direction
++ *
++ * Wrapper for pci_unmap_single that uses efx_iommu_lock if necessary.
++ */
++void efx_pci_unmap_single(struct pci_dev *pci, dma_addr_t dma_addr,
++ size_t size, int direction)
++{
++ struct efx_nic *efx = pci_get_drvdata(pci);
++ unsigned long flags __attribute__ ((unused));
++
++ if (unlikely((efx_use_iommu_lock &&
++ (!EFX_NO_IOMMU) &&
++ (!EFX_DMA_CONSISTENT(efx))) ||
++ efx_use_iommu_lock >= 2)) {
++ spin_lock_irqsave(&efx_iommu_lock, flags);
++ pci_unmap_single(pci, dma_addr, size, direction);
++ spin_unlock_irqrestore(&efx_iommu_lock, flags);
++ } else {
++ pci_unmap_single(pci, dma_addr, size, direction);
++ }
++}
++
++/**
++ * efx_pci_alloc_consistent - allocate DMA-consistent buffer, under IOMMU lock
++ * @pci: PCI device
++ * @size: Buffer length
++ * @dma_addr: DMA address
++ *
++ * Wrapper for pci_alloc_consistent that uses efx_iommu_lock if necessary.
++ *
++ * Bugs: Currently this can't use the spinlock because
++ * pci_alloc_consistent may block.
++ */
++void *efx_pci_alloc_consistent(struct pci_dev *pci, size_t size,
++ dma_addr_t *dma_addr)
++{
++ return pci_alloc_consistent(pci, size, dma_addr);
++}
++
++/**
++ * efx_pci_free_consistent - free DMA-consistent buffer, under IOMMU lock
++ * @pci: PCI device
++ * @size: Buffer length
++ * @ptr: Buffer
++ * @dma_addr: DMA address
++ *
++ * Wrapper for pci_free_consistent that uses efx_iommu_lock if necessary.
++ */
++void efx_pci_free_consistent(struct pci_dev *pci, size_t size, void *ptr,
++ dma_addr_t dma_addr)
++{
++ struct efx_nic *efx = pci_get_drvdata(pci);
++ unsigned long flags __attribute__ ((unused));
++
++ if (unlikely((efx_use_iommu_lock &&
++ (!EFX_NO_IOMMU) &&
++ (!EFX_DMA_CONSISTENT(efx))) ||
++ efx_use_iommu_lock >= 2)) {
++ spin_lock_irqsave(&efx_iommu_lock, flags);
++ pci_free_consistent(pci, size, ptr, dma_addr);
++ spin_unlock_irqrestore(&efx_iommu_lock, flags);
++ } else {
++ pci_free_consistent(pci, size, ptr, dma_addr);
++ }
++}
++
++module_param(efx_use_iommu_lock, uint, 0644);
++MODULE_PARM_DESC(efx_use_iommu_lock, "Enable lock for bug in free_iommu");
++
++#endif
++
++#ifdef EFX_NEED_COMPOUND_PAGE_FIX
++
++void efx_compound_page_destructor(struct page *page)
++{
++ /* Fake up page state to keep __free_pages happy */
++ set_page_count(page, 1);
++ page[1].mapping = NULL;
++
++ __free_pages(page, (unsigned long)page[1].index);
++}
++
++#endif /* NEED_COMPOUND_PAGE_FIX */
++
++/**************************************************************************
++ *
++ * print_hex_dump, taken from lib/hexdump.c.
++ *
++ **************************************************************************
++ *
++ */
++#ifdef EFX_NEED_HEX_DUMP
++
++#define hex_asc(x) "0123456789abcdef"[x]
++#define isascii(c) (((unsigned char)(c))<=0x7f)
++
++static void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
++ int groupsize, char *linebuf, size_t linebuflen,
++ int ascii)
++{
++ const u8 *ptr = buf;
++ u8 ch;
++ int j, lx = 0;
++ int ascii_column;
++
++ if (rowsize != 16 && rowsize != 32)
++ rowsize = 16;
++
++ if (!len)
++ goto nil;
++ if (len > rowsize) /* limit to one line at a time */
++ len = rowsize;
++ if ((len % groupsize) != 0) /* no mixed size output */
++ groupsize = 1;
++
++ switch (groupsize) {
++ case 8: {
++ const u64 *ptr8 = buf;
++ int ngroups = len / groupsize;
++
++ for (j = 0; j < ngroups; j++)
++ lx += scnprintf(linebuf + lx, linebuflen - lx,
++ "%16.16llx ", (unsigned long long)*(ptr8 + j));
++ ascii_column = 17 * ngroups + 2;
++ break;
++ }
++
++ case 4: {
++ const u32 *ptr4 = buf;
++ int ngroups = len / groupsize;
++
++ for (j = 0; j < ngroups; j++)
++ lx += scnprintf(linebuf + lx, linebuflen - lx,
++ "%8.8x ", *(ptr4 + j));
++ ascii_column = 9 * ngroups + 2;
++ break;
++ }
++
++ case 2: {
++ const u16 *ptr2 = buf;
++ int ngroups = len / groupsize;
++
++ for (j = 0; j < ngroups; j++)
++ lx += scnprintf(linebuf + lx, linebuflen - lx,
++ "%4.4x ", *(ptr2 + j));
++ ascii_column = 5 * ngroups + 2;
++ break;
++ }
++
++ default:
++ for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen;
++ j++) {
++ ch = ptr[j];
++ linebuf[lx++] = hex_asc(ch >> 4);
++ linebuf[lx++] = hex_asc(ch & 0x0f);
++ linebuf[lx++] = ' ';
++ }
++ ascii_column = 3 * rowsize + 2;
++ break;
++ }
++ if (!ascii)
++ goto nil;
++
++ while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
++ linebuf[lx++] = ' ';
++ /* Removed is_print() check */
++ for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++)
++ linebuf[lx++] = isascii(ptr[j]) ? ptr[j] : '.';
++nil:
++ linebuf[lx++] = '\0';
++}
++
++void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
++ int rowsize, int groupsize,
++ const void *buf, size_t len, int ascii)
++{
++ const u8 *ptr = buf;
++ int i, linelen, remaining = len;
++ char linebuf[200];
++
++ if (rowsize != 16 && rowsize != 32)
++ rowsize = 16;
++
++ for (i = 0; i < len; i += rowsize) {
++ linelen = min(remaining, rowsize);
++ remaining -= rowsize;
++ hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
++ linebuf, sizeof(linebuf), ascii);
++
++ switch (prefix_type) {
++ case DUMP_PREFIX_ADDRESS:
++ printk("%s%s%*p: %s\n", level, prefix_str,
++ (int)(2 * sizeof(void *)), ptr + i, linebuf);
++ break;
++ case DUMP_PREFIX_OFFSET:
++ printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
++ break;
++ default:
++ printk("%s%s%s\n", level, prefix_str, linebuf);
++ break;
++ }
++ }
++}
++
++#endif /* EFX_NEED_HEX_DUMP */
++
++/**************************************************************************
++ *
++ * print_mac, from net/ethernet/eth.c in v2.6.24
++ *
++ **************************************************************************
++ *
++ */
++#ifdef EFX_NEED_PRINT_MAC
++char *print_mac(char *buf, const u8 *addr)
++{
++ sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
++ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
++ return buf;
++}
++#endif /* EFX_NEED_PRINT_MAC */
++
++#ifdef EFX_NEED_CSUM_TCPUDP_NOFOLD
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++__wsum
++csum_tcpudp_nofold (__be32 saddr, __be32 daddr, unsigned short len,
++ unsigned short proto, __wsum sum)
++#else
++__wsum
++csum_tcpudp_nofold (unsigned long saddr, unsigned long daddr,
++ unsigned short len, unsigned short proto, __wsum sum)
++#endif
++{
++ unsigned long result;
++
++ result = (__force u64)saddr + (__force u64)daddr +
++ (__force u64)sum + ((len + proto) << 8);
++
++ /* Fold down to 32-bits so we don't lose in the typedef-less network stack. */
++ /* 64 to 33 */
++ result = (result & 0xffffffff) + (result >> 32);
++ /* 33 to 32 */
++ result = (result & 0xffffffff) + (result >> 32);
++ return (__force __wsum)result;
++
++}
++#endif /* EFX_NEED_CSUM_TCPUDP_NOFOLD */
++
++#ifdef EFX_NEED_RANDOM_ETHER_ADDR
++/* Generate random MAC address */
++void efx_random_ether_addr(uint8_t *addr) {
++ get_random_bytes (addr, ETH_ALEN);
++ addr [0] &= 0xfe; /* clear multicast bit */
++ addr [0] |= 0x02; /* set local assignment bit (IEEE802) */
++}
++#endif /* EFX_NEED_RANDOM_ETHER_ADDR */
++
++#ifdef EFX_NEED_MSECS_TO_JIFFIES
++/*
++ * When we convert to jiffies then we interpret incoming values
++ * the following way:
++ *
++ * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
++ *
++ * - 'too large' values [that would result in larger than
++ * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
++ *
++ * - all other values are converted to jiffies by either multiplying
++ * the input value by a factor or dividing it with a factor
++ *
++ * We must also be careful about 32-bit overflows.
++ */
++#ifndef MSEC_PER_SEC
++#define MSEC_PER_SEC 1000L
++#endif
++unsigned long msecs_to_jiffies(const unsigned int m)
++{
++ /*
++ * Negative value, means infinite timeout:
++ */
++ if ((int)m < 0)
++ return MAX_JIFFY_OFFSET;
++
++#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
++ /*
++ * HZ is equal to or smaller than 1000, and 1000 is a nice
++ * round multiple of HZ, divide with the factor between them,
++ * but round upwards:
++ */
++ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
++#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
++ /*
++ * HZ is larger than 1000, and HZ is a nice round multiple of
++ * 1000 - simply multiply with the factor between them.
++ *
++ * But first make sure the multiplication result cannot
++ * overflow:
++ */
++ if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
++ return MAX_JIFFY_OFFSET;
++
++ return m * (HZ / MSEC_PER_SEC);
++#else
++ /*
++ * Generic case - multiply, round and divide. But first
++ * check that if we are doing a net multiplication, that
++ * we wouldnt overflow:
++ */
++ if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
++ return MAX_JIFFY_OFFSET;
++
++ return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
++#endif
++}
++#endif /* EFX_NEED_MSECS_TO_JIFFIES */
++
++#ifdef EFX_NEED_MSLEEP
++/**
++ * msleep - sleep safely even with waitqueue interruptions
++ * @msecs: Time in milliseconds to sleep for
++ */
++void msleep(unsigned int msecs)
++{
++ unsigned long timeout = msecs_to_jiffies(msecs) + 1;
++
++ while (timeout)
++ timeout = schedule_timeout_uninterruptible(timeout);
++}
++#endif
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/kernel_compat.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/kernel_compat.h
+--- linux-2.6.18.8/drivers/net/sfc/kernel_compat.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/kernel_compat.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,925 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_KERNEL_COMPAT_H
++#define EFX_KERNEL_COMPAT_H
++
++#include <linux/version.h>
++#include <linux/sched.h>
++#include <linux/errno.h>
++#include <linux/pci.h>
++#include <linux/workqueue.h>
++#include <linux/moduleparam.h>
++#include <linux/interrupt.h>
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++
++#include "extraversion.h"
++
++/*
++ * Kernel backwards compatibility
++ *
++ * This file provides macros that enable the driver to be compiled on
++ * any kernel from 2.6.9 onward (plus SLES 9 2.6.5), without requiring
++ * explicit version tests scattered throughout the code.
++ */
++
++/**************************************************************************
++ *
++ * Version/config/architecture tests to set feature flags
++ *
++ **************************************************************************
++ *
++ * NOTE: For simplicity, these initial version tests cover kernel.org
++ * releases only. Backported features in "enterprise" kernels are
++ * handled further down.
++ */
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) && \
++ !(defined(EFX_DIST_SUSE) && \
++ LINUX_VERSION_CODE == KERNEL_VERSION(2,6,5) && \
++ EFX_DIST_KVER_LEVEL_1 == 7)
++ #error "This kernel version is now unsupported"
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6)
++ #define EFX_NEED_RANDOM_ETHER_ADDR yes
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)
++ #define EFX_NEED_I2C_CLASS_HWMON yes
++ #define EFX_NEED_IF_MII yes
++ #define EFX_NEED_MSLEEP yes
++ #define EFX_NEED_MSECS_TO_JIFFIES yes
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,8)
++ #define EFX_USE_MTD_ERASE_FAIL_ADDR yes
++#else
++ #define EFX_NEED_MTD_ERASE_CALLBACK yes
++ #define EFX_NEED_DUMMY_PCI_DISABLE_MSI yes
++ #define EFX_NEED_DUMMY_MSIX yes
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
++ #define EFX_NEED_BYTEORDER_TYPES yes
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
++ #define EFX_NEED_MMIOWB yes
++ #define EFX_NEED_PCI_SAVE_RESTORE_WRAPPERS yes
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
++ #define EFX_NEED_DUMMY_SUPPORTS_GMII yes
++ #define EFX_NEED_MII_CONSTANTS yes
++ #define EFX_NEED_MII_ETHTOOL_FIX yes
++ #define EFX_HAVE_MSIX_TABLE_RESERVED yes
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
++ #define EFX_NEED_SCHEDULE_TIMEOUT_INTERRUPTIBLE yes
++ #define EFX_NEED_SCHEDULE_TIMEOUT_UNINTERRUPTIBLE yes
++ #define EFX_NEED_GFP_T yes
++ #define EFX_NEED_KZALLOC yes
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
++ #define EFX_NEED_SETUP_TIMER yes
++ #ifdef CONFIG_HUGETLB_PAGE
++ #define EFX_USE_COMPOUND_PAGES yes
++ #endif
++#else
++ #define EFX_USE_COMPOUND_PAGES yes
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++ #define EFX_NEED_MUTEX yes
++ #define EFX_NEED_SAFE_LISTS yes
++ #ifdef EFX_USE_COMPOUND_PAGES
++ #define EFX_NEED_COMPOUND_PAGE_FIX yes
++ #endif
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++ #define EFX_NEED_UNREGISTER_NETDEVICE_NOTIFIER_FIX yes
++ #define EFX_NEED_DEV_NOTICE yes
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
++ #define EFX_NEED_IRQF_FLAGS yes
++ #define EFX_NEED_NETDEV_ALLOC_SKB yes
++ /* Fedora backported 2.6.18 netdevice.h changes */
++ #ifndef NETIF_F_GSO
++ #define EFX_NEED_NETIF_TX_LOCK yes
++ #endif
++#else
++ #define EFX_USE_MTD_WRITESIZE yes
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ #define EFX_NEED_IRQ_HANDLER_T yes
++ #define EFX_HAVE_IRQ_HANDLER_REGS yes
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++ #define EFX_NEED_WORK_API_WRAPPERS yes
++ #define EFX_USE_FASTCALL yes
++ #define EFX_NEED_CSUM_UNFOLDED yes
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++ /*
++ * debugfs was introduced earlier, but only supports sym-links
++ * from 2.6.21
++ */
++ #ifdef CONFIG_DEBUG_FS
++ #define EFX_USE_DEBUGFS yes
++ #endif
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
++ #define EFX_NEED_SKB_HEADER_MACROS yes
++ #define EFX_NEED_HEX_DUMP yes
++#else
++ #define EFX_USE_CANCEL_WORK_SYNC yes
++#endif
++
++#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,22)
++ #define EFX_NEED_HEX_DUMP_CONST_FIX yes
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)) && \
++ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
++ #define EFX_USE_ETHTOOL_GET_PERM_ADDR yes
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
++ #ifdef __ia64__
++ /* csum_tcpudp_nofold() is extern but not exported */
++ #define EFX_NEED_CSUM_TCPUDP_NOFOLD yes
++ #endif
++#else
++ #define EFX_USE_PCI_DEV_REVISION yes
++ #define EFX_USE_CANCEL_DELAYED_WORK_SYNC yes
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++ #define EFX_HAVE_OLD_NAPI yes
++ #define EFX_NEED_GENERIC_LRO yes
++ #define EFX_NEED_PRINT_MAC yes
++#else
++ #define EFX_USE_ETHTOOL_FLAGS yes
++#endif
++
++/*
++ * SFC Bug 4560: Some kernels leak IOMMU entries under heavy load. Use a
++ * spinlock to serialise access where possible to alleviate the
++ * problem.
++ *
++ * NB. The following definition is duplicated in
++ * the char driver. Please keep in sync.
++ */
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) && \
++ defined(__x86_64__) && defined(CONFIG_SMP))
++ #define EFX_NEED_IOMMU_LOCK yes
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
++ #if defined(CONFIG_GART_IOMMU)
++ #define EFX_NO_IOMMU no_iommu
++ #else
++ #define EFX_NO_IOMMU 1
++ #endif
++ #else
++ #define EFX_NO_IOMMU 0
++ #endif
++#endif
++
++#ifdef CONFIG_PPC64
++ /* __raw_writel and friends are broken on ppc64 */
++ #define EFX_NEED_RAW_READ_AND_WRITE_FIX yes
++#endif
++
++/**************************************************************************
++ *
++ * Exceptions for backported features
++ *
++ **************************************************************************
++ */
++
++/* RHEL4 */
++#if defined(EFX_DIST_RHEL) && LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)
++ #if EFX_DIST_KVER_LEVEL_1 >= 22
++ /* linux-2.6.9-mmiowb.patch */
++ #undef EFX_NEED_MMIOWB
++ #endif
++ #if EFX_DIST_KVER_LEVEL_1 >= 34
++ /* linux-2.6.9-net-mii-update.patch */
++ #undef EFX_NEED_DUMMY_SUPPORTS_GMII
++ #undef EFX_NEED_MII_CONSTANTS
++ #undef EFX_NEED_MII_ETHTOOL_FIX
++ /* linux-2.6.9-gfp_t-typedef.patch */
++ #undef EFX_NEED_GFP_T
++ /* linux-2.6.9-slab-update.patch */
++ #undef EFX_NEED_KZALLOC
++ #endif
++ #if EFX_DIST_KVER_LEVEL_1 >= 55
++ /* linux-2.6.18-sata-update.patch (ported from 2.6.18->2.6.9) */
++ #undef EFX_NEED_SCHEDULE_TIMEOUT_UNINTERRUPTIBLE
++ #undef EFX_NEED_IRQ_HANDLER_T
++ #endif
++#endif
++
++/* RHEL5 */
++#if defined(EFX_DIST_RHEL) && LINUX_VERSION_CODE == KERNEL_VERSION(2,6,18)
++ #if EFX_DIST_KVER_LEVEL_1 >= 53
++ /* linux-2.6.18-sata-update.patch */
++ #undef EFX_NEED_SCHEDULE_TIMEOUT_UNINTERRUPTIBLE
++ #undef EFX_NEED_IRQ_HANDLER_T
++ #endif
++#endif
++
++#if defined(EFX_DIST_RHEL)
++ #if (LINUX_VERSION_CODE != KERNEL_VERSION(2,6,9)) && \
++ (LINUX_VERSION_CODE != KERNEL_VERSION(2,6,18))
++ #error "Unknown Red Hat Enterprise kernel version"
++ #endif
++#endif
++
++/* SLES9 */
++#if defined(EFX_DIST_SUSE) && LINUX_VERSION_CODE == KERNEL_VERSION(2,6,5) && \
++ EFX_DIST_KVER_LEVEL_1 == 7
++ #if EFX_DIST_KVER_LEVEL_2 >= 139
++ #undef EFX_NEED_MMIOWB
++ #endif
++ #if EFX_DIST_KVER_LEVEL_2 >= 191
++ #undef EFX_NEED_MSLEEP
++ #undef EFX_NEED_MSECS_TO_JIFFIES
++ #endif
++ #if EFX_DIST_KVER_LEVEL_2 >= 244
++ #undef EFX_NEED_BYTEORDER_TYPES
++ #endif
++ #if EFX_DIST_KVER_LEVEL_2 >= 252
++ #undef EFX_NEED_KZALLOC
++ #endif
++#endif
++
++/**************************************************************************
++ *
++ * Definitions of missing constants, types, functions and macros
++ *
++ **************************************************************************
++ *
++ */
++
++#ifndef DMA_40BIT_MASK
++ #define DMA_40BIT_MASK 0x000000ffffffffffULL
++#endif
++
++#ifndef spin_trylock_irqsave
++ #define spin_trylock_irqsave(lock, flags) \
++ ({ \
++ local_irq_save(flags); \
++ spin_trylock(lock) ? \
++ 1 : ({local_irq_restore(flags); 0;}); \
++ })
++#endif
++
++#ifndef raw_smp_processor_id
++ #define raw_smp_processor_id() (current_thread_info()->cpu)
++#endif
++
++#ifndef NETIF_F_LRO
++ #define NETIF_F_LRO 0
++#endif
++
++/* Cope with small changes in PCI constants between minor kernel revisions */
++#if PCI_X_STATUS != 4
++ #undef PCI_X_STATUS
++ #define PCI_X_STATUS 4
++ #undef PCI_X_STATUS_MAX_SPLIT
++ #define PCI_X_STATUS_MAX_SPLIT 0x03800000
++#endif
++
++#ifndef PCI_EXP_LNKSTA
++ #define PCI_EXP_LNKSTA 18 /* Link Status */
++#endif
++
++/* Used for struct pt_regs */
++#ifndef regs_return_value
++ #if defined(__x86_64__)
++ #define regs_return_value(regs) ((regs)->rax)
++ #elif defined(__i386__)
++ #define regs_return_value(regs) ((regs)->eax)
++ #elif defined(__ia64__)
++ #define regs_return_value(regs) ((regs)->r8)
++ #else
++ #error "Need definition for regs_return_value()"
++ #endif
++#endif
++
++#ifndef __GFP_COMP
++ #define __GFP_COMP 0
++#endif
++
++#ifndef __iomem
++ #define __iomem
++#endif
++
++#ifndef NET_IP_ALIGN
++ #define NET_IP_ALIGN 2
++#endif
++
++#ifndef PCI_CAP_ID_EXP
++#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
++#endif
++
++#ifndef PCI_EXP_FLAGS
++#define PCI_EXP_FLAGS 2 /* Capabilities register */
++#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Capability version */
++#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
++#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
++#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
++#endif
++
++#ifndef PCI_EXP_DEVCAP
++#define PCI_EXP_DEVCAP 4 /* Device capabilities */
++#define PCI_EXP_DEVCAP_PAYLOAD 0x07 /* Max_Payload_Size */
++#define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */
++#define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */
++#endif
++
++#ifndef PCI_EXP_DEVCTL
++#define PCI_EXP_DEVCTL 8 /* Device Control */
++#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */
++#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
++#endif
++
++#ifndef PCI_EXP_LNKSTA
++#define PCI_EXP_LNKSTA 18 /* Link Status */
++#endif
++
++#ifndef NETDEV_TX_OK
++ #define NETDEV_TX_OK 0
++#endif
++
++#ifndef NETDEV_TX_BUSY
++ #define NETDEV_TX_BUSY 1
++#endif
++
++#ifndef __force
++ #define __force
++#endif
++
++#if ! defined(for_each_cpu_mask) && ! defined(CONFIG_SMP)
++ #define for_each_cpu_mask(cpu, mask) \
++ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
++#endif
++
++/**************************************************************************/
++
++#ifdef EFX_NEED_IRQ_HANDLER_T
++ typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *);
++#endif
++
++#ifdef EFX_NEED_I2C_CLASS_HWMON
++ #define I2C_CLASS_HWMON (1<<0)
++#endif
++
++#ifdef EFX_NEED_MII_CONSTANTS
++ #define MII_CTRL1000 0x09
++ #define MII_STAT1000 0x0a
++ #define BMCR_SPEED1000 0x0040
++ #define ADVERTISE_PAUSE_ASYM 0x0800
++ #define ADVERTISE_PAUSE_CAP 0x0400
++ #define LPA_PAUSE_ASYM 0x0800
++ #define LPA_PAUSE_CAP 0x0400
++ #define ADVERTISE_1000FULL 0x0200
++ #define ADVERTISE_1000HALF 0x0100
++ #define LPA_1000FULL 0x0800
++ #define LPA_1000HALF 0x0400
++#endif
++
++#ifdef EFX_NEED_DUMMY_SUPPORTS_GMII
++ #include <linux/mii.h>
++ /* Ugly; redirect nonexistent new field to an old unused field */
++ #undef supports_gmii
++ #define supports_gmii full_duplex
++#endif
++
++#ifdef EFX_NEED_SKB_HEADER_MACROS
++ #define skb_mac_header(skb) (skb)->mac.raw
++ #define skb_network_header(skb) (skb)->nh.raw
++ #define tcp_hdr(skb) (skb)->h.th
++ #define ip_hdr(skb) (skb)->nh.iph
++ #define skb_tail_pointer(skb) (skb)->tail
++#endif
++
++#ifdef EFX_NEED_RAW_READ_AND_WRITE_FIX
++ #include <asm/io.h>
++ static inline void
++ efx_raw_writeb(u8 value, volatile void __iomem *addr)
++ {
++ writeb(value, addr);
++ }
++ static inline void
++ efx_raw_writew(u16 value, volatile void __iomem *addr)
++ {
++ writew(le16_to_cpu(value), addr);
++ }
++ static inline void
++ efx_raw_writel(u32 value, volatile void __iomem *addr)
++ {
++ writel(le32_to_cpu(value), addr);
++ }
++ static inline void
++ efx_raw_writeq(u64 value, volatile void __iomem *addr)
++ {
++ writeq(le64_to_cpu(value), addr);
++ }
++ static inline u8
++ efx_raw_readb(const volatile void __iomem *addr)
++ {
++ return readb(addr);
++ }
++ static inline u16
++ efx_raw_readw(const volatile void __iomem *addr)
++ {
++ return cpu_to_le16(readw(addr));
++ }
++ static inline u32
++ efx_raw_readl(const volatile void __iomem *addr)
++ {
++ return cpu_to_le32(readl(addr));
++ }
++ static inline u64
++ efx_raw_readq(const volatile void __iomem *addr)
++ {
++ return cpu_to_le64(readq(addr));
++ }
++
++ #undef __raw_writeb
++ #undef __raw_writew
++ #undef __raw_writel
++ #undef __raw_writeq
++ #undef __raw_readb
++ #undef __raw_readw
++ #undef __raw_readl
++ #undef __raw_readq
++ #define __raw_writeb efx_raw_writeb
++ #define __raw_writew efx_raw_writew
++ #define __raw_writel efx_raw_writel
++ #define __raw_writeq efx_raw_writeq
++ #define __raw_readb efx_raw_readb
++ #define __raw_readw efx_raw_readw
++ #define __raw_readl efx_raw_readl
++ #define __raw_readq efx_raw_readq
++#endif
++
++#ifdef EFX_NEED_SCHEDULE_TIMEOUT_INTERRUPTIBLE
++ static inline signed long
++ schedule_timeout_interruptible(signed long timeout)
++ {
++ set_current_state(TASK_INTERRUPTIBLE);
++ return schedule_timeout(timeout);
++ }
++#endif
++
++#ifdef EFX_NEED_SCHEDULE_TIMEOUT_UNINTERRUPTIBLE
++ static inline signed long
++ schedule_timeout_uninterruptible(signed long timeout)
++ {
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ return schedule_timeout(timeout);
++ }
++#endif
++
++#ifdef EFX_NEED_MMIOWB
++ #if defined(__i386__) || defined(__x86_64__)
++ #define mmiowb()
++ #elif defined(__ia64__)
++ #ifndef ia64_mfa
++ #define ia64_mfa() asm volatile ("mf.a" ::: "memory")
++ #endif
++ #define mmiowb ia64_mfa
++ #else
++ #error "Need definition for mmiowb()"
++ #endif
++#endif
++
++#ifdef EFX_NEED_KZALLOC
++ static inline void *kzalloc(size_t size, int flags)
++ {
++ void *buf = kmalloc(size, flags);
++ if (buf)
++ memset(buf, 0,size);
++ return buf;
++ }
++#endif
++
++#ifdef EFX_NEED_SETUP_TIMER
++ static inline void setup_timer(struct timer_list * timer,
++ void (*function)(unsigned long),
++ unsigned long data)
++ {
++ timer->function = function;
++ timer->data = data;
++ init_timer(timer);
++ }
++#endif
++
++#ifdef EFX_NEED_MUTEX
++ #define EFX_DEFINE_MUTEX(x) DECLARE_MUTEX(x)
++ #undef DEFINE_MUTEX
++ #define DEFINE_MUTEX EFX_DEFINE_MUTEX
++
++ #define efx_mutex semaphore
++ #undef mutex
++ #define mutex efx_mutex
++
++ #define efx_mutex_init(x) init_MUTEX(x)
++ #undef mutex_init
++ #define mutex_init efx_mutex_init
++
++ #define efx_mutex_destroy(x) do { } while(0)
++ #undef mutex_destroy
++ #define mutex_destroy efx_mutex_destroy
++
++ #define efx_mutex_lock(x) down(x)
++ #undef mutex_lock
++ #define mutex_lock efx_mutex_lock
++
++ #define efx_mutex_lock_interruptible(x) down_interruptible(x)
++ #undef mutex_lock_interruptible
++ #define mutex_lock_interruptible efx_mutex_lock_interruptible
++
++ #define efx_mutex_unlock(x) up(x)
++ #undef mutex_unlock
++ #define mutex_unlock efx_mutex_unlock
++
++ #define efx_mutex_trylock(x) (!down_trylock(x))
++ #undef mutex_trylock
++ #define mutex_trylock efx_mutex_trylock
++
++ static inline int efx_mutex_is_locked(struct efx_mutex *m)
++ {
++ /* NB. This is quite inefficient, but it's the best we
++ * can do with the semaphore API. */
++ if ( down_trylock(m) )
++ return 1;
++ /* Undo the effect of down_trylock. */
++ up(m);
++ return 0;
++ }
++ #undef mutex_is_locked
++ #define mutex_is_locked efx_mutex_is_locked
++#endif
++
++#ifndef NETIF_F_GSO
++ #define efx_gso_size tso_size
++ #undef gso_size
++ #define gso_size efx_gso_size
++ #define efx_gso_segs tso_segs
++ #undef gso_segs
++ #define gso_segs efx_gso_segs
++#endif
++
++#ifdef EFX_NEED_IRQF_FLAGS
++ #ifdef SA_PROBEIRQ
++ #define IRQF_PROBE_SHARED SA_PROBEIRQ
++ #else
++ #define IRQF_PROBE_SHARED 0
++ #endif
++ #define IRQF_SHARED SA_SHIRQ
++#endif
++
++#ifdef EFX_NEED_NETDEV_ALLOC_SKB
++ #ifndef NET_SKB_PAD
++ #define NET_SKB_PAD 16
++ #endif
++
++ static inline
++ struct sk_buff *netdev_alloc_skb(struct net_device *dev,
++ unsigned int length)
++ {
++ struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD,
++ GFP_ATOMIC | __GFP_COLD);
++ if (likely(skb)) {
++ skb_reserve(skb, NET_SKB_PAD);
++ skb->dev = dev;
++ }
++ return skb;
++ }
++#endif
++
++#ifdef EFX_NEED_NETIF_TX_LOCK
++ static inline void netif_tx_lock(struct net_device *dev)
++ {
++ spin_lock(&dev->xmit_lock);
++ dev->xmit_lock_owner = smp_processor_id();
++ }
++ static inline void netif_tx_lock_bh(struct net_device *dev)
++ {
++ spin_lock_bh(&dev->xmit_lock);
++ dev->xmit_lock_owner = smp_processor_id();
++ }
++ static inline void netif_tx_unlock_bh(struct net_device *dev)
++ {
++ dev->xmit_lock_owner = -1;
++ spin_unlock_bh(&dev->xmit_lock);
++ }
++ static inline void netif_tx_unlock(struct net_device *dev)
++ {
++ dev->xmit_lock_owner = -1;
++ spin_unlock(&dev->xmit_lock);
++ }
++#endif
++
++#ifdef EFX_NEED_CSUM_UNFOLDED
++ typedef u32 __wsum;
++ #define csum_unfold(x) ((__force __wsum) x)
++#endif
++
++#ifdef EFX_NEED_HEX_DUMP
++ enum {
++ DUMP_PREFIX_NONE,
++ DUMP_PREFIX_ADDRESS,
++ DUMP_PREFIX_OFFSET
++ };
++#endif
++
++#ifdef EFX_NEED_PRINT_MAC
++ #define DECLARE_MAC_BUF(var) char var[18] __attribute__((unused))
++#endif
++
++#ifdef EFX_NEED_GFP_T
++ typedef unsigned int gfp_t;
++#endif
++
++#ifdef EFX_NEED_SAFE_LISTS
++ #define list_for_each_entry_safe_reverse(pos, n, head, member) \
++ for (pos = list_entry((head)->prev, typeof(*pos), member), \
++ n = list_entry(pos->member.prev, typeof(*pos), member); \
++ &pos->member != (head); \
++ pos = n, \
++ n = list_entry(n->member.prev, typeof(*n), member))
++#endif
++
++#ifdef EFX_NEED_DEV_NOTICE
++ #define dev_notice dev_warn
++#endif
++
++#ifdef EFX_NEED_IF_MII
++ #include <linux/mii.h>
++ static inline struct mii_ioctl_data *efx_if_mii ( struct ifreq *rq ) {
++ return ( struct mii_ioctl_data * ) &rq->ifr_ifru;
++ }
++ #undef if_mii
++ #define if_mii efx_if_mii
++#endif
++
++#ifdef EFX_NEED_MTD_ERASE_CALLBACK
++ #include <linux/mtd/mtd.h>
++ static inline void efx_mtd_erase_callback(struct erase_info *instr) {
++ if ( instr->callback )
++ instr->callback ( instr );
++ }
++ #undef mtd_erase_callback
++ #define mtd_erase_callback efx_mtd_erase_callback
++#endif
++
++#ifdef EFX_NEED_DUMMY_PCI_DISABLE_MSI
++ #include <linux/pci.h>
++ static inline void dummy_pci_disable_msi ( struct pci_dev *dev ) {
++ /* Do nothing */
++ }
++ #undef pci_disable_msi
++ #define pci_disable_msi dummy_pci_disable_msi
++#endif
++
++#ifdef EFX_NEED_DUMMY_MSIX
++ struct msix_entry {
++ u16 vector; /* kernel uses to write allocated vector */
++ u16 entry; /* driver uses to specify entry, OS writes */
++ };
++ static inline int pci_enable_msix(struct pci_dev* dev,
++ struct msix_entry *entries, int nvec)
++ {return -1;}
++ static inline void pci_disable_msix(struct pci_dev *dev) { /* Do nothing */}
++#endif
++
++#ifdef EFX_NEED_BYTEORDER_TYPES
++ typedef __u16 __be16;
++ typedef __u32 __be32;
++ typedef __u64 __be64;
++ typedef __u16 __le16;
++ typedef __u32 __le32;
++ typedef __u64 __le64;
++#endif
++
++/**************************************************************************
++ *
++ * Missing functions provided by kernel_compat.c
++ *
++ **************************************************************************
++ *
++ */
++#ifdef EFX_NEED_RANDOM_ETHER_ADDR
++ extern void efx_random_ether_addr(uint8_t *addr);
++ #ifndef EFX_IN_KCOMPAT_C
++ #undef random_ether_addr
++ #define random_ether_addr efx_random_ether_addr
++ #endif
++#endif
++
++#ifdef EFX_NEED_MII_ETHTOOL_FIX
++ extern int efx_mii_ethtool_gset(struct mii_if_info *mii,
++ struct ethtool_cmd *ecmd);
++ extern int efx_mii_ethtool_sset(struct mii_if_info *mii,
++ struct ethtool_cmd *ecmd);
++ #ifndef EFX_IN_KCOMPAT_C
++ #undef mii_ethtool_gset
++ #define mii_ethtool_gset efx_mii_ethtool_gset
++ #undef mii_ethtool_sset
++ #define mii_ethtool_sset efx_mii_ethtool_sset
++ #endif
++#endif
++
++#ifdef EFX_NEED_UNREGISTER_NETDEVICE_NOTIFIER_FIX
++ extern int efx_unregister_netdevice_notifier(struct notifier_block *nb);
++ #ifndef EFX_IN_KCOMPAT_C
++ #undef unregister_netdevice_notifier
++ #define unregister_netdevice_notifier \
++ efx_unregister_netdevice_notifier
++ #endif
++#endif
++
++#ifdef EFX_NEED_IOMMU_LOCK
++ extern dma_addr_t efx_pci_map_single(struct pci_dev *pci, void *ptr,
++ size_t size, int direction);
++ extern void efx_pci_unmap_single(struct pci_dev *pci,
++ dma_addr_t dma_addr, size_t size,
++ int direction);
++ extern void * efx_pci_alloc_consistent(struct pci_dev *pci,
++ size_t size,
++ dma_addr_t *dma_addr);
++ extern void efx_pci_free_consistent(struct pci_dev *pci,
++ size_t size, void *ptr,
++ dma_addr_t dma_addr);
++ #ifndef EFX_IN_KCOMPAT_C
++ #undef pci_map_single
++ #undef pci_unmap_single
++ #undef pci_alloc_consistent
++ #undef pci_free_consistent
++ #define pci_map_single efx_pci_map_single
++ #define pci_unmap_single efx_pci_unmap_single
++ #define pci_alloc_consistent efx_pci_alloc_consistent
++ #define pci_free_consistent efx_pci_free_consistent
++ #endif
++#endif
++
++#ifdef EFX_NEED_PRINT_MAC
++ extern char *print_mac(char *buf, const u8 *addr);
++#endif
++
++#ifdef EFX_NEED_COMPOUND_PAGE_FIX
++ extern void efx_compound_page_destructor(struct page *page);
++#endif
++
++#ifdef EFX_NEED_HEX_DUMP
++ extern void
++ print_hex_dump(const char *level, const char *prefix_str,
++ int prefix_type, int rowsize, int groupsize,
++ const void *buf, size_t len, int ascii);
++#endif
++
++#ifdef EFX_NEED_MSECS_TO_JIFFIES
++ extern unsigned long msecs_to_jiffies(const unsigned int m);
++#endif
++
++#ifdef EFX_NEED_MSLEEP
++ extern void msleep(unsigned int msecs);
++#endif
++
++/**************************************************************************
++ *
++ * Wrappers to fix bugs and parameter changes
++ *
++ **************************************************************************
++ *
++ */
++
++#ifdef EFX_NEED_PCI_SAVE_RESTORE_WRAPPERS
++ #define pci_save_state(_dev) \
++ pci_save_state(_dev, (_dev)->saved_config_space)
++
++ #define pci_restore_state(_dev) \
++ pci_restore_state(_dev, (_dev)->saved_config_space)
++#endif
++
++#ifdef EFX_NEED_WORK_API_WRAPPERS
++ /**
++ * queue_delayed_work in pre 2.6.20 can't rearm from inside
++ * the work member. So instead do a rather hacky sleep
++ */
++ #define delayed_work work_struct
++ #define INIT_DELAYED_WORK INIT_WORK
++
++ static int inline efx_queue_delayed_work(struct workqueue_struct *wq,
++ struct work_struct *work,
++ unsigned long delay)
++ {
++ if (unlikely(delay > 0))
++ schedule_timeout_uninterruptible(delay);
++ return queue_work(wq, work);
++ }
++ #define queue_delayed_work efx_queue_delayed_work
++
++ /**
++ * The old and new work-function prototypes just differ
++ * in the type of the pointer returned, so it's safe
++ * to cast between the prototypes.
++ */
++ typedef void (*efx_old_work_func_t)(void *p);
++
++ #undef INIT_WORK
++ #define INIT_WORK(_work, _func) \
++ do { \
++ INIT_LIST_HEAD(&(_work)->entry); \
++ (_work)->pending = 0; \
++ PREPARE_WORK((_work), \
++ (efx_old_work_func_t) (_func), \
++ (_work)); \
++ } while (0)
++#endif
++
++#ifdef EFX_HAVE_OLD_NAPI
++ #define napi_str napi_dev[0]
++
++ static inline void netif_napi_add(struct net_device *dev,
++ struct net_device *dummy,
++ int (*poll) (struct net_device *,
++ int *),
++ int weight)
++ {
++ dev->weight = weight;
++ dev->poll = poll;
++ }
++
++ #define napi_enable netif_poll_enable
++ #define napi_disable netif_poll_disable
++
++ #define netif_rx_complete(dev, dummy) netif_rx_complete(dev)
++#endif
++
++#ifdef EFX_NEED_COMPOUND_PAGE_FIX
++ static inline
++ struct page *efx_alloc_pages(gfp_t flags, unsigned int order)
++ {
++ struct page *p = alloc_pages(flags, order);
++ if ((flags & __GFP_COMP) && (p != NULL) && (order > 0))
++ p[1].mapping = (void *)efx_compound_page_destructor;
++ return p;
++ }
++ #undef alloc_pages
++ #define alloc_pages efx_alloc_pages
++
++ static inline
++ void efx_free_pages(struct page *p, unsigned int order)
++ {
++ if ((order > 0) && (page_count(p) == 1))
++ p[1].mapping = NULL;
++ __free_pages(p, order);
++ }
++ #define __free_pages efx_free_pages
++#endif
++
++#ifdef EFX_NEED_HEX_DUMP_CONST_FIX
++ #define print_hex_dump(v,s,t,r,g,b,l,a) \
++ print_hex_dump((v),(s),(t),(r),(g),(void*)(b),(l),(a))
++#endif
++
++#endif /* EFX_KERNEL_COMPAT_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/lm87_support.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/lm87_support.c
+--- linux-2.6.18.8/drivers/net/sfc/lm87_support.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/lm87_support.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,295 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************/
++
++#include "net_driver.h"
++#include "lm87_support.h"
++#include "workarounds.h"
++
++/* Setting this to 1 will cause efx_check_lm87 to dump the status when it
++ * detects an alarm. This will result in the canonical name (i.e. that in
++ * the LM87 data book) being printed for each set status bit, along with
++ * the reading for that sensor value, if applicable. If set to 0 only the
++ * raw status1 and status2 register values are printed. */
++#define LM87_VERBOSE_ALARMS 1
++
++/**************************************************************************
++ *
++ * Onboard LM87 temperature and voltage monitor
++ *
++ **************************************************************************
++ */
++
++/* LM87 channel mode: all current boards either do not use AIN1/FAN1 and 2
++ * or use them as AIN. */
++#define LM87_CHANNEL_MODE 0x16
++#define LM87_CHANNEL_AIN1 1
++#define LM87_CHANNEL_AIN2 2
++#define LM87_CHANNEL_INIT (LM87_CHANNEL_AIN2 | LM87_CHANNEL_AIN1)
++
++/* LM87 configuration register 1 */
++#define LM87_CONFIG_1 0x40
++#define LM87_START 0x01
++#define LM87_INTEN 0x02
++#define LM87_INITIALIZATION 0x80
++
++/* LM87 interrupt status register 1 */
++#define LM87_INT_STATUS_1 0x41
++
++/* LM87 interrupt status register 2 */
++#define LM87_INT_STATUS_2 0x42
++
++/* LM87 interrupt mask register 1 */
++#define LM87_INT_MASK_1 0x43
++
++/* LM87 interrupt mask register 2 */
++#define LM87_INT_MASK_2 0x44
++
++/* LM87 monitoring limits */
++#define LM87_LIMITS 0x2b
++
++
++int efx_probe_lm87(struct efx_nic *efx, int addr,
++ const u8 *limits, int nlimits, const u16 irqmask)
++{
++ struct efx_i2c_interface *i2c = &efx->i2c;
++ u8 byte;
++ int rc;
++
++ /* Check for onboard LM87 */
++ rc = efx_i2c_check_presence_retry(i2c, addr);
++ if (rc) {
++ /* Not an error to lack an LM87, but failure to probe the
++ * bus is worrying. */
++ if (rc == -EFAULT) {
++ EFX_ERR(efx, "Failed to probe I2C bus for LM87!\n");
++ return rc;
++ } else {
++ EFX_LOG(efx, "has no onboard LM87 chip\n");
++ return 0;
++ }
++ }
++ efx->board_info.lm87_addr = addr;
++ EFX_LOG(efx, "detected onboard LM87 chip at 0x%2x\n", addr);
++
++ /* Reset chip */
++ byte = LM87_INITIALIZATION;
++ rc = efx_i2c_write_retry(i2c, addr, LM87_CONFIG_1, &byte, 1);
++ if (rc) {
++ EFX_ERR(efx, "could not reset LM87\n");
++ return rc;
++ }
++
++ /* Configure channel mode: currently hardwire to make pins 5 and 6
++ * AIN1 and AIN2 rather than FAN1, FAN2. */
++ byte = LM87_CHANNEL_INIT;
++ rc = efx_i2c_write_retry(i2c, addr, LM87_CHANNEL_MODE, &byte, 1);
++ if (rc) {
++ EFX_ERR(efx, "could not program LM87 chan. mode\n");
++ return rc;
++ }
++
++ /* Configure limits */
++ rc = efx_i2c_write_retry(i2c, addr, LM87_LIMITS, limits, nlimits);
++ if (rc) {
++ EFX_ERR(efx, "could not program LM87 limits\n");
++ return rc;
++ }
++
++ /* Mask off unwanted interrupts */
++ byte = (irqmask & 0xff);
++ rc = efx_i2c_write_retry(i2c, addr, LM87_INT_MASK_1, &byte, 1);
++ if (rc) {
++ EFX_ERR(efx, "could not mask LM87 interrupts\n");
++ return rc;
++ }
++
++ byte = (irqmask >> 8);
++ rc = efx_i2c_write_retry(i2c, addr, LM87_INT_MASK_2, &byte, 1);
++ if (rc) {
++ EFX_ERR(efx, "could not mask LM87 interrupts\n");
++ return rc;
++ }
++
++ /* Start monitoring */
++ byte = LM87_START;
++ if (irqmask != EFX_LM87_NO_INTS)
++ byte |= LM87_INTEN;
++
++ rc = efx_i2c_write_retry(i2c, addr, LM87_CONFIG_1, &byte, 1);
++ if (rc) {
++ EFX_ERR(efx, "could not start LM87\n");
++ return rc;
++ }
++
++ return rc;
++}
++
++void efx_remove_lm87(struct efx_nic *efx)
++{
++ struct efx_i2c_interface *i2c = &efx->i2c;
++ u8 byte;
++
++ if (!efx->board_info.lm87_addr)
++ return;
++
++ /* Reset chip */
++ byte = LM87_INITIALIZATION;
++ if (efx_i2c_write_retry(i2c, efx->board_info.lm87_addr,
++ LM87_CONFIG_1, &byte, 1) != 0)
++ EFX_ERR(efx, "could not reset LM87 on exit\n");
++}
++
++#if LM87_VERBOSE_ALARMS
++/* Bit number to name mapping for status1 */
++static const char *lm_stat_names[] = {
++/* Status 1 contents */
++ "+2.5Vin",
++ "Vccp1",
++ "Vcc",
++ "+5Vin",
++ "Int. Temp.",
++ "Ext. Temp.",
++ "FAN1/AIN1",
++ "FAN2/AIN2",
++/* Status 2 contents */
++ "+12Vin",
++ "Vccp2",
++ "Reserved",
++ "Reserved",
++ "CI",
++ "THERM#",
++ "D1 Fault",
++ "D2 Fault"
++};
++
++/* Where to read the value corresponding to an alarm bit. */
++static const int lm_stat_regs[] = {
++ 0x20, 0x21, 0x22, 0x23, 0x27, 0x26, 0x28, 0x29,
++ 0x24, 0x25, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
++};
++
++/* The positions of the alarm bits do not correspond exactly to the
++ * order of the limit values. Convert so the user only needs to maintain
++ * one array */
++static int lm_bit_to_lim[] = {
++ 0, /* 2.5V */
++ 1, /* Vccp1 */
++ 2, /* Vcc */
++ 3, /* 5V */
++ 7, /* Int temp. */
++ 6, /* Ext temp. */
++ 8, /* AIN1 */
++ 9, /* AIN2 */
++ 4, /* 12V */
++ 5 /* Vccp2 */
++};
++
++/* These are bit numbers. I feel justified in hardwiring the max. */
++static const int lm_stat_max = 16;
++
++static void lm87_show_alarm(struct efx_nic *efx, int bit)
++{
++ char valbuf[8];
++ u8 val;
++
++ if (lm_stat_regs[bit] != 0xff) {
++ efx_i2c_read_retry(&efx->i2c, efx->board_info.lm87_addr,
++ lm_stat_regs[bit], &val, 1);
++ sprintf(valbuf, "0x%02x ", val);
++ } else {
++ strcpy(valbuf, "---- ");
++ }
++ /* If the board code knows what this sensor is wired to, let it tell
++ * us, else just print the LM87 datasheet name of the input, and the
++ * value. */
++ if (efx->board_info.interpret_sensor == NULL ||
++ (bit < ARRAY_SIZE(lm_bit_to_lim) &&
++ efx->board_info.interpret_sensor(efx, lm_bit_to_lim[bit], val)
++ == 0))
++ EFX_ERR(efx, ": %10s %4s\n",
++ STRING_TABLE_LOOKUP(bit, lm_stat), valbuf);
++}
++
++static void lm87_dump_alarms(struct efx_nic *efx, int stat1, int stat2)
++{
++ int i;
++ EFX_ERR(efx, " NAME value\n");
++ for (i = 0; i < 8; i++) {
++ if (stat1 & (1 << i))
++ lm87_show_alarm(efx, i);
++ if (stat2 & (1 << i))
++ lm87_show_alarm(efx, i + 8);
++ }
++}
++
++#else
++#define lm87_dump_alarms(_name, _stat1, _stat2) do {} while (0)
++#endif
++
++/* Read onboard LM87 (if present)
++ * Return error code if lm87 could not be read (-EIO)
++ * _or_ is raising an alarm (-ERANGE). 0 if AOK.
++ */
++int efx_check_lm87(struct efx_nic *efx, unsigned mask)
++{
++ struct efx_i2c_interface *i2c = &efx->i2c;
++ u8 int_status_1, int_status_2;
++ unsigned ints;
++ int rc = 0;
++
++ /* If link is up then do not monitor temperature */
++ if (EFX_WORKAROUND_7884(efx) && efx->link_up)
++ return 0;
++
++ if (!efx->board_info.lm87_addr)
++ return 0;
++
++ /* Read interrupt status registers */
++ rc = efx_i2c_read_retry(i2c, efx->board_info.lm87_addr,
++ LM87_INT_STATUS_1, &int_status_1, 1);
++ if (rc) {
++ EFX_ERR(efx, "could not read LM87 INT status 1\n");
++ return rc;
++ }
++ rc = efx_i2c_read_retry(i2c, efx->board_info.lm87_addr,
++ LM87_INT_STATUS_2, &int_status_2, 1);
++ if (rc) {
++ EFX_ERR(efx, "could not read LM87 INT status 2\n");
++ return rc;
++ }
++
++ int_status_1 &= mask;
++ int_status_2 &= (mask >> 8);
++ ints = ((int_status_2 << 8) | int_status_1);
++
++ /* Check interrupt status */
++ if (ints == 0)
++ return 0;
++
++ EFX_ERR(efx, "LM87 detected a hardware failure (status %02x:%02x)\n",
++ int_status_1, int_status_2);
++ lm87_dump_alarms(efx, int_status_1, int_status_2);
++
++ return -ERANGE;
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/lm87_support.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/lm87_support.h
+--- linux-2.6.18.8/drivers/net/sfc/lm87_support.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/lm87_support.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,58 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************/
++
++#ifndef EFX_LM87_SUPPORT_H
++#define EFX_LM87_SUPPORT_H
++
++/* The interrupt bit masks. These are the same in the interrupt status and
++ * interrupt mask registers. */
++/* Register 1 bits */
++#define EFX_LM87_2_5V_INT (1)
++#define EFX_LM87_VCCP1_INT (2)
++#define EFX_LM87_VCC_INT (4)
++#define EFX_LM87_5_V_INT (8)
++#define EFX_LM87_ITMP_INT (0x10)
++#define EFX_LM87_ETMP_INT (0x20)
++#define EFX_LM87_FAN1_INT (0x40)
++#define EFX_LM87_FAN2_INT (0x80)
++/* Register 2 bits */
++#define EFX_LM87_12V_INT (0x100)
++#define EFX_LM87_VCCP2_INT (0x200)
++/* Bits 2 and 3 are reserved. */
++#define EFX_LM87_CI_INT (0x1000)
++#define EFX_LM87_THERM_INT (0x2000)
++#define EFX_LM87_D1_INT (0x4000)
++#define EFX_LM87_D2_INT (0x8000)
++
++#define EFX_LM87_NO_INTS ((u16)-1)
++
++extern
++int efx_probe_lm87(struct efx_nic *efx, int addr, const u8 *limits,
++ int nlimits, const u16 irqmask);
++
++extern void efx_remove_lm87(struct efx_nic *efx);
++
++extern int efx_check_lm87(struct efx_nic *efx, unsigned mask);
++
++#endif /* EFX_LM87_SUPPORT_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/mac.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/mac.h
+--- linux-2.6.18.8/drivers/net/sfc/mac.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/mac.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,38 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_MAC_H
++#define EFX_MAC_H
++
++#include "net_driver.h"
++
++extern void mentormac_reset(struct efx_nic *efx);
++extern void mentormac_reconfigure(struct efx_nic *efx);
++extern struct efx_mac_operations falcon_gmac_operations;
++extern struct efx_mac_operations falcon_xmac_operations;
++
++#endif
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/Makefile linux-2.6.18-xen-3.3.0/drivers/net/sfc/Makefile
+--- linux-2.6.18.8/drivers/net/sfc/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,43 @@
++
++# Final objects
++sfc_o = sfc.o
++sfc_mtd_o = sfc_mtd.o
++
++# Constituent objects
++sfc_elements_o :=
++sfc_elements_o += efx.o
++sfc_elements_o += falcon.o
++sfc_elements_o += tx.o
++sfc_elements_o += rx.o
++sfc_elements_o += mentormac.o
++sfc_elements_o += falcon_gmac.o
++sfc_elements_o += falcon_xmac.o
++sfc_elements_o += alaska.o
++sfc_elements_o += i2c-direct.o
++sfc_elements_o += selftest.o
++sfc_elements_o += driverlink.o
++ifeq ($(CONFIG_SFC_DEBUGFS),y)
++sfc_elements_o += debugfs.o
++endif
++sfc_elements_o += ethtool.o
++sfc_elements_o += xfp_phy.o
++sfc_elements_o += mdio_10g.o
++sfc_elements_o += txc43128_phy.o
++sfc_elements_o += tenxpress.o
++sfc_elements_o += lm87_support.o
++sfc_elements_o += boards.o
++sfc_elements_o += sfe4001.o
++sfc_elements_o += pm8358_phy.o
++sfc_elements_o += null_phy.o
++sfc_elements_o += phy.o
++sfc_elements_o += kernel_compat.o
++
++sfc_mtd_elements_o := mtd.o
++
++obj-$(CONFIG_SFC) += $(sfc_o)
++obj-$(CONFIG_SFC_MTD) += $(sfc_mtd_o)
++
++sfc-objs = $(sfc_elements_o)
++sfc_mtd-objs = $(sfc_mtd_elements_o)
++
++obj-$(CONFIG_SFC_RESOURCE) += sfc_resource/
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/mdio_10g.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/mdio_10g.c
+--- linux-2.6.18.8/drivers/net/sfc/mdio_10g.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/mdio_10g.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,441 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++/*
++ * Useful functions for working with MDIO clause 45 PHYs
++ */
++#include <linux/types.h>
++#include <linux/ethtool.h>
++#include <linux/delay.h>
++#include "net_driver.h"
++#include "mdio_10g.h"
++#include "boards.h"
++
++static const char *mmd_block_names[] = {
++ [0] = "(illegal)",
++ [MDIO_MMD_PMAPMD] = "PMA/PMD",
++ [MDIO_MMD_WIS] = "WIS",
++ [MDIO_MMD_PCS] = "PCS",
++ [MDIO_MMD_PHYXS] = "PHY XS",
++ [MDIO_MMD_DTEXS] = "DTE XS",
++ [MDIO_MMD_TC] = "TC",
++ [MDIO_MMD_AN] = "AN",
++};
++static const int mmd_block_max = ARRAY_SIZE(mmd_block_names);
++
++const char *mdio_clause45_mmd_name(int mmd)
++{
++ return STRING_TABLE_LOOKUP(mmd, mmd_block);
++}
++
++int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd,
++ int spins, int spintime)
++{
++ u32 ctrl;
++ int phy_id = port->mii.phy_id;
++
++ /* Catch callers passing values in the wrong units (or just silly) */
++ EFX_BUG_ON_PARANOID(spins * spintime >= 5000);
++
++ mdio_clause45_write(port, phy_id, mmd, MDIO_MMDREG_CTRL1,
++ (1 << MDIO_MMDREG_CTRL1_RESET_LBN));
++ /* Wait for the reset bit to clear. */
++ do {
++ msleep(spintime);
++ ctrl = mdio_clause45_read(port, phy_id, mmd, MDIO_MMDREG_CTRL1);
++ spins--;
++
++ } while (spins && (ctrl & (1 << MDIO_MMDREG_CTRL1_RESET_LBN)));
++
++ return spins ? spins : -ETIMEDOUT;
++}
++
++static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd,
++ int fault_fatal)
++{
++ int status;
++ int phy_id = efx->mii.phy_id;
++ const char *mmdname = STRING_TABLE_LOOKUP(mmd, mmd_block);
++
++ if (LOOPBACK_INTERNAL(efx))
++ return 0;
++
++ /* Read MMD STATUS2 to check it is responding. */
++ status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2);
++ if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) &
++ ((1 << MDIO_MMDREG_STAT2_PRESENT_WIDTH) - 1)) !=
++ MDIO_MMDREG_STAT2_PRESENT_VAL) {
++ EFX_ERR(efx, "PHY MMD %s not responding.\n", mmdname);
++ return -EIO;
++ }
++
++ /* Read MMD STATUS 1 to check for fault. */
++ status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT1);
++ if ((status & (1 << MDIO_MMDREG_STAT1_FAULT_LBN)) != 0) {
++ if (fault_fatal) {
++ EFX_ERR(efx, "PHY MMD %s reporting fatal"
++ " fault: status %x\n", mmdname, status);
++ return -EIO;
++ } else {
++ EFX_LOG(efx, "PHY MMD %s reporting status"
++ " %x (expected)\n", mmdname, status);
++ }
++ }
++ return 0;
++}
++
++/* This ought to be ridiculous overkill. We expect it to fail rarely */
++#define MDIO45_RESET_TIME HZ
++#define MDIO45_RESET_ITERS (100)
++
++int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
++ unsigned int mmd_mask)
++{
++ const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS;
++ int tries = MDIO45_RESET_ITERS;
++ int rc = 0;
++ int in_reset;
++
++ while (tries) {
++ int mask = mmd_mask;
++ int mmd = 0;
++ int stat;
++ in_reset = 0;
++ while (mask) {
++ if (mask & 1) {
++ stat = mdio_clause45_read(efx,
++ efx->mii.phy_id,
++ mmd,
++ MDIO_MMDREG_CTRL1);
++ if (stat < 0) {
++ EFX_ERR(efx, "failed to read"
++ " status of MMD %s\n",
++ STRING_TABLE_LOOKUP(mmd,
++ mmd_block));
++ return -EIO;
++ }
++ if (stat & (1 << MDIO_MMDREG_CTRL1_RESET_LBN))
++ in_reset |= (1 << mmd);
++ }
++ mask = mask >> 1;
++ mmd++;
++ }
++ if (!in_reset)
++ break;
++ tries--;
++ msleep(spintime);
++ }
++ if (in_reset != 0) {
++ EFX_ERR(efx, "not all MMDs came out of reset in time."
++ " MMDs still in reset: %x\n", in_reset);
++ rc = -ETIMEDOUT;
++ }
++ return rc;
++}
++
++int mdio_clause45_check_mmds(struct efx_nic *efx,
++ unsigned int mmd_mask, unsigned int fatal_mask)
++{
++ int devices, mmd = 0;
++ int probe_mmd;
++
++ /* Historically we have probed the PHYXS to find out what devices are
++ * present,but that doesn't work so well if the PHYXS isn't expected
++ * to exist, if so just find the first item in the list supplied. */
++ probe_mmd = (mmd_mask & MDIO_MMDREG_DEVS0_PHYXS) ? MDIO_MMD_PHYXS :
++ __ffs(mmd_mask);
++ devices = mdio_clause45_read(efx, efx->mii.phy_id,
++ probe_mmd, MDIO_MMDREG_DEVS0);
++
++ /* Check all the expected MMDs are present */
++ if (devices < 0) {
++ EFX_ERR(efx, "failed to read devices present\n");
++ return -EIO;
++ }
++ if ((devices & mmd_mask) != mmd_mask) {
++ EFX_ERR(efx, "required MMDs not present: got %x, "
++ "wanted %x\n", devices, mmd_mask);
++ return -ENODEV;
++ }
++ EFX_TRACE(efx, "Devices present: %x\n", devices);
++
++ /* Check all required MMDs are responding and happy. */
++ while (mmd_mask) {
++ if (mmd_mask & 1) {
++ int fault_fatal = fatal_mask & 1;
++ if (mdio_clause45_check_mmd(efx, mmd, fault_fatal))
++ return -EIO;
++ }
++ mmd_mask = mmd_mask >> 1;
++ fatal_mask = fatal_mask >> 1;
++ mmd++;
++ }
++
++ return 0;
++}
++
++int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
++{
++ int phy_id = efx->mii.phy_id;
++ int status;
++ int ok = 1;
++ int mmd = 0;
++ int good;
++
++ /* If the port is in loopback, then we should only consider a subset
++ * of mmd's */
++ if (LOOPBACK_INTERNAL(efx))
++ return 1;
++ else if (efx->loopback_mode == LOOPBACK_NETWORK)
++ return 0;
++ else if (!efx->phy_powered)
++ return 0;
++ else if (efx->loopback_mode == LOOPBACK_PHYXS)
++ mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS |
++ MDIO_MMDREG_DEVS0_PCS |
++ MDIO_MMDREG_DEVS0_PMAPMD);
++ else if (efx->loopback_mode == LOOPBACK_PCS)
++ mmd_mask &= ~(MDIO_MMDREG_DEVS0_PCS |
++ MDIO_MMDREG_DEVS0_PMAPMD);
++ else if (efx->loopback_mode == LOOPBACK_PMAPMD)
++ mmd_mask &= ~MDIO_MMDREG_DEVS0_PMAPMD;
++
++ while (mmd_mask) {
++ if (mmd_mask & 1) {
++ /* Double reads because link state is latched, and a
++ * read moves the current state into the register */
++ status = mdio_clause45_read(efx, phy_id,
++ mmd, MDIO_MMDREG_STAT1);
++ status = mdio_clause45_read(efx, phy_id,
++ mmd, MDIO_MMDREG_STAT1);
++
++ good = status & (1 << MDIO_MMDREG_STAT1_LINK_LBN);
++ ok = ok && good;
++ }
++ mmd_mask = (mmd_mask >> 1);
++ mmd++;
++ }
++ return ok;
++}
++
++void mdio_clause45_transmit_disable(struct efx_nic *efx, int disable)
++{
++ int phy_id = efx->mii.phy_id;
++ int ctrl1, ctrl2;
++
++ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
++ MDIO_MMDREG_TXDIS);
++ if (disable)
++ ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
++ else
++ ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
++ if (ctrl1 != ctrl2)
++ mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
++ MDIO_MMDREG_TXDIS, ctrl2);
++}
++
++void mdio_clause45_phy_reconfigure(struct efx_nic *efx)
++{
++ int phy_id = efx->mii.phy_id;
++ int ctrl1, ctrl2;
++
++ /* Handle (with debouncing) PMA/PMD loopback */
++ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
++ MDIO_MMDREG_CTRL1);
++
++ if (efx->loopback_mode == LOOPBACK_PMAPMD)
++ ctrl2 |= (1 << MDIO_PMAPMD_CTRL1_LBACK_LBN);
++ else
++ ctrl2 &= ~(1 << MDIO_PMAPMD_CTRL1_LBACK_LBN);
++
++ if (ctrl1 != ctrl2)
++ mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
++ MDIO_MMDREG_CTRL1, ctrl2);
++
++ /* Handle (with debouncing) PCS loopback */
++ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS,
++ MDIO_MMDREG_CTRL1);
++ if (efx->loopback_mode == LOOPBACK_PCS)
++ ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
++ else
++ ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
++
++ if (ctrl1 != ctrl2)
++ mdio_clause45_write(efx, phy_id, MDIO_MMD_PCS,
++ MDIO_MMDREG_CTRL1, ctrl2);
++
++ /* Handle (with debouncing) PHYXS network loopback */
++ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
++ MDIO_MMDREG_CTRL1);
++ if (efx->loopback_mode == LOOPBACK_NETWORK)
++ ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
++ else
++ ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
++
++ if (ctrl1 != ctrl2)
++ mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS,
++ MDIO_MMDREG_CTRL1, ctrl2);
++}
++
++static void mdio_clause45_set_mmd_lpower(struct efx_nic *efx,
++ int lpower, int mmd)
++{
++ const char *mmdname = STRING_TABLE_LOOKUP(mmd, mmd_block);
++ int phy = efx->mii.phy_id;
++ int stat = mdio_clause45_read(efx, phy, mmd, MDIO_MMDREG_STAT1);
++ int ctrl1, ctrl2;
++
++ EFX_TRACE(efx, "Setting low power mode for MMD %s to %d\n",
++ mmdname, lpower);
++
++ if (stat & (1 << MDIO_MMDREG_STAT1_LPABLE_LBN)) {
++ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy,
++ mmd, MDIO_MMDREG_CTRL1);
++ if (lpower)
++ ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LPOWER_LBN);
++ else
++ ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LPOWER_LBN);
++ if (ctrl1 != ctrl2)
++ mdio_clause45_write(efx, phy, mmd,
++ MDIO_MMDREG_CTRL1, ctrl2);
++ } else {
++ /* If we ever want a completely generic PHY driver
++ * that which just does clause 45, we may consider not
++ * complaining, but for now expect the driver to know
++ * which MMDs to apply this to. */
++ EFX_ERR(efx, "Attempt change power setting of MMD %s"
++ " which doesn't support it.\n", mmdname);
++ }
++}
++
++void mdio_clause45_set_mmds_lpower(struct efx_nic *efx,
++ int low_power, unsigned int mmd_mask)
++{
++ int mmd = 0;
++ while (mmd_mask) {
++ if (mmd_mask & 1)
++ mdio_clause45_set_mmd_lpower(efx, low_power, mmd);
++ mmd_mask = (mmd_mask >> 1);
++ mmd++;
++ }
++}
++
++/**
++ * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO.
++ * @efx: Efx NIC
++ * @ecmd: Buffer for settings
++ *
++ * On return the 'port', 'speed', 'supported' and 'advertising' fields of
++ * ecmd have been filled out based on the PMA type.
++ */
++void mdio_clause45_get_settings(struct efx_nic *efx,
++ struct ethtool_cmd *ecmd)
++{
++ int pma_type;
++
++ /* If no PMA is present we are presumably talking something XAUI-ish
++ * like CX4. Which we report as FIBRE (see below) */
++ if ((efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_PMAPMD)) == 0) {
++ ecmd->speed = SPEED_10000;
++ ecmd->port = PORT_FIBRE;
++ ecmd->supported = SUPPORTED_FIBRE;
++ ecmd->advertising = ADVERTISED_FIBRE;
++ return;
++ }
++
++ pma_type = mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL2);
++ pma_type &= MDIO_PMAPMD_CTRL2_TYPE_MASK;
++
++ switch (pma_type) {
++ /* We represent CX4 as fibre in the absence of anything
++ better. */
++ case MDIO_PMAPMD_CTRL2_10G_CX4:
++ ecmd->speed = SPEED_10000;
++ ecmd->port = PORT_FIBRE;
++ ecmd->supported = SUPPORTED_FIBRE;
++ ecmd->advertising = ADVERTISED_FIBRE;
++ break;
++ /* 10G Base-T */
++ case MDIO_PMAPMD_CTRL2_10G_BT:
++ ecmd->speed = SPEED_10000;
++ ecmd->port = PORT_TP;
++ ecmd->supported = SUPPORTED_TP | SUPPORTED_10000baseT_Full;
++ ecmd->advertising = (ADVERTISED_FIBRE
++ | ADVERTISED_10000baseT_Full);
++ break;
++ case MDIO_PMAPMD_CTRL2_1G_BT:
++ ecmd->speed = SPEED_1000;
++ ecmd->port = PORT_TP;
++ ecmd->supported = SUPPORTED_TP | SUPPORTED_1000baseT_Full;
++ ecmd->advertising = (ADVERTISED_FIBRE
++ | ADVERTISED_1000baseT_Full);
++ break;
++ case MDIO_PMAPMD_CTRL2_100_BT:
++ ecmd->speed = SPEED_100;
++ ecmd->port = PORT_TP;
++ ecmd->supported = SUPPORTED_TP | SUPPORTED_100baseT_Full;
++ ecmd->advertising = (ADVERTISED_FIBRE
++ | ADVERTISED_100baseT_Full);
++ break;
++ case MDIO_PMAPMD_CTRL2_10_BT:
++ ecmd->speed = SPEED_10;
++ ecmd->port = PORT_TP;
++ ecmd->supported = SUPPORTED_TP | SUPPORTED_10baseT_Full;
++ ecmd->advertising = ADVERTISED_FIBRE | ADVERTISED_10baseT_Full;
++ break;
++ /* All the other defined modes are flavours of
++ * 10G optical */
++ default:
++ ecmd->speed = SPEED_10000;
++ ecmd->port = PORT_FIBRE;
++ ecmd->supported = SUPPORTED_FIBRE;
++ ecmd->advertising = ADVERTISED_FIBRE;
++ break;
++ }
++}
++
++/**
++ * mdio_clause45_set_settings - Set (some of) the PHY settings over MDIO.
++ * @efx: Efx NIC
++ * @ecmd: New settings
++ *
++ * Currently this just enforces that we are _not_ changing the
++ * 'port', 'speed', 'supported' or 'advertising' settings as these
++ * cannot be changed on any currently supported PHY.
++ */
++int mdio_clause45_set_settings(struct efx_nic *efx,
++ struct ethtool_cmd *ecmd)
++{
++ struct ethtool_cmd tmpcmd;
++ mdio_clause45_get_settings(efx, &tmpcmd);
++ /* None of the current PHYs support more than one mode
++ * of operation (and only 10GBT ever will), so keep things
++ * simple for now */
++ if ((ecmd->speed == tmpcmd.speed) && (ecmd->port == tmpcmd.port) &&
++ (ecmd->supported == tmpcmd.supported) &&
++ (ecmd->advertising == tmpcmd.advertising))
++ return 0;
++ return -EOPNOTSUPP;
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/mdio_10g.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/mdio_10g.h
+--- linux-2.6.18.8/drivers/net/sfc/mdio_10g.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/mdio_10g.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,295 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_MDIO_10G_H
++#define EFX_MDIO_10G_H
++
++/*
++ * Definitions needed for doing 10G MDIO as specified in clause 45
++ * MDIO, which do not appear in Linux yet. Also some helper functions.
++ */
++
++#include "efx.h"
++#include "boards.h"
++
++/* Numbering of the MDIO Manageable Devices (MMDs) */
++/* Physical Medium Attachment/ Physical Medium Dependent sublayer */
++#define MDIO_MMD_PMAPMD (1)
++/* WAN Interface Sublayer */
++#define MDIO_MMD_WIS (2)
++/* Physical Coding Sublayer */
++#define MDIO_MMD_PCS (3)
++/* PHY Extender Sublayer */
++#define MDIO_MMD_PHYXS (4)
++/* Extender Sublayer */
++#define MDIO_MMD_DTEXS (5)
++/* Transmission convergence */
++#define MDIO_MMD_TC (6)
++/* Auto negotiation */
++#define MDIO_MMD_AN (7)
++
++/* Generic register locations */
++#define MDIO_MMDREG_CTRL1 (0)
++#define MDIO_MMDREG_STAT1 (1)
++#define MDIO_MMDREG_IDHI (2)
++#define MDIO_MMDREG_IDLOW (3)
++#define MDIO_MMDREG_SPEED (4)
++#define MDIO_MMDREG_DEVS0 (5)
++#define MDIO_MMDREG_DEVS1 (6)
++#define MDIO_MMDREG_CTRL2 (7)
++#define MDIO_MMDREG_STAT2 (8)
++#define MDIO_MMDREG_TXDIS (9)
++
++/* Bits in MMDREG_CTRL1 */
++/* Reset */
++#define MDIO_MMDREG_CTRL1_RESET_LBN (15)
++#define MDIO_MMDREG_CTRL1_RESET_WIDTH (1)
++/* Loopback */
++/* Note that while WIS, PCS, PHYXS and DTEXS have the loopback bit here,
++ * the PMA/PMD does not. */
++#define MDIO_MMDREG_CTRL1_LBACK_LBN (14)
++#define MDIO_MMDREG_CTRL1_LBACK_WIDTH (1)
++/* Low power */
++#define MDIO_MMDREG_CTRL1_LPOWER_LBN (11)
++#define MDIO_MMDREG_CTRL1_LPOWER_WIDTH (1)
++
++/* Bits in MMDREG_STAT1 */
++#define MDIO_MMDREG_STAT1_FAULT_LBN (7)
++#define MDIO_MMDREG_STAT1_FAULT_WIDTH (1)
++/* Link state */
++#define MDIO_MMDREG_STAT1_LINK_LBN (2)
++#define MDIO_MMDREG_STAT1_LINK_WIDTH (1)
++/* Low power ability */
++#define MDIO_MMDREG_STAT1_LPABLE_LBN (1)
++#define MDIO_MMDREG_STAT1_LPABLE_WIDTH (1)
++
++/* Bits in ID reg */
++#define MDIO_ID_REV(_id32) (_id32 & 0xf)
++#define MDIO_ID_MODEL(_id32) ((_id32 >> 4) & 0x3f)
++#define MDIO_ID_OUI(_id32) (_id32 >> 10)
++
++/* Bits in MMDREG_DEVS0. Someone thoughtfully layed things out
++ * so the 'bit present' bit number of an MMD is the number of
++ * that MMD */
++#define DEV_PRESENT_BIT(_b) (1 << _b)
++
++#define MDIO_MMDREG_DEVS0_DTEXS DEV_PRESENT_BIT(MDIO_MMD_DTEXS)
++#define MDIO_MMDREG_DEVS0_PHYXS DEV_PRESENT_BIT(MDIO_MMD_PHYXS)
++#define MDIO_MMDREG_DEVS0_PCS DEV_PRESENT_BIT(MDIO_MMD_PCS)
++#define MDIO_MMDREG_DEVS0_WIS DEV_PRESENT_BIT(MDIO_MMD_WIS)
++#define MDIO_MMDREG_DEVS0_PMAPMD DEV_PRESENT_BIT(MDIO_MMD_PMAPMD)
++#define MDIO_MMDREG_DEVS0_AN DEV_PRESENT_BIT(MDIO_MMD_AN)
++
++
++/* Bits in MMDREG_STAT2 */
++#define MDIO_MMDREG_STAT2_PRESENT_VAL (2)
++#define MDIO_MMDREG_STAT2_PRESENT_LBN (14)
++#define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2)
++
++/* Bits in MMDREG_TXDIS */
++#define MDIO_MMDREG_TXDIS_GLOBAL_LBN (0)
++#define MDIO_MMDREG_TXDIS_GLOBAL_WIDTH (1)
++
++/* MMD-specific bits, ordered by MMD, then register */
++#define MDIO_PMAPMD_CTRL1_LBACK_LBN (0)
++#define MDIO_PMAPMD_CTRL1_LBACK_WIDTH (1)
++
++/* PMA type (4 bits) */
++#define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0)
++#define MDIO_PMAPMD_CTRL2_10G_EW (0x1)
++#define MDIO_PMAPMD_CTRL2_10G_LW (0x2)
++#define MDIO_PMAPMD_CTRL2_10G_SW (0x3)
++#define MDIO_PMAPMD_CTRL2_10G_LX4 (0x4)
++#define MDIO_PMAPMD_CTRL2_10G_ER (0x5)
++#define MDIO_PMAPMD_CTRL2_10G_LR (0x6)
++#define MDIO_PMAPMD_CTRL2_10G_SR (0x7)
++/* Reserved */
++#define MDIO_PMAPMD_CTRL2_10G_BT (0x9)
++/* Reserved */
++/* Reserved */
++#define MDIO_PMAPMD_CTRL2_1G_BT (0xc)
++/* Reserved */
++#define MDIO_PMAPMD_CTRL2_100_BT (0xe)
++#define MDIO_PMAPMD_CTRL2_10_BT (0xf)
++#define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf)
++
++/* PCS 10GBT registers */
++#define MDIO_PCS_10GBT_STATUS (32)
++#define MDIO_PCS_10GBT_STATUS2 (33)
++#define MDIO_PCS_10GBT_STATUS2_BER_LBN (8)
++#define MDIO_PCS_10GBT_STATUS2_BER_WIDTH (6)
++#define MDIO_PCS_10GBT_STATUS2_ERR_LBN (0)
++#define MDIO_PCS_10GBT_STATUS2_ERR_WIDTH (8)
++
++/* PHY XGXS lane state */
++#define MDIO_PHYXS_LANE_STATE (0x18)
++#define MDIO_PHYXS_LANE_ALIGNED_LBN (12)
++#define MDIO_PHYXS_LANE_SYNC0_LBN (0)
++#define MDIO_PHYXS_LANE_SYNC1_LBN (1)
++#define MDIO_PHYXS_LANE_SYNC2_LBN (2)
++#define MDIO_PHYXS_LANE_SYNC3_LBN (3)
++
++/* AN registers */
++#define MDIO_AN_STATUS (1)
++#define MDIO_AN_STATUS_XNP_LBN (7)
++#define MDIO_AN_STATUS_PAGE_LBN (6)
++#define MDIO_AN_STATUS_AN_DONE_LBN (5)
++#define MDIO_AN_STATUS_LP_AN_CAP_LBN (0)
++
++#define MDIO_AN_10GBT_STATUS (33)
++#define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */
++#define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */
++#define MDIO_AN_10GBT_STATUS_LOC_OK_LBN (13) /* Local OK */
++#define MDIO_AN_10GBT_STATUS_REM_OK_LBN (12) /* Remote OK */
++#define MDIO_AN_10GBT_STATUS_LP_10G_LBN (11) /* Link partner is 10GBT capable */
++#define MDIO_AN_10GBT_STATUS_LP_LTA_LBN (10) /* LP loop timing ability */
++#define MDIO_AN_10GBT_STATUS_LP_TRR_LBN (9) /* LP Training Reset Request */
++
++
++/* Packing of the prt and dev arguments of clause 45 style MDIO into a
++ * single int so they can be passed into the mdio_read/write functions
++ * that currently exist. Note that as Falcon is the only current user,
++ * the packed form is chosen to match what Falcon needs to write into
++ * a register. This is checked at compile-time so do not change it. If
++ * your target chip needs things layed out differently you will need
++ * to unpack the arguments in your chip-specific mdio functions.
++ */
++ /* These are defined by the standard. */
++#define MDIO45_PRT_ID_WIDTH (5)
++#define MDIO45_DEV_ID_WIDTH (5)
++
++/* The prt ID is just packed in immediately to the left of the dev ID */
++#define MDIO45_PRT_DEV_WIDTH (MDIO45_PRT_ID_WIDTH + MDIO45_DEV_ID_WIDTH)
++
++#define MDIO45_PRT_ID_MASK ((1 << MDIO45_PRT_DEV_WIDTH) - 1)
++/* This is the prt + dev extended by 1 bit to hold the 'is clause 45' flag. */
++#define MDIO45_XPRT_ID_WIDTH (MDIO45_PRT_DEV_WIDTH + 1)
++#define MDIO45_XPRT_ID_MASK ((1 << MDIO45_XPRT_ID_WIDTH) - 1)
++#define MDIO45_XPRT_ID_IS10G (1 << (MDIO45_XPRT_ID_WIDTH - 1))
++
++
++#define MDIO45_PRT_ID_COMP_LBN MDIO45_DEV_ID_WIDTH
++#define MDIO45_PRT_ID_COMP_WIDTH MDIO45_PRT_ID_WIDTH
++#define MDIO45_DEV_ID_COMP_LBN 0
++#define MDIO45_DEV_ID_COMP_WIDTH MDIO45_DEV_ID_WIDTH
++
++/* Compose port and device into a phy_id */
++static inline int mdio_clause45_pack(u8 prt, u8 dev)
++{
++ efx_dword_t phy_id;
++ EFX_POPULATE_DWORD_2(phy_id, MDIO45_PRT_ID_COMP, prt,
++ MDIO45_DEV_ID_COMP, dev);
++ return MDIO45_XPRT_ID_IS10G | EFX_DWORD_VAL(phy_id);
++}
++
++static inline void mdio_clause45_unpack(u32 val, u8 *prt, u8 *dev)
++{
++ efx_dword_t phy_id;
++ EFX_POPULATE_DWORD_1(phy_id, EFX_DWORD_0, val);
++ *prt = EFX_DWORD_FIELD(phy_id, MDIO45_PRT_ID_COMP);
++ *dev = EFX_DWORD_FIELD(phy_id, MDIO45_DEV_ID_COMP);
++}
++
++static inline int mdio_clause45_read(struct efx_nic *efx,
++ u8 prt, u8 dev, u16 addr)
++{
++ return efx->mii.mdio_read(efx->net_dev,
++ mdio_clause45_pack(prt, dev), addr);
++}
++
++static inline void mdio_clause45_write(struct efx_nic *efx,
++ u8 prt, u8 dev, u16 addr, int value)
++{
++ efx->mii.mdio_write(efx->net_dev,
++ mdio_clause45_pack(prt, dev), addr, value);
++}
++
++
++static inline u32 mdio_clause45_read_id(struct efx_nic *efx, int mmd)
++{
++ int phy_id = efx->mii.phy_id;
++ u16 id_low = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_IDLOW);
++ u16 id_hi = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_IDHI);
++ return (id_hi << 16) | (id_low);
++}
++
++static inline int mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx)
++{
++ int i, sync, lane_status;
++
++ for (i = 0; i < 2; ++i)
++ lane_status = mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_PHYXS,
++ MDIO_PHYXS_LANE_STATE);
++
++ sync = (lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN)) != 0;
++ if (!sync)
++ EFX_INFO(efx, "XGXS lane status: %x\n", lane_status);
++ return sync;
++}
++
++extern const char *mdio_clause45_mmd_name(int mmd);
++
++/*
++ * Reset a specific MMD and wait for reset to clear.
++ * Return number of spins left (>0) on success, -%ETIMEDOUT on failure.
++ *
++ * This function will sleep
++ */
++extern int mdio_clause45_reset_mmd(struct efx_nic *efx, int mmd,
++ int spins, int spintime);
++
++/* As mdio_clause45_check_mmd but for multiple MMDs */
++int mdio_clause45_check_mmds(struct efx_nic *efx,
++ unsigned int mmd_mask, unsigned int fatal_mask);
++
++/* Check the link status of specified mmds in bit mask */
++extern int mdio_clause45_links_ok(struct efx_nic *efx,
++ unsigned int mmd_mask);
++
++/* Generic transmit disable support though PMAPMD */
++extern void mdio_clause45_transmit_disable(struct efx_nic *efx,
++ int disable);
++
++/* Generic part of reconfigure: set/clear loopback bits */
++extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx);
++
++/* Set the power state of the specified MMDs */
++extern void mdio_clause45_set_mmds_lpower(struct efx_nic *efx,
++ int low_power, unsigned int mmd_mask);
++
++/* Read (some of) the PHY settings over MDIO */
++extern void mdio_clause45_get_settings(struct efx_nic *efx,
++ struct ethtool_cmd *ecmd);
++
++/* Set (some of) the PHY settings over MDIO */
++extern int mdio_clause45_set_settings(struct efx_nic *efx,
++ struct ethtool_cmd *ecmd);
++
++/* Wait for specified MMDs to exit reset within a timeout */
++extern int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
++ unsigned int mmd_mask);
++
++#endif /* EFX_MDIO_10G_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/mentormac.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/mentormac.c
+--- linux-2.6.18.8/drivers/net/sfc/mentormac.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/mentormac.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,506 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/delay.h>
++#include "net_driver.h"
++#include "gmii.h"
++#include "mac.h"
++
++/*
++ * Mentor MAC control
++ */
++
++/**************************************************************************
++ *
++ * Mentor MAC registers
++ *
++ **************************************************************************
++ *
++ * Register addresses are Mentor MAC register numbers. Falcon maps these
++ * registers in at 16-byte intervals. The mac_writel() and mac_readl()
++ * methods take care of abstracting away this difference.
++ */
++
++/* GMAC configuration register 1 */
++#define GM_CFG1_REG_MAC 0x00
++#define GM_SW_RST_LBN 31
++#define GM_SW_RST_WIDTH 1
++#define GM_SIM_RST_LBN 30
++#define GM_SIM_RST_WIDTH 1
++#define GM_RST_RX_MAC_CTL_LBN 19
++#define GM_RST_RX_MAC_CTL_WIDTH 1
++#define GM_RST_TX_MAC_CTL_LBN 18
++#define GM_RST_TX_MAC_CTL_WIDTH 1
++#define GM_RST_RX_FUNC_LBN 17
++#define GM_RST_RX_FUNC_WIDTH 1
++#define GM_RST_TX_FUNC_LBN 16
++#define GM_RST_TX_FUNC_WIDTH 1
++#define GM_LOOP_LBN 8
++#define GM_LOOP_WIDTH 1
++#define GM_RX_FC_EN_LBN 5
++#define GM_RX_FC_EN_WIDTH 1
++#define GM_TX_FC_EN_LBN 4
++#define GM_TX_FC_EN_WIDTH 1
++#define GM_SYNC_RXEN_LBN 3
++#define GM_SYNC_RXEN_WIDTH 1
++#define GM_RX_EN_LBN 2
++#define GM_RX_EN_WIDTH 1
++#define GM_SYNC_TXEN_LBN 1
++#define GM_SYNC_TXEN_WIDTH 1
++#define GM_TX_EN_LBN 0
++#define GM_TX_EN_WIDTH 1
++
++/* GMAC configuration register 2 */
++#define GM_CFG2_REG_MAC 0x01
++#define GM_PAMBL_LEN_LBN 12
++#define GM_PAMBL_LEN_WIDTH 4
++#define GM_IF_MODE_LBN 8
++#define GM_IF_MODE_WIDTH 2
++#define GM_HUGE_FRM_EN_LBN 5
++#define GM_HUGE_FRM_EN_WIDTH 1
++#define GM_LEN_CHK_LBN 4
++#define GM_LEN_CHK_WIDTH 1
++#define GM_PAD_CRC_EN_LBN 2
++#define GM_PAD_CRC_EN_WIDTH 1
++#define GM_CRC_EN_LBN 1
++#define GM_CRC_EN_WIDTH 1
++#define GM_FD_LBN 0
++#define GM_FD_WIDTH 1
++
++/* GMAC maximum frame length register */
++#define GM_MAX_FLEN_REG_MAC 0x04
++#define GM_MAX_FLEN_LBN 0
++#define GM_MAX_FLEN_WIDTH 16
++
++/* GMAC MII management configuration register */
++#define GM_MII_MGMT_CFG_REG_MAC 0x08
++#define GM_RST_MII_MGMT_LBN 31
++#define GM_RST_MII_MGMT_WIDTH 1
++#define GM_MGMT_SCAN_AUTO_INC_LBN 5
++#define GM_MGMT_SCAN_AUTO_INC_WIDTH 1
++#define GM_MGMT_PREM_SUPR_LBN 4
++#define GM_MGMT_PREM_SUPR_WIDTH 1
++#define GM_MGMT_CLK_SEL_LBN 0
++#define GM_MGMT_CLK_SEL_WIDTH 3
++
++/* GMAC MII management command register */
++#define GM_MII_MGMT_CMD_REG_MAC 0x09
++#define GM_MGMT_SCAN_CYC_LBN 1
++#define GM_MGMT_SCAN_CYC_WIDTH 1
++#define GM_MGMT_RD_CYC_LBN 0
++#define GM_MGMT_RD_CYC_WIDTH 1
++
++/* GMAC MII management address register */
++#define GM_MII_MGMT_ADR_REG_MAC 0x0a
++#define GM_MGMT_PHY_ADDR_LBN 8
++#define GM_MGMT_PHY_ADDR_WIDTH 5
++#define GM_MGMT_REG_ADDR_LBN 0
++#define GM_MGMT_REG_ADDR_WIDTH 5
++
++/* GMAC MII management control register */
++#define GM_MII_MGMT_CTL_REG_MAC 0x0b
++#define GM_MGMT_CTL_LBN 0
++#define GM_MGMT_CTL_WIDTH 16
++
++/* GMAC MII management status register */
++#define GM_MII_MGMT_STAT_REG_MAC 0x0c
++#define GM_MGMT_STAT_LBN 0
++#define GM_MGMT_STAT_WIDTH 16
++
++/* GMAC MII management indicators register */
++#define GM_MII_MGMT_IND_REG_MAC 0x0d
++#define GM_MGMT_NOT_VLD_LBN 2
++#define GM_MGMT_NOT_VLD_WIDTH 1
++#define GM_MGMT_SCANNING_LBN 1
++#define GM_MGMT_SCANNING_WIDTH 1
++#define GM_MGMT_BUSY_LBN 0
++#define GM_MGMT_BUSY_WIDTH 1
++
++/* GMAC station address register 1 */
++#define GM_ADR1_REG_MAC 0x10
++#define GM_HWADDR_5_LBN 24
++#define GM_HWADDR_5_WIDTH 8
++#define GM_HWADDR_4_LBN 16
++#define GM_HWADDR_4_WIDTH 8
++#define GM_HWADDR_3_LBN 8
++#define GM_HWADDR_3_WIDTH 8
++#define GM_HWADDR_2_LBN 0
++#define GM_HWADDR_2_WIDTH 8
++
++/* GMAC station address register 2 */
++#define GM_ADR2_REG_MAC 0x11
++#define GM_HWADDR_1_LBN 24
++#define GM_HWADDR_1_WIDTH 8
++#define GM_HWADDR_0_LBN 16
++#define GM_HWADDR_0_WIDTH 8
++
++/* GMAC FIFO configuration register 0 */
++#define GMF_CFG0_REG_MAC 0x12
++#define GMF_FTFENRPLY_LBN 20
++#define GMF_FTFENRPLY_WIDTH 1
++#define GMF_STFENRPLY_LBN 19
++#define GMF_STFENRPLY_WIDTH 1
++#define GMF_FRFENRPLY_LBN 18
++#define GMF_FRFENRPLY_WIDTH 1
++#define GMF_SRFENRPLY_LBN 17
++#define GMF_SRFENRPLY_WIDTH 1
++#define GMF_WTMENRPLY_LBN 16
++#define GMF_WTMENRPLY_WIDTH 1
++#define GMF_FTFENREQ_LBN 12
++#define GMF_FTFENREQ_WIDTH 1
++#define GMF_STFENREQ_LBN 11
++#define GMF_STFENREQ_WIDTH 1
++#define GMF_FRFENREQ_LBN 10
++#define GMF_FRFENREQ_WIDTH 1
++#define GMF_SRFENREQ_LBN 9
++#define GMF_SRFENREQ_WIDTH 1
++#define GMF_WTMENREQ_LBN 8
++#define GMF_WTMENREQ_WIDTH 1
++#define GMF_HSTRSTFT_LBN 4
++#define GMF_HSTRSTFT_WIDTH 1
++#define GMF_HSTRSTST_LBN 3
++#define GMF_HSTRSTST_WIDTH 1
++#define GMF_HSTRSTFR_LBN 2
++#define GMF_HSTRSTFR_WIDTH 1
++#define GMF_HSTRSTSR_LBN 1
++#define GMF_HSTRSTSR_WIDTH 1
++#define GMF_HSTRSTWT_LBN 0
++#define GMF_HSTRSTWT_WIDTH 1
++
++/* GMAC FIFO configuration register 1 */
++#define GMF_CFG1_REG_MAC 0x13
++#define GMF_CFGFRTH_LBN 16
++#define GMF_CFGFRTH_WIDTH 5
++#define GMF_CFGXOFFRTX_LBN 0
++#define GMF_CFGXOFFRTX_WIDTH 16
++
++/* GMAC FIFO configuration register 2 */
++#define GMF_CFG2_REG_MAC 0x14
++#define GMF_CFGHWM_LBN 16
++#define GMF_CFGHWM_WIDTH 6
++#define GMF_CFGLWM_LBN 0
++#define GMF_CFGLWM_WIDTH 6
++
++/* GMAC FIFO configuration register 3 */
++#define GMF_CFG3_REG_MAC 0x15
++#define GMF_CFGHWMFT_LBN 16
++#define GMF_CFGHWMFT_WIDTH 6
++#define GMF_CFGFTTH_LBN 0
++#define GMF_CFGFTTH_WIDTH 6
++
++/* GMAC FIFO configuration register 4 */
++#define GMF_CFG4_REG_MAC 0x16
++#define GMF_HSTFLTRFRM_LBN 0
++#define GMF_HSTFLTRFRM_WIDTH 18
++#define GMF_HSTFLTRFRM_PAUSE_LBN 12
++#define GMF_HSTFLTRFRM_PAUSE_WIDTH 12
++
++/* GMAC FIFO configuration register 5 */
++#define GMF_CFG5_REG_MAC 0x17
++#define GMF_CFGHDPLX_LBN 22
++#define GMF_CFGHDPLX_WIDTH 1
++#define GMF_SRFULL_LBN 21
++#define GMF_SRFULL_WIDTH 1
++#define GMF_HSTSRFULLCLR_LBN 20
++#define GMF_HSTSRFULLCLR_WIDTH 1
++#define GMF_CFGBYTMODE_LBN 19
++#define GMF_CFGBYTMODE_WIDTH 1
++#define GMF_HSTDRPLT64_LBN 18
++#define GMF_HSTDRPLT64_WIDTH 1
++#define GMF_HSTFLTRFRMDC_LBN 0
++#define GMF_HSTFLTRFRMDC_WIDTH 18
++#define GMF_HSTFLTRFRMDC_PAUSE_LBN 12
++#define GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
++
++/* TX total octet count */
++#define GM_TX_OCT_CNT_REG_MAC 0x40
++#define GM_STAT_LBN 0
++#define GM_STAT_WIDTH 32
++
++/* TX good octet count */
++#define GM_TX_GOOD_OCT_CNT_REG_MAC 0x41
++
++/* TX single collision packet count */
++#define GM_TX_SGLCOL_PKT_CNT_REG_MAC 0x42
++
++/* TX multiple collision packet count */
++#define GM_TX_MULTCOL_PKT_CNT_REG_MAC 0x43
++
++/* TX excessive collision packet count */
++#define GM_TX_EXCOL_PKT_CNT_REG_MAC 0x44
++
++/* TX deferred packet count */
++#define GM_TX_DEF_PKT_CNT_REG_MAC 0x45
++
++/* TX late packet count */
++#define GM_TX_LATECOL_PKT_CNT_REG_MAC 0x46
++
++/* TX excessive deferral packet count */
++#define GM_TX_EXDEF_PKT_CNT_REG_MAC 0x47
++
++/* TX pause packet count */
++#define GM_TX_PAUSE_PKT_CNT_REG_MAC 0x48
++
++/* TX bad packet count */
++#define GM_TX_BAD_PKT_CNT_REG_MAC 0x49
++
++/* TX unicast packet count */
++#define GM_TX_UCAST_PKT_CNT_REG_MAC 0x4a
++
++/* TX multicast packet count */
++#define GM_TX_MCAST_PKT_CNT_REG_MAC 0x4b
++
++/* TX broadcast packet count */
++#define GM_TX_BCAST_PKT_CNT_REG_MAC 0x4c
++
++/* TX <64-byte packet count */
++#define GM_TX_LT64_PKT_CNT_REG_MAC 0x4d
++
++/* TX 64-byte packet count */
++#define GM_TX_64_PKT_CNT_REG_MAC 0x4e
++
++/* TX 65-byte to 127-byte packet count */
++#define GM_TX_65_TO_127_PKT_CNT_REG_MAC 0x4f
++
++/* TX 128-byte to 255-byte packet count */
++#define GM_TX_128_TO_255_PKT_CNT_REG_MAC 0x50
++
++/* TX 256-byte to 511-byte packet count */
++#define GM_TX_256_TO_511_PKT_CNT_REG_MAC 0x51
++
++/* TX 512-byte to 1023-byte packet count */
++#define GM_TX_512_TO_1023_PKT_CNT_REG_MAC 0x52
++
++/* TX 1024-byte to 15xx-byte packet count */
++#define GM_TX_1024_TO_15XX_PKT_CNT_REG_MAC 0x53
++
++/* TX 15xx-byte to jumbo packet count */
++#define GM_TX_15XX_TO_JUMBO_PKT_CNT_REG_MAC 0x54
++
++/* TX >jumbo packet count */
++#define GM_TX_GTJUMBO_PKT_CNT_REG_MAC 0x55
++
++/* RX good octet count */
++#define GM_RX_GOOD_OCT_CNT_REG_MAC 0x60
++
++/* RX bad octet count */
++#define GM_RX_BAD_OCT_CNT_REG_MAC 0x61
++
++/* RX missed packet count */
++#define GM_RX_MISS_PKT_CNT_REG_MAC 0x62
++
++/* RX false carrier count */
++#define GM_RX_FALSE_CRS_CNT_REG_MAC 0x63
++
++/* RX pause packet count */
++#define GM_RX_PAUSE_PKT_CNT_REG_MAC 0x64
++
++/* RX bad packet count */
++#define GM_RX_BAD_PKT_CNT_REG_MAC 0x65
++
++/* RX unicast packet count */
++#define GM_RX_UCAST_PKT_CNT_REG_MAC 0x66
++
++/* RX multicast packet count */
++#define GM_RX_MCAST_PKT_CNT_REG_MAC 0x67
++
++/* RX broadcast packet count */
++#define GM_RX_BCAST_PKT_CNT_REG_MAC 0x68
++
++/* RX <64-byte good packet count */
++#define GM_RX_GOOD_LT64_PKT_CNT_REG_MAC 0x69
++
++/* RX <64-byte bad packet count */
++#define GM_RX_BAD_LT64_PKT_CNT_REG_MAC 0x6a
++
++/* RX 64-byte packet count */
++#define GM_RX_64_PKT_CNT_REG_MAC 0x6b
++
++/* RX 65-byte to 127-byte packet count */
++#define GM_RX_65_TO_127_PKT_CNT_REG_MAC 0x6c
++
++/* RX 128-byte to 255-byte packet count*/
++#define GM_RX_128_TO_255_PKT_CNT_REG_MAC 0x6d
++
++/* RX 256-byte to 511-byte packet count */
++#define GM_RX_256_TO_511_PKT_CNT_REG_MAC 0x6e
++
++/* RX 512-byte to 1023-byte packet count */
++#define GM_RX_512_TO_1023_PKT_CNT_REG_MAC 0x6f
++
++/* RX 1024-byte to 15xx-byte packet count */
++#define GM_RX_1024_TO_15XX_PKT_CNT_REG_MAC 0x70
++
++/* RX 15xx-byte to jumbo packet count */
++#define GM_RX_15XX_TO_JUMBO_PKT_CNT_REG_MAC 0x71
++
++/* RX >jumbo packet count */
++#define GM_RX_GTJUMBO_PKT_CNT_REG_MAC 0x72
++
++/* RX 64-byte to 15xx-byte bad crc packet count */
++#define GM_RX_BAD_64_TO_15XX_PKT_CNT_REG_MAC 0x73
++
++/* RX 15xx-byte to jumbo bad crc packet count */
++#define GM_RX_BAD_15XX_TO_JUMBO_PKT_CNT_REG_MAC 0x74
++
++/* RX >jumbo bad crc packet count */
++#define GM_RX_BAD_GTJUMBO_PKT_CNT_REG_MAC 0x75
++
++/**************************************************************************
++ *
++ * GMII access to PHY
++ *
++ **************************************************************************
++ */
++
++/* This does not reset the PHY, only the MAC. However, TX and RX will
++ * both be disabled on the MAC after this, so the state of the PHY is
++ * somewhat irrelevant until the MAC is reinitialised.
++ */
++void mentormac_reset(struct efx_nic *efx)
++{
++ efx_dword_t reg;
++
++ EFX_POPULATE_DWORD_1(reg, GM_SW_RST, 1);
++ efx->mac_op->mac_writel(efx, &reg, GM_CFG1_REG_MAC);
++ udelay(1000);
++
++ EFX_POPULATE_DWORD_1(reg, GM_SW_RST, 0);
++ efx->mac_op->mac_writel(efx, &reg, GM_CFG1_REG_MAC);
++ udelay(1000);
++
++ /* Configure GMII interface so PHY is accessible */
++ EFX_POPULATE_DWORD_1(reg, GM_MGMT_CLK_SEL, 0x4);
++ efx->mac_op->mac_writel(efx, &reg,
++ GM_MII_MGMT_CFG_REG_MAC);
++ udelay(10);
++}
++
++void mentormac_reconfigure(struct efx_nic *efx)
++{
++ int loopback, tx_fc, rx_fc, if_mode, full_duplex, bytemode, half_duplex;
++ unsigned int max_frame_len;
++ efx_dword_t reg;
++
++ /* Configuration register 1 */
++ tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
++ rx_fc = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
++ loopback = (efx->loopback_mode == LOOPBACK_MAC) ? 1 : 0;
++ bytemode = (efx->link_options & GM_LPA_1000) ? 1 : 0;
++
++ if (efx->loopback_mode != LOOPBACK_NONE)
++ bytemode = 1;
++ if (!(efx->link_options & GM_LPA_DUPLEX))
++ /* Half-duplex operation requires TX flow control */
++ tx_fc = 1;
++ EFX_POPULATE_DWORD_5(reg,
++ GM_LOOP, loopback,
++ GM_TX_EN, 1,
++ GM_TX_FC_EN, tx_fc,
++ GM_RX_EN, 1,
++ GM_RX_FC_EN, rx_fc);
++ efx->mac_op->mac_writel(efx, &reg, GM_CFG1_REG_MAC);
++ udelay(10);
++
++ /* Configuration register 2 */
++ if_mode = (bytemode) ? 2 : 1;
++ full_duplex = (efx->link_options & GM_LPA_DUPLEX) ? 1 : 0;
++ EFX_POPULATE_DWORD_4(reg,
++ GM_IF_MODE, if_mode,
++ GM_PAD_CRC_EN, 1,
++ GM_FD, full_duplex,
++ GM_PAMBL_LEN, 0x7/*datasheet recommended */);
++
++ efx->mac_op->mac_writel(efx, &reg, GM_CFG2_REG_MAC);
++ udelay(10);
++
++ /* Max frame len register */
++ max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
++ EFX_POPULATE_DWORD_1(reg, GM_MAX_FLEN, max_frame_len);
++ efx->mac_op->mac_writel(efx, &reg, GM_MAX_FLEN_REG_MAC);
++ udelay(10);
++
++ /* FIFO configuration register 0 */
++ EFX_POPULATE_DWORD_5(reg,
++ GMF_FTFENREQ, 1,
++ GMF_STFENREQ, 1,
++ GMF_FRFENREQ, 1,
++ GMF_SRFENREQ, 1,
++ GMF_WTMENREQ, 1);
++ efx->mac_op->mac_writel(efx, &reg, GMF_CFG0_REG_MAC);
++ udelay(10);
++
++ /* FIFO configuration register 1 */
++ EFX_POPULATE_DWORD_2(reg,
++ GMF_CFGFRTH, 0x12,
++ GMF_CFGXOFFRTX, 0xffff);
++ efx->mac_op->mac_writel(efx, &reg, GMF_CFG1_REG_MAC);
++ udelay(10);
++
++ /* FIFO configuration register 2 */
++ EFX_POPULATE_DWORD_2(reg,
++ GMF_CFGHWM, 0x3f,
++ GMF_CFGLWM, 0xa);
++ efx->mac_op->mac_writel(efx, &reg, GMF_CFG2_REG_MAC);
++ udelay(10);
++
++ /* FIFO configuration register 3 */
++ EFX_POPULATE_DWORD_2(reg,
++ GMF_CFGHWMFT, 0x1c,
++ GMF_CFGFTTH, 0x08);
++ efx->mac_op->mac_writel(efx, &reg, GMF_CFG3_REG_MAC);
++ udelay(10);
++
++ /* FIFO configuration register 4 */
++ EFX_POPULATE_DWORD_1(reg, GMF_HSTFLTRFRM_PAUSE, 1);
++ efx->mac_op->mac_writel(efx, &reg, GMF_CFG4_REG_MAC);
++ udelay(10);
++
++ /* FIFO configuration register 5 */
++ half_duplex = (efx->link_options & GM_LPA_DUPLEX) ? 0 : 1;
++ efx->mac_op->mac_readl(efx, &reg, GMF_CFG5_REG_MAC);
++ EFX_SET_DWORD_FIELD(reg, GMF_CFGBYTMODE, bytemode);
++ EFX_SET_DWORD_FIELD(reg, GMF_CFGHDPLX, half_duplex);
++ EFX_SET_DWORD_FIELD(reg, GMF_HSTDRPLT64, half_duplex);
++ EFX_SET_DWORD_FIELD(reg, GMF_HSTFLTRFRMDC_PAUSE, 0);
++ efx->mac_op->mac_writel(efx, &reg, GMF_CFG5_REG_MAC);
++ udelay(10);
++
++ /* MAC address */
++ EFX_POPULATE_DWORD_4(reg,
++ GM_HWADDR_5, efx->net_dev->dev_addr[5],
++ GM_HWADDR_4, efx->net_dev->dev_addr[4],
++ GM_HWADDR_3, efx->net_dev->dev_addr[3],
++ GM_HWADDR_2, efx->net_dev->dev_addr[2]);
++ efx->mac_op->mac_writel(efx, &reg, GM_ADR1_REG_MAC);
++ udelay(10);
++ EFX_POPULATE_DWORD_2(reg,
++ GM_HWADDR_1, efx->net_dev->dev_addr[1],
++ GM_HWADDR_0, efx->net_dev->dev_addr[0]);
++ efx->mac_op->mac_writel(efx, &reg, GM_ADR2_REG_MAC);
++ udelay(10);
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/mtd.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/mtd.c
+--- linux-2.6.18.8/drivers/net/sfc/mtd.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/mtd.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,602 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/module.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
++#include <linux/delay.h>
++
++#define EFX_DRIVER_NAME "sfc_mtd"
++#include "driverlink_api.h"
++#include "net_driver.h"
++#include "spi.h"
++
++/*
++ * Flash and EEPROM (MTD) device driver
++ *
++ * This file provides a separate kernel module (sfc_mtd) which
++ * exposes the flash and EEPROM devices present on Solarflare NICs as
++ * MTD devices, enabling you to reflash the boot ROM code (or use the
++ * remaining space on the flash as a jffs2 filesystem, should you want
++ * to do so).
++ */
++
++#define EFX_MTD_VERIFY_BUF_LEN 16
++#define EFX_MAX_PARTITIONS 2
++#define EFX_FLASH_BOOTROM_OFFSET 0x8000U
++
++/* Write enable for EEPROM/flash configuration area
++ *
++ * Normally, writes to parts of non-volatile storage which contain
++ * critical configuration are disabled to prevent accidents. This
++ * parameter allows enabling of such writes.
++ */
++static unsigned int efx_allow_nvconfig_writes;
++
++struct efx_mtd {
++ struct mtd_info mtd;
++ struct mtd_partition part[EFX_MAX_PARTITIONS];
++ char part_name[EFX_MAX_PARTITIONS][32];
++ char name[32];
++ struct efx_dl_device *efx_dev;
++ struct efx_nic *efx;
++ /* This must be held when using *spi; it guards against races
++ * with device reset and between sequences of dependent
++ * commands. */
++ struct semaphore access_lock;
++ struct efx_spi_device *spi;
++};
++
++/* SPI utilities */
++
++static int efx_spi_fast_wait(struct efx_mtd *efx_mtd)
++{
++ struct efx_spi_device *spi = efx_mtd->spi;
++ u8 status;
++ int i, rc;
++
++ /* Wait up to 1000us for flash/EEPROM to finish a fast operation. */
++ for (i = 0; i < 50; i++) {
++ udelay(20);
++
++ rc = spi->read(spi, efx_mtd->efx, SPI_RDSR, -1,
++ &status, sizeof(status));
++ if (rc)
++ return rc;
++ if (!(status & SPI_STATUS_NRDY))
++ return 0;
++ }
++ EFX_ERR(efx_mtd->efx, "timed out waiting for %s last status=0x%02x\n",
++ efx_mtd->name, status);
++ return -ETIMEDOUT;
++}
++
++static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, int uninterruptible)
++{
++ struct efx_spi_device *spi = efx_mtd->spi;
++ u8 status;
++ int rc, i;
++
++ /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
++ for (i = 0; i < 40; i++) {
++ __set_current_state(uninterruptible ?
++ TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
++ schedule_timeout(HZ / 10);
++ rc = spi->read(spi, efx_mtd->efx, SPI_RDSR, -1,
++ &status, sizeof(status));
++ if (rc)
++ return rc;
++ if (!(status & SPI_STATUS_NRDY))
++ return 0;
++ if (signal_pending(current))
++ return -EINTR;
++ }
++ EFX_ERR(efx_mtd->efx, "timed out waiting for %s\n", efx_mtd->name);
++ return -ETIMEDOUT;
++}
++
++static int
++efx_spi_write_enable(struct efx_mtd *efx_mtd)
++{
++ struct efx_spi_device *spi = efx_mtd->spi;
++
++ return spi->write(spi, efx_mtd->efx, SPI_WREN, -1, NULL, 0);
++}
++
++static int efx_spi_unlock(struct efx_mtd *efx_mtd)
++{
++ struct efx_spi_device *spi = efx_mtd->spi;
++ const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
++ SPI_STATUS_BP0);
++ u8 status;
++ int rc;
++
++ rc = spi->read(spi, efx_mtd->efx, SPI_RDSR, -1, &status,
++ sizeof(status));
++ if (rc)
++ return rc;
++
++ if (!(status & unlock_mask))
++ return 0; /* already unlocked */
++
++ rc = efx_spi_write_enable(efx_mtd);
++ if (rc)
++ return rc;
++ rc = spi->write(spi, efx_mtd->efx, SPI_SST_EWSR, -1, NULL, 0);
++ if (rc)
++ return rc;
++
++ status &= ~unlock_mask;
++ rc = spi->write(spi, efx_mtd->efx, SPI_WRSR, -1, &status,
++ sizeof(status));
++ if (rc)
++ return rc;
++ rc = efx_spi_fast_wait(efx_mtd);
++ if (rc)
++ return rc;
++
++ return 0;
++}
++
++/* Dummy device used in case of a failed reset */
++
++static int efx_spi_dummy_read(const struct efx_spi_device *spi,
++ struct efx_nic *efx, unsigned int command,
++ int address, void *data, unsigned int len)
++{
++ return -EIO;
++}
++
++static int efx_spi_dummy_write(const struct efx_spi_device *spi,
++ struct efx_nic *efx, unsigned int command,
++ int address, const void *data, unsigned int len)
++{
++ return -EIO;
++}
++
++static struct efx_spi_device efx_spi_dummy_device = {
++ .block_size = 1,
++ .erase_command = 0xff,
++ .read = efx_spi_dummy_read,
++ .write = efx_spi_dummy_write,
++};
++
++/* MTD interface */
++
++static int efx_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
++ size_t *retlen, u8 *buffer)
++{
++ struct efx_mtd *efx_mtd = mtd->priv;
++ struct efx_spi_device *spi;
++ unsigned int command;
++ unsigned int block_len;
++ unsigned int pos = 0;
++ int rc;
++
++ rc = down_interruptible(&efx_mtd->access_lock);
++ if (rc)
++ goto out;
++ spi = efx_mtd->spi;
++
++ while (pos < len) {
++ block_len = min((unsigned int)len - pos,
++ efx_spi_read_limit(spi, start + pos));
++ command = efx_spi_munge_command(spi, SPI_READ, start + pos);
++ rc = spi->read(spi, efx_mtd->efx, command, start + pos,
++ buffer + pos, block_len);
++ if (rc)
++ break;
++ pos += block_len;
++
++ /* Avoid locking up the system */
++ cond_resched();
++ if (signal_pending(current)) {
++ rc = -EINTR;
++ break;
++ }
++ }
++
++ up(&efx_mtd->access_lock);
++out:
++ *retlen = pos;
++ return rc;
++}
++
++/* Check that device contents match buffer. If repeat is true, buffer
++ * contains a pattern of length EFX_MTD_VERIFY_BUF_LEN which the
++ * device contents should match repeatedly.
++ */
++static int efx_mtd_verify(struct mtd_info *mtd, loff_t start,
++ size_t len, const u8 *buffer, int repeat)
++{
++ u8 verify_buffer[EFX_MTD_VERIFY_BUF_LEN];
++ unsigned int block_len;
++ size_t read_len;
++ unsigned int pos = 0;
++ int rc = 0;
++
++ while (pos < len) {
++ block_len = min(len - pos, sizeof(verify_buffer));
++ rc = efx_mtd_read(mtd, start + pos, block_len, &read_len,
++ verify_buffer);
++ if (rc)
++ return rc;
++ if (memcmp(repeat ? buffer : buffer + pos, verify_buffer,
++ block_len))
++ return -EIO;
++ pos += block_len;
++ }
++
++ return 0;
++}
++
++static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
++{
++ struct efx_mtd *efx_mtd = mtd->priv;
++ struct efx_spi_device *spi;
++ u8 empty[EFX_MTD_VERIFY_BUF_LEN];
++ int rc;
++
++ if (erase->len != mtd->erasesize) {
++ rc = -EINVAL;
++ goto out;
++ }
++
++ rc = down_interruptible(&efx_mtd->access_lock);
++ if (rc)
++ goto out;
++ spi = efx_mtd->spi;
++ if (spi->erase_command == 0) {
++ rc = -EOPNOTSUPP;
++ goto out_up;
++ }
++
++ rc = efx_spi_unlock(efx_mtd);
++ if (rc)
++ goto out_up;
++ rc = efx_spi_write_enable(efx_mtd);
++ if (rc)
++ goto out_up;
++ rc = spi->write(spi, efx_mtd->efx, spi->erase_command, erase->addr,
++ NULL, 0);
++ if (rc)
++ goto out_up;
++ rc = efx_spi_slow_wait(efx_mtd, 0);
++
++out_up:
++ up(&efx_mtd->access_lock);
++ if (rc)
++ goto out;
++
++ memset(empty, 0xff, sizeof(empty));
++ rc = efx_mtd_verify(mtd, erase->addr, erase->len, empty, 1);
++
++out:
++ if (rc == 0) {
++ erase->state = MTD_ERASE_DONE;
++ } else {
++ erase->state = MTD_ERASE_FAILED;
++#if defined(EFX_USE_MTD_ERASE_FAIL_ADDR)
++ erase->fail_addr = 0xffffffff;
++#endif
++ }
++ mtd_erase_callback(erase);
++ return rc;
++}
++
++static int efx_mtd_write(struct mtd_info *mtd, loff_t start,
++ size_t len, size_t *retlen, const u8 *buffer)
++{
++ struct efx_mtd *efx_mtd = mtd->priv;
++ struct efx_spi_device *spi;
++ unsigned int command;
++ unsigned int block_len;
++ unsigned int pos = 0;
++ int rc;
++
++ rc = down_interruptible(&efx_mtd->access_lock);
++ if (rc)
++ goto out;
++ spi = efx_mtd->spi;
++
++ rc = efx_spi_unlock(efx_mtd);
++ if (rc)
++ goto out_up;
++
++ while (pos < len) {
++ rc = efx_spi_write_enable(efx_mtd);
++ if (rc)
++ break;
++
++ block_len = min((unsigned int)len - pos,
++ efx_spi_write_limit(spi, start + pos));
++ command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
++ rc = spi->write(spi, efx_mtd->efx, command, start + pos,
++ buffer + pos, block_len);
++ if (rc)
++ break;
++ pos += block_len;
++
++ rc = efx_spi_fast_wait(efx_mtd);
++ if (rc)
++ break;
++
++ /* Avoid locking up the system */
++ cond_resched();
++ if (signal_pending(current)) {
++ rc = -EINTR;
++ break;
++ }
++ }
++
++out_up:
++ up(&efx_mtd->access_lock);
++ if (rc == 0)
++ rc = efx_mtd_verify(mtd, start, len, buffer, 0);
++out:
++ *retlen = pos;
++ return rc;
++}
++
++static void efx_mtd_sync(struct mtd_info *mtd)
++{
++ struct efx_mtd *efx_mtd = mtd->priv;
++ int rc;
++
++ down(&efx_mtd->access_lock);
++ rc = efx_spi_slow_wait(efx_mtd, 1);
++ if (rc)
++ EFX_ERR(efx_mtd->efx, "%s sync failed (%d)\n",
++ efx_mtd->name, rc);
++ up(&efx_mtd->access_lock);
++}
++
++/* Driverlink interface */
++
++static void efx_mtd_reset_suspend(struct efx_dl_device *efx_dev)
++{
++ struct efx_mtd *efx_mtd = efx_dev->priv;
++
++ if (!efx_mtd)
++ return;
++
++ /* Acquire lock to ensure that any in-progress operations have
++ * completed, and no new ones can start.
++ */
++ down(&efx_mtd->access_lock);
++}
++
++static void efx_mtd_reset_resume(struct efx_dl_device *efx_dev, int ok)
++{
++ struct efx_mtd *efx_mtd = efx_dev->priv;
++
++ if (!efx_mtd)
++ return;
++
++ /* If device reset failed already, or SPI device doesn't
++ * become ready, disable device.
++ */
++ if (!ok || efx_spi_slow_wait(efx_mtd, 1) != 0) {
++ efx_mtd->spi = &efx_spi_dummy_device;
++ EFX_ERR(efx_mtd->efx, "%s disabled after failed reset\n",
++ efx_mtd->name);
++ }
++
++ up(&efx_mtd->access_lock);
++}
++
++static void efx_mtd_remove(struct efx_dl_device *efx_dev)
++{
++ struct efx_mtd *efx_mtd = efx_dev->priv;
++
++ del_mtd_partitions(&efx_mtd->mtd);
++ kfree(efx_mtd);
++ efx_dev->priv = NULL;
++}
++
++static __devinit int efx_mtd_register(struct efx_mtd *efx_mtd,
++ struct efx_dl_device *efx_dev,
++ struct efx_nic *efx,
++ struct efx_spi_device *spi,
++ const char *type_name,
++ const char **part_type_name,
++ unsigned int num_parts)
++{
++ int i;
++
++ efx_dev->priv = efx_mtd;
++
++ efx_mtd->efx_dev = efx_dev;
++ efx_mtd->efx = efx;
++ efx_mtd->spi = spi;
++ sema_init(&efx_mtd->access_lock, 1);
++
++ efx_mtd->mtd.size = spi->size;
++ efx_mtd->mtd.erasesize = spi->erase_size;
++#if defined(EFX_USE_MTD_WRITESIZE)
++ efx_mtd->mtd.writesize = 1;
++#endif
++ if (snprintf(efx_mtd->name, sizeof(efx_mtd->name),
++ "%s %s", efx->name, type_name) >=
++ sizeof(efx_mtd->name))
++ return -ENAMETOOLONG;
++
++ efx_mtd->mtd.priv = efx_mtd;
++ efx_mtd->mtd.name = efx_mtd->name;
++ efx_mtd->mtd.erase = efx_mtd_erase;
++ efx_mtd->mtd.read = efx_mtd_read;
++ efx_mtd->mtd.write = efx_mtd_write;
++ efx_mtd->mtd.sync = efx_mtd_sync;
++
++ for (i = 0; i < num_parts; i++) {
++ efx_mtd->part[i].name = efx_mtd->part_name[i];
++ if (snprintf(efx_mtd->part_name[i],
++ sizeof(efx_mtd->part_name[i]),
++ "%s %s", efx->name, part_type_name[i]) >=
++ sizeof(efx_mtd->part_name[i]))
++ return -ENAMETOOLONG;
++
++ if (efx_allow_nvconfig_writes)
++ efx_mtd->part[i].mask_flags &= ~MTD_WRITEABLE;
++ }
++
++ return add_mtd_partitions(&efx_mtd->mtd, efx_mtd->part, num_parts);
++}
++
++static int __devinit
++efx_flash_probe(struct efx_dl_device *efx_dev,
++ const struct net_device *net_dev,
++ const struct efx_dl_device_info *dev_info,
++ const char *silicon_rev)
++{
++ struct efx_nic *efx = efx_dl_get_nic(efx_dev);
++ struct efx_mtd *efx_mtd;
++ const char *part_type_name[2];
++ unsigned int num_parts;
++ int rc;
++
++ if (!efx->spi_flash)
++ return -ENODEV;
++
++ efx_mtd = kzalloc(sizeof(*efx_mtd), GFP_KERNEL);
++ if (!efx_mtd)
++ return -ENOMEM;
++
++ efx_mtd->mtd.type = MTD_NORFLASH;
++ efx_mtd->mtd.flags = MTD_CAP_NORFLASH;
++
++ part_type_name[0] = "sfc_flash_config";
++ efx_mtd->part[0].offset = 0;
++ efx_mtd->part[0].size = min(efx->spi_flash->size,
++ EFX_FLASH_BOOTROM_OFFSET);
++ efx_mtd->part[0].mask_flags = MTD_WRITEABLE;
++
++ if (efx->spi_flash->size <= EFX_FLASH_BOOTROM_OFFSET) {
++ num_parts = 1;
++ } else {
++ part_type_name[1] = "sfc_flash_bootrom";
++ efx_mtd->part[1].offset = EFX_FLASH_BOOTROM_OFFSET;
++ efx_mtd->part[1].size = (efx->spi_flash->size
++ - EFX_FLASH_BOOTROM_OFFSET);
++ num_parts = 2;
++ }
++
++ rc = efx_mtd_register(efx_mtd, efx_dev, efx, efx->spi_flash,
++ "sfc_flash", part_type_name, num_parts);
++ if (rc)
++ kfree(efx_mtd);
++ return rc;
++}
++
++static struct efx_dl_driver efx_flash_driver = {
++ .name = "sfc_flash",
++ .probe = efx_flash_probe,
++ .remove = efx_mtd_remove,
++ .reset_suspend = efx_mtd_reset_suspend,
++ .reset_resume = efx_mtd_reset_resume,
++};
++
++static int __devinit
++efx_eeprom_probe(struct efx_dl_device *efx_dev,
++ const struct net_device *net_dev,
++ const struct efx_dl_device_info *dev_info,
++ const char *silicon_rev)
++{
++ struct efx_nic *efx = efx_dl_get_nic(efx_dev);
++ struct efx_mtd *efx_mtd;
++ const char *type_name;
++ const char *part_type_name[1];
++ int rc;
++
++ if (!efx->spi_eeprom)
++ return -ENODEV;
++
++ efx_mtd = kzalloc(sizeof(*efx_mtd), GFP_KERNEL);
++ if (!efx_mtd)
++ return -ENOMEM;
++
++ efx_mtd->mtd.type = MTD_RAM;
++ efx_mtd->mtd.flags = MTD_CAP_RAM;
++
++ efx_mtd->part[0].offset = 0;
++ efx_mtd->part[0].size = efx->spi_eeprom->size;
++ efx_mtd->part[0].mask_flags = MTD_WRITEABLE;
++
++ if (efx->spi_eeprom->size <= 0x200) {
++ type_name = "sfc_small_eeprom";
++ part_type_name[0] = "sfc_small_config";
++ } else {
++ type_name = "sfc_large_eeprom";
++ part_type_name[0] = "sfc_large_config";
++ }
++
++ rc = efx_mtd_register(efx_mtd, efx_dev, efx, efx->spi_eeprom,
++ type_name, part_type_name, 1);
++ if (rc)
++ kfree(efx_mtd);
++ return rc;
++}
++
++static struct efx_dl_driver efx_eeprom_driver = {
++ .name = "sfc_eeprom",
++ .probe = efx_eeprom_probe,
++ .remove = efx_mtd_remove,
++ .reset_suspend = efx_mtd_reset_suspend,
++ .reset_resume = efx_mtd_reset_resume,
++};
++
++/* Kernel module interface */
++
++static int __init efx_mtd_init_module(void)
++{
++ int rc;
++
++ rc = efx_dl_register_driver(&efx_flash_driver);
++ if (rc)
++ return rc;
++ rc = efx_dl_register_driver(&efx_eeprom_driver);
++ if (rc) {
++ efx_dl_unregister_driver(&efx_flash_driver);
++ return rc;
++ }
++
++ return 0;
++}
++
++static void __exit efx_mtd_exit_module(void)
++{
++ efx_dl_unregister_driver(&efx_eeprom_driver);
++ efx_dl_unregister_driver(&efx_flash_driver);
++}
++
++module_init(efx_mtd_init_module);
++module_exit(efx_mtd_exit_module);
++
++MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
++ "Solarflare Communications");
++MODULE_DESCRIPTION("SFC MTD driver");
++MODULE_LICENSE("GPL");
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/net_driver.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/net_driver.h
+--- linux-2.6.18.8/drivers/net/sfc/net_driver.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/net_driver.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1096 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/* Common definitions for all Efx net driver code */
++
++#ifndef EFX_NET_DRIVER_H
++#define EFX_NET_DRIVER_H
++
++#include <linux/version.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/ethtool.h>
++#include <linux/if_vlan.h>
++#include <linux/timer.h>
++#include <linux/mii.h>
++#include <linux/list.h>
++#include <linux/pci.h>
++#include <linux/device.h>
++
++/* Must come before other headers */
++#include "kernel_compat.h"
++
++#include "enum.h"
++#include "bitfield.h"
++#include "driverlink_api.h"
++#include "driverlink.h"
++#include "i2c-direct.h"
++
++ #ifndef EFX_USE_DEBUGFS
++ /* Sick, but we have no other use for dentry */
++ #define dentry proc_dir_entry
++ #endif
++
++#define EFX_MAX_LRO_DESCRIPTORS 8
++#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS
++
++/**************************************************************************
++ *
++ * Build definitions
++ *
++ **************************************************************************/
++#ifndef EFX_DRIVER_NAME
++#define EFX_DRIVER_NAME "sfc"
++#endif
++#define EFX_DRIVER_VERSION "2.2.0101"
++
++#ifdef EFX_ENABLE_DEBUG
++#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
++#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
++#else
++#define EFX_BUG_ON_PARANOID(x) do {} while (0)
++#define EFX_WARN_ON_PARANOID(x) do {} while (0)
++#endif
++
++/* Include net device name in log messages if it has been registered.
++ * Use efx->name not efx->net_dev->name so that races with (un)registration
++ * are harmless.
++ */
++#define NET_DEV_NAME(efx) ((efx)->net_dev_registered ? (efx)->name : "")
++
++/* Un-rate-limited logging */
++#define EFX_ERR(efx, fmt, args...) \
++dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args)
++
++#define EFX_INFO(efx, fmt, args...) \
++dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args)
++
++#ifdef EFX_ENABLE_DEBUG
++#define EFX_LOG(efx, fmt, args...) \
++dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args)
++#else
++#define EFX_LOG(efx, fmt, args...) \
++dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args)
++#endif
++
++#define EFX_TRACE(efx, fmt, args...) do {} while (0)
++
++#define EFX_REGDUMP(efx, fmt, args...) do {} while (0)
++
++/* Rate-limited logging */
++#define EFX_ERR_RL(efx, fmt, args...) \
++do {if (net_ratelimit()) EFX_ERR(efx, fmt, ##args); } while (0)
++
++#define EFX_INFO_RL(efx, fmt, args...) \
++do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
++
++#define EFX_LOG_RL(efx, fmt, args...) \
++do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
++
++/* Kernel headers may redefine inline anyway */
++#ifndef inline
++#define inline inline __attribute__ ((always_inline))
++#endif
++
++/**************************************************************************
++ *
++ * Efx data structures
++ *
++ **************************************************************************/
++
++#define EFX_MAX_CHANNELS 32
++#define EFX_MAX_TX_QUEUES 1
++#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
++
++/**
++ * struct efx_special_buffer - An Efx special buffer
++ * @addr: CPU base address of the buffer
++ * @dma_addr: DMA base address of the buffer
++ * @len: Buffer length, in bytes
++ * @index: Buffer index within controller;s buffer table
++ * @entries: Number of buffer table entries
++ *
++ * Special buffers are used for the event queues and the TX and RX
++ * descriptor queues for each channel. They are *not* used for the
++ * actual transmit and receive buffers.
++ *
++ * Note that for Falcon, TX and RX descriptor queues live in host memory.
++ * Allocation and freeing procedures must take this into account.
++ */
++struct efx_special_buffer {
++ void *addr;
++ dma_addr_t dma_addr;
++ unsigned int len;
++ int index;
++ int entries;
++};
++
++/**
++ * struct efx_tx_buffer - An Efx TX buffer
++ * @skb: The associated socket buffer.
++ * Set only on the final fragment of a packet; %NULL for all other
++ * fragments. When this fragment completes, then we can free this
++ * skb.
++ * @dma_addr: DMA address of the fragment.
++ * @len: Length of this fragment.
++ * This field is zero when the queue slot is empty.
++ * @continuation: True if this fragment is not the end of a packet.
++ * @unmap_single: True if pci_unmap_single should be used.
++ * @unmap_addr: DMA address to unmap
++ * @unmap_len: Length of this fragment to unmap
++ */
++struct efx_tx_buffer {
++ const struct sk_buff *skb;
++ dma_addr_t dma_addr;
++ unsigned short len;
++ unsigned char continuation;
++ unsigned char unmap_single;
++ dma_addr_t unmap_addr;
++ unsigned short unmap_len;
++};
++
++/**
++ * struct efx_tx_queue - An Efx TX queue
++ *
++ * This is a ring buffer of TX fragments.
++ * Since the TX completion path always executes on the same
++ * CPU and the xmit path can operate on different CPUs,
++ * performance is increased by ensuring that the completion
++ * path and the xmit path operate on different cache lines.
++ * This is particularly important if the xmit path is always
++ * executing on one CPU which is different from the completion
++ * path. There is also a cache line for members which are
++ * read but not written on the fast path.
++ *
++ * @efx: The associated Efx NIC
++ * @queue: DMA queue number
++ * @used: Queue is used by net driver
++ * @channel: The associated channel
++ * @buffer: The software buffer ring
++ * @txd: The hardware descriptor ring
++ * @debug_dir: debugfs directory
++ * @read_count: Current read pointer.
++ * This is the number of buffers that have been removed from both rings.
++ * @stopped: Stopped flag.
++ * Set if this TX queue is currently stopping its port.
++ * @insert_count: Current insert pointer
++ * This is the number of buffers that have been added to the
++ * software ring.
++ * @write_count: Current write pointer
++ * This is the number of buffers that have been added to the
++ * hardware ring.
++ * @old_read_count: The value of read_count when last checked.
++ * This is here for performance reasons. The xmit path will
++ * only get the up-to-date value of read_count if this
++ * variable indicates that the queue is full. This is to
++ * avoid cache-line ping-pong between the xmit path and the
++ * completion path.
++ */
++struct efx_tx_queue {
++ /* Members which don't change on the fast path */
++ struct efx_nic *efx ____cacheline_aligned_in_smp;
++ int queue;
++ int used;
++ struct efx_channel *channel;
++ struct efx_nic *nic;
++ struct efx_tx_buffer *buffer;
++ struct efx_special_buffer txd;
++#ifdef CONFIG_SFC_DEBUGFS
++ struct dentry *debug_dir;
++#endif
++
++ /* Members used mainly on the completion path */
++ unsigned int read_count ____cacheline_aligned_in_smp;
++ int stopped;
++
++ /* Members used only on the xmit path */
++ unsigned int insert_count ____cacheline_aligned_in_smp;
++ unsigned int write_count;
++ unsigned int old_read_count;
++};
++
++/**
++ * struct efx_rx_buffer - An Efx RX data buffer
++ * @dma_addr: DMA base address of the buffer
++ * @skb: The associated socket buffer, if any.
++ * If both this and page are %NULL, the buffer slot is currently free.
++ * @page: The associated page buffer, if any.
++ * If both this and skb are %NULL, the buffer slot is currently free.
++ * @data: Pointer to ethernet header
++ * @len: Buffer length, in bytes.
++ * @unmap_addr: DMA address to unmap
++ */
++struct efx_rx_buffer {
++ dma_addr_t dma_addr;
++ struct sk_buff *skb;
++ struct page *page;
++ char *data;
++ unsigned int len;
++ dma_addr_t unmap_addr;
++};
++
++/**
++ * struct efx_rx_queue - An Efx RX queue
++ * @efx: The associated Efx NIC
++ * @queue: DMA queue number
++ * @used: Queue is used by net driver
++ * @channel: The associated channel
++ * @buffer: The software buffer ring
++ * @rxd: The hardware descriptor ring
++ * @added_count: Number of buffers added to the receive queue.
++ * @notified_count: Number of buffers given to NIC (<= @added_count).
++ * @removed_count: Number of buffers removed from the receive queue.
++ * @add_lock: Receive queue descriptor add spin lock.
++ * This lock must be held in order to add buffers to the RX
++ * descriptor ring (rxd and buffer) and to update added_count (but
++ * not removed_count).
++ * @max_fill: RX descriptor maximum fill level (<= ring size)
++ * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
++ * (<= @max_fill)
++ * @fast_fill_limit: The level to which a fast fill will fill
++ * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill)
++ * @min_fill: RX descriptor minimum non-zero fill level.
++ * This records the minimum fill level observed when a ring
++ * refill was triggered.
++ * @min_overfill: RX descriptor minimum overflow fill level.
++ * This records the minimum fill level at which RX queue
++ * overflow was observed. It should never be set.
++ * @alloc_page_count: RX allocation strategy counter.
++ * @alloc_skb_count: RX allocation strategy counter.
++ * @work: Descriptor push work thread
++ * @buf_page: Page for next RX buffer.
++ * We can use a single page for multiple RX buffers. This tracks
++ * the remaining space in the allocation.
++ * @buf_dma_addr: Page's DMA address.
++ * @buf_data: Page's host address.
++ * @debug_dir: debugfs directory
++ */
++struct efx_rx_queue {
++ struct efx_nic *efx;
++ int queue;
++ int used;
++ struct efx_channel *channel;
++ struct efx_rx_buffer *buffer;
++ struct efx_special_buffer rxd;
++
++ int added_count;
++ int notified_count;
++ int removed_count;
++ spinlock_t add_lock;
++ unsigned int max_fill;
++ unsigned int fast_fill_trigger;
++ unsigned int fast_fill_limit;
++ unsigned int min_fill;
++ unsigned int min_overfill;
++ unsigned int alloc_page_count;
++ unsigned int alloc_skb_count;
++ struct delayed_work work;
++ unsigned int slow_fill_count;
++
++ struct page *buf_page;
++ dma_addr_t buf_dma_addr;
++ char *buf_data;
++
++#ifdef CONFIG_SFC_DEBUGFS
++ struct dentry *debug_dir;
++#endif
++};
++
++/**
++ * struct efx_buffer - An Efx general-purpose buffer
++ * @addr: host base address of the buffer
++ * @dma_addr: DMA base address of the buffer
++ * @len: Buffer length, in bytes
++ *
++ * Falcon uses these buffers for its interrupt status registers and
++ * MAC stats dumps.
++ */
++struct efx_buffer {
++ void *addr;
++ dma_addr_t dma_addr;
++ unsigned int len;
++};
++
++
++/* Flags for channel->used_flags */
++#define EFX_USED_BY_RX 1
++#define EFX_USED_BY_TX 2
++#define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX)
++
++enum efx_rx_alloc_method {
++ RX_ALLOC_METHOD_AUTO = 0,
++ RX_ALLOC_METHOD_SKB = 1,
++ RX_ALLOC_METHOD_PAGE = 2,
++};
++
++/**
++ * struct efx_channel - An Efx channel
++ *
++ * A channel comprises an event queue, at least one TX queue, at least
++ * one RX queue, and an associated tasklet for processing the event
++ * queue.
++ *
++ * @efx: Associated Efx NIC
++ * @evqnum: Event queue number
++ * @channel: Channel instance number
++ * @used_flags: Channel is used by net driver
++ * @enabled: Channel enabled indicator
++ * @irq: IRQ number (MSI and MSI-X only)
++ * @has_interrupt: Channel has an interrupt
++ * @irq_moderation: IRQ moderation value (in us)
++ * @napi_dev: Net device used with NAPI
++ * @napi_str: NAPI control structure
++ * @reset_work: Scheduled reset work thread
++ * @work_pending: Is work pending via NAPI?
++ * @eventq: Event queue buffer
++ * @eventq_read_ptr: Event queue read pointer
++ * @last_eventq_read_ptr: Last event queue read pointer value.
++ * @eventq_magic: Event queue magic value for driver-generated test events
++ * @debug_dir: debugfs directory
++ * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
++ * and diagnostic counters
++ * @rx_alloc_push_pages: RX allocation method currently in use for pushing
++ * descriptors
++ * @rx_alloc_pop_pages: RX allocation method currently in use for popping
++ * descriptors
++ * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
++ * @n_rx_ip_frag_err: Count of RX IP fragment errors
++ * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
++ * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
++ * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
++ * @n_rx_overlength: Count of RX_OVERLENGTH errors
++ * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
++ */
++struct efx_channel {
++ struct efx_nic *efx;
++ int evqnum;
++ int channel;
++ int used_flags;
++ int enabled;
++ int irq;
++ unsigned int has_interrupt;
++ unsigned int irq_moderation;
++ struct net_device *napi_dev;
++#if !defined(EFX_HAVE_OLD_NAPI)
++ struct napi_struct napi_str;
++#endif
++ struct work_struct reset_work;
++ int work_pending;
++ struct efx_special_buffer eventq;
++ unsigned int eventq_read_ptr;
++ unsigned int last_eventq_read_ptr;
++ unsigned int eventq_magic;
++
++#ifdef CONFIG_SFC_DEBUGFS
++ struct dentry *debug_dir;
++#endif
++
++ int rx_alloc_level;
++ int rx_alloc_push_pages;
++ int rx_alloc_pop_pages;
++
++ unsigned n_rx_tobe_disc;
++ unsigned n_rx_ip_frag_err;
++ unsigned n_rx_ip_hdr_chksum_err;
++ unsigned n_rx_tcp_udp_chksum_err;
++ unsigned n_rx_frm_trunc;
++ unsigned n_rx_overlength;
++ unsigned n_skbuff_leaks;
++
++ /* Used to pipeline received packets in order to optimise memory
++ * access with prefetches.
++ */
++ struct efx_rx_buffer *rx_pkt;
++ int rx_pkt_csummed;
++
++};
++
++/**
++ * struct efx_blinker - S/W LED blinking context
++ * @led_num: LED ID (board-specific meaning)
++ * @state: Current state - on or off
++ * @resubmit: Timer resubmission flag
++ * @timer: Control timer for blinking
++ */
++struct efx_blinker {
++ int led_num;
++ int state;
++ int resubmit;
++ struct timer_list timer;
++};
++
++
++/**
++ * struct efx_board - board information
++ * @type: Board model type
++ * @major: Major rev. ('A', 'B' ...)
++ * @minor: Minor rev. (0, 1, ...)
++ * @init: Initialisation function
++ * @init_leds: Sets up board LEDs
++ * @set_fault_led: Turns the fault LED on or off
++ * @blink: Starts/stops blinking
++ * @monitor: Board-specific health check function
++ * @fini: Cleanup function
++ * @interpret_sensor: Function to interpret LM87 sensor meanings.
++ * Returns %FALSE if no board-specific meaning.
++ * @mwatts: Power requirements (mW)
++ * @blinker: used to blink LEDs in software
++ * @lm87_addr: I2C address of LM87 monitoring chip (0 if absent)
++ */
++struct efx_board {
++ int type;
++ int major;
++ int minor;
++ int (*init) (struct efx_nic *nic);
++ /* As the LEDs are typically attached to the PHY, LEDs
++ * have a separate init callback that happens later than
++ * board init. */
++ int (*init_leds)(struct efx_nic *efx);
++ int (*monitor) (struct efx_nic *nic);
++ void (*set_fault_led) (struct efx_nic *efx, int state);
++ void (*blink) (struct efx_nic *efx, int start);
++ void (*fini) (struct efx_nic *nic);
++ int (*interpret_sensor) (struct efx_nic *nic, int num, unsigned val);
++ unsigned mwatts;
++ struct efx_blinker blinker;
++ unsigned int lm87_addr;
++};
++
++#define STRING_TABLE_LOOKUP(val, member) \
++ ((val) < member ## _max) ? member ## _names[val] : "(invalid)"
++
++extern const char *efx_loopback_mode_names[];
++extern const unsigned int efx_loopback_mode_max;
++#define LOOPBACK_MODE(efx) \
++ STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
++
++extern const char *efx_interrupt_mode_names[];
++extern const unsigned int efx_interrupt_mode_max;
++#define INT_MODE(efx) \
++ STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
++
++extern const char *efx_reset_type_names[];
++extern const unsigned int efx_reset_type_max;
++#define RESET_TYPE(type) \
++ STRING_TABLE_LOOKUP(type, efx_reset_type)
++
++extern const char *efx_phy_type_names[];
++extern const unsigned int efx_phy_type_max;
++#define PHY_TYPE(efx) \
++ STRING_TABLE_LOOKUP((efx)->phy_type, efx_phy_type)
++
++extern const char *efx_nic_state_names[];
++extern const unsigned int efx_nic_state_max;
++#define STATE_NAME(efx) \
++ STRING_TABLE_LOOKUP((efx)->state, efx_nic_state)
++
++enum efx_int_mode {
++ /* Be careful if altering to correct macro below */
++ EFX_INT_MODE_MSIX = 0,
++ EFX_INT_MODE_MSI = 1,
++ EFX_INT_MODE_LEGACY = 2,
++ EFX_INT_MODE_MAX /* Insert any new items before this */
++};
++#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
++
++enum phy_type {
++ PHY_TYPE_NONE = 0,
++ PHY_TYPE_CX4_RTMR = 1,
++ PHY_TYPE_1G_ALASKA = 2,
++ PHY_TYPE_10XPRESS = 3,
++ PHY_TYPE_XFP = 4,
++ PHY_TYPE_PM8358 = 6,
++ PHY_TYPE_MAX /* Insert any new items before this */
++};
++
++#define PHY_ADDR_INVALID 0xff
++
++#define EFX_IS10G(efx) ((efx)->is_10g)
++#define EFX_ISCLAUSE45(efx) ((efx)->phy_type != PHY_TYPE_1G_ALASKA)
++
++enum nic_state {
++ STATE_INIT = 0, /* suspend_lock always held */
++ STATE_RUNNING = 1,
++ STATE_FINI = 2,
++ STATE_RESETTING = 3, /* suspend_lock always held */
++ STATE_DISABLED = 4,
++ STATE_MAX,
++};
++
++/*
++ * Alignment of page-allocated RX buffers
++ *
++ * Controls the number of bytes inserted at the start of an RX buffer.
++ * This is the equivalent of NET_IP_ALIGN [which controls the alignment
++ * of the skb->head for hardware DMA].
++ */
++#ifdef __ia64__
++#define EFX_PAGE_IP_ALIGN 2
++#else
++#define EFX_PAGE_IP_ALIGN 0
++#endif
++
++/*
++ * Alignment of the skb->head which wraps a page-allocated RX buffer
++ *
++ * The skb allocated to wrap an rx_buffer can have this alignment. Since
++ * the data is memcpy'd from the rx_buf, it does not need to be equal to
++ * EFX_PAGE_IP_ALIGN.
++ */
++#define EFX_PAGE_SKB_ALIGN 2
++
++/* Forward declaration */
++struct efx_nic;
++
++/* Pseudo bit-mask flow control field */
++enum efx_fc_type {
++ EFX_FC_RX = 1,
++ EFX_FC_TX = 2,
++ EFX_FC_AUTO = 4,
++};
++
++/**
++ * struct efx_mac_operations - Efx MAC operations table
++ * @mac_writel: Write dword to MAC register
++ * @mac_readl: Read dword from a MAC register
++ * @init: Initialise MAC and PHY
++ * @reconfigure: Reconfigure MAC and PHY (e.g. for new link parameters)
++ * @update_stats: Update statistics
++ * @fini: Shut down MAC and PHY
++ * @check_hw: Check hardware
++ * @fake_phy_event: Simulate a PHY event on a port
++ * @get_settings: Get ethtool settings
++ * @set_settings: Set ethtool settings
++ * @set_pause: Set pause parameters
++ */
++struct efx_mac_operations {
++ void (*mac_writel) (struct efx_nic *efx,
++ efx_dword_t *value, unsigned int mac_reg);
++ void (*mac_readl) (struct efx_nic *efx,
++ efx_dword_t *value, unsigned int mac_reg);
++ int (*init) (struct efx_nic *efx);
++ void (*reconfigure) (struct efx_nic *efx);
++ void (*update_stats) (struct efx_nic *efx);
++ void (*fini) (struct efx_nic *efx);
++ int (*check_hw) (struct efx_nic *efx);
++ void (*fake_phy_event) (struct efx_nic *efx);
++
++ int (*get_settings) (struct efx_nic *efx,
++ struct ethtool_cmd *ecmd);
++ int (*set_settings) (struct efx_nic *efx,
++ struct ethtool_cmd *ecmd);
++ int (*set_pause) (struct efx_nic *efx,
++ enum efx_fc_type pause_params);
++};
++
++/**
++ * struct efx_phy_operations - Efx PHY operations table
++ * @init: Initialise PHY
++ * @fini: Shut down PHY
++ * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
++ * @clear_interrupt: Clear down interrupt
++ * @blink: Blink LEDs
++ * @check_hw: Check hardware
++ * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
++ * @mmds: MMD presence mask
++ * @loopbacks: Supported loopback modes mask
++ * @startup_loopback: Loopback mode for start-of-day self-test
++ */
++struct efx_phy_operations {
++ int (*init) (struct efx_nic *efx);
++ void (*fini) (struct efx_nic *efx);
++ void (*reconfigure) (struct efx_nic *efx);
++ void (*clear_interrupt) (struct efx_nic *efx);
++ int (*check_hw) (struct efx_nic *efx);
++ void (*reset_xaui) (struct efx_nic *efx);
++ int mmds;
++ unsigned loopbacks;
++ unsigned startup_loopback;
++};
++
++/*
++ * Efx extended statistics
++ *
++ * Not all statistics are provided by all supported MACs. The purpose
++ * is this structure is to contain the raw statistics provided by each
++ * MAC.
++ */
++struct efx_mac_stats {
++ u64 tx_bytes;
++ u64 tx_good_bytes;
++ u64 tx_bad_bytes;
++ unsigned long tx_packets;
++ unsigned long tx_bad;
++ unsigned long tx_pause;
++ unsigned long tx_control;
++ unsigned long tx_unicast;
++ unsigned long tx_multicast;
++ unsigned long tx_broadcast;
++ unsigned long tx_lt64;
++ unsigned long tx_64;
++ unsigned long tx_65_to_127;
++ unsigned long tx_128_to_255;
++ unsigned long tx_256_to_511;
++ unsigned long tx_512_to_1023;
++ unsigned long tx_1024_to_15xx;
++ unsigned long tx_15xx_to_jumbo;
++ unsigned long tx_gtjumbo;
++ unsigned long tx_collision;
++ unsigned long tx_single_collision;
++ unsigned long tx_multiple_collision;
++ unsigned long tx_excessive_collision;
++ unsigned long tx_deferred;
++ unsigned long tx_late_collision;
++ unsigned long tx_excessive_deferred;
++ unsigned long tx_non_tcpudp;
++ unsigned long tx_mac_src_error;
++ unsigned long tx_ip_src_error;
++ u64 rx_bytes;
++ u64 rx_good_bytes;
++ u64 rx_bad_bytes;
++ unsigned long rx_packets;
++ unsigned long rx_good;
++ unsigned long rx_bad;
++ unsigned long rx_pause;
++ unsigned long rx_control;
++ unsigned long rx_unicast;
++ unsigned long rx_multicast;
++ unsigned long rx_broadcast;
++ unsigned long rx_lt64;
++ unsigned long rx_64;
++ unsigned long rx_65_to_127;
++ unsigned long rx_128_to_255;
++ unsigned long rx_256_to_511;
++ unsigned long rx_512_to_1023;
++ unsigned long rx_1024_to_15xx;
++ unsigned long rx_15xx_to_jumbo;
++ unsigned long rx_gtjumbo;
++ unsigned long rx_bad_lt64;
++ unsigned long rx_bad_64_to_15xx;
++ unsigned long rx_bad_15xx_to_jumbo;
++ unsigned long rx_bad_gtjumbo;
++ unsigned long rx_overflow;
++ unsigned long rx_missed;
++ unsigned long rx_false_carrier;
++ unsigned long rx_symbol_error;
++ unsigned long rx_align_error;
++ unsigned long rx_length_error;
++ unsigned long rx_internal_error;
++ unsigned long rx_good_lt64;
++};
++
++/* Number of bits used in a multicast filter hash address */
++#define EFX_MCAST_HASH_BITS 8
++
++/* Number of (single-bit) entries in a multicast filter hash */
++#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
++
++/* An Efx multicast filter hash */
++union efx_multicast_hash {
++ u8 byte[EFX_MCAST_HASH_ENTRIES / sizeof(u8)];
++ efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t)];
++};
++
++/* Efx Error condition statistics */
++struct efx_nic_errors {
++ atomic_t missing_event;
++ atomic_t rx_reset;
++ atomic_t rx_desc_fetch;
++ atomic_t tx_desc_fetch;
++ atomic_t spurious_tx;
++
++#ifdef CONFIG_SFC_DEBUGFS
++ struct dentry *debug_dir;
++#endif
++};
++
++/**
++ * struct efx_nic - an Efx NIC
++ * @name: Device name (net device name or bus id before net device registered)
++ * @pci_dev: The PCI device
++ * @pci_dev2: The secondary PCI device if present
++ * @type: Controller type attributes
++ * @dma_mask: DMA mask
++ * @legacy_irq: IRQ number
++ * @workqueue: Workqueue for resets, port reconfigures and the HW monitor
++ * @refill_workqueue: RX refill workqueue
++ * @reset_work: Scheduled reset workitem
++ * @monitor_work: Hardware monitor workitem
++ * @membase_phys: Memory BAR value as physical address
++ * @membase: Memory BAR value
++ * @biu_lock: BIU (bus interface unit) lock
++ * @interrupt_mode: Interrupt mode
++ * @is_asic: Is ASIC (else FPGA)
++ * @is_10g: Is set to 10G (else 1G)
++ * @external_sram_cfg: Size and number of banks of external SRAM
++ * @i2c: I2C interface
++ * @board_info: Board-level information
++ * @state: Device state flag. Can only be manipulated when both
++ * suspend_lock and rtnl_lock are held. Can be read when
++ * either is held.
++ * @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
++ * @suspend_lock: Device suspend lock. This must not be acquired with
++ * rtnl_lock held.
++ * @tx_queue: TX DMA queues
++ * @rx_queue: RX DMA queues
++ * @channel: Channels
++ * @rss_queues: Number of RSS queues
++ * @rx_buffer_len: RX buffer length
++ * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
++ * @errors: Error condition stats
++ * @irq_status: Interrupt status buffer
++ * @last_irq_cpu: Last CPU to handle interrupt.
++ * This register is written with the SMP processor ID whenever an
++ * interrupt is handled. It is used by falcon_test_interrupt()
++ * to verify that an interrupt has occurred.
++ * @spi_flash: SPI flash device
++ * This field will be %NULL if no flash device is present.
++ * @spi_eeprom: SPI EEPROM device
++ * This field will be %NULL if no EEPROM device is present.
++ * @spi_lock: SPI bus lock
++ * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
++ * @nic_data: Hardware dependant state
++ * @mac_lock: MAC access lock. Protects efx->port_enabled/net_dev_registered
++ * and efx_reconfigure_port()
++ * @port_enabled: Port enabled indicator.
++ * Serialises efx_stop_all and efx_start_all with kernel interfaces.
++ * Safe to read under the rtnl_lock, mac_lock, or netif_tx_lock, but
++ * all three must be held to modify it.
++ * @net_dev_registered: Port is registered with operating system.
++ * @port_initialized: Port initialized?
++ * @net_dev: Operating system network device. Consider holding the rtnl lock
++ * @rx_checksum_enabled: RX checksumming enabled
++ * @netif_stop_count: Port stop count
++ * @netif_stop_lock: Port stop lock
++ * @mac_stats: MAC statistics
++ * @stats: Net device statistics.
++ * Hardware-specific code fills in @mac_stats, which provides a
++ * detailed breakdown. Generic code aggregates these statistics
++ * into a standard &struct net_device_stats.
++ * @stats_buffer: DMA buffer for statistics
++ * @stats_lock: Statistics update lock
++ * @mac_op: MAC interface
++ * @mac_address: Permanent MAC address
++ * @phy_type: PHY type
++ * @phy_lock: PHY access lock
++ * @phy_op: PHY interface
++ * @phy_data: PHY private data (including PHY-specific stats)
++ * @mii: PHY interface
++ * @phy_powered: PHY power state
++ * @tx_disabled: PHY transmitter turned off
++ * @link_up: Link status
++ * @link_options: Link options (MII/GMII format)
++ * @n_link_state_changes: Number of times the link has changed state
++ * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
++ * @multicast_hash: Multicast hash table
++ * @flow_control: Flow control flags - separate RX/TX so can't use link_options
++ * @reconfigure_work: work item for dealing with PHY events
++ * @loopback_mode: Loopback status
++ * @loopback_modes: Supported loopback mode bitmask
++ * @loopback_selftest: Offline self-test private state
++ * @silicon_rev: Silicon revision description for driverlink
++ * @dl_info: Linked list of hardware parameters exposed through driverlink
++ * @dl_node: Driverlink port list
++ * @dl_device_list: Driverlink device list
++ * @dl_cb: Driverlink callbacks table
++ * @dl_cb_dev: Driverlink callback owner devices
++ * @debug_dir: NIC debugfs directory
++ * @debug_symlink: NIC debugfs sym-link (nic_eth\%d)
++ * @debug_port_dir: Port debugfs directory
++ * @debug_port_symlink: Port debugfs sym-link (if_eth\%d)
++ *
++ * The @priv field of the corresponding &struct net_device points to
++ * this.
++ */
++struct efx_nic {
++ char name[IFNAMSIZ];
++ struct pci_dev *pci_dev;
++ struct pci_dev *pci_dev2;
++#if !defined(EFX_USE_PCI_DEV_REVISION)
++ u8 revision;
++#endif
++ const struct efx_nic_type *type;
++ dma_addr_t dma_mask;
++ int legacy_irq;
++ struct workqueue_struct *workqueue;
++#if !defined(EFX_USE_CANCEL_DELAYED_WORK_SYNC)
++ /* Since we can't use cancel_delayed_work_sync efx_reset() has to
++ * flush efx->workqueue to serialise against efx_reconfigure_port
++ * and efx_monitor. So it can't also run on workqueue */
++ struct workqueue_struct *reset_workqueue;
++#endif
++ struct workqueue_struct *refill_workqueue;
++ struct work_struct reset_work;
++ struct delayed_work monitor_work;
++ unsigned long membase_phys;
++ void __iomem *membase;
++ spinlock_t biu_lock;
++ enum efx_int_mode interrupt_mode;
++ unsigned int is_asic:1;
++ unsigned int is_10g:1;
++ int external_sram_cfg;
++
++ struct efx_i2c_interface i2c;
++ struct efx_board board_info;
++
++ enum nic_state state;
++ enum reset_type reset_pending;
++
++ struct semaphore suspend_lock;
++
++ struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
++ struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
++ struct efx_channel channel[EFX_MAX_CHANNELS];
++
++ int rss_queues;
++ unsigned int rx_buffer_len;
++ unsigned int rx_buffer_order;
++
++ struct efx_nic_errors errors;
++
++ struct efx_buffer irq_status;
++ volatile signed int last_irq_cpu;
++
++ struct efx_spi_device *spi_flash;
++ struct efx_spi_device *spi_eeprom;
++ struct mutex spi_lock;
++
++ unsigned n_rx_nodesc_drop_cnt;
++
++ void *nic_data;
++
++ struct mutex mac_lock;
++ int port_enabled;
++ int net_dev_registered;
++
++ int port_initialized;
++ struct net_device *net_dev;
++ int rx_checksum_enabled;
++
++ atomic_t netif_stop_count;
++ spinlock_t netif_stop_lock;
++
++ struct efx_mac_stats mac_stats;
++ struct net_device_stats stats;
++ struct efx_buffer stats_buffer;
++ spinlock_t stats_lock;
++
++ struct efx_mac_operations *mac_op;
++ unsigned char mac_address[ETH_ALEN];
++
++ enum phy_type phy_type;
++ spinlock_t phy_lock;
++ struct efx_phy_operations *phy_op;
++ void *phy_data;
++ struct mii_if_info mii;
++ unsigned phy_powered;
++ unsigned tx_disabled;
++
++ int link_up;
++ unsigned int link_options;
++ unsigned int n_link_state_changes;
++
++ int promiscuous;
++ union efx_multicast_hash multicast_hash;
++ enum efx_fc_type flow_control;
++ struct work_struct reconfigure_work;
++
++ enum efx_loopback_mode loopback_mode;
++ unsigned int loopback_modes;
++ unsigned int startup_loopbacks;
++
++ void *loopback_selftest;
++
++ const char *silicon_rev;
++ struct efx_dl_device_info *dl_info;
++ struct list_head dl_node;
++ struct list_head dl_device_list;
++ struct efx_dl_callbacks dl_cb;
++ struct efx_dl_cb_devices dl_cb_dev;
++
++#ifdef CONFIG_SFC_DEBUGFS
++ struct dentry *debug_dir;
++ struct dentry *debug_symlink;
++ struct dentry *debug_port_dir;
++ struct dentry *debug_port_symlink;
++#endif
++};
++
++/**
++ * struct efx_nic_type - Efx device type definition
++ * @is_dual_func: Is dual-function (else single-function)
++ * @mem_bar: Memory BAR number
++ * @mem_map_size: Memory BAR mapped size
++ * @txd_ptr_tbl_base: TX descriptor ring base address
++ * @rxd_ptr_tbl_base: RX descriptor ring base address
++ * @buf_tbl_base: Buffer table base address
++ * @evq_ptr_tbl_base: Event queue pointer table base address
++ * @evq_rptr_tbl_base: Event queue read-pointer table base address
++ * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1)
++ * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1)
++ * @evq_size: Event queue size (must be a power of two)
++ * @max_dma_mask: Maximum possible DMA mask
++ * @tx_dma_mask: TX DMA mask
++ * @bug5391_mask: Address mask for bug 5391 workaround
++ * @rx_xoff_thresh: RX FIFO XOFF watermark (bytes)
++ * @rx_xon_thresh: RX FIFO XON watermark (bytes)
++ * @rx_buffer_padding: Padding added to each RX buffer
++ * @max_interrupt_mode: Highest capability interrupt mode supported
++ * from &enum efx_init_mode.
++ * @phys_addr_channels: Number of channels with physically addressed
++ * descriptors
++ */
++struct efx_nic_type {
++ unsigned int is_dual_func;
++ unsigned int mem_bar;
++ unsigned int mem_map_size;
++ unsigned int txd_ptr_tbl_base;
++ unsigned int rxd_ptr_tbl_base;
++ unsigned int buf_tbl_base;
++ unsigned int evq_ptr_tbl_base;
++ unsigned int evq_rptr_tbl_base;
++
++ unsigned int txd_ring_mask;
++ unsigned int rxd_ring_mask;
++ unsigned int evq_size;
++ dma_addr_t max_dma_mask;
++ unsigned int tx_dma_mask;
++ unsigned bug5391_mask;
++
++ int rx_xoff_thresh;
++ int rx_xon_thresh;
++ unsigned int rx_buffer_padding;
++ unsigned int max_interrupt_mode;
++ unsigned int phys_addr_channels;
++};
++
++/**************************************************************************
++ *
++ * Prototypes and inline functions
++ *
++ *************************************************************************/
++
++/* Iterate over all used channels */
++#define efx_for_each_channel(_channel, _efx) \
++ for (_channel = &_efx->channel[0]; \
++ _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
++ _channel++) \
++ if (!_channel->used_flags) \
++ continue; \
++ else
++
++/* Iterate over all used channels with interrupts */
++#define efx_for_each_channel_with_interrupt(_channel, _efx) \
++ for (_channel = &_efx->channel[0]; \
++ _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
++ _channel++) \
++ if (!(_channel->used_flags && _channel->has_interrupt)) \
++ continue; \
++ else
++
++/* Iterate over all used TX queues */
++#define efx_for_each_tx_queue(_tx_queue, _efx) \
++ for (_tx_queue = &_efx->tx_queue[0]; \
++ _tx_queue < &_efx->tx_queue[EFX_MAX_TX_QUEUES]; \
++ _tx_queue++) \
++ if (!_tx_queue->used) \
++ continue; \
++ else
++
++/* Iterate over all TX queues belonging to a channel */
++#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
++ for (_tx_queue = &_channel->efx->tx_queue[0]; \
++ _tx_queue < &_channel->efx->tx_queue[EFX_MAX_TX_QUEUES]; \
++ _tx_queue++) \
++ if ((!_tx_queue->used) || \
++ (_tx_queue->channel != _channel)) \
++ continue; \
++ else
++
++/* Iterate over all used RX queues */
++#define efx_for_each_rx_queue(_rx_queue, _efx) \
++ for (_rx_queue = &_efx->rx_queue[0]; \
++ _rx_queue < &_efx->rx_queue[EFX_MAX_RX_QUEUES]; \
++ _rx_queue++) \
++ if (!_rx_queue->used) \
++ continue; \
++ else
++
++/* Iterate over all RX queues belonging to a channel */
++#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
++ for (_rx_queue = &_channel->efx->rx_queue[0]; \
++ _rx_queue < &_channel->efx->rx_queue[EFX_MAX_RX_QUEUES]; \
++ _rx_queue++) \
++ if ((!_rx_queue->used) || \
++ (_rx_queue->channel != _channel)) \
++ continue; \
++ else
++
++/* Name formats */
++#define EFX_CHANNEL_NAME(_channel) "channel%d", _channel->channel
++#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
++#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
++
++/* Returns a pointer to the specified receive buffer in the RX
++ * descriptor queue.
++ */
++static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
++ unsigned int index)
++{
++ return (&rx_queue->buffer[index]);
++}
++
++/* Set bit in a little-endian bitfield */
++static inline void set_bit_le(int nr, unsigned char *addr)
++{
++ addr[nr / 8] |= (1 << (nr % 8));
++}
++
++/* Clear bit in a little-endian bitfield */
++static inline void clear_bit_le(int nr, unsigned char *addr)
++{
++ addr[nr / 8] &= ~(1 << (nr % 8));
++}
++
++
++/**
++ * EFX_MAX_FRAME_LEN - calculate maximum frame length
++ *
++ * This calculates the maximum frame length that will be used for a
++ * given MTU. The frame length will be equal to the MTU plus a
++ * constant amount of header space and padding. This is the quantity
++ * that the net driver will program into the MAC as the maximum frame
++ * length.
++ *
++ * The 10G MAC used in Falcon requires 8-byte alignment on the frame
++ * length, so we round up to the nearest 8.
++ */
++#define EFX_MAX_FRAME_LEN(mtu) \
++ ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */) + 7) & ~7)
++
++
++#endif /* EFX_NET_DRIVER_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/null_phy.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/null_phy.c
+--- linux-2.6.18.8/drivers/net/sfc/null_phy.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/null_phy.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,62 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include "efx.h"
++#include "falcon.h"
++#include "gmii.h"
++#include "phy.h"
++
++static int falcon_null_phy_check_hw(struct efx_nic *efx)
++{
++ int link_ok = falcon_xaui_link_ok(efx);
++
++ /* Generate PHY event that a PHY would have generated */
++ if (link_ok != efx->link_up) {
++ efx->link_up = link_ok;
++ efx->mac_op->fake_phy_event(efx);
++ }
++
++ return 0;
++}
++
++static void falcon_null_phy_reconfigure(struct efx_nic *efx)
++{
++ /* CX4 is always 10000FD only */
++ efx->link_options = GM_LPA_10000FULL;
++
++ falcon_null_phy_check_hw(efx);
++}
++
++struct efx_phy_operations falcon_null_phy_ops = {
++ .reconfigure = falcon_null_phy_reconfigure,
++ .check_hw = falcon_null_phy_check_hw,
++ .fini = efx_port_dummy_op_void,
++ .clear_interrupt = efx_port_dummy_op_void,
++ .init = efx_port_dummy_op_int,
++ .reset_xaui = efx_port_dummy_op_void,
++ .mmds = 0,
++ .loopbacks = 0,
++ .startup_loopback = 0,
++};
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/phy.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/phy.c
+--- linux-2.6.18.8/drivers/net/sfc/phy.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/phy.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,28 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include "net_driver.h"
++#include "phy.h"
++
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/phy.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/phy.h
+--- linux-2.6.18.8/drivers/net/sfc/phy.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/phy.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,90 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_PHY_H
++#define EFX_PHY_H
++
++/****************************************************************************
++ * 10Xpress (SFX7101) PHY
++ */
++extern struct efx_phy_operations falcon_tenxpress_phy_ops;
++
++enum tenxpress_state {
++ TENXPRESS_STATUS_OFF = 0,
++ TENXPRESS_STATUS_OTEMP = 1,
++ TENXPRESS_STATUS_NORMAL = 2,
++};
++
++extern void tenxpress_set_state(struct efx_nic *efx,
++ enum tenxpress_state state);
++extern void tenxpress_phy_blink(struct efx_nic *efx, int blink);
++extern void tenxpress_crc_err(struct efx_nic *efx);
++
++/****************************************************************************
++ * Marvell 88E1111 "Alaska" PHY control
++ */
++extern struct efx_phy_operations alaska_phy_operations;
++
++/****************************************************************************
++* Exported functions from the driver for Transwitch CX4 retimer
++*/
++extern struct efx_phy_operations falcon_txc_phy_ops;
++
++#define TXC_GPIO_DIR_INPUT (0)
++#define TXC_GPIO_DIR_OUTPUT (1)
++
++extern void txc_set_gpio_dir(struct efx_nic *p, int pin, int dir);
++extern void txc_set_gpio_val(struct efx_nic *p, int pin, int val);
++
++/****************************************************************************
++ * Exported functions from the driver for PMC PM8358 PHY
++ */
++extern struct efx_phy_operations falcon_pm8358_phy_ops;
++
++/****************************************************************************
++ * Exported functions from the driver for XFP optical PHYs
++ */
++extern struct efx_phy_operations falcon_xfp_phy_ops;
++
++/* The QUAKE XFP PHY provides various H/W control states for LEDs */
++#define QUAKE_LED_LINK_INVAL (0)
++#define QUAKE_LED_LINK_STAT (1)
++#define QUAKE_LED_LINK_ACT (2)
++#define QUAKE_LED_LINK_ACTSTAT (3)
++#define QUAKE_LED_OFF (4)
++#define QUAKE_LED_ON (5)
++#define QUAKE_LED_LINK_INPUT (6) /* Pin is an input. */
++/* What link the LED tracks */
++#define QUAKE_LED_TXLINK (0)
++#define QUAKE_LED_RXLINK (8)
++
++extern void xfp_set_led(struct efx_nic *p, int led, int state);
++
++/****************************************************************************
++ * NULL PHY ops
++ */
++extern struct efx_phy_operations falcon_null_phy_ops;
++
++#endif
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/pm8358_phy.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/pm8358_phy.c
+--- linux-2.6.18.8/drivers/net/sfc/pm8358_phy.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/pm8358_phy.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,206 @@
++/* Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++/*
++ * Driver for PMC-Sierra PM8358 XAUI PHY
++ */
++
++#include <linux/delay.h>
++#include "efx.h"
++#include "gmii.h"
++#include "mdio_10g.h"
++#include "phy.h"
++
++#define PM8358_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_DTEXS)
++#define PM8358_LOOPBACKS (1 << LOOPBACK_PHY)
++
++/* PHY-specific definitions */
++/* Master ID and Global Performance Monitor Update */
++#define PMC_MASTER_REG (0xd000)
++/* Analog TX/RX settings under software control */
++#define PMC_MASTER_ANLG_CTRL (1 << 11)
++
++#define PMC_MCONF2_REG (0xd002)
++/* Drive TX off centre of data eye (1) vs. clock edge (0) */
++#define PMC_MCONF2_TEDGE (1 << 2)
++/* Drive RX off centre of data eye (1) vs. clock edge (0) */
++#define PMC_MCONF2_REDGE (1 << 3)
++
++/* Analog RX settings */
++#define PMC_ANALOG_RX_CFG0 (0xd025)
++#define PMC_ANALOG_RX_CFG1 (0xd02d)
++#define PMC_ANALOG_RX_CFG2 (0xd035)
++#define PMC_ANALOG_RX_CFG3 (0xd03d)
++#define PMC_ANALOG_RX_TERM (1 << 15) /* Bit 15 of RX CFG: 0 for 100 ohms
++ float, 1 for 50 to 1.2V */
++#define PMC_ANALOG_RX_EQ_MASK (3 << 8)
++#define PMC_ANALOG_RX_EQ_NONE (0 << 8)
++#define PMC_ANALOG_RX_EQ_HALF (1 << 8)
++#define PMC_ANALOG_RX_EQ_FULL (2 << 8)
++#define PMC_ANALOG_RX_EQ_RSVD (3 << 8)
++
++/* Reset the DTE XS MMD. */
++#define PMC_MAX_RESET_TIME 500
++#define PMC_RESET_WAIT 10
++
++static int pmc_reset_phy(struct efx_nic *efx)
++{
++ int rc = mdio_clause45_reset_mmd(efx, MDIO_MMD_DTEXS,
++ PMC_MAX_RESET_TIME / PMC_RESET_WAIT,
++ PMC_RESET_WAIT);
++ if (rc >= 0) {
++ EFX_TRACE(efx, "PMC8358: came out of reset with "
++ "%d0 ms left\n", rc);
++ rc = 0;
++ } else {
++ EFX_ERR(efx, "PMC8358: reset timed out!\n");
++ }
++ return rc;
++}
++
++
++static void pmc_full_rx_eq(struct efx_nic *efx)
++{
++ int i, reg;
++
++ /* Enable software control of analog settings */
++ reg = mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_DTEXS, PMC_MASTER_REG);
++ reg |= PMC_MASTER_ANLG_CTRL;
++
++ mdio_clause45_write(efx, efx->mii.phy_id,
++ MDIO_MMD_DTEXS, PMC_MASTER_REG, reg);
++
++ /* Turn RX eq on full for all channels. */
++ for (i = 0; i < 3; i++) {
++ /* The analog CFG registers are evenly spaced 8 apart */
++ u16 addr = PMC_ANALOG_RX_CFG0 + 8 * i;
++
++ reg = mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_DTEXS, addr);
++ reg = (reg & ~PMC_ANALOG_RX_EQ_MASK) | PMC_ANALOG_RX_EQ_FULL;
++ mdio_clause45_write(efx, efx->mii.phy_id,
++ MDIO_MMD_DTEXS, addr, reg);
++ }
++}
++
++static void pmc_set_data_edges(struct efx_nic *efx)
++{
++ int reg;
++ /* Set TEDGE, clear REDGE */
++ reg = mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_DTEXS, PMC_MCONF2_REG);
++ reg &= ~PMC_MCONF2_REDGE;
++ reg |= PMC_MCONF2_TEDGE;
++
++ mdio_clause45_write(efx, efx->mii.phy_id,
++ MDIO_MMD_DTEXS, PMC_MCONF2_REG, reg);
++}
++
++static int pm8358_phy_init(struct efx_nic *efx)
++{
++ u32 devid;
++ int rc;
++
++ /* The GLB_CTL reset line has been pulled before this is called,
++ * and it may take up to 5ms for the PLL's to synchronise after
++ * this is done. Best to wait 10ms here */
++ schedule_timeout_uninterruptible(HZ / 100);
++
++ rc = pmc_reset_phy(efx);
++ if (rc < 0)
++ return rc;
++
++ /* Check that all the MMDs we expect are present and responding. We
++ * expect faults on some if the link is down, but not on the PHY XS */
++ rc = mdio_clause45_check_mmds(efx, PM8358_REQUIRED_DEVS, 0);
++ if (rc < 0)
++ return rc;
++
++ devid = mdio_clause45_read_id(efx, MDIO_MMD_DTEXS);
++ EFX_LOG(efx, "PM8358: PHY ID reg %x (OUI %x model %x revision"
++ " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
++ MDIO_ID_REV(devid));
++
++ /* Turn on full RX equalisation */
++ pmc_full_rx_eq(efx);
++
++ /* Adjust RX and TX data edge position */
++ pmc_set_data_edges(efx);
++
++ EFX_LOG(efx, "PM8358: PHY init successful.\n");
++ return rc;
++}
++
++static int pm8358_link_ok(struct efx_nic *efx)
++{
++ return mdio_clause45_links_ok(efx, PM8358_REQUIRED_DEVS);
++}
++
++static int pm8358_phy_check_hw(struct efx_nic *efx)
++{
++ int rc = 0;
++ int link_up = pm8358_link_ok(efx);
++ /* Simulate a PHY event if link state has changed */
++ if (link_up != efx->link_up) {
++ efx->link_up = link_up;
++ efx->mac_op->fake_phy_event(efx);
++ }
++
++ return rc;
++}
++
++static void pm8358_phy_reconfigure(struct efx_nic *efx)
++{
++ int phy_id = efx->mii.phy_id;
++ int ctrl;
++ /* Handle DTE loopback */
++ ctrl = mdio_clause45_read(efx, phy_id, MDIO_MMD_DTEXS,
++ MDIO_MMDREG_CTRL1);
++ if (efx->loopback_mode == LOOPBACK_PHY) {
++ EFX_TRACE(efx, "PM8358: setting DTE loopback\n");
++ ctrl |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
++ } else {
++ if (ctrl & (1 << MDIO_MMDREG_CTRL1_LBACK_LBN))
++ EFX_TRACE(efx,
++ "PM8358: clearing DTE loopback\n");
++ ctrl &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
++ }
++ mdio_clause45_write(efx, phy_id, MDIO_MMD_DTEXS,
++ MDIO_MMDREG_CTRL1, ctrl);
++
++ efx->link_up = pm8358_link_ok(efx);
++ efx->link_options = GM_LPA_10000FULL;
++}
++
++struct efx_phy_operations falcon_pm8358_phy_ops = {
++ .init = pm8358_phy_init,
++ .reconfigure = pm8358_phy_reconfigure,
++ .check_hw = pm8358_phy_check_hw,
++ .fini = efx_port_dummy_op_void,
++ .clear_interrupt = efx_port_dummy_op_void,
++ .reset_xaui = efx_port_dummy_op_void,
++ .mmds = PM8358_REQUIRED_DEVS,
++ .loopbacks = PM8358_LOOPBACKS,
++ .startup_loopback = LOOPBACK_PHY,
++};
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/rx.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/rx.c
+--- linux-2.6.18.8/drivers/net/sfc/rx.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/rx.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,798 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/socket.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <net/ip.h>
++#include <net/checksum.h>
++#include "net_driver.h"
++#include "rx.h"
++#include "efx.h"
++#include "falcon.h"
++#include "selftest.h"
++#include "workarounds.h"
++
++
++/* Number of RX descriptors pushed at once. */
++#define EFX_RX_BATCH 8
++
++/* Size of buffer allocated for skb header area. */
++#define EFX_SKB_HEADERS 64u
++
++/*
++ * rx_alloc_method - RX buffer allocation method
++ *
++ * This driver supports two methods for allocating and using RX buffers:
++ * each RX buffer may be backed by an skb or by an order-n page.
++ *
++ * When LRO is in use then the second method has a lower overhead,
++ * since we don't have to allocate then free skbs on reassembled frames.
++ *
++ * Values:
++ * - RX_ALLOC_METHOD_AUTO = 0
++ * - RX_ALLOC_METHOD_SKB = 1
++ * - RX_ALLOC_METHOD_PAGE = 2
++ *
++ * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
++ * controlled by the parameters below.
++ *
++ * - Since pushing and popping descriptors are separated by the rx_queue
++ * size, so the watermarks should be ~rxd_size.
++ * - The performance win by using page-based allocation for LRO is less
++ * than the performance hit of using page-based allocation of non-LRO,
++ * so the watermarks should reflect this.
++ *
++ * Per channel we maintain a single variable, updated by each channel:
++ *
++ * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
++ * RX_ALLOC_FACTOR_SKB)
++ * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
++ * limits the hysteresis), and update the allocation strategy:
++ *
++ * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
++ * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
++ */
++static int rx_alloc_method = RX_ALLOC_METHOD_PAGE;
++
++#define RX_ALLOC_LEVEL_LRO 0x2000
++#define RX_ALLOC_LEVEL_MAX 0x3000
++#define RX_ALLOC_FACTOR_LRO 1
++#define RX_ALLOC_FACTOR_SKB -2
++
++/* This is the percentage fill level below which new RX descriptors
++ * will be added to the RX descriptor ring.
++ */
++static unsigned int rx_refill_threshold = 90;
++
++/* This is the percentage fill level to which an RX queue will be refilled
++ * when the "RX refill threshold" is reached.
++ */
++static unsigned int rx_refill_limit = 95;
++
++/*
++ * RX maximum head room required.
++ *
++ * This must be at least 1 to prevent overflow and at least 2 to allow
++ * pipelined receives.
++ */
++#define EFX_RXD_HEAD_ROOM 2
++
++/* Macros for zero-order pages (potentially) containing multiple RX buffers */
++#define RX_DATA_OFFSET(_data) \
++ (((unsigned long) (_data)) & (PAGE_SIZE-1))
++#define RX_BUF_OFFSET(_rx_buf) \
++ RX_DATA_OFFSET((_rx_buf)->data)
++
++#define RX_PAGE_SIZE(_efx) \
++ (PAGE_SIZE * (1u << (_efx)->rx_buffer_order))
++
++
++/**
++ * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
++ *
++ * @rx_queue: Efx RX queue
++ * @rx_buf: RX buffer structure to populate
++ *
++ * This allocates memory for a new receive buffer, maps it for DMA,
++ * and populates a struct efx_rx_buffer with the relevant
++ * information. Return a negative error code or 0 on success.
++ */
++static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
++ struct efx_rx_buffer *rx_buf)
++{
++ struct efx_nic *efx = rx_queue->efx;
++ struct net_device *net_dev = efx->net_dev;
++ int skb_len = efx->rx_buffer_len;
++
++ rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
++ if (unlikely(!rx_buf->skb))
++ return -ENOMEM;
++
++ /* Adjust the SKB for padding and checksum */
++ skb_reserve(rx_buf->skb, NET_IP_ALIGN);
++ rx_buf->len = skb_len - NET_IP_ALIGN;
++ rx_buf->data = (char *)rx_buf->skb->data;
++
++ rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++ /* Map for DMA */
++ rx_buf->dma_addr = pci_map_single(efx->pci_dev,
++ rx_buf->data, rx_buf->len,
++ PCI_DMA_FROMDEVICE);
++
++ if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) {
++ /* Free the SKB */
++ dev_kfree_skb_any(rx_buf->skb);
++ rx_buf->skb = NULL;
++ return -EIO;
++ }
++
++ return 0;
++}
++
++/**
++ * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
++ *
++ * @rx_queue: Efx RX queue
++ * @rx_buf: RX buffer structure to populate
++ *
++ * This allocates memory for a new receive buffer, maps it for DMA,
++ * and populates a struct efx_rx_buffer with the relevant
++ * information. Return a negative error code or 0 on success.
++ */
++static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
++ struct efx_rx_buffer *rx_buf)
++{
++ struct efx_nic *efx = rx_queue->efx;
++ int bytes, space, offset;
++
++ bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
++
++ /* If there is space left in the previously allocated page,
++ * then use it. Otherwise allocate a new one */
++ rx_buf->page = rx_queue->buf_page;
++ if (rx_buf->page == NULL) {
++ dma_addr_t dma_addr;
++
++ rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
++ efx->rx_buffer_order);
++ if (unlikely(rx_buf->page == NULL))
++ return -ENOMEM;
++
++ /* Map the entire page for DMA */
++ dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
++ 0, RX_PAGE_SIZE(efx),
++ PCI_DMA_FROMDEVICE);
++
++ if (unlikely(pci_dma_mapping_error(dma_addr))) {
++ __free_pages(rx_buf->page, efx->rx_buffer_order);
++ rx_buf->page = NULL;
++ return -EIO;
++ }
++
++ rx_queue->buf_page = rx_buf->page;
++ rx_queue->buf_dma_addr = dma_addr;
++ rx_queue->buf_data = ((char *) page_address(rx_buf->page) +
++ EFX_PAGE_IP_ALIGN);
++ }
++
++ offset = RX_DATA_OFFSET(rx_queue->buf_data);
++ rx_buf->len = bytes;
++ rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
++ rx_buf->data = rx_queue->buf_data;
++
++ /* Try to pack multiple buffers per page */
++ if (efx->rx_buffer_order == 0) {
++ /* The next buffer starts on the next 512 byte boundary */
++ rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
++ offset += ((bytes + 0x1ff) & ~0x1ff);
++
++ space = RX_PAGE_SIZE(efx) - offset;
++ if (space >= bytes) {
++ /* Refs dropped on kernel releasing each skb */
++ get_page(rx_queue->buf_page);
++ goto out;
++ }
++ }
++
++ /* This is the final RX buffer for this page, so mark it for
++ * unmapping */
++ rx_queue->buf_page = NULL;
++ rx_buf->unmap_addr = rx_queue->buf_dma_addr;
++
++ out:
++ return 0;
++}
++
++/* This allocates memory for a new receive buffer, maps it for DMA,
++ * and populates a struct efx_rx_buffer with the relevant
++ * information.
++ */
++static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
++ struct efx_rx_buffer *new_rx_buf)
++{
++ int rc = 0;
++
++ if (rx_queue->channel->rx_alloc_push_pages) {
++ new_rx_buf->skb = NULL;
++ rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf);
++ rx_queue->alloc_page_count++;
++ } else {
++ new_rx_buf->page = NULL;
++ rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf);
++ rx_queue->alloc_skb_count++;
++ }
++
++ if (unlikely(rc < 0))
++ EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__,
++ rx_queue->queue, rc);
++ return rc;
++}
++
++static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
++ struct efx_rx_buffer *rx_buf)
++{
++ if (rx_buf->page) {
++ EFX_BUG_ON_PARANOID(rx_buf->skb);
++ if (rx_buf->unmap_addr) {
++ pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
++ RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE);
++ rx_buf->unmap_addr = 0;
++ }
++ } else if (likely(rx_buf->skb)) {
++ pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
++ rx_buf->len, PCI_DMA_FROMDEVICE);
++ }
++}
++
++static inline void efx_free_rx_buffer(struct efx_nic *efx,
++ struct efx_rx_buffer *rx_buf)
++{
++ if (rx_buf->page) {
++ __free_pages(rx_buf->page, efx->rx_buffer_order);
++ rx_buf->page = NULL;
++ } else if (likely(rx_buf->skb)) {
++ dev_kfree_skb_any(rx_buf->skb);
++ rx_buf->skb = NULL;
++ }
++}
++
++inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
++ struct efx_rx_buffer *rx_buf)
++{
++ /* Unmap for DMA */
++ efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
++
++ /* Free the skb/page */
++ efx_free_rx_buffer(rx_queue->efx, rx_buf);
++}
++
++/**
++ * efx_fast_push_rx_descriptors - push new RX descriptors quickly
++ * @rx_queue: RX descriptor queue
++ * @retry: Recheck the fill level
++ * This will aim to fill the RX descriptor queue up to
++ * @rx_queue->@fast_fill_limit. If there is insufficient atomic
++ * memory to do so, the caller should retry.
++ */
++static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
++ int retry)
++{
++ struct efx_rx_buffer *rx_buf;
++ unsigned fill_level, index;
++ int i, space, rc = 0;
++
++ /* Calculate current fill level. Do this outside the lock,
++ * because most of the time we'll end up not wanting to do the
++ * fill anyway.
++ */
++ fill_level = (rx_queue->added_count - rx_queue->removed_count);
++ EFX_BUG_ON_PARANOID(fill_level >
++ rx_queue->efx->type->rxd_ring_mask + 1);
++
++ /* Don't fill if we don't need to */
++ if (fill_level >= rx_queue->fast_fill_trigger)
++ return 0;
++
++ /* Record minimum fill level */
++ if (unlikely(fill_level < rx_queue->min_fill))
++ if (fill_level)
++ rx_queue->min_fill = fill_level;
++
++ /* Acquire RX add lock. If this lock is contended, then a fast
++ * fill must already be in progress (e.g. in the refill
++ * tasklet), so we don't need to do anything
++ */
++ if (!spin_trylock_bh(&rx_queue->add_lock))
++ return -1;
++
++ retry:
++ /* Recalculate current fill level now that we have the lock */
++ fill_level = (rx_queue->added_count - rx_queue->removed_count);
++ EFX_BUG_ON_PARANOID(fill_level >
++ rx_queue->efx->type->rxd_ring_mask + 1);
++ space = rx_queue->fast_fill_limit - fill_level;
++ if (space < EFX_RX_BATCH)
++ goto out_unlock;
++
++ EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
++ " level %d to level %d using %s allocation\n",
++ rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
++ rx_queue->channel->rx_alloc_push_pages ? "page" : "skb");
++
++ do {
++ for (i = 0; i < EFX_RX_BATCH; ++i) {
++ index = (rx_queue->added_count &
++ rx_queue->efx->type->rxd_ring_mask);
++ rx_buf = efx_rx_buffer(rx_queue, index);
++ rc = efx_init_rx_buffer(rx_queue, rx_buf);
++ if (unlikely(rc))
++ goto out;
++ ++rx_queue->added_count;
++ }
++ } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
++
++ EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring "
++ "to level %d\n", rx_queue->queue,
++ rx_queue->added_count - rx_queue->removed_count);
++
++ out:
++ /* Send write pointer to card. */
++ falcon_notify_rx_desc(rx_queue);
++
++ /* If the fast fill is running inside from the refill tasklet, then
++ * for SMP systems it may be running on a different CPU to
++ * RX event processing, which means that the fill level may now be
++ * out of date. */
++ if (unlikely(retry && (rc == 0)))
++ goto retry;
++
++ out_unlock:
++ /* Release RX add lock */
++ spin_unlock_bh(&rx_queue->add_lock);
++
++ return rc;
++}
++
++/**
++ * efx_fast_push_rx_descriptors - push new RX descriptors quickly
++ * @rx_queue: RX descriptor queue
++ *
++ * This will aim to fill the RX descriptor queue up to
++ * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so,
++ * it will schedule a work item to immediately continue the fast fill
++ */
++void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
++{
++ int rc;
++
++ rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
++ if (unlikely(rc)) {
++ /* Schedule the work item to run immediately. The hope is
++ * that work is immediately pending to free some memory
++ * (e.g. an RX event or TX completion)
++ */
++ queue_delayed_work(rx_queue->efx->refill_workqueue,
++ &rx_queue->work, 0);
++ }
++}
++
++void efx_rx_work(struct work_struct *data)
++{
++ struct efx_rx_queue *rx_queue;
++ int rc;
++
++#if !defined(EFX_NEED_WORK_API_WRAPPERS)
++ rx_queue = container_of(data, struct efx_rx_queue, work.work);
++#else
++ rx_queue = container_of(data, struct efx_rx_queue, work);
++#endif
++
++ if (unlikely(!rx_queue->channel->enabled))
++ return;
++
++ EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
++ "%d\n", rx_queue->queue, raw_smp_processor_id());
++
++ ++rx_queue->slow_fill_count;
++ /* Push new RX descriptors, allowing at least 1 jiffy for
++ * the kernel to free some more memory. */
++ rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
++ if (rc) {
++ queue_delayed_work(rx_queue->efx->refill_workqueue,
++ &rx_queue->work, 1);
++ }
++}
++
++static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
++ struct efx_rx_buffer *rx_buf,
++ int len, int *discard,
++ int *leak_packet)
++{
++ struct efx_nic *efx = rx_queue->efx;
++ unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
++
++ if (likely(len <= max_len))
++ return;
++
++ /* The packet must be discarded, but this is only a fatal error
++ * if the caller indicated it was
++ */
++ *discard = 1;
++
++ if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
++ EFX_ERR_RL(efx, " RX queue %d seriously overlength "
++ "RX event (0x%x > 0x%x+0x%x). Leaking\n",
++ rx_queue->queue, len, max_len,
++ efx->type->rx_buffer_padding);
++ /* If this buffer was skb-allocated, then the meta
++ * data at the end of the skb will be trashed. So
++ * we have no choice but to leak the fragment.
++ */
++ *leak_packet = (rx_buf->skb != NULL);
++ efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
++ } else {
++ EFX_ERR_RL(efx, " RX queue %d overlength RX event "
++ "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len);
++ }
++
++ rx_queue->channel->n_rx_overlength++;
++}
++
++/* Allocate and construct an SKB around a struct page.*/
++static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
++ struct efx_nic *efx,
++ int hdr_len)
++{
++ struct sk_buff *skb;
++
++ /* Allocate an SKB to store the headers */
++ skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
++ if (unlikely(skb == NULL)) {
++ EFX_ERR_RL(efx, "RX out of memory for skb\n");
++ return NULL;
++ }
++
++ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags);
++ EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
++
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
++
++ skb->len = rx_buf->len;
++ skb->truesize = rx_buf->len + sizeof(struct sk_buff);
++ memcpy(skb->data, rx_buf->data, hdr_len);
++ skb->tail += hdr_len;
++
++ /* Append the remaining page onto the frag list */
++ if (unlikely(rx_buf->len > hdr_len)) {
++ struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
++ frag->page = rx_buf->page;
++ frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len;
++ frag->size = skb->len - hdr_len;
++ skb_shinfo(skb)->nr_frags = 1;
++ skb->data_len = frag->size;
++ } else {
++ __free_pages(rx_buf->page, efx->rx_buffer_order);
++ skb->data_len = 0;
++ }
++
++ /* Ownership has transferred from the rx_buf to skb */
++ rx_buf->page = NULL;
++
++ /* Move past the ethernet header */
++ skb->protocol = eth_type_trans(skb, efx->net_dev);
++
++ return skb;
++}
++
++#if defined(EFX_USE_FASTCALL)
++void fastcall efx_rx_packet(struct efx_rx_queue *rx_queue,
++ unsigned int index, unsigned int len,
++ int checksummed, int discard)
++#else
++void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
++ unsigned int len, int checksummed, int discard)
++#endif
++{
++ struct efx_nic *efx = rx_queue->efx;
++ struct efx_rx_buffer *rx_buf;
++ int leak_packet = 0;
++
++ rx_buf = efx_rx_buffer(rx_queue, index);
++ EFX_BUG_ON_PARANOID(!rx_buf->data);
++ EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
++ EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
++
++ /* This allows the refill path to post another buffer.
++ * EFX_RXD_HEAD_ROOM ensures that the slot we are using
++ * isn't overwritten yet.
++ */
++ rx_queue->removed_count++;
++
++ /* Validate the length encoded in the event vs the descriptor pushed */
++ efx_rx_packet__check_len(rx_queue, rx_buf, len,
++ &discard, &leak_packet);
++
++ EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n",
++ rx_queue->queue, index,
++ (unsigned long long)rx_buf->dma_addr, len,
++ (checksummed ? " [SUMMED]" : ""),
++ (discard ? " [DISCARD]" : ""));
++
++ /* Discard packet, if instructed to do so */
++ if (unlikely(discard)) {
++ if (unlikely(leak_packet))
++ rx_queue->channel->n_skbuff_leaks++;
++ else
++ /* We haven't called efx_unmap_rx_buffer yet,
++ * so fini the entire rx_buffer here */
++ efx_fini_rx_buffer(rx_queue, rx_buf);
++ return;
++ }
++
++ /* Release card resources - assumes all RX buffers consumed in-order
++ * per RX queue
++ */
++ efx_unmap_rx_buffer(efx, rx_buf);
++
++ /* Prefetch nice and early so data will (hopefully) be in cache by
++ * the time we look at it.
++ */
++ prefetch(rx_buf->data);
++
++ /* Pipeline receives so that we give time for packet headers to be
++ * prefetched into cache.
++ */
++ rx_buf->len = len;
++ if (rx_queue->channel->rx_pkt)
++ __efx_rx_packet(rx_queue->channel,
++ rx_queue->channel->rx_pkt,
++ rx_queue->channel->rx_pkt_csummed);
++ rx_queue->channel->rx_pkt = rx_buf;
++ rx_queue->channel->rx_pkt_csummed = checksummed;
++}
++
++/* Handle a received packet. Second half: Touches packet payload. */
++void __efx_rx_packet(struct efx_channel *channel,
++ struct efx_rx_buffer *rx_buf, int checksummed)
++{
++ struct efx_nic *efx = channel->efx;
++ enum efx_veto veto;
++ struct sk_buff *skb;
++
++ /* If we're in loopback test, then pass the packet directly to the
++ * loopback layer, and free the rx_buf here
++ */
++ if (unlikely(efx->loopback_selftest)) {
++ efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
++ efx_free_rx_buffer(efx, rx_buf);
++ goto done;
++ }
++
++ if (rx_buf->skb) {
++ /* Prefetch more information */
++ prefetch(skb_shinfo(rx_buf->skb));
++
++ /* Reserve space for the data */
++ skb_put(rx_buf->skb, rx_buf->len);
++
++ /* Move past the ethernet header. rx_buf->data still points
++ * at the ethernet header */
++ rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
++ efx->net_dev);
++ }
++
++ /* Both our generic-LRO and SFC-SSR support skb and page based
++ * allocation, but neither support switching from one to the
++ * other on the fly. If we spot that the allocation mode has
++ * changed, then flush the LRO state.
++ */
++ if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) {
++ channel->rx_alloc_pop_pages = (rx_buf->page != NULL);
++ }
++
++ /* Allow callback to veto the packet */
++ veto = EFX_DL_CALLBACK(efx, rx_packet, rx_buf->data, rx_buf->len);
++ if (unlikely(veto)) {
++ EFX_LOG(efx, "RX vetoed by driverlink %s driver\n",
++ efx->dl_cb_dev.rx_packet->driver->name);
++ /* Free the buffer now */
++ efx_free_rx_buffer(efx, rx_buf);
++ goto done;
++ }
++
++ /* Form an skb if required */
++ if (rx_buf->page) {
++ int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS);
++ skb = efx_rx_mk_skb(rx_buf, efx, hdr_len);
++ if (unlikely(skb == NULL)) {
++ efx_free_rx_buffer(efx, rx_buf);
++ goto done;
++ }
++ } else {
++ /* We now own the SKB */
++ skb = rx_buf->skb;
++ rx_buf->skb = NULL;
++ }
++
++ EFX_BUG_ON_PARANOID(rx_buf->page);
++ EFX_BUG_ON_PARANOID(rx_buf->skb);
++ EFX_BUG_ON_PARANOID(!skb);
++
++ /* Set the SKB flags */
++ if (unlikely(!checksummed || !efx->rx_checksum_enabled))
++ skb->ip_summed = CHECKSUM_NONE;
++
++ /* Pass the packet up */
++ netif_receive_skb(skb);
++
++ /* Update allocation strategy method */
++ channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
++
++ /* fall-thru */
++done:
++ /* Update statistics */
++ efx->net_dev->last_rx = jiffies;
++}
++
++void efx_rx_strategy(struct efx_channel *channel)
++{
++ enum efx_rx_alloc_method method = rx_alloc_method;
++
++ /* Only makes sense to use page based allocation if LRO is enabled */
++ if (!(channel->efx->net_dev->features & NETIF_F_LRO)) {
++ method = RX_ALLOC_METHOD_SKB;
++ } else if (method == RX_ALLOC_METHOD_AUTO) {
++ /* Constrain the rx_alloc_level */
++ if (channel->rx_alloc_level < 0)
++ channel->rx_alloc_level = 0;
++ else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
++ channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
++
++ /* Decide on the allocation method */
++ method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ?
++ RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
++ }
++
++ /* Push the option */
++ channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
++}
++
++int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
++{
++ struct efx_nic *efx = rx_queue->efx;
++ unsigned int rxq_size;
++ int rc;
++
++ EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
++
++ /* Allocate RX buffers */
++ rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
++ rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
++ if (!rx_queue->buffer) {
++ rc = -ENOMEM;
++ goto fail1;
++ }
++
++ rc = falcon_probe_rx(rx_queue);
++ if (rc)
++ goto fail2;
++
++ return 0;
++
++ fail2:
++ kfree(rx_queue->buffer);
++ rx_queue->buffer = NULL;
++ fail1:
++ /* Mark queue as unused */
++ rx_queue->used = 0;
++
++ return rc;
++}
++
++int efx_init_rx_queue(struct efx_rx_queue *rx_queue)
++{
++ struct efx_nic *efx = rx_queue->efx;
++ unsigned int max_fill, trigger, limit;
++
++ EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
++
++ ASSERT_RTNL();
++
++ /* Initialise ptr fields */
++ rx_queue->added_count = 0;
++ rx_queue->notified_count = 0;
++ rx_queue->removed_count = 0;
++ rx_queue->min_fill = -1U;
++ rx_queue->min_overfill = -1U;
++
++ /* Initialise limit fields */
++ max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM;
++ trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
++ limit = max_fill * min(rx_refill_limit, 100U) / 100U;
++
++ rx_queue->max_fill = max_fill;
++ rx_queue->fast_fill_trigger = trigger;
++ rx_queue->fast_fill_limit = limit;
++
++ /* Set up RX descriptor ring */
++ return falcon_init_rx(rx_queue);
++}
++
++void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
++{
++ int i;
++ struct efx_rx_buffer *rx_buf;
++
++ EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
++
++ ASSERT_RTNL();
++
++ /* Flush RX queue and remove descriptor ring */
++ falcon_fini_rx(rx_queue);
++
++ /* Release RX buffers NB start at index 0 not current HW ptr */
++ if (rx_queue->buffer) {
++ for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) {
++ rx_buf = efx_rx_buffer(rx_queue, i);
++ efx_fini_rx_buffer(rx_queue, rx_buf);
++ }
++ }
++
++ /* For a page that is part-way through splitting into RX buffers */
++ if (rx_queue->buf_page != NULL) {
++ pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
++ RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE);
++ __free_pages(rx_queue->buf_page,
++ rx_queue->efx->rx_buffer_order);
++ rx_queue->buf_page = NULL;
++ }
++}
++
++void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
++{
++ EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);
++
++ falcon_remove_rx(rx_queue);
++
++ kfree(rx_queue->buffer);
++ rx_queue->buffer = NULL;
++ rx_queue->used = 0;
++}
++
++
++module_param(rx_alloc_method, int, 0644);
++MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
++
++module_param(rx_refill_threshold, uint, 0444);
++MODULE_PARM_DESC(rx_refill_threshold,
++ "RX descriptor ring fast/slow fill threshold (%)");
++
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/rx.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/rx.h
+--- linux-2.6.18.8/drivers/net/sfc/rx.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/rx.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,44 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2006: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_RX_H
++#define EFX_RX_H
++
++#include "net_driver.h"
++
++
++int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
++void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
++int efx_init_rx_queue(struct efx_rx_queue *rx_queue);
++void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
++
++void efx_rx_strategy(struct efx_channel *channel);
++void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
++void efx_rx_work(struct work_struct *data);
++void __efx_rx_packet(struct efx_channel *channel,
++ struct efx_rx_buffer *rx_buf, int checksummed);
++
++
++#endif /* EFX_RX_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/selftest.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/selftest.c
+--- linux-2.6.18.8/drivers/net/sfc/selftest.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/selftest.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,815 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/netdevice.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/kernel_stat.h>
++#include <linux/pci.h>
++#include <linux/ethtool.h>
++#include <linux/ip.h>
++#include <linux/in.h>
++#include <linux/udp.h>
++#include <linux/rtnetlink.h>
++#include <asm/io.h>
++#include "net_driver.h"
++#include "ethtool.h"
++#include "efx.h"
++#include "falcon.h"
++#include "selftest.h"
++#include "boards.h"
++#include "workarounds.h"
++
++/* Self tests */
++
++/*
++ * Loopback test packet structure
++ *
++ * The self-test should stress every RSS vector, and unfortunately
++ * Falcon only performs RSS on TCP/UDP packets.
++ */
++struct efx_loopback_payload {
++ struct ethhdr header;
++ struct iphdr ip;
++ struct udphdr udp;
++ __be16 iteration;
++ const char msg[64];
++} __attribute__ ((packed));
++
++/* Loopback test source MAC address */
++static const unsigned char payload_source[ETH_ALEN] = {
++ 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
++};
++
++static const char *payload_msg =
++ "Hello world! This is an Efx loopback test in progress!";
++
++struct efx_selftest_state {
++ /* Drop all packets in efx_loopback_rx_packet */
++ int flush;
++
++ /* Number of packets being used in this test */
++ int packet_count;
++
++ /* RX good packet count */
++ atomic_t rx_good;
++
++ /* RX bad packet count */
++ atomic_t rx_bad;
++
++ /* Payload used in tests */
++ struct efx_loopback_payload payload;
++};
++
++/**************************************************************************
++ *
++ * Configurable values
++ *
++ **************************************************************************/
++
++/* Level of loopback testing
++ *
++ * The maximum packet burst length is 16**(n-1), i.e.
++ *
++ * - Level 0 : no packets
++ * - Level 1 : 1 packet
++ * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets)
++ * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets)
++ *
++ */
++static unsigned int loopback_test_level = 3;
++
++/**************************************************************************
++ *
++ * Interrupt and event queue testing
++ *
++ **************************************************************************/
++
++/*
++ * Test interrupts
++ *
++ * This generates a test interrupt and waits for it to be received by
++ * a CPU. This routine must be called from process context and will
++ * sleep.
++ */
++static int efx_test_interrupts(struct efx_nic *efx,
++ struct efx_self_tests *tests)
++{
++ struct efx_channel *channel;
++ unsigned long j_start;
++
++ EFX_LOG(efx, "testing interrupts\n");
++ tests->interrupt = -1;
++
++ /* Reset interrupt flag */
++ efx->last_irq_cpu = -1;
++ smp_wmb();
++
++ /* To guarantee that an interrupt is received make sure that the
++ * channels are ack'd at least once, reenabling interrupts.
++ * We will then receive an interrupt, either by explicitly
++ * requesting one, or receiving traffic.
++ */
++ efx_for_each_channel_with_interrupt(channel, efx) {
++ /* Process the eventq synchronously */
++ if (channel->work_pending)
++ efx_process_channel_now(channel);
++ /* Check if we haven't already received an interrupt */
++ if (efx->last_irq_cpu >= 0)
++ goto success;
++ }
++
++ j_start = jiffies;
++
++ /* Generate test interrupt. */
++ falcon_generate_interrupt(efx);
++
++ /* Wait for arrival of test interrupt. */
++ EFX_LOG(efx, "waiting for test interrupt\n");
++ schedule_timeout_uninterruptible(HZ / 10);
++ if (efx->last_irq_cpu >= 0)
++ goto success;
++
++ EFX_ERR(efx, "timed out in %ld jiffies waiting for interrupt\n",
++ jiffies - j_start);
++ return -ETIMEDOUT;
++
++ success:
++ EFX_LOG(efx, "%s test interrupt seen on CPU%d\n", INT_MODE(efx),
++ efx->last_irq_cpu);
++ tests->interrupt = 1;
++ return 0;
++}
++
++/*
++ * Test capability of events to receive events
++ * This generates a test event and waits for it to be processed
++ * This routine must be called from process context and will sleep.
++ */
++static int efx_test_eventq(struct efx_channel *channel,
++ struct efx_self_tests *tests)
++{
++ unsigned int magic;
++
++ /* Channel specific code, limited to 20 bits */
++ magic = (0x00010150 + channel->channel);
++ EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
++ channel->channel, magic);
++
++ tests->eventq_dma[channel->channel] = -1;
++ tests->eventq_int[channel->channel] = 1; /* fake pass */
++ tests->eventq_poll[channel->channel] = 1; /* fake pass */
++
++ /* Reset flag and zero magic word */
++ channel->efx->last_irq_cpu = -1;
++ channel->eventq_magic = 0;
++ smp_wmb();
++
++ /* Generate test event */
++ falcon_generate_test_event(channel, magic);
++ udelay(1);
++
++ efx_process_channel_now(channel);
++ if (channel->eventq_magic != magic) {
++ EFX_ERR(channel->efx, "channel %d failed to see test event\n",
++ channel->channel);
++ return -ETIMEDOUT;
++ } else {
++ tests->eventq_dma[channel->channel] = 1;
++ }
++
++ return 0;
++}
++
++
++/*
++ * Test capability of events to generate interrupts
++ * This generates a test event and waits for it to be processed by an
++ * ISR. This routine must be called from process context and will
++ * sleep.
++ */
++static int efx_test_eventq_irq(struct efx_channel *channel,
++ struct efx_self_tests *tests)
++{
++ unsigned int magic, count;
++ unsigned long j_start = jiffies;
++
++ /* Channel specific code, limited to 20 bits */
++ magic = (0x00010150 + channel->channel);
++ EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
++ channel->channel, magic);
++
++ tests->eventq_dma[channel->channel] = -1;
++ tests->eventq_int[channel->channel] = -1;
++ tests->eventq_poll[channel->channel] = -1;
++
++ /* Reset flag and zero magic word */
++ channel->efx->last_irq_cpu = -1;
++ channel->eventq_magic = 0;
++ smp_wmb();
++
++ /* Generate test event */
++ falcon_generate_test_event(channel, magic);
++
++ /* Wait for arrival of interrupt */
++ count = 0;
++ do {
++ schedule_timeout_uninterruptible(HZ / 100);
++
++ if (channel->work_pending)
++ efx_process_channel_now(channel);
++
++ if (channel->eventq_magic == magic)
++ goto eventq_ok;
++ }
++ while (++count < 2);
++
++ EFX_ERR(channel->efx, "channel %d timed out in %ld jiffies waiting for"
++ " event queue\n", channel->channel, jiffies - j_start);
++
++ /* See if interrupt arrived */
++ if (channel->efx->last_irq_cpu >= 0) {
++ EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d "
++ "during event queue test\n", channel->channel,
++ raw_smp_processor_id());
++ tests->eventq_int[channel->channel] = 1;
++ }
++
++ /* Check to see if event was received even if interrupt wasn't */
++ efx_process_channel_now(channel);
++ if (channel->eventq_magic == magic) {
++ EFX_ERR(channel->efx, "channel %d event was generated, but "
++ "failed to trigger an interrupt\n", channel->channel);
++ tests->eventq_dma[channel->channel] = 1;
++ }
++
++ return -ETIMEDOUT;
++ eventq_ok:
++ EFX_LOG(channel->efx, "channel %d event queue passed\n",
++ channel->channel);
++ tests->eventq_dma[channel->channel] = 1;
++ tests->eventq_int[channel->channel] = 1;
++ tests->eventq_poll[channel->channel] = 1;
++ return 0;
++}
++
++/**************************************************************************
++ *
++ * PHY testing
++ *
++ **************************************************************************/
++
++/*
++ * Check PHY presence
++ * This reads the PHY ID registers via GMII and checks that neither
++ * are all-zeroes or all-ones (indicating a nonexistent or
++ * uncommunicative PHY).
++ */
++static int efx_test_phy(struct efx_nic *efx,
++ struct efx_self_tests *tests)
++{
++ u16 physid1, physid2;
++ struct mii_if_info *mii = &efx->mii;
++ struct net_device *net_dev = efx->net_dev;
++
++ if (efx->phy_type == PHY_TYPE_NONE)
++ return 0;
++
++ EFX_LOG(efx, "testing PHY presence\n");
++ tests->phy_ok = -1;
++
++ physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1);
++ physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2);
++
++ if ((physid1 != 0x0000) && (physid1 != 0xffff) &&
++ (physid2 != 0x0000) && (physid2 != 0xffff)) {
++ EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n",
++ mii->phy_id, physid1, physid2);
++ tests->phy_ok = 1;
++ return 0;
++ }
++
++ EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id);
++ return -ENODEV;
++}
++
++/**************************************************************************
++ *
++ * Loopback testing
++ * NB Only one loopback test can be executing concurrently.
++ *
++ **************************************************************************/
++
++/* Loopback test RX callback
++ * This is called for each received packet during loopback testing.
++ */
++void efx_loopback_rx_packet(struct efx_nic *efx,
++ const char *buf_ptr, int pkt_len)
++{
++ struct efx_selftest_state *state = efx->loopback_selftest;
++ struct efx_loopback_payload *received;
++ struct efx_loopback_payload *payload;
++
++ BUG_ON(!buf_ptr);
++
++ /* If we are just flushing, then drop the packet */
++ if ((state == NULL) || state->flush)
++ return;
++
++ payload = &state->payload;
++
++ /* The packet should have been passed up to us before any LRO/SSR, so
++ * we should be able to compare the data directly. Since the packet
++ * is going to be thrown away by the caller, modify it in place.
++ * efx_test_loopback guarantees to not touch state->payload during
++ * the test */
++ received = (struct efx_loopback_payload *)(char *)buf_ptr;
++ received->ip.saddr = payload->ip.saddr;
++ received->ip.check = payload->ip.check;
++
++ /* Check that header exists */
++ if (pkt_len < sizeof(received->header)) {
++ EFX_ERR(efx, "saw runt RX packet (length %d) "
++ "in %s loopback test\n", pkt_len,
++ LOOPBACK_MODE(efx));
++ goto err;
++ }
++
++ /* Check that header matches */
++ if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
++ EFX_ERR(efx, "saw non-loopback RX packet in"
++ " %s loopback test\n",
++ LOOPBACK_MODE(efx));
++ goto err;
++ }
++
++ /* Check packet length */
++ if (pkt_len != sizeof(*payload)) {
++ EFX_ERR(efx, "saw incorrect RX packet length"
++ " %d (wanted %d) in %s loopback test\n",
++ pkt_len, (int)sizeof(*payload),
++ LOOPBACK_MODE(efx));
++ goto err;
++ }
++
++ /* Check that IP header matches */
++ if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
++ EFX_ERR(efx, "saw corrupted IP header in %s "
++ "loopback test\n",
++ LOOPBACK_MODE(efx));
++ goto err;
++ }
++
++ /* Check that msg and padding matches */
++ if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
++ EFX_ERR(efx, "saw corrupted RX packet in %s "
++ "loopback test\n",
++ LOOPBACK_MODE(efx));
++ goto err;
++ }
++
++ /* Check that iteration matches */
++ if (received->iteration != payload->iteration) {
++ EFX_ERR(efx, "saw RX packet from iteration %d"
++ " (wanted %d) in %s loopback test\n",
++ ntohs(received->iteration), ntohs(payload->iteration),
++ LOOPBACK_MODE(efx));
++ goto err;
++ }
++
++ /* Increase correct RX count */
++ EFX_TRACE(efx, "got loopback RX in %s loopback test\n",
++ LOOPBACK_MODE(efx));
++
++ atomic_inc(&state->rx_good);
++ return;
++
++ err:
++#ifdef EFX_ENABLE_DEBUG
++ if (atomic_read(&state->rx_bad) == 0) {
++ EFX_ERR(efx, "received packet:\n");
++ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
++ buf_ptr, pkt_len, 0);
++ EFX_ERR(efx, "expected packet:\n");
++ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
++ &state->payload, sizeof(state->payload), 0);
++ }
++#endif
++ atomic_inc(&state->rx_bad);
++}
++
++/* Initialise an efx_selftest_state for a new iteration */
++static void efx_iterate_state(struct efx_nic *efx)
++{
++ struct efx_selftest_state *state = efx->loopback_selftest;
++ struct net_device *net_dev = efx->net_dev;
++ struct efx_loopback_payload *payload = &state->payload;
++
++ /* Initialise the layerII header */
++ memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN);
++ memcpy(&payload->header.h_source, &payload_source, ETH_ALEN);
++ payload->header.h_proto = htons(ETH_P_IP);
++
++ /* saddr set later and used as incrementing count */
++ payload->ip.daddr = htonl(INADDR_LOOPBACK);
++ payload->ip.ihl = 5;
++ payload->ip.check = 0; /* offloaded */
++ payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
++ payload->ip.version = IPVERSION;
++ payload->ip.protocol = IPPROTO_UDP;
++
++ /* Initialise udp header */
++ payload->udp.source = 0;
++ payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
++ sizeof(struct iphdr));
++ payload->udp.check = 0; /* checksum ignored */
++
++ /* Fill out payload */
++ payload->iteration = htons(ntohs(payload->iteration) + 1);
++ memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
++
++ /* Fill out remaining state members */
++ atomic_set(&state->rx_good, 0);
++ atomic_set(&state->rx_bad, 0);
++ smp_wmb();
++}
++
++/*
++ * Perform loopback test with N packets
++ *
++ * This will transmit "num_packets" copies of a test packet, and check
++ * that they were both transmitted (i.e. a TX completion event was
++ * received) and received (i.e. the data arrived intact via loopback).
++ * The port must have already been placed into the desired loopback
++ * mode.
++ */
++static int efx_test_loopback(struct efx_nic *efx,
++ struct efx_tx_queue *tx_queue,
++ struct efx_loopback_self_tests *lb_tests)
++{
++#if !defined(EFX_HAVE_OLD_NAPI)
++ struct efx_channel *channel;
++#endif
++ struct efx_selftest_state *state = efx->loopback_selftest;
++ struct efx_loopback_payload *payload;
++ struct sk_buff *skb;
++ int rc = 0, i, tx_done, rx_good, rx_bad;
++
++ /* Fill out the packet contents */
++ efx_iterate_state(efx);
++
++ /* Create and fill skb */
++ skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
++ if (!skb) {
++ rc = -ENOMEM;
++ goto out1;
++ }
++ payload = ((struct efx_loopback_payload *)
++ skb_put(skb, sizeof(state->payload)));
++ memcpy(payload, &state->payload, sizeof(state->payload));
++
++ /* Transmit N copies of buffer */
++ for (i = 0; i < state->packet_count; i++) {
++ /* Set the source address in the copy of the packet.
++ * Incrementing the source address on a per-packet basis
++ * should ensure that we stress all RSS vectors */
++ payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
++ skb_get(skb);
++
++ if (efx_xmit(efx, tx_queue, skb) != NETDEV_TX_OK) {
++ EFX_ERR(efx, "TX queue %d could not transmit "
++ "packet %d of %d in %s loopback test\n",
++ tx_queue->queue, i + 1,
++ state->packet_count, LOOPBACK_MODE(efx));
++ rc = -EPIPE;
++ goto out2;
++ }
++
++ /* Avoid hogging the PCI bus */
++ udelay(10);
++ }
++
++#if !defined(EFX_HAVE_OLD_NAPI)
++ /* NAPI polling is not enabled, so process channels synchronously */
++ schedule_timeout_uninterruptible(HZ / 50);
++ efx_for_each_channel_with_interrupt(channel, efx) {
++ if (channel->work_pending)
++ efx_process_channel_now(channel);
++ }
++#else
++ /* Allow time for processing */
++ schedule_timeout_uninterruptible(HZ / 10);
++#endif
++
++ if (state->flush)
++ goto out3;
++
++ /* Check TX completion and received packet counts */
++ tx_done = state->packet_count - (atomic_read(&skb->users) - 1);
++ rx_good = atomic_read(&state->rx_good);
++ rx_bad = atomic_read(&state->rx_bad);
++ if (tx_done != state->packet_count) {
++ /* Don't free the skbs; they will be picked up on TX
++ * overflow or channel teardown.
++ */
++ EFX_ERR(efx, "TX queue %d saw only %d out of an "
++ "expected %d TX completion events in %s loopback "
++ "test\n", tx_queue->queue, tx_done,
++ state->packet_count, LOOPBACK_MODE(efx));
++ rc = -ETIMEDOUT;
++ /* Allow to fall through so we see the RX errors as well */
++ }
++
++ /* We may always be up to a flush away from our desired packet total */
++ if (rx_good != state->packet_count) {
++ EFX_LOG(efx, "TX queue %d saw only %d out of an "
++ "expected %d received packets in %s loopback "
++ "test\n", tx_queue->queue, rx_good,
++ state->packet_count, LOOPBACK_MODE(efx));
++ rc = -ETIMEDOUT;
++ /* Fall through */
++ }
++
++ /* Update loopback test structure */
++ lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
++ lb_tests->tx_done[tx_queue->queue] += tx_done;
++ lb_tests->rx_good += rx_good;
++ lb_tests->rx_bad += rx_bad;
++
++ out3:
++ out2:
++ /* Free skb */
++ if (skb) {
++ /* If the selftest failed then the skb needs its reference
++ * count decreasing */
++ while (skb_shared(skb))
++ kfree_skb(skb);
++ dev_kfree_skb_any(skb);
++ }
++
++ out1:
++ return rc;
++}
++
++/* Perform loopback test safely
++ *
++ * This performs a safe loopback test by starting with a single packet
++ * and only increasing the number of packets while the tests are
++ * passing. This avoids flooding a network with garbage packets if
++ * e.g. setting the loopback mode fails.
++ */
++static int
++efx_test_loopback_safely(struct efx_nic *efx,
++ struct efx_tx_queue *tx_queue,
++ struct efx_loopback_self_tests *lb_tests)
++{
++ struct efx_selftest_state *state = efx->loopback_selftest;
++ int i, rc = 0;
++
++ for (i = 0; i < loopback_test_level; i++) {
++ /* Determine how many packets to send */
++ state->packet_count = (efx->type->txd_ring_mask + 1) / 3;
++ state->packet_count = min(1 << (i << 2), state->packet_count);
++ state->flush = 0;
++
++ EFX_LOG(efx, "TX queue %d testing %s loopback"
++ " with %d packets\n", tx_queue->queue,
++ LOOPBACK_MODE(efx), state->packet_count);
++
++ rc = efx_test_loopback(efx, tx_queue, lb_tests);
++ if (rc) {
++ /* Wait a while to ensure there are no packets
++ * floating around after a failure.
++ */
++ schedule_timeout_uninterruptible(HZ / 5);
++ return rc;
++ }
++ }
++
++ EFX_LOG(efx, "TX queue %d passed %s loopback test "
++ "with a burst length of %d packets\n",
++ tx_queue->queue, LOOPBACK_MODE(efx), state->packet_count);
++
++ return rc;
++}
++
++static int efx_test_loopbacks(struct efx_nic *efx,
++ struct efx_self_tests *tests,
++ unsigned int loopback_modes)
++{
++ struct efx_selftest_state *state = efx->loopback_selftest;
++ struct ethtool_cmd ecmd, ecmd_loopback;
++ struct efx_tx_queue *tx_queue;
++ enum efx_loopback_mode old_mode, mode;
++ int old_powered, count, rc = 0;
++ int retry = EFX_WORKAROUND_8909(efx);
++
++ /* Get current PHY settings */
++ rc = efx_ethtool_get_settings(efx->net_dev, &ecmd);
++ if (rc) {
++ EFX_ERR(efx, "could not get GMII settings\n");
++ return rc;
++ }
++ old_mode = efx->loopback_mode;
++ old_powered = efx->phy_powered;
++
++ /* Disable autonegotiation for the purposes of loopback */
++ memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback));
++ if (ecmd_loopback.autoneg == AUTONEG_ENABLE) {
++ ecmd_loopback.autoneg = AUTONEG_DISABLE;
++ ecmd_loopback.duplex = DUPLEX_FULL;
++ ecmd_loopback.speed = EFX_IS10G(efx) ?
++ SPEED_10000 : SPEED_1000;
++ }
++
++ rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback);
++ if (rc) {
++ EFX_ERR(efx, "could not disable autonegotiation\n");
++ goto out;
++ }
++ tests->loopback_speed = ecmd_loopback.speed;
++ tests->loopback_full_duplex = ecmd_loopback.duplex;
++
++ /* Test all supported loopback modes */
++ for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
++ if (!(loopback_modes & (1 << mode)))
++ continue;
++
++ /* Move the port into the specified loopback mode. */
++ state->flush = 1;
++ efx->phy_powered = 1;
++ efx->loopback_mode = mode;
++ efx_reconfigure_port(efx, 0);
++
++ /* Wait for the PHY to signal the link is up */
++ count = 0;
++ do {
++ struct efx_channel *channel = &efx->channel[0];
++
++ (void) efx->mac_op->check_hw(efx);
++ schedule_timeout_uninterruptible(HZ / 10);
++ if (channel->work_pending)
++ efx_process_channel_now(channel);
++ /* Wait for PHY events to be processed */
++ flush_workqueue(efx->workqueue);
++ rmb();
++ } while ((++count < 20) && !efx->link_up);
++
++ /* The link should now be up. If it isn't, there is no point
++ * in attempting a loopback test */
++ if (!efx->link_up) {
++ EFX_ERR(efx, "loopback %s never came up\n",
++ LOOPBACK_MODE(efx));
++ rc = -EIO;
++ goto out;
++ }
++
++ EFX_LOG(efx, "link came up in %s loopback in %d iterations\n",
++ LOOPBACK_MODE(efx), count);
++
++ /* Test every TX queue */
++ efx_for_each_tx_queue(tx_queue, efx) {
++ rc |= efx_test_loopback_safely(efx, tx_queue,
++ &tests->loopback[mode]);
++ if (rc)
++ goto fail;
++ }
++
++ continue;
++
++fail:
++ if (retry) {
++ /* Give the PHY a kick by pretending to move into
++ * a Falcon internal loopback mode, then back out */
++ int first = ffs(efx->loopback_modes) - 1;
++
++ EFX_INFO(efx, "retrying %s loopback\n",
++ LOOPBACK_MODE(efx));
++
++ state->flush = 1;
++ efx->loopback_mode = first;
++ efx_reconfigure_port(efx, 0);
++
++ retry = rc = 0;
++ --mode;
++ continue;
++ }
++ break;
++ }
++
++ out:
++ /* Take out of loopback and restore PHY settings */
++ state->flush = 1;
++ efx->loopback_mode = old_mode;
++ efx->phy_powered = old_powered;
++ /* Push the loopback change, and restore any other
++ * settings we may have trodden on */
++ (void) efx_ethtool_set_settings(efx->net_dev, &ecmd);
++
++ return rc;
++}
++
++/**************************************************************************
++ *
++ * Entry points
++ *
++ *************************************************************************/
++
++/* Online (i.e. non-disruptive) testing
++ *
++ * This checks interrupt generation, event delivery and PHY presence.
++ * The caller should hold the suspend lock
++ *
++ */
++int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
++{
++ struct efx_channel *channel;
++ int rc = 0;
++
++ ASSERT_RTNL();
++
++ EFX_LOG(efx, "performing online self-tests\n");
++
++ rc |= efx_test_interrupts(efx, tests);
++ efx_for_each_channel(channel, efx) {
++ if (channel->has_interrupt)
++ rc |= efx_test_eventq_irq(channel, tests);
++ else
++ rc |= efx_test_eventq(channel, tests);
++ }
++ rc |= efx_test_phy(efx, tests);
++
++ if (rc)
++ EFX_ERR(efx, "failed online self-tests\n");
++
++ return rc;
++}
++
++/* Offline (i.e. disruptive) testing
++ * This checks MAC and PHY loopback on the specified port. The caller
++ * should hold the rtnl lock
++ */
++int efx_offline_test(struct efx_nic *efx,
++ struct efx_self_tests *tests, unsigned int loopback_modes)
++{
++ struct efx_selftest_state *state;
++ int rc = 0;
++
++ ASSERT_RTNL();
++
++ EFX_LOG(efx, "performing offline self-tests\n");
++
++ /* Create a selftest_state structure to hold state for the test */
++ state = kzalloc(sizeof(*state), GFP_KERNEL);
++ if (state == NULL) {
++ rc = -ENOMEM;
++ goto out;
++ }
++
++ /* Set the port loopback_selftest member. From this point on
++ * all received packets will be dropped. Mark the state as
++ * "flushing" so all inflight packets are dropped */
++ BUG_ON(efx->loopback_selftest);
++ state->flush = 1;
++ efx->loopback_selftest = (void *)state;
++ wmb();
++
++ /* Test all loopback modes */
++ rc = efx_test_loopbacks(efx, tests, loopback_modes);
++
++ /* Tidy up the port test state */
++ efx->loopback_selftest = NULL;
++ wmb();
++ kfree(state);
++
++ out:
++ if (rc)
++ EFX_ERR(efx, "failed offline self-tests\n");
++
++ return rc;
++}
++
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/selftest.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/selftest.h
+--- linux-2.6.18.8/drivers/net/sfc/selftest.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/selftest.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,67 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_SELFTEST_H
++#define EFX_SELFTEST_H
++
++#include "net_driver.h"
++
++/*
++ * Self tests
++ */
++
++struct efx_loopback_self_tests {
++ int tx_sent[EFX_MAX_TX_QUEUES];
++ int tx_done[EFX_MAX_TX_QUEUES];
++ int rx_good;
++ int rx_bad;
++};
++
++/* Efx self test results
++ * For fields which are not counters, 1 indicates success and -1
++ * indicates failure.
++ */
++struct efx_self_tests {
++ int interrupt;
++ int eventq_dma[EFX_MAX_CHANNELS];
++ int eventq_int[EFX_MAX_CHANNELS];
++ int eventq_poll[EFX_MAX_CHANNELS];
++ int phy_ok;
++ int loopback_speed;
++ int loopback_full_duplex;
++ struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX];
++};
++
++extern void efx_loopback_rx_packet(struct efx_nic *efx,
++ const char *buf_ptr, int pkt_len);
++extern int efx_online_test(struct efx_nic *efx,
++ struct efx_self_tests *tests);
++extern int efx_offline_test(struct efx_nic *efx,
++ struct efx_self_tests *tests,
++ unsigned int loopback_modes);
++
++#endif /* EFX_SELFTEST_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/assert_valid.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/assert_valid.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/assert_valid.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/assert_valid.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,95 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains functions to assert validness of resources and
++ * resource manager in DEBUG build of the resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efrm/sysdep.h>
++
++#ifndef NDEBUG
++#include <ci/efrm/resource.h>
++#include <ci/efrm/driver_private.h>
++#include <ci/efrm/debug.h>
++
++void
++efrm_resource_manager_assert_valid(struct efrm_resource_manager *rm,
++ const char *file, int line)
++{
++ _EFRM_ASSERT(rm, file, line);
++ _EFRM_ASSERT(rm->rm_name, file, line);
++ _EFRM_ASSERT(rm->rm_type < EFRM_RESOURCE_NUM, file, line);
++ _EFRM_ASSERT(rm->rm_table, file, line);
++ _EFRM_ASSERT(rm->rm_table_size > 0, file, line);
++ _EFRM_ASSERT(rm->rm_dtor, file, line);
++}
++EXPORT_SYMBOL(efrm_resource_manager_assert_valid);
++
++/*
++ * \param rs resource to validate
++ * \param ref_count_is_zero One of 3 values
++ * > 0 - check ref count is zero
++ * = 0 - check ref count is non-zero
++ * < 0 - ref count could be any value
++ */
++void
++efrm_resource_assert_valid(struct efrm_resource *rs, int ref_count_is_zero,
++ const char *file, int line)
++{
++ struct efrm_resource_manager *rm;
++
++ _EFRM_ASSERT(rs, file, line);
++
++ if (ref_count_is_zero >= 0) {
++ if (!(ref_count_is_zero || atomic_read(&rs->rs_ref_count) > 0)
++ || !(!ref_count_is_zero
++ || atomic_read(&rs->rs_ref_count) == 0))
++ EFRM_WARN("%s: check %szero ref=%d " EFRM_RESOURCE_FMT,
++ __FUNCTION__,
++ ref_count_is_zero == 0 ? "non-" : "",
++ atomic_read(&rs->rs_ref_count),
++ EFRM_RESOURCE_PRI_ARG(rs->rs_handle));
++
++ _EFRM_ASSERT(!(ref_count_is_zero == 0) ||
++ atomic_read(&rs->rs_ref_count) != 0, file, line);
++ _EFRM_ASSERT(!(ref_count_is_zero > 0) ||
++ atomic_read(&rs->rs_ref_count) == 0, file, line);
++ }
++
++ rm = efrm_rm_table[EFRM_RESOURCE_TYPE(rs->rs_handle)];
++ efrm_resource_manager_assert_valid(rm, file, line);
++}
++EXPORT_SYMBOL(efrm_resource_assert_valid);
++
++#endif
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/buddy.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/buddy.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/buddy.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/buddy.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,307 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains implementation of a buddy allocator.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efhw/common.h> /* get uintXX types on win32 */
++#include <ci/efrm/sysdep.h>
++#include <ci/efrm/buddy.h>
++#include <ci/efrm/debug.h>
++
++#if 1
++#define DEBUG_ALLOC(x)
++#else
++#define DEBUG_ALLOC(x) x
++
++static inline void efrm_buddy_dump(struct efrm_buddy_allocator *b)
++{
++ unsigned o;
++
++ EFRM_NOTICE("%s: dump allocator with order %u",
++ __FUNCTION__, b->order);
++ for (o = 0; o <= b->order; o++) {
++ struct list_head *l = &b->free_lists[o];
++ while (l->next != &b->free_lists[o]) {
++ l = l->next;
++ EFRM_NOTICE("%s: order %x: %zx", __FUNCTION__, o,
++ l - b->links);
++ }
++ }
++}
++#endif
++
++/*
++ * The purpose of the following inline functions is to give the
++ * understandable names to the simple actions.
++ */
++static inline void
++efrm_buddy_free_list_add(struct efrm_buddy_allocator *b,
++ unsigned order, unsigned addr)
++{
++ list_add(&b->links[addr], &b->free_lists[order]);
++ b->orders[addr] = (uint8_t) b->order;
++}
++static inline void
++efrm_buddy_free_list_del(struct efrm_buddy_allocator *b, unsigned addr)
++{
++ list_del(&b->links[addr]);
++ b->links[addr].next = NULL;
++}
++static inline int
++efrm_buddy_free_list_empty(struct efrm_buddy_allocator *b, unsigned order)
++{
++ return list_empty(&b->free_lists[order]);
++}
++static inline unsigned
++efrm_buddy_free_list_pop(struct efrm_buddy_allocator *b, unsigned order)
++{
++ struct list_head *l = list_pop(&b->free_lists[order]);
++ l->next = NULL;
++ return (unsigned)(l - b->links);
++}
++static inline int
++efrm_buddy_addr_in_free_list(struct efrm_buddy_allocator *b, unsigned addr)
++{
++ return b->links[addr].next != NULL;
++}
++static inline unsigned
++efrm_buddy_free_list_first(struct efrm_buddy_allocator *b, unsigned order)
++{
++ return (unsigned)(b->free_lists[order].next - b->links);
++}
++
++int efrm_buddy_ctor(struct efrm_buddy_allocator *b, unsigned order)
++{
++ unsigned o;
++ unsigned size = 1 << order;
++
++ DEBUG_ALLOC(EFRM_NOTICE("%s(%u)", __FUNCTION__, order));
++ EFRM_ASSERT(b);
++ EFRM_ASSERT(order <= sizeof(unsigned) * 8 - 1);
++
++ b->order = order;
++ b->free_lists = vmalloc((order + 1) * sizeof(struct list_head));
++ if (b->free_lists == NULL)
++ goto fail1;
++
++ b->links = vmalloc(size * sizeof(struct list_head));
++ if (b->links == NULL)
++ goto fail2;
++
++ b->orders = vmalloc(size);
++ if (b->orders == NULL)
++ goto fail3;
++
++ memset(b->links, 0, size * sizeof(struct list_head));
++
++ for (o = 0; o <= b->order; ++o)
++ INIT_LIST_HEAD(b->free_lists + o);
++
++ efrm_buddy_free_list_add(b, b->order, 0);
++
++ return 0;
++
++fail3:
++ vfree(b->links);
++fail2:
++ vfree(b->free_lists);
++fail1:
++ return -ENOMEM;
++}
++
++void efrm_buddy_dtor(struct efrm_buddy_allocator *b)
++{
++ EFRM_ASSERT(b);
++
++ vfree(b->free_lists);
++ vfree(b->links);
++ vfree(b->orders);
++}
++
++int efrm_buddy_alloc(struct efrm_buddy_allocator *b, unsigned order)
++{
++ unsigned smallest;
++ unsigned addr;
++
++ DEBUG_ALLOC(EFRM_NOTICE("%s(%u)", __FUNCTION__, order));
++ EFRM_ASSERT(b);
++
++ /* Find smallest chunk that is big enough. ?? Can optimise this by
++ ** keeping array of pointers to smallest chunk for each order.
++ */
++ smallest = order;
++ while (smallest <= b->order &&
++ efrm_buddy_free_list_empty(b, smallest))
++ ++smallest;
++
++ if (smallest > b->order) {
++ DEBUG_ALLOC(EFRM_NOTICE
++ ("buddy - alloc order %d failed - max order %d",
++ order, b->order););
++ return -ENOMEM;
++ }
++
++ /* Split blocks until we get one of the correct size. */
++ addr = efrm_buddy_free_list_pop(b, smallest);
++
++ DEBUG_ALLOC(EFRM_NOTICE("buddy - alloc %x order %d cut from order %d",
++ addr, order, smallest););
++ while (smallest-- > order)
++ efrm_buddy_free_list_add(b, smallest, addr + (1 << smallest));
++
++ EFRM_DO_DEBUG(b->orders[addr] = (uint8_t) order);
++
++ EFRM_ASSERT(addr < 1u << b->order);
++ return addr;
++}
++
++void
++efrm_buddy_free(struct efrm_buddy_allocator *b, unsigned addr,
++ unsigned order)
++{
++ unsigned buddy_addr;
++
++ DEBUG_ALLOC(EFRM_NOTICE("%s(%u, %u)", __FUNCTION__, addr, order));
++ EFRM_ASSERT(b);
++ EFRM_ASSERT(order <= b->order);
++ EFRM_ASSERT((unsigned long)addr + ((unsigned long)1 << order) <=
++ (unsigned long)1 << b->order);
++ EFRM_ASSERT(!efrm_buddy_addr_in_free_list(b, addr));
++ EFRM_ASSERT(b->orders[addr] == order);
++
++ /* merge free blocks */
++ while (order < b->order) {
++ buddy_addr = addr ^ (1 << order);
++ if (!efrm_buddy_addr_in_free_list(b, buddy_addr) ||
++ b->orders[buddy_addr] != order)
++ break;
++ efrm_buddy_free_list_del(b, addr);
++ if (buddy_addr < addr)
++ addr = buddy_addr;
++ ++order;
++ }
++
++ DEBUG_ALLOC(EFRM_NOTICE
++ ("buddy - free %x merged into order %d", addr, order););
++ efrm_buddy_free_list_add(b, order, addr);
++}
++
++void efrm_buddy_reserve_at_start(struct efrm_buddy_allocator *b, unsigned n)
++{
++ int addr;
++ unsigned o;
++ EFRM_DO_DEBUG(int n_save = n);
++
++ DEBUG_ALLOC(EFRM_NOTICE("%s(%u)", __FUNCTION__, n));
++ EFRM_ASSERT(b);
++ EFRM_ASSERT(n <= 1u << b->order && n > 0);
++ /* Whole space must be free. */
++ EFRM_ASSERT(!efrm_buddy_free_list_empty(b, b->order));
++
++ o = fls(n);
++
++ while (n) {
++ while (((unsigned)1 << o) > n)
++ --o;
++ EFRM_ASSERT(((unsigned)1 << o) <= n);
++ addr = efrm_buddy_alloc(b, o);
++ EFRM_ASSERT(addr + (1 << o) <= n_save);
++ n -= 1 << o;
++ }
++}
++
++static int
++__efrm_buddy_reserve_at_end(struct efrm_buddy_allocator *b, unsigned order,
++ int threshold)
++{
++ unsigned o, addr;
++
++ DEBUG_ALLOC(EFRM_NOTICE("%s(%u, %d)", __FUNCTION__, order, threshold));
++ EFRM_ASSERT(b);
++
++ /* Find largest block; there must be one big enough (or caller has
++ ** goofed).
++ */
++ for (o = b->order;; --o) {
++ if (efrm_buddy_free_list_empty(b, o))
++ continue;
++ addr = efrm_buddy_free_list_first(b, o);
++ if (addr + (1 << o) <= (unsigned)threshold)
++ continue;
++ break;
++ }
++ EFRM_ASSERT(o >= order);
++
++ /* Split down (keeping second half) until we reach
++ * the requested size. */
++ addr = efrm_buddy_free_list_pop(b, o);
++
++ while (o-- > order) {
++ efrm_buddy_free_list_add(b, o, addr);
++ addr += 1 << o;
++ }
++
++ EFRM_DO_DEBUG(b->orders[addr] = (uint8_t) order);
++
++ return addr;
++}
++
++void efrm_buddy_reserve_at_end(struct efrm_buddy_allocator *b, unsigned n)
++{
++ int addr, threshold;
++ unsigned o;
++ EFRM_DO_DEBUG(int n_save = n);
++
++ DEBUG_ALLOC(EFRM_NOTICE("%s(%u)", __FUNCTION__, n));
++ DEBUG_ALLOC(efrm_buddy_dump(b));
++ EFRM_ASSERT(b);
++ EFRM_ASSERT(n <= 1u << b->order);
++
++ if (!n)
++ return;
++
++ threshold = (1 << b->order) - n;
++ o = fls(n);
++
++ while (n) {
++ while (((unsigned)1 << o) > n)
++ --o;
++ EFRM_ASSERT(((unsigned)1 << o) <= n);
++ addr = __efrm_buddy_reserve_at_end(b, o, threshold);
++ EFRM_ASSERT(addr >= (1 << b->order) - n_save);
++ n -= 1 << o;
++ }
++ DEBUG_ALLOC(efrm_buddy_dump(b));
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/buffer_table.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/buffer_table.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/buffer_table.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/buffer_table.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,210 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains abstraction of the buffer table on the NIC.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++** Might be worth keeping a bitmap of which entries are clear. Then we
++** wouldn't need to clear them all again when we free an allocation.
++*/
++
++#include <ci/efrm/debug.h>
++#include <ci/driver/efab/hardware.h>
++#include <ci/efrm/nic_table.h>
++#include <ci/efrm/buffer_table.h>
++#include <ci/efrm/buddy.h>
++
++/*! Comment? */
++struct efrm_buffer_table {
++ spinlock_t lock;
++ struct efrm_buddy_allocator buddy;
++};
++
++/* Efab buffer state. */
++static struct efrm_buffer_table efrm_buffers;
++
++int efrm_buffer_table_ctor(unsigned low, unsigned high)
++{
++ int log2_n_entries, rc;
++
++ EFRM_ASSERT(high > 0);
++ EFRM_ASSERT(low < high);
++
++ EFRM_TRACE("efrm_buffer_table_ctor: low=%u high=%u", low, high);
++ EFRM_NOTICE("efrm_buffer_table_ctor: low=%u high=%u", low, high);
++
++ log2_n_entries = fls(high - 1);
++
++ rc = efrm_buddy_ctor(&efrm_buffers.buddy, log2_n_entries);
++ if (rc < 0) {
++ EFRM_ERR("efrm_buffer_table_ctor: efrm_buddy_ctor(%d) "
++ "failed (%d)", log2_n_entries, rc);
++ return rc;
++ }
++
++ spin_lock_init(&efrm_buffers.lock);
++
++ efrm_buddy_reserve_at_start(&efrm_buffers.buddy, low);
++ efrm_buddy_reserve_at_end(&efrm_buffers.buddy,
++ (1 << log2_n_entries) - high);
++
++ EFRM_TRACE("efrm_buffer_table_ctor: done");
++
++ return 0;
++}
++
++void efrm_buffer_table_dtor(void)
++{
++ /* ?? debug check that all allocations have been freed? */
++
++ spin_lock_destroy(&efrm_buffers.lock);
++ efrm_buddy_dtor(&efrm_buffers.buddy);
++
++ EFRM_TRACE("efrm_buffer_table_dtor: done");
++}
++
++/**********************************************************************/
++
++int
++efrm_buffer_table_alloc(unsigned order,
++ struct efhw_buffer_table_allocation *a)
++{
++ irq_flags_t lock_flags;
++ int rc;
++
++ EFRM_ASSERT(&efrm_buffers.buddy);
++ EFRM_ASSERT(a);
++
++ /* Round up to multiple of two, as the buffer clear logic works in
++ * pairs when not in "full" mode. */
++ order = max_t(unsigned, order, 1);
++
++ spin_lock_irqsave(&efrm_buffers.lock, lock_flags);
++ rc = efrm_buddy_alloc(&efrm_buffers.buddy, order);
++ spin_unlock_irqrestore(&efrm_buffers.lock, lock_flags);
++
++ if (rc < 0) {
++ EFRM_ERR("efrm_buffer_table_alloc: failed (n=%ld) rc %d",
++ 1ul << order, rc);
++ return rc;
++ }
++
++ EFRM_TRACE("efrm_buffer_table_alloc: base=%d n=%ld",
++ rc, 1ul << order);
++ a->order = order;
++ a->base = (unsigned)rc;
++ return 0;
++}
++
++void efrm_buffer_table_free(struct efhw_buffer_table_allocation *a)
++{
++ irq_flags_t lock_flags;
++ struct efhw_nic *nic;
++ int nic_i;
++
++ EFRM_ASSERT(&efrm_buffers.buddy);
++ EFRM_ASSERT(a);
++ EFRM_ASSERT(a->base != -1);
++ EFRM_ASSERT((unsigned long)a->base + (1ul << a->order) <=
++ efrm_buddy_size(&efrm_buffers.buddy));
++
++ EFRM_TRACE("efrm_buffer_table_free: base=%d n=%ld",
++ a->base, (1ul << a->order));
++
++ EFRM_FOR_EACH_NIC(nic_i, nic)
++ efhw_nic_buffer_table_clear(nic, a->base, 1ul << a->order);
++
++ spin_lock_irqsave(&efrm_buffers.lock, lock_flags);
++ efrm_buddy_free(&efrm_buffers.buddy, a->base, a->order);
++ spin_unlock_irqrestore(&efrm_buffers.lock, lock_flags);
++
++ EFRM_DO_DEBUG(a->base = a->order = -1);
++}
++
++/**********************************************************************/
++
++void
++efrm_buffer_table_set(struct efhw_buffer_table_allocation *a,
++ unsigned i, dma_addr_t dma_addr, int owner)
++{
++ struct efhw_nic *nic;
++ int nic_i;
++
++ EFRM_ASSERT(a);
++ EFRM_ASSERT(i < (unsigned)1 << a->order);
++ EFRM_FOR_EACH_NIC(nic_i, nic)
++ efhw_nic_buffer_table_set(nic, dma_addr, EFHW_NIC_PAGE_SIZE,
++ 0, owner, a->base + i);
++ /* NB. No commit Caller should call efrm_buffer_table_commit. There
++ are underlying hardware constraints regarding the number of
++ buffer table entries which can be pushed before commiting. */
++}
++
++unsigned long efrm_buffer_table_size(void)
++{
++ return efrm_buddy_size(&efrm_buffers.buddy);
++}
++
++/**********************************************************************/
++
++int
++efrm_page_register(dma_addr_t dma_addr, int owner,
++ efhw_buffer_addr_t *buf_addr_out)
++{
++ struct efhw_buffer_table_allocation alloc;
++ int rc;
++
++ rc = efrm_buffer_table_alloc(0, &alloc);
++ if (rc == 0) {
++ efrm_buffer_table_set(&alloc, 0, dma_addr, owner);
++ efrm_buffer_table_commit();
++ *buf_addr_out = EFHW_BUFFER_ADDR(alloc.base, 0);
++ }
++ return rc;
++}
++EXPORT_SYMBOL(efrm_page_register);
++
++void efrm_page_unregister(efhw_buffer_addr_t buf_addr)
++{
++ struct efhw_buffer_table_allocation alloc;
++
++ alloc.order = 0;
++ alloc.base = EFHW_BUFFER_PAGE(buf_addr);
++ efrm_buffer_table_free(&alloc);
++}
++EXPORT_SYMBOL(efrm_page_unregister);
++
++void efrm_buffer_table_commit(void)
++{
++ struct efhw_nic *nic;
++ int nic_i;
++
++ EFRM_FOR_EACH_NIC(nic_i, nic)
++ efhw_nic_buffer_table_commit(nic);
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/common.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/common.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/common.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/common.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,68 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides EtherFabric NIC hardware interface common
++ * definitions.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_DRIVER_EFAB_HARDWARE_COMMON_H__
++#define __CI_DRIVER_EFAB_HARDWARE_COMMON_H__
++
++/*----------------------------------------------------------------------------
++ *
++ * EtherFabric constants
++ *
++ *---------------------------------------------------------------------------*/
++
++#define EFHW_1K 0x00000400u
++#define EFHW_2K 0x00000800u
++#define EFHW_4K 0x00001000u
++#define EFHW_8K 0x00002000u
++#define EFHW_16K 0x00004000u
++#define EFHW_32K 0x00008000u
++#define EFHW_64K 0x00010000u
++#define EFHW_128K 0x00020000u
++#define EFHW_256K 0x00040000u
++#define EFHW_512K 0x00080000u
++#define EFHW_1M 0x00100000u
++#define EFHW_2M 0x00200000u
++#define EFHW_4M 0x00400000u
++#define EFHW_8M 0x00800000u
++#define EFHW_16M 0x01000000u
++#define EFHW_32M 0x02000000u
++#define EFHW_48M 0x03000000u
++#define EFHW_64M 0x04000000u
++#define EFHW_128M 0x08000000u
++#define EFHW_256M 0x10000000u
++#define EFHW_512M 0x20000000u
++#define EFHW_1G 0x40000000u
++#define EFHW_2G 0x80000000u
++#define EFHW_4G 0x100000000ULL
++#define EFHW_8G 0x200000000ULL
++
++#endif /* __CI_DRIVER_EFAB_HARDWARE_COMMON_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_core.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_core.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_core.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_core.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1149 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides EtherFabric NIC - EFXXXX (aka Falcon) core register
++ * definitions.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#define FALCON_EXTENDED_P_BAR 1
++
++/*************---- Bus Interface Unit Registers C Header ----*************/
++#define IOM_IND_ADR_REG_OFST 0x0 /* IO-mapped indirect access address
++ register */
++ #define IOM_AUTO_ADR_INC_EN_LBN 16
++ #define IOM_AUTO_ADR_INC_EN_WIDTH 1
++ #define IOM_IND_ADR_LBN 0
++ #define IOM_IND_ADR_WIDTH 16
++#define IOM_IND_DAT_REG_OFST 0x4 /* IO-mapped indirect access data register */
++ #define IOM_IND_DAT_LBN 0
++ #define IOM_IND_DAT_WIDTH 32
++#define ADR_REGION_REG_KER_OFST 0x0 /* Address region register */
++#define ADR_REGION_REG_OFST 0x0 /* Address region register */
++ #define ADR_REGION3_LBN 96
++ #define ADR_REGION3_WIDTH 18
++ #define ADR_REGION2_LBN 64
++ #define ADR_REGION2_WIDTH 18
++ #define ADR_REGION1_LBN 32
++ #define ADR_REGION1_WIDTH 18
++ #define ADR_REGION0_LBN 0
++ #define ADR_REGION0_WIDTH 18
++#define INT_EN_REG_KER_OFST 0x10 /* Kernel driver Interrupt enable register */
++ #define KER_INT_CHAR_LBN 4
++ #define KER_INT_CHAR_WIDTH 1
++ #define KER_INT_KER_LBN 3
++ #define KER_INT_KER_WIDTH 1
++ #define ILL_ADR_ERR_INT_EN_KER_LBN 2
++ #define ILL_ADR_ERR_INT_EN_KER_WIDTH 1
++ #define SRM_PERR_INT_EN_KER_LBN 1
++ #define SRM_PERR_INT_EN_KER_WIDTH 1
++ #define DRV_INT_EN_KER_LBN 0
++ #define DRV_INT_EN_KER_WIDTH 1
++#define INT_EN_REG_CHAR_OFST 0x20 /* Char Driver interrupt enable register */
++ #define CHAR_INT_CHAR_LBN 4
++ #define CHAR_INT_CHAR_WIDTH 1
++ #define CHAR_INT_KER_LBN 3
++ #define CHAR_INT_KER_WIDTH 1
++ #define ILL_ADR_ERR_INT_EN_CHAR_LBN 2
++ #define ILL_ADR_ERR_INT_EN_CHAR_WIDTH 1
++ #define SRM_PERR_INT_EN_CHAR_LBN 1
++ #define SRM_PERR_INT_EN_CHAR_WIDTH 1
++ #define DRV_INT_EN_CHAR_LBN 0
++ #define DRV_INT_EN_CHAR_WIDTH 1
++#define INT_ADR_REG_KER_OFST 0x30 /* Interrupt host address for Kernel driver */
++ #define INT_ADR_KER_LBN 0
++ #define INT_ADR_KER_WIDTH 64
++ #define DRV_INT_KER_LBN 32
++ #define DRV_INT_KER_WIDTH 1
++ #define EV_FF_HALF_INT_KER_LBN 3
++ #define EV_FF_HALF_INT_KER_WIDTH 1
++ #define EV_FF_FULL_INT_KER_LBN 2
++ #define EV_FF_FULL_INT_KER_WIDTH 1
++ #define ILL_ADR_ERR_INT_KER_LBN 1
++ #define ILL_ADR_ERR_INT_KER_WIDTH 1
++ #define SRAM_PERR_INT_KER_LBN 0
++ #define SRAM_PERR_INT_KER_WIDTH 1
++#define INT_ADR_REG_CHAR_OFST 0x40 /* Interrupt host address for Char driver */
++ #define INT_ADR_CHAR_LBN 0
++ #define INT_ADR_CHAR_WIDTH 64
++ #define DRV_INT_CHAR_LBN 32
++ #define DRV_INT_CHAR_WIDTH 1
++ #define EV_FF_HALF_INT_CHAR_LBN 3
++ #define EV_FF_HALF_INT_CHAR_WIDTH 1
++ #define EV_FF_FULL_INT_CHAR_LBN 2
++ #define EV_FF_FULL_INT_CHAR_WIDTH 1
++ #define ILL_ADR_ERR_INT_CHAR_LBN 1
++ #define ILL_ADR_ERR_INT_CHAR_WIDTH 1
++ #define SRAM_PERR_INT_CHAR_LBN 0
++ #define SRAM_PERR_INT_CHAR_WIDTH 1
++#define INT_ISR0_B0_OFST 0x90 /* B0 only */
++#define INT_ISR1_B0_OFST 0xA0
++#define INT_ACK_REG_KER_A1_OFST 0x50 /* Kernel interrupt acknowledge register */
++ #define RESERVED_LBN 0
++ #define RESERVED_WIDTH 32
++#define INT_ACK_REG_CHAR_A1_OFST 0x60 /* CHAR interrupt acknowledge register */
++ #define RESERVED_LBN 0
++ #define RESERVED_WIDTH 32
++/*************---- Global CSR Registers C Header ----*************/
++#define STRAP_REG_KER_OFST 0x200 /* ASIC strap status register */
++#define STRAP_REG_OFST 0x200 /* ASIC strap status register */
++ #define ONCHIP_SRAM_LBN 16
++ #define ONCHIP_SRAM_WIDTH 0
++ #define STRAP_ISCSI_EN_LBN 3
++ #define STRAP_ISCSI_EN_WIDTH 1
++ #define STRAP_PINS_LBN 0
++ #define STRAP_PINS_WIDTH 3
++#define GPIO_CTL_REG_KER_OFST 0x210 /* GPIO control register */
++#define GPIO_CTL_REG_OFST 0x210 /* GPIO control register */
++ #define GPIO_OEN_LBN 24
++ #define GPIO_OEN_WIDTH 4
++ #define GPIO_OUT_LBN 16
++ #define GPIO_OUT_WIDTH 4
++ #define GPIO_IN_LBN 8
++ #define GPIO_IN_WIDTH 4
++ #define GPIO_PWRUP_VALUE_LBN 0
++ #define GPIO_PWRUP_VALUE_WIDTH 4
++#define GLB_CTL_REG_KER_OFST 0x220 /* Global control register */
++#define GLB_CTL_REG_OFST 0x220 /* Global control register */
++ #define SWRST_LBN 0
++ #define SWRST_WIDTH 1
++#define FATAL_INTR_REG_KER_OFST 0x230 /* Fatal interrupt register for Kernel */
++ #define PCI_BUSERR_INT_KER_EN_LBN 43
++ #define PCI_BUSERR_INT_KER_EN_WIDTH 1
++ #define SRAM_OOB_INT_KER_EN_LBN 42
++ #define SRAM_OOB_INT_KER_EN_WIDTH 1
++ #define BUFID_OOB_INT_KER_EN_LBN 41
++ #define BUFID_OOB_INT_KER_EN_WIDTH 1
++ #define MEM_PERR_INT_KER_EN_LBN 40
++ #define MEM_PERR_INT_KER_EN_WIDTH 1
++ #define RBUF_OWN_INT_KER_EN_LBN 39
++ #define RBUF_OWN_INT_KER_EN_WIDTH 1
++ #define TBUF_OWN_INT_KER_EN_LBN 38
++ #define TBUF_OWN_INT_KER_EN_WIDTH 1
++ #define RDESCQ_OWN_INT_KER_EN_LBN 37
++ #define RDESCQ_OWN_INT_KER_EN_WIDTH 1
++ #define TDESCQ_OWN_INT_KER_EN_LBN 36
++ #define TDESCQ_OWN_INT_KER_EN_WIDTH 1
++ #define EVQ_OWN_INT_KER_EN_LBN 35
++ #define EVQ_OWN_INT_KER_EN_WIDTH 1
++ #define EVFF_OFLO_INT_KER_EN_LBN 34
++ #define EVFF_OFLO_INT_KER_EN_WIDTH 1
++ #define ILL_ADR_INT_KER_EN_LBN 33
++ #define ILL_ADR_INT_KER_EN_WIDTH 1
++ #define SRM_PERR_INT_KER_EN_LBN 32
++ #define SRM_PERR_INT_KER_EN_WIDTH 1
++ #define PCI_BUSERR_INT_KER_LBN 11
++ #define PCI_BUSERR_INT_KER_WIDTH 1
++ #define SRAM_OOB_INT_KER_LBN 10
++ #define SRAM_OOB_INT_KER_WIDTH 1
++ #define BUFID_OOB_INT_KER_LBN 9
++ #define BUFID_OOB_INT_KER_WIDTH 1
++ #define MEM_PERR_INT_KER_LBN 8
++ #define MEM_PERR_INT_KER_WIDTH 1
++ #define RBUF_OWN_INT_KER_LBN 7
++ #define RBUF_OWN_INT_KER_WIDTH 1
++ #define TBUF_OWN_INT_KER_LBN 6
++ #define TBUF_OWN_INT_KER_WIDTH 1
++ #define RDESCQ_OWN_INT_KER_LBN 5
++ #define RDESCQ_OWN_INT_KER_WIDTH 1
++ #define TDESCQ_OWN_INT_KER_LBN 4
++ #define TDESCQ_OWN_INT_KER_WIDTH 1
++ #define EVQ_OWN_INT_KER_LBN 3
++ #define EVQ_OWN_INT_KER_WIDTH 1
++ #define EVFF_OFLO_INT_KER_LBN 2
++ #define EVFF_OFLO_INT_KER_WIDTH 1
++ #define ILL_ADR_INT_KER_LBN 1
++ #define ILL_ADR_INT_KER_WIDTH 1
++ #define SRM_PERR_INT_KER_LBN 0
++ #define SRM_PERR_INT_KER_WIDTH 1
++#define FATAL_INTR_REG_OFST 0x240 /* Fatal interrupt register for Char */
++ #define PCI_BUSERR_INT_CHAR_EN_LBN 43
++ #define PCI_BUSERR_INT_CHAR_EN_WIDTH 1
++ #define SRAM_OOB_INT_CHAR_EN_LBN 42
++ #define SRAM_OOB_INT_CHAR_EN_WIDTH 1
++ #define BUFID_OOB_INT_CHAR_EN_LBN 41
++ #define BUFID_OOB_INT_CHAR_EN_WIDTH 1
++ #define MEM_PERR_INT_CHAR_EN_LBN 40
++ #define MEM_PERR_INT_CHAR_EN_WIDTH 1
++ #define RBUF_OWN_INT_CHAR_EN_LBN 39
++ #define RBUF_OWN_INT_CHAR_EN_WIDTH 1
++ #define TBUF_OWN_INT_CHAR_EN_LBN 38
++ #define TBUF_OWN_INT_CHAR_EN_WIDTH 1
++ #define RDESCQ_OWN_INT_CHAR_EN_LBN 37
++ #define RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
++ #define TDESCQ_OWN_INT_CHAR_EN_LBN 36
++ #define TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
++ #define EVQ_OWN_INT_CHAR_EN_LBN 35
++ #define EVQ_OWN_INT_CHAR_EN_WIDTH 1
++ #define EVFF_OFLO_INT_CHAR_EN_LBN 34
++ #define EVFF_OFLO_INT_CHAR_EN_WIDTH 1
++ #define ILL_ADR_INT_CHAR_EN_LBN 33
++ #define ILL_ADR_INT_CHAR_EN_WIDTH 1
++ #define SRM_PERR_INT_CHAR_EN_LBN 32
++ #define SRM_PERR_INT_CHAR_EN_WIDTH 1
++ #define FATAL_INTR_REG_EN_BITS 0xffffffffffffffffULL
++ #define PCI_BUSERR_INT_CHAR_LBN 11
++ #define PCI_BUSERR_INT_CHAR_WIDTH 1
++ #define SRAM_OOB_INT_CHAR_LBN 10
++ #define SRAM_OOB_INT_CHAR_WIDTH 1
++ #define BUFID_OOB_INT_CHAR_LBN 9
++ #define BUFID_OOB_INT_CHAR_WIDTH 1
++ #define MEM_PERR_INT_CHAR_LBN 8
++ #define MEM_PERR_INT_CHAR_WIDTH 1
++ #define RBUF_OWN_INT_CHAR_LBN 7
++ #define RBUF_OWN_INT_CHAR_WIDTH 1
++ #define TBUF_OWN_INT_CHAR_LBN 6
++ #define TBUF_OWN_INT_CHAR_WIDTH 1
++ #define RDESCQ_OWN_INT_CHAR_LBN 5
++ #define RDESCQ_OWN_INT_CHAR_WIDTH 1
++ #define TDESCQ_OWN_INT_CHAR_LBN 4
++ #define TDESCQ_OWN_INT_CHAR_WIDTH 1
++ #define EVQ_OWN_INT_CHAR_LBN 3
++ #define EVQ_OWN_INT_CHAR_WIDTH 1
++ #define EVFF_OFLO_INT_CHAR_LBN 2
++ #define EVFF_OFLO_INT_CHAR_WIDTH 1
++ #define ILL_ADR_INT_CHAR_LBN 1
++ #define ILL_ADR_INT_CHAR_WIDTH 1
++ #define SRM_PERR_INT_CHAR_LBN 0
++ #define SRM_PERR_INT_CHAR_WIDTH 1
++#define DP_CTRL_REG_OFST 0x250 /* Datapath control register */
++ #define FLS_EVQ_ID_LBN 0
++ #define FLS_EVQ_ID_WIDTH 12
++#define MEM_STAT_REG_KER_OFST 0x260 /* Memory status register */
++#define MEM_STAT_REG_OFST 0x260 /* Memory status register */
++ #define MEM_PERR_VEC_LBN 53
++ #define MEM_PERR_VEC_WIDTH 38
++ #define MBIST_CORR_LBN 38
++ #define MBIST_CORR_WIDTH 15
++ #define MBIST_ERR_LBN 0
++ #define MBIST_ERR_WIDTH 38
++#define DEBUG_REG_KER_OFST 0x270 /* Debug register */
++#define DEBUG_REG_OFST 0x270 /* Debug register */
++ #define DEBUG_BLK_SEL2_LBN 47
++ #define DEBUG_BLK_SEL2_WIDTH 3
++ #define DEBUG_BLK_SEL1_LBN 44
++ #define DEBUG_BLK_SEL1_WIDTH 3
++ #define DEBUG_BLK_SEL0_LBN 41
++ #define DEBUG_BLK_SEL0_WIDTH 3
++ #define MISC_DEBUG_ADDR_LBN 36
++ #define MISC_DEBUG_ADDR_WIDTH 5
++ #define SERDES_DEBUG_ADDR_LBN 31
++ #define SERDES_DEBUG_ADDR_WIDTH 5
++ #define EM_DEBUG_ADDR_LBN 26
++ #define EM_DEBUG_ADDR_WIDTH 5
++ #define SR_DEBUG_ADDR_LBN 21
++ #define SR_DEBUG_ADDR_WIDTH 5
++ #define EV_DEBUG_ADDR_LBN 16
++ #define EV_DEBUG_ADDR_WIDTH 5
++ #define RX_DEBUG_ADDR_LBN 11
++ #define RX_DEBUG_ADDR_WIDTH 5
++ #define TX_DEBUG_ADDR_LBN 6
++ #define TX_DEBUG_ADDR_WIDTH 5
++ #define BIU_DEBUG_ADDR_LBN 1
++ #define BIU_DEBUG_ADDR_WIDTH 5
++ #define DEBUG_EN_LBN 0
++ #define DEBUG_EN_WIDTH 1
++#define DRIVER_REG0_KER_OFST 0x280 /* Driver scratch register 0 */
++#define DRIVER_REG0_OFST 0x280 /* Driver scratch register 0 */
++ #define DRIVER_DW0_LBN 0
++ #define DRIVER_DW0_WIDTH 32
++#define DRIVER_REG1_KER_OFST 0x290 /* Driver scratch register 1 */
++#define DRIVER_REG1_OFST 0x290 /* Driver scratch register 1 */
++ #define DRIVER_DW1_LBN 0
++ #define DRIVER_DW1_WIDTH 32
++#define DRIVER_REG2_KER_OFST 0x2A0 /* Driver scratch register 2 */
++#define DRIVER_REG2_OFST 0x2A0 /* Driver scratch register 2 */
++ #define DRIVER_DW2_LBN 0
++ #define DRIVER_DW2_WIDTH 32
++#define DRIVER_REG3_KER_OFST 0x2B0 /* Driver scratch register 3 */
++#define DRIVER_REG3_OFST 0x2B0 /* Driver scratch register 3 */
++ #define DRIVER_DW3_LBN 0
++ #define DRIVER_DW3_WIDTH 32
++#define DRIVER_REG4_KER_OFST 0x2C0 /* Driver scratch register 4 */
++#define DRIVER_REG4_OFST 0x2C0 /* Driver scratch register 4 */
++ #define DRIVER_DW4_LBN 0
++ #define DRIVER_DW4_WIDTH 32
++#define DRIVER_REG5_KER_OFST 0x2D0 /* Driver scratch register 5 */
++#define DRIVER_REG5_OFST 0x2D0 /* Driver scratch register 5 */
++ #define DRIVER_DW5_LBN 0
++ #define DRIVER_DW5_WIDTH 32
++#define DRIVER_REG6_KER_OFST 0x2E0 /* Driver scratch register 6 */
++#define DRIVER_REG6_OFST 0x2E0 /* Driver scratch register 6 */
++ #define DRIVER_DW6_LBN 0
++ #define DRIVER_DW6_WIDTH 32
++#define DRIVER_REG7_KER_OFST 0x2F0 /* Driver scratch register 7 */
++#define DRIVER_REG7_OFST 0x2F0 /* Driver scratch register 7 */
++ #define DRIVER_DW7_LBN 0
++ #define DRIVER_DW7_WIDTH 32
++#define ALTERA_BUILD_REG_OFST 0x300 /* Altera build register */
++#define ALTERA_BUILD_REG_OFST 0x300 /* Altera build register */
++ #define ALTERA_BUILD_VER_LBN 0
++ #define ALTERA_BUILD_VER_WIDTH 32
++
++/* so called CSR spare register
++ - contains separate parity enable bits for the various internal memory
++ blocks */
++#define MEM_PARITY_ERR_EN_REG_KER 0x310
++#define MEM_PARITY_ALL_BLOCKS_EN_LBN 64
++#define MEM_PARITY_ALL_BLOCKS_EN_WIDTH 38
++#define MEM_PARITY_TX_DATA_EN_LBN 72
++#define MEM_PARITY_TX_DATA_EN_WIDTH 2
++
++/*************---- Event & Timer Module Registers C Header ----*************/
++
++#if FALCON_EXTENDED_P_BAR
++#define EVQ_RPTR_REG_KER_OFST 0x11B00 /* Event queue read pointer register */
++#else
++#define EVQ_RPTR_REG_KER_OFST 0x1B00 /* Event queue read pointer register */
++#endif
++
++#define EVQ_RPTR_REG_OFST 0xFA0000 /* Event queue read pointer register
++ array. */
++ #define EVQ_RPTR_LBN 0
++ #define EVQ_RPTR_WIDTH 15
++
++#if FALCON_EXTENDED_P_BAR
++#define EVQ_PTR_TBL_KER_OFST 0x11A00 /* Event queue pointer table for kernel
++ access */
++#else
++#define EVQ_PTR_TBL_KER_OFST 0x1A00 /* Event queue pointer table for kernel
++ access */
++#endif
++
++#define EVQ_PTR_TBL_CHAR_OFST 0xF60000 /* Event queue pointer table for char
++ direct access */
++ #define EVQ_WKUP_OR_INT_EN_LBN 39
++ #define EVQ_WKUP_OR_INT_EN_WIDTH 1
++ #define EVQ_NXT_WPTR_LBN 24
++ #define EVQ_NXT_WPTR_WIDTH 15
++ #define EVQ_EN_LBN 23
++ #define EVQ_EN_WIDTH 1
++ #define EVQ_SIZE_LBN 20
++ #define EVQ_SIZE_WIDTH 3
++ #define EVQ_BUF_BASE_ID_LBN 0
++ #define EVQ_BUF_BASE_ID_WIDTH 20
++#define TIMER_CMD_REG_KER_OFST 0x420 /* Timer table for kernel access.
++ Page-mapped */
++#define TIMER_CMD_REG_PAGE4_OFST 0x8420 /* Timer table for user-level access.
++ Page-mapped. For lowest 1K queues.
++ */
++#define TIMER_CMD_REG_PAGE123K_OFST 0x1000420 /* Timer table for user-level
++ access. Page-mapped.
++ For upper 3K queues. */
++#define TIMER_TBL_OFST 0xF70000 /* Timer table for char driver direct access */
++ #define TIMER_MODE_LBN 12
++ #define TIMER_MODE_WIDTH 2
++ #define TIMER_VAL_LBN 0
++ #define TIMER_VAL_WIDTH 12
++ #define TIMER_MODE_INT_HLDOFF 2
++ #define EVQ_BUF_SIZE_LBN 0
++ #define EVQ_BUF_SIZE_WIDTH 1
++#define DRV_EV_REG_KER_OFST 0x440 /* Driver generated event register */
++#define DRV_EV_REG_OFST 0x440 /* Driver generated event register */
++ #define DRV_EV_QID_LBN 64
++ #define DRV_EV_QID_WIDTH 12
++ #define DRV_EV_DATA_LBN 0
++ #define DRV_EV_DATA_WIDTH 64
++#define EVQ_CTL_REG_KER_OFST 0x450 /* Event queue control register */
++#define EVQ_CTL_REG_OFST 0x450 /* Event queue control register */
++ #define RX_EVQ_WAKEUP_MASK_B0_LBN 15
++ #define RX_EVQ_WAKEUP_MASK_B0_WIDTH 6
++ #define EVQ_OWNERR_CTL_LBN 14
++ #define EVQ_OWNERR_CTL_WIDTH 1
++ #define EVQ_FIFO_AF_TH_LBN 8
++ #define EVQ_FIFO_AF_TH_WIDTH 6
++ #define EVQ_FIFO_NOTAF_TH_LBN 0
++ #define EVQ_FIFO_NOTAF_TH_WIDTH 6
++/*************---- SRAM Module Registers C Header ----*************/
++#define BUF_TBL_CFG_REG_KER_OFST 0x600 /* Buffer table configuration register */
++#define BUF_TBL_CFG_REG_OFST 0x600 /* Buffer table configuration register */
++ #define BUF_TBL_MODE_LBN 3
++ #define BUF_TBL_MODE_WIDTH 1
++#define SRM_RX_DC_CFG_REG_KER_OFST 0x610 /* SRAM receive descriptor cache
++ configuration register */
++#define SRM_RX_DC_CFG_REG_OFST 0x610 /* SRAM receive descriptor cache
++ configuration register */
++ #define SRM_RX_DC_BASE_ADR_LBN 0
++ #define SRM_RX_DC_BASE_ADR_WIDTH 21
++#define SRM_TX_DC_CFG_REG_KER_OFST 0x620 /* SRAM transmit descriptor cache
++ configuration register */
++#define SRM_TX_DC_CFG_REG_OFST 0x620 /* SRAM transmit descriptor cache
++ configuration register */
++ #define SRM_TX_DC_BASE_ADR_LBN 0
++ #define SRM_TX_DC_BASE_ADR_WIDTH 21
++#define SRM_CFG_REG_KER_OFST 0x630 /* SRAM configuration register */
++#define SRM_CFG_REG_OFST 0x630 /* SRAM configuration register */
++ #define SRAM_OOB_ADR_INTEN_LBN 5
++ #define SRAM_OOB_ADR_INTEN_WIDTH 1
++ #define SRAM_OOB_BUF_INTEN_LBN 4
++ #define SRAM_OOB_BUF_INTEN_WIDTH 1
++ #define SRAM_BT_INIT_EN_LBN 3
++ #define SRAM_BT_INIT_EN_WIDTH 1
++ #define SRM_NUM_BANK_LBN 2
++ #define SRM_NUM_BANK_WIDTH 1
++ #define SRM_BANK_SIZE_LBN 0
++ #define SRM_BANK_SIZE_WIDTH 2
++#define BUF_TBL_UPD_REG_KER_OFST 0x650 /* Buffer table update register */
++#define BUF_TBL_UPD_REG_OFST 0x650 /* Buffer table update register */
++ #define BUF_UPD_CMD_LBN 63
++ #define BUF_UPD_CMD_WIDTH 1
++ #define BUF_CLR_CMD_LBN 62
++ #define BUF_CLR_CMD_WIDTH 1
++ #define BUF_CLR_END_ID_LBN 32
++ #define BUF_CLR_END_ID_WIDTH 20
++ #define BUF_CLR_START_ID_LBN 0
++ #define BUF_CLR_START_ID_WIDTH 20
++#define SRM_UPD_EVQ_REG_KER_OFST 0x660 /* Buffer table update register */
++#define SRM_UPD_EVQ_REG_OFST 0x660 /* Buffer table update register */
++ #define SRM_UPD_EVQ_ID_LBN 0
++ #define SRM_UPD_EVQ_ID_WIDTH 12
++#define SRAM_PARITY_REG_KER_OFST 0x670 /* SRAM parity register. */
++#define SRAM_PARITY_REG_OFST 0x670 /* SRAM parity register. */
++ #define FORCE_SRAM_PERR_LBN 0
++ #define FORCE_SRAM_PERR_WIDTH 1
++
++#if FALCON_EXTENDED_P_BAR
++#define BUF_HALF_TBL_KER_OFST 0x18000 /* Buffer table in half buffer table
++ mode direct access by kernel driver */
++#else
++#define BUF_HALF_TBL_KER_OFST 0x8000 /* Buffer table in half buffer table
++ mode direct access by kernel driver */
++#endif
++
++
++#define BUF_HALF_TBL_OFST 0x800000 /* Buffer table in half buffer table mode
++ direct access by char driver */
++ #define BUF_ADR_HBUF_ODD_LBN 44
++ #define BUF_ADR_HBUF_ODD_WIDTH 20
++ #define BUF_OWNER_ID_HBUF_ODD_LBN 32
++ #define BUF_OWNER_ID_HBUF_ODD_WIDTH 12
++ #define BUF_ADR_HBUF_EVEN_LBN 12
++ #define BUF_ADR_HBUF_EVEN_WIDTH 20
++ #define BUF_OWNER_ID_HBUF_EVEN_LBN 0
++ #define BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
++
++
++#if FALCON_EXTENDED_P_BAR
++#define BUF_FULL_TBL_KER_OFST 0x18000 /* Buffer table in full buffer table
++ mode direct access by kernel driver */
++#else
++#define BUF_FULL_TBL_KER_OFST 0x8000 /* Buffer table in full buffer table mode
++ direct access by kernel driver */
++#endif
++
++
++
++
++#define BUF_FULL_TBL_OFST 0x800000 /* Buffer table in full buffer table mode
++ direct access by char driver */
++ #define IP_DAT_BUF_SIZE_LBN 50
++ #define IP_DAT_BUF_SIZE_WIDTH 1
++ #define BUF_ADR_REGION_LBN 48
++ #define BUF_ADR_REGION_WIDTH 2
++ #define BUF_ADR_FBUF_LBN 14
++ #define BUF_ADR_FBUF_WIDTH 34
++ #define BUF_OWNER_ID_FBUF_LBN 0
++ #define BUF_OWNER_ID_FBUF_WIDTH 14
++#define SRM_DBG_REG_OFST 0x3000000 /* SRAM debug access */
++ #define SRM_DBG_LBN 0
++ #define SRM_DBG_WIDTH 64
++/*************---- RX Datapath Registers C Header ----*************/
++
++#define RX_CFG_REG_KER_OFST 0x800 /* Receive configuration register */
++#define RX_CFG_REG_OFST 0x800 /* Receive configuration register */
++
++#if !defined(FALCON_64K_RXFIFO) && !defined(FALCON_PRE_02020029)
++# if !defined(FALCON_128K_RXFIFO)
++# define FALCON_128K_RXFIFO
++# endif
++#endif
++
++#if defined(FALCON_128K_RXFIFO)
++
++/* new for B0 */
++ #define RX_TOEP_TCP_SUPPRESS_B0_LBN 48
++ #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1
++ #define RX_INGR_EN_B0_LBN 47
++ #define RX_INGR_EN_B0_WIDTH 1
++ #define RX_TOEP_IPV4_B0_LBN 46
++ #define RX_TOEP_IPV4_B0_WIDTH 1
++ #define RX_HASH_ALG_B0_LBN 45
++ #define RX_HASH_ALG_B0_WIDTH 1
++ #define RX_HASH_INSERT_HDR_B0_LBN 44
++ #define RX_HASH_INSERT_HDR_B0_WIDTH 1
++/* moved for B0 */
++ #define RX_DESC_PUSH_EN_B0_LBN 43
++ #define RX_DESC_PUSH_EN_B0_WIDTH 1
++ #define RX_RDW_PATCH_EN_LBN 42 /* Non head of line blocking */
++ #define RX_RDW_PATCH_EN_WIDTH 1
++ #define RX_PCI_BURST_SIZE_B0_LBN 39
++ #define RX_PCI_BURST_SIZE_B0_WIDTH 3
++ #define RX_OWNERR_CTL_B0_LBN 38
++ #define RX_OWNERR_CTL_B0_WIDTH 1
++ #define RX_XON_TX_TH_B0_LBN 33
++ #define RX_XON_TX_TH_B0_WIDTH 5
++ #define RX_XOFF_TX_TH_B0_LBN 28
++ #define RX_XOFF_TX_TH_B0_WIDTH 5
++ #define RX_USR_BUF_SIZE_B0_LBN 19
++ #define RX_USR_BUF_SIZE_B0_WIDTH 9
++ #define RX_XON_MAC_TH_B0_LBN 10
++ #define RX_XON_MAC_TH_B0_WIDTH 9
++ #define RX_XOFF_MAC_TH_B0_LBN 1
++ #define RX_XOFF_MAC_TH_B0_WIDTH 9
++ #define RX_XOFF_MAC_EN_B0_LBN 0
++ #define RX_XOFF_MAC_EN_B0_WIDTH 1
++
++#elif !defined(FALCON_PRE_02020029)
++/* new for B0 */
++ #define RX_TOEP_TCP_SUPPRESS_B0_LBN 46
++ #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1
++ #define RX_INGR_EN_B0_LBN 45
++ #define RX_INGR_EN_B0_WIDTH 1
++ #define RX_TOEP_IPV4_B0_LBN 44
++ #define RX_TOEP_IPV4_B0_WIDTH 1
++ #define RX_HASH_ALG_B0_LBN 43
++ #define RX_HASH_ALG_B0_WIDTH 41
++ #define RX_HASH_INSERT_HDR_B0_LBN 42
++ #define RX_HASH_INSERT_HDR_B0_WIDTH 1
++/* moved for B0 */
++ #define RX_DESC_PUSH_EN_B0_LBN 41
++ #define RX_DESC_PUSH_EN_B0_WIDTH 1
++ #define RX_PCI_BURST_SIZE_B0_LBN 37
++ #define RX_PCI_BURST_SIZE_B0_WIDTH 3
++ #define RX_OWNERR_CTL_B0_LBN 36
++ #define RX_OWNERR_CTL_B0_WIDTH 1
++ #define RX_XON_TX_TH_B0_LBN 31
++ #define RX_XON_TX_TH_B0_WIDTH 5
++ #define RX_XOFF_TX_TH_B0_LBN 26
++ #define RX_XOFF_TX_TH_B0_WIDTH 5
++ #define RX_USR_BUF_SIZE_B0_LBN 17
++ #define RX_USR_BUF_SIZE_B0_WIDTH 9
++ #define RX_XON_MAC_TH_B0_LBN 9
++ #define RX_XON_MAC_TH_B0_WIDTH 8
++ #define RX_XOFF_MAC_TH_B0_LBN 1
++ #define RX_XOFF_MAC_TH_B0_WIDTH 8
++ #define RX_XOFF_MAC_EN_B0_LBN 0
++ #define RX_XOFF_MAC_EN_B0_WIDTH 1
++
++#else
++/* new for B0 */
++ #define RX_TOEP_TCP_SUPPRESS_B0_LBN 44
++ #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1
++ #define RX_INGR_EN_B0_LBN 43
++ #define RX_INGR_EN_B0_WIDTH 1
++ #define RX_TOEP_IPV4_B0_LBN 42
++ #define RX_TOEP_IPV4_B0_WIDTH 1
++ #define RX_HASH_ALG_B0_LBN 41
++ #define RX_HASH_ALG_B0_WIDTH 41
++ #define RX_HASH_INSERT_HDR_B0_LBN 40
++ #define RX_HASH_INSERT_HDR_B0_WIDTH 1
++/* moved for B0 */
++ #define RX_DESC_PUSH_EN_B0_LBN 35
++ #define RX_DESC_PUSH_EN_B0_WIDTH 1
++ #define RX_PCI_BURST_SIZE_B0_LBN 35
++ #define RX_PCI_BURST_SIZE_B0_WIDTH 2
++ #define RX_OWNERR_CTL_B0_LBN 34
++ #define RX_OWNERR_CTL_B0_WIDTH 1
++ #define RX_XON_TX_TH_B0_LBN 29
++ #define RX_XON_TX_TH_B0_WIDTH 5
++ #define RX_XOFF_TX_TH_B0_LBN 24
++ #define RX_XOFF_TX_TH_B0_WIDTH 5
++ #define RX_USR_BUF_SIZE_B0_LBN 15
++ #define RX_USR_BUF_SIZE_B0_WIDTH 9
++ #define RX_XON_MAC_TH_B0_LBN 8
++ #define RX_XON_MAC_TH_B0_WIDTH 7
++ #define RX_XOFF_MAC_TH_B0_LBN 1
++ #define RX_XOFF_MAC_TH_B0_WIDTH 7
++ #define RX_XOFF_MAC_EN_B0_LBN 0
++ #define RX_XOFF_MAC_EN_B0_WIDTH 1
++
++#endif
++
++/* A0/A1 */
++ #define RX_PUSH_EN_A1_LBN 35
++ #define RX_PUSH_EN_A1_WIDTH 1
++ #define RX_PCI_BURST_SIZE_A1_LBN 31
++ #define RX_PCI_BURST_SIZE_A1_WIDTH 3
++ #define RX_OWNERR_CTL_A1_LBN 30
++ #define RX_OWNERR_CTL_A1_WIDTH 1
++ #define RX_XON_TX_TH_A1_LBN 25
++ #define RX_XON_TX_TH_A1_WIDTH 5
++ #define RX_XOFF_TX_TH_A1_LBN 20
++ #define RX_XOFF_TX_TH_A1_WIDTH 5
++ #define RX_USR_BUF_SIZE_A1_LBN 11
++ #define RX_USR_BUF_SIZE_A1_WIDTH 9
++ #define RX_XON_MAC_TH_A1_LBN 6
++ #define RX_XON_MAC_TH_A1_WIDTH 5
++ #define RX_XOFF_MAC_TH_A1_LBN 1
++ #define RX_XOFF_MAC_TH_A1_WIDTH 5
++ #define RX_XOFF_MAC_EN_A1_LBN 0
++ #define RX_XOFF_MAC_EN_A1_WIDTH 1
++
++#define RX_FILTER_CTL_REG_OFST 0x810 /* Receive filter control registers */
++ #define SCATTER_ENBL_NO_MATCH_Q_B0_LBN 40
++ #define SCATTER_ENBL_NO_MATCH_Q_B0_WIDTH 1
++ #define UDP_FULL_SRCH_LIMIT_LBN 32
++ #define UDP_FULL_SRCH_LIMIT_WIDTH 8
++ #define NUM_KER_LBN 24
++ #define NUM_KER_WIDTH 2
++ #define UDP_WILD_SRCH_LIMIT_LBN 16
++ #define UDP_WILD_SRCH_LIMIT_WIDTH 8
++ #define TCP_WILD_SRCH_LIMIT_LBN 8
++ #define TCP_WILD_SRCH_LIMIT_WIDTH 8
++ #define TCP_FULL_SRCH_LIMIT_LBN 0
++ #define TCP_FULL_SRCH_LIMIT_WIDTH 8
++#define RX_FLUSH_DESCQ_REG_KER_OFST 0x820 /* Receive flush descriptor queue
++ register */
++#define RX_FLUSH_DESCQ_REG_OFST 0x820 /* Receive flush descriptor queue
++ register */
++ #define RX_FLUSH_DESCQ_CMD_LBN 24
++ #define RX_FLUSH_DESCQ_CMD_WIDTH 1
++ #define RX_FLUSH_EVQ_ID_LBN 12
++ #define RX_FLUSH_EVQ_ID_WIDTH 12
++ #define RX_FLUSH_DESCQ_LBN 0
++ #define RX_FLUSH_DESCQ_WIDTH 12
++#define RX_DESC_UPD_REG_KER_OFST 0x830 /* Kernel receive descriptor update
++ register. Page-mapped */
++#define RX_DESC_UPD_REG_PAGE4_OFST 0x8830 /* Char & user receive descriptor
++ update register. Page-mapped.
++ For lowest 1K queues. */
++#define RX_DESC_UPD_REG_PAGE123K_OFST 0x1000830 /* Char & user receive
++ descriptor update register.
++ Page-mapped. For upper
++ 3K queues. */
++ #define RX_DESC_WPTR_LBN 96
++ #define RX_DESC_WPTR_WIDTH 12
++ #define RX_DESC_PUSH_CMD_LBN 95
++ #define RX_DESC_PUSH_CMD_WIDTH 1
++ #define RX_DESC_LBN 0
++ #define RX_DESC_WIDTH 64
++ #define RX_KER_DESC_LBN 0
++ #define RX_KER_DESC_WIDTH 64
++ #define RX_USR_DESC_LBN 0
++ #define RX_USR_DESC_WIDTH 32
++#define RX_DC_CFG_REG_KER_OFST 0x840 /* Receive descriptor cache
++ configuration register */
++#define RX_DC_CFG_REG_OFST 0x840 /* Receive descriptor cache
++ configuration register */
++ #define RX_DC_SIZE_LBN 0
++ #define RX_DC_SIZE_WIDTH 2
++#define RX_DC_PF_WM_REG_KER_OFST 0x850 /* Receive descriptor cache pre-fetch
++ watermark register */
++#define RX_DC_PF_WM_REG_OFST 0x850 /* Receive descriptor cache pre-fetch
++ watermark register */
++ #define RX_DC_PF_LWM_LO_LBN 0
++ #define RX_DC_PF_LWM_LO_WIDTH 6
++
++#define RX_RSS_TKEY_B0_OFST 0x860 /* RSS Toeplitz hash key (B0 only) */
++
++#define RX_NODESC_DROP_REG 0x880
++ #define RX_NODESC_DROP_CNT_LBN 0
++ #define RX_NODESC_DROP_CNT_WIDTH 16
++
++#define XM_TX_CFG_REG_OFST 0x1230
++ #define XM_AUTO_PAD_LBN 5
++ #define XM_AUTO_PAD_WIDTH 1
++
++#define RX_FILTER_TBL0_OFST 0xF00000 /* Receive filter table - even entries */
++ #define RSS_EN_0_B0_LBN 110
++ #define RSS_EN_0_B0_WIDTH 1
++ #define SCATTER_EN_0_B0_LBN 109
++ #define SCATTER_EN_0_B0_WIDTH 1
++ #define TCP_UDP_0_LBN 108
++ #define TCP_UDP_0_WIDTH 1
++ #define RXQ_ID_0_LBN 96
++ #define RXQ_ID_0_WIDTH 12
++ #define DEST_IP_0_LBN 64
++ #define DEST_IP_0_WIDTH 32
++ #define DEST_PORT_TCP_0_LBN 48
++ #define DEST_PORT_TCP_0_WIDTH 16
++ #define SRC_IP_0_LBN 16
++ #define SRC_IP_0_WIDTH 32
++ #define SRC_TCP_DEST_UDP_0_LBN 0
++ #define SRC_TCP_DEST_UDP_0_WIDTH 16
++#define RX_FILTER_TBL1_OFST 0xF00010 /* Receive filter table - odd entries */
++ #define RSS_EN_1_B0_LBN 110
++ #define RSS_EN_1_B0_WIDTH 1
++ #define SCATTER_EN_1_B0_LBN 109
++ #define SCATTER_EN_1_B0_WIDTH 1
++ #define TCP_UDP_1_LBN 108
++ #define TCP_UDP_1_WIDTH 1
++ #define RXQ_ID_1_LBN 96
++ #define RXQ_ID_1_WIDTH 12
++ #define DEST_IP_1_LBN 64
++ #define DEST_IP_1_WIDTH 32
++ #define DEST_PORT_TCP_1_LBN 48
++ #define DEST_PORT_TCP_1_WIDTH 16
++ #define SRC_IP_1_LBN 16
++ #define SRC_IP_1_WIDTH 32
++ #define SRC_TCP_DEST_UDP_1_LBN 0
++ #define SRC_TCP_DEST_UDP_1_WIDTH 16
++
++#if FALCON_EXTENDED_P_BAR
++#define RX_DESC_PTR_TBL_KER_OFST 0x11800 /* Receive descriptor pointer
++ kernel access */
++#else
++#define RX_DESC_PTR_TBL_KER_OFST 0x1800 /* Receive descriptor pointer
++ kernel access */
++#endif
++
++
++#define RX_DESC_PTR_TBL_OFST 0xF40000 /* Receive descriptor pointer table */
++ #define RX_ISCSI_DDIG_EN_LBN 88
++ #define RX_ISCSI_DDIG_EN_WIDTH 1
++ #define RX_ISCSI_HDIG_EN_LBN 87
++ #define RX_ISCSI_HDIG_EN_WIDTH 1
++ #define RX_DESC_PREF_ACT_LBN 86
++ #define RX_DESC_PREF_ACT_WIDTH 1
++ #define RX_DC_HW_RPTR_LBN 80
++ #define RX_DC_HW_RPTR_WIDTH 6
++ #define RX_DESCQ_HW_RPTR_LBN 68
++ #define RX_DESCQ_HW_RPTR_WIDTH 12
++ #define RX_DESCQ_SW_WPTR_LBN 56
++ #define RX_DESCQ_SW_WPTR_WIDTH 12
++ #define RX_DESCQ_BUF_BASE_ID_LBN 36
++ #define RX_DESCQ_BUF_BASE_ID_WIDTH 20
++ #define RX_DESCQ_EVQ_ID_LBN 24
++ #define RX_DESCQ_EVQ_ID_WIDTH 12
++ #define RX_DESCQ_OWNER_ID_LBN 10
++ #define RX_DESCQ_OWNER_ID_WIDTH 14
++ #define RX_DESCQ_LABEL_LBN 5
++ #define RX_DESCQ_LABEL_WIDTH 5
++ #define RX_DESCQ_SIZE_LBN 3
++ #define RX_DESCQ_SIZE_WIDTH 2
++ #define RX_DESCQ_TYPE_LBN 2
++ #define RX_DESCQ_TYPE_WIDTH 1
++ #define RX_DESCQ_JUMBO_LBN 1
++ #define RX_DESCQ_JUMBO_WIDTH 1
++ #define RX_DESCQ_EN_LBN 0
++ #define RX_DESCQ_EN_WIDTH 1
++
++
++#define RX_RSS_INDIR_TBL_B0_OFST 0xFB0000 /* RSS indirection table (B0 only) */
++ #define RX_RSS_INDIR_ENT_B0_LBN 0
++ #define RX_RSS_INDIR_ENT_B0_WIDTH 6
++
++/*************---- TX Datapath Registers C Header ----*************/
++#define TX_FLUSH_DESCQ_REG_KER_OFST 0xA00 /* Transmit flush descriptor
++ queue register */
++#define TX_FLUSH_DESCQ_REG_OFST 0xA00 /* Transmit flush descriptor queue
++ register */
++ #define TX_FLUSH_DESCQ_CMD_LBN 12
++ #define TX_FLUSH_DESCQ_CMD_WIDTH 1
++ #define TX_FLUSH_DESCQ_LBN 0
++ #define TX_FLUSH_DESCQ_WIDTH 12
++#define TX_DESC_UPD_REG_KER_OFST 0xA10 /* Kernel transmit descriptor update
++ register. Page-mapped */
++#define TX_DESC_UPD_REG_PAGE4_OFST 0x8A10 /* Char & user transmit descriptor
++ update register. Page-mapped */
++#define TX_DESC_UPD_REG_PAGE123K_OFST 0x1000A10 /* Char & user transmit
++ descriptor update register.
++ Page-mapped */
++ #define TX_DESC_WPTR_LBN 96
++ #define TX_DESC_WPTR_WIDTH 12
++ #define TX_DESC_PUSH_CMD_LBN 95
++ #define TX_DESC_PUSH_CMD_WIDTH 1
++ #define TX_DESC_LBN 0
++ #define TX_DESC_WIDTH 95
++ #define TX_KER_DESC_LBN 0
++ #define TX_KER_DESC_WIDTH 64
++ #define TX_USR_DESC_LBN 0
++ #define TX_USR_DESC_WIDTH 64
++#define TX_DC_CFG_REG_KER_OFST 0xA20 /* Transmit descriptor cache
++ configuration register */
++#define TX_DC_CFG_REG_OFST 0xA20 /* Transmit descriptor cache configuration
++ register */
++ #define TX_DC_SIZE_LBN 0
++ #define TX_DC_SIZE_WIDTH 2
++
++#if FALCON_EXTENDED_P_BAR
++#define TX_DESC_PTR_TBL_KER_OFST 0x11900 /* Transmit descriptor pointer. */
++#else
++#define TX_DESC_PTR_TBL_KER_OFST 0x1900 /* Transmit descriptor pointer. */
++#endif
++
++
++#define TX_DESC_PTR_TBL_OFST 0xF50000 /* Transmit descriptor pointer */
++ #define TX_NON_IP_DROP_DIS_B0_LBN 91
++ #define TX_NON_IP_DROP_DIS_B0_WIDTH 1
++ #define TX_IP_CHKSM_DIS_B0_LBN 90
++ #define TX_IP_CHKSM_DIS_B0_WIDTH 1
++ #define TX_TCP_CHKSM_DIS_B0_LBN 89
++ #define TX_TCP_CHKSM_DIS_B0_WIDTH 1
++ #define TX_DESCQ_EN_LBN 88
++ #define TX_DESCQ_EN_WIDTH 1
++ #define TX_ISCSI_DDIG_EN_LBN 87
++ #define TX_ISCSI_DDIG_EN_WIDTH 1
++ #define TX_ISCSI_HDIG_EN_LBN 86
++ #define TX_ISCSI_HDIG_EN_WIDTH 1
++ #define TX_DC_HW_RPTR_LBN 80
++ #define TX_DC_HW_RPTR_WIDTH 6
++ #define TX_DESCQ_HW_RPTR_LBN 68
++ #define TX_DESCQ_HW_RPTR_WIDTH 12
++ #define TX_DESCQ_SW_WPTR_LBN 56
++ #define TX_DESCQ_SW_WPTR_WIDTH 12
++ #define TX_DESCQ_BUF_BASE_ID_LBN 36
++ #define TX_DESCQ_BUF_BASE_ID_WIDTH 20
++ #define TX_DESCQ_EVQ_ID_LBN 24
++ #define TX_DESCQ_EVQ_ID_WIDTH 12
++ #define TX_DESCQ_OWNER_ID_LBN 10
++ #define TX_DESCQ_OWNER_ID_WIDTH 14
++ #define TX_DESCQ_LABEL_LBN 5
++ #define TX_DESCQ_LABEL_WIDTH 5
++ #define TX_DESCQ_SIZE_LBN 3
++ #define TX_DESCQ_SIZE_WIDTH 2
++ #define TX_DESCQ_TYPE_LBN 1
++ #define TX_DESCQ_TYPE_WIDTH 2
++ #define TX_DESCQ_FLUSH_LBN 0
++ #define TX_DESCQ_FLUSH_WIDTH 1
++#define TX_CFG_REG_KER_OFST 0xA50 /* Transmit configuration register */
++#define TX_CFG_REG_OFST 0xA50 /* Transmit configuration register */
++ #define TX_IP_ID_P1_OFS_LBN 32
++ #define TX_IP_ID_P1_OFS_WIDTH 15
++ #define TX_IP_ID_P0_OFS_LBN 16
++ #define TX_IP_ID_P0_OFS_WIDTH 15
++ #define TX_TURBO_EN_LBN 3
++ #define TX_TURBO_EN_WIDTH 1
++ #define TX_OWNERR_CTL_LBN 2
++ #define TX_OWNERR_CTL_WIDTH 2
++ #define TX_NON_IP_DROP_DIS_LBN 1
++ #define TX_NON_IP_DROP_DIS_WIDTH 1
++ #define TX_IP_ID_REP_EN_LBN 0
++ #define TX_IP_ID_REP_EN_WIDTH 1
++#define TX_RESERVED_REG_KER_OFST 0xA80 /* Transmit configuration register */
++#define TX_RESERVED_REG_OFST 0xA80 /* Transmit configuration register */
++ #define TX_CSR_PUSH_EN_LBN 89
++ #define TX_CSR_PUSH_EN_WIDTH 1
++ #define TX_RX_SPACER_LBN 64
++ #define TX_RX_SPACER_WIDTH 8
++ #define TX_SW_EV_EN_LBN 59
++ #define TX_SW_EV_EN_WIDTH 1
++ #define TX_RX_SPACER_EN_LBN 57
++ #define TX_RX_SPACER_EN_WIDTH 1
++ #define TX_CSR_PREF_WD_TMR_LBN 24
++ #define TX_CSR_PREF_WD_TMR_WIDTH 16
++ #define TX_CSR_ONLY1TAG_LBN 21
++ #define TX_CSR_ONLY1TAG_WIDTH 1
++ #define TX_PREF_THRESHOLD_LBN 19
++ #define TX_PREF_THRESHOLD_WIDTH 2
++ #define TX_ONE_PKT_PER_Q_LBN 18
++ #define TX_ONE_PKT_PER_Q_WIDTH 1
++ #define TX_DIS_NON_IP_EV_LBN 17
++ #define TX_DIS_NON_IP_EV_WIDTH 1
++ #define TX_DMA_SPACER_LBN 8
++ #define TX_DMA_SPACER_WIDTH 8
++ #define TX_FLUSH_MIN_LEN_EN_B0_LBN 7
++ #define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1
++ #define TX_TCP_DIS_A1_LBN 7
++ #define TX_TCP_DIS_A1_WIDTH 1
++ #define TX_IP_DIS_A1_LBN 6
++ #define TX_IP_DIS_A1_WIDTH 1
++ #define TX_MAX_CPL_LBN 2
++ #define TX_MAX_CPL_WIDTH 2
++ #define TX_MAX_PREF_LBN 0
++ #define TX_MAX_PREF_WIDTH 2
++#define TX_VLAN_REG_OFST 0xAE0 /* Transmit VLAN tag register */
++ #define TX_VLAN_EN_LBN 127
++ #define TX_VLAN_EN_WIDTH 1
++ #define TX_VLAN7_PORT1_EN_LBN 125
++ #define TX_VLAN7_PORT1_EN_WIDTH 1
++ #define TX_VLAN7_PORT0_EN_LBN 124
++ #define TX_VLAN7_PORT0_EN_WIDTH 1
++ #define TX_VLAN7_LBN 112
++ #define TX_VLAN7_WIDTH 12
++ #define TX_VLAN6_PORT1_EN_LBN 109
++ #define TX_VLAN6_PORT1_EN_WIDTH 1
++ #define TX_VLAN6_PORT0_EN_LBN 108
++ #define TX_VLAN6_PORT0_EN_WIDTH 1
++ #define TX_VLAN6_LBN 96
++ #define TX_VLAN6_WIDTH 12
++ #define TX_VLAN5_PORT1_EN_LBN 93
++ #define TX_VLAN5_PORT1_EN_WIDTH 1
++ #define TX_VLAN5_PORT0_EN_LBN 92
++ #define TX_VLAN5_PORT0_EN_WIDTH 1
++ #define TX_VLAN5_LBN 80
++ #define TX_VLAN5_WIDTH 12
++ #define TX_VLAN4_PORT1_EN_LBN 77
++ #define TX_VLAN4_PORT1_EN_WIDTH 1
++ #define TX_VLAN4_PORT0_EN_LBN 76
++ #define TX_VLAN4_PORT0_EN_WIDTH 1
++ #define TX_VLAN4_LBN 64
++ #define TX_VLAN4_WIDTH 12
++ #define TX_VLAN3_PORT1_EN_LBN 61
++ #define TX_VLAN3_PORT1_EN_WIDTH 1
++ #define TX_VLAN3_PORT0_EN_LBN 60
++ #define TX_VLAN3_PORT0_EN_WIDTH 1
++ #define TX_VLAN3_LBN 48
++ #define TX_VLAN3_WIDTH 12
++ #define TX_VLAN2_PORT1_EN_LBN 45
++ #define TX_VLAN2_PORT1_EN_WIDTH 1
++ #define TX_VLAN2_PORT0_EN_LBN 44
++ #define TX_VLAN2_PORT0_EN_WIDTH 1
++ #define TX_VLAN2_LBN 32
++ #define TX_VLAN2_WIDTH 12
++ #define TX_VLAN1_PORT1_EN_LBN 29
++ #define TX_VLAN1_PORT1_EN_WIDTH 1
++ #define TX_VLAN1_PORT0_EN_LBN 28
++ #define TX_VLAN1_PORT0_EN_WIDTH 1
++ #define TX_VLAN1_LBN 16
++ #define TX_VLAN1_WIDTH 12
++ #define TX_VLAN0_PORT1_EN_LBN 13
++ #define TX_VLAN0_PORT1_EN_WIDTH 1
++ #define TX_VLAN0_PORT0_EN_LBN 12
++ #define TX_VLAN0_PORT0_EN_WIDTH 1
++ #define TX_VLAN0_LBN 0
++ #define TX_VLAN0_WIDTH 12
++#define TX_FIL_CTL_REG_OFST 0xAF0 /* Transmit filter control register */
++ #define TX_MADR1_FIL_EN_LBN 65
++ #define TX_MADR1_FIL_EN_WIDTH 1
++ #define TX_MADR0_FIL_EN_LBN 64
++ #define TX_MADR0_FIL_EN_WIDTH 1
++ #define TX_IPFIL31_PORT1_EN_LBN 63
++ #define TX_IPFIL31_PORT1_EN_WIDTH 1
++ #define TX_IPFIL31_PORT0_EN_LBN 62
++ #define TX_IPFIL31_PORT0_EN_WIDTH 1
++ #define TX_IPFIL30_PORT1_EN_LBN 61
++ #define TX_IPFIL30_PORT1_EN_WIDTH 1
++ #define TX_IPFIL30_PORT0_EN_LBN 60
++ #define TX_IPFIL30_PORT0_EN_WIDTH 1
++ #define TX_IPFIL29_PORT1_EN_LBN 59
++ #define TX_IPFIL29_PORT1_EN_WIDTH 1
++ #define TX_IPFIL29_PORT0_EN_LBN 58
++ #define TX_IPFIL29_PORT0_EN_WIDTH 1
++ #define TX_IPFIL28_PORT1_EN_LBN 57
++ #define TX_IPFIL28_PORT1_EN_WIDTH 1
++ #define TX_IPFIL28_PORT0_EN_LBN 56
++ #define TX_IPFIL28_PORT0_EN_WIDTH 1
++ #define TX_IPFIL27_PORT1_EN_LBN 55
++ #define TX_IPFIL27_PORT1_EN_WIDTH 1
++ #define TX_IPFIL27_PORT0_EN_LBN 54
++ #define TX_IPFIL27_PORT0_EN_WIDTH 1
++ #define TX_IPFIL26_PORT1_EN_LBN 53
++ #define TX_IPFIL26_PORT1_EN_WIDTH 1
++ #define TX_IPFIL26_PORT0_EN_LBN 52
++ #define TX_IPFIL26_PORT0_EN_WIDTH 1
++ #define TX_IPFIL25_PORT1_EN_LBN 51
++ #define TX_IPFIL25_PORT1_EN_WIDTH 1
++ #define TX_IPFIL25_PORT0_EN_LBN 50
++ #define TX_IPFIL25_PORT0_EN_WIDTH 1
++ #define TX_IPFIL24_PORT1_EN_LBN 49
++ #define TX_IPFIL24_PORT1_EN_WIDTH 1
++ #define TX_IPFIL24_PORT0_EN_LBN 48
++ #define TX_IPFIL24_PORT0_EN_WIDTH 1
++ #define TX_IPFIL23_PORT1_EN_LBN 47
++ #define TX_IPFIL23_PORT1_EN_WIDTH 1
++ #define TX_IPFIL23_PORT0_EN_LBN 46
++ #define TX_IPFIL23_PORT0_EN_WIDTH 1
++ #define TX_IPFIL22_PORT1_EN_LBN 45
++ #define TX_IPFIL22_PORT1_EN_WIDTH 1
++ #define TX_IPFIL22_PORT0_EN_LBN 44
++ #define TX_IPFIL22_PORT0_EN_WIDTH 1
++ #define TX_IPFIL21_PORT1_EN_LBN 43
++ #define TX_IPFIL21_PORT1_EN_WIDTH 1
++ #define TX_IPFIL21_PORT0_EN_LBN 42
++ #define TX_IPFIL21_PORT0_EN_WIDTH 1
++ #define TX_IPFIL20_PORT1_EN_LBN 41
++ #define TX_IPFIL20_PORT1_EN_WIDTH 1
++ #define TX_IPFIL20_PORT0_EN_LBN 40
++ #define TX_IPFIL20_PORT0_EN_WIDTH 1
++ #define TX_IPFIL19_PORT1_EN_LBN 39
++ #define TX_IPFIL19_PORT1_EN_WIDTH 1
++ #define TX_IPFIL19_PORT0_EN_LBN 38
++ #define TX_IPFIL19_PORT0_EN_WIDTH 1
++ #define TX_IPFIL18_PORT1_EN_LBN 37
++ #define TX_IPFIL18_PORT1_EN_WIDTH 1
++ #define TX_IPFIL18_PORT0_EN_LBN 36
++ #define TX_IPFIL18_PORT0_EN_WIDTH 1
++ #define TX_IPFIL17_PORT1_EN_LBN 35
++ #define TX_IPFIL17_PORT1_EN_WIDTH 1
++ #define TX_IPFIL17_PORT0_EN_LBN 34
++ #define TX_IPFIL17_PORT0_EN_WIDTH 1
++ #define TX_IPFIL16_PORT1_EN_LBN 33
++ #define TX_IPFIL16_PORT1_EN_WIDTH 1
++ #define TX_IPFIL16_PORT0_EN_LBN 32
++ #define TX_IPFIL16_PORT0_EN_WIDTH 1
++ #define TX_IPFIL15_PORT1_EN_LBN 31
++ #define TX_IPFIL15_PORT1_EN_WIDTH 1
++ #define TX_IPFIL15_PORT0_EN_LBN 30
++ #define TX_IPFIL15_PORT0_EN_WIDTH 1
++ #define TX_IPFIL14_PORT1_EN_LBN 29
++ #define TX_IPFIL14_PORT1_EN_WIDTH 1
++ #define TX_IPFIL14_PORT0_EN_LBN 28
++ #define TX_IPFIL14_PORT0_EN_WIDTH 1
++ #define TX_IPFIL13_PORT1_EN_LBN 27
++ #define TX_IPFIL13_PORT1_EN_WIDTH 1
++ #define TX_IPFIL13_PORT0_EN_LBN 26
++ #define TX_IPFIL13_PORT0_EN_WIDTH 1
++ #define TX_IPFIL12_PORT1_EN_LBN 25
++ #define TX_IPFIL12_PORT1_EN_WIDTH 1
++ #define TX_IPFIL12_PORT0_EN_LBN 24
++ #define TX_IPFIL12_PORT0_EN_WIDTH 1
++ #define TX_IPFIL11_PORT1_EN_LBN 23
++ #define TX_IPFIL11_PORT1_EN_WIDTH 1
++ #define TX_IPFIL11_PORT0_EN_LBN 22
++ #define TX_IPFIL11_PORT0_EN_WIDTH 1
++ #define TX_IPFIL10_PORT1_EN_LBN 21
++ #define TX_IPFIL10_PORT1_EN_WIDTH 1
++ #define TX_IPFIL10_PORT0_EN_LBN 20
++ #define TX_IPFIL10_PORT0_EN_WIDTH 1
++ #define TX_IPFIL9_PORT1_EN_LBN 19
++ #define TX_IPFIL9_PORT1_EN_WIDTH 1
++ #define TX_IPFIL9_PORT0_EN_LBN 18
++ #define TX_IPFIL9_PORT0_EN_WIDTH 1
++ #define TX_IPFIL8_PORT1_EN_LBN 17
++ #define TX_IPFIL8_PORT1_EN_WIDTH 1
++ #define TX_IPFIL8_PORT0_EN_LBN 16
++ #define TX_IPFIL8_PORT0_EN_WIDTH 1
++ #define TX_IPFIL7_PORT1_EN_LBN 15
++ #define TX_IPFIL7_PORT1_EN_WIDTH 1
++ #define TX_IPFIL7_PORT0_EN_LBN 14
++ #define TX_IPFIL7_PORT0_EN_WIDTH 1
++ #define TX_IPFIL6_PORT1_EN_LBN 13
++ #define TX_IPFIL6_PORT1_EN_WIDTH 1
++ #define TX_IPFIL6_PORT0_EN_LBN 12
++ #define TX_IPFIL6_PORT0_EN_WIDTH 1
++ #define TX_IPFIL5_PORT1_EN_LBN 11
++ #define TX_IPFIL5_PORT1_EN_WIDTH 1
++ #define TX_IPFIL5_PORT0_EN_LBN 10
++ #define TX_IPFIL5_PORT0_EN_WIDTH 1
++ #define TX_IPFIL4_PORT1_EN_LBN 9
++ #define TX_IPFIL4_PORT1_EN_WIDTH 1
++ #define TX_IPFIL4_PORT0_EN_LBN 8
++ #define TX_IPFIL4_PORT0_EN_WIDTH 1
++ #define TX_IPFIL3_PORT1_EN_LBN 7
++ #define TX_IPFIL3_PORT1_EN_WIDTH 1
++ #define TX_IPFIL3_PORT0_EN_LBN 6
++ #define TX_IPFIL3_PORT0_EN_WIDTH 1
++ #define TX_IPFIL2_PORT1_EN_LBN 5
++ #define TX_IPFIL2_PORT1_EN_WIDTH 1
++ #define TX_IPFIL2_PORT0_EN_LBN 4
++ #define TX_IPFIL2_PORT0_EN_WIDTH 1
++ #define TX_IPFIL1_PORT1_EN_LBN 3
++ #define TX_IPFIL1_PORT1_EN_WIDTH 1
++ #define TX_IPFIL1_PORT0_EN_LBN 2
++ #define TX_IPFIL1_PORT0_EN_WIDTH 1
++ #define TX_IPFIL0_PORT1_EN_LBN 1
++ #define TX_IPFIL0_PORT1_EN_WIDTH 1
++ #define TX_IPFIL0_PORT0_EN_LBN 0
++ #define TX_IPFIL0_PORT0_EN_WIDTH 1
++#define TX_IPFIL_TBL_OFST 0xB00 /* Transmit IP source address filter table */
++ #define TX_IPFIL_MASK_LBN 32
++ #define TX_IPFIL_MASK_WIDTH 32
++ #define TX_IP_SRC_ADR_LBN 0
++ #define TX_IP_SRC_ADR_WIDTH 32
++#define TX_PACE_REG_A1_OFST 0xF80000 /* Transmit pace control register */
++#define TX_PACE_REG_B0_OFST 0xA90 /* Transmit pace control register */
++ #define TX_PACE_SB_AF_LBN 19
++ #define TX_PACE_SB_AF_WIDTH 10
++ #define TX_PACE_SB_NOTAF_LBN 9
++ #define TX_PACE_SB_NOTAF_WIDTH 10
++ #define TX_PACE_FB_BASE_LBN 5
++ #define TX_PACE_FB_BASE_WIDTH 4
++ #define TX_PACE_BIN_TH_LBN 0
++ #define TX_PACE_BIN_TH_WIDTH 5
++#define TX_PACE_TBL_A1_OFST 0xF80040 /* Transmit pacing table */
++#define TX_PACE_TBL_FIRST_QUEUE_A1 4
++#define TX_PACE_TBL_B0_OFST 0xF80000 /* Transmit pacing table */
++#define TX_PACE_TBL_FIRST_QUEUE_B0 0
++ #define TX_PACE_LBN 0
++ #define TX_PACE_WIDTH 5
++
++/*************---- EE/Flash Registers C Header ----*************/
++#define EE_SPI_HCMD_REG_KER_OFST 0x100 /* SPI host command register */
++#define EE_SPI_HCMD_REG_OFST 0x100 /* SPI host command register */
++ #define EE_SPI_HCMD_CMD_EN_LBN 31
++ #define EE_SPI_HCMD_CMD_EN_WIDTH 1
++ #define EE_WR_TIMER_ACTIVE_LBN 28
++ #define EE_WR_TIMER_ACTIVE_WIDTH 1
++ #define EE_SPI_HCMD_SF_SEL_LBN 24
++ #define EE_SPI_HCMD_SF_SEL_WIDTH 1
++ #define EE_SPI_HCMD_DABCNT_LBN 16
++ #define EE_SPI_HCMD_DABCNT_WIDTH 5
++ #define EE_SPI_HCMD_READ_LBN 15
++ #define EE_SPI_HCMD_READ_WIDTH 1
++ #define EE_SPI_HCMD_DUBCNT_LBN 12
++ #define EE_SPI_HCMD_DUBCNT_WIDTH 2
++ #define EE_SPI_HCMD_ADBCNT_LBN 8
++ #define EE_SPI_HCMD_ADBCNT_WIDTH 2
++ #define EE_SPI_HCMD_ENC_LBN 0
++ #define EE_SPI_HCMD_ENC_WIDTH 8
++#define EE_SPI_HADR_REG_KER_OFST 0X110 /* SPI host address register */
++#define EE_SPI_HADR_REG_OFST 0X110 /* SPI host address register */
++ #define EE_SPI_HADR_DUBYTE_LBN 24
++ #define EE_SPI_HADR_DUBYTE_WIDTH 8
++ #define EE_SPI_HADR_ADR_LBN 0
++ #define EE_SPI_HADR_ADR_WIDTH 24
++#define EE_SPI_HDATA_REG_KER_OFST 0x120 /* SPI host data register */
++#define EE_SPI_HDATA_REG_OFST 0x120 /* SPI host data register */
++ #define EE_SPI_HDATA3_LBN 96
++ #define EE_SPI_HDATA3_WIDTH 32
++ #define EE_SPI_HDATA2_LBN 64
++ #define EE_SPI_HDATA2_WIDTH 32
++ #define EE_SPI_HDATA1_LBN 32
++ #define EE_SPI_HDATA1_WIDTH 32
++ #define EE_SPI_HDATA0_LBN 0
++ #define EE_SPI_HDATA0_WIDTH 32
++#define EE_BASE_PAGE_REG_KER_OFST 0x130 /* Expansion ROM base mirror register */
++#define EE_BASE_PAGE_REG_OFST 0x130 /* Expansion ROM base mirror register */
++ #define EE_EXP_ROM_WINDOW_BASE_LBN 16
++ #define EE_EXP_ROM_WINDOW_BASE_WIDTH 13
++ #define EE_EXPROM_MASK_LBN 0
++ #define EE_EXPROM_MASK_WIDTH 13
++#define EE_VPD_CFG0_REG_KER_OFST 0X140 /* SPI/VPD configuration register */
++#define EE_VPD_CFG0_REG_OFST 0X140 /* SPI/VPD configuration register */
++ #define EE_SF_FASTRD_EN_LBN 127
++ #define EE_SF_FASTRD_EN_WIDTH 1
++ #define EE_SF_CLOCK_DIV_LBN 120
++ #define EE_SF_CLOCK_DIV_WIDTH 7
++ #define EE_VPD_WIP_POLL_LBN 119
++ #define EE_VPD_WIP_POLL_WIDTH 1
++ #define EE_VPDW_LENGTH_LBN 80
++ #define EE_VPDW_LENGTH_WIDTH 15
++ #define EE_VPDW_BASE_LBN 64
++ #define EE_VPDW_BASE_WIDTH 15
++ #define EE_VPD_WR_CMD_EN_LBN 56
++ #define EE_VPD_WR_CMD_EN_WIDTH 8
++ #define EE_VPD_BASE_LBN 32
++ #define EE_VPD_BASE_WIDTH 24
++ #define EE_VPD_LENGTH_LBN 16
++ #define EE_VPD_LENGTH_WIDTH 13
++ #define EE_VPD_AD_SIZE_LBN 8
++ #define EE_VPD_AD_SIZE_WIDTH 5
++ #define EE_VPD_ACCESS_ON_LBN 5
++ #define EE_VPD_ACCESS_ON_WIDTH 1
++#define EE_VPD_SW_CNTL_REG_KER_OFST 0X150 /* VPD access SW control register */
++#define EE_VPD_SW_CNTL_REG_OFST 0X150 /* VPD access SW control register */
++ #define EE_VPD_CYCLE_PENDING_LBN 31
++ #define EE_VPD_CYCLE_PENDING_WIDTH 1
++ #define EE_VPD_CYC_WRITE_LBN 28
++ #define EE_VPD_CYC_WRITE_WIDTH 1
++ #define EE_VPD_CYC_ADR_LBN 0
++ #define EE_VPD_CYC_ADR_WIDTH 15
++#define EE_VPD_SW_DATA_REG_KER_OFST 0x160 /* VPD access SW data register */
++#define EE_VPD_SW_DATA_REG_OFST 0x160 /* VPD access SW data register */
++ #define EE_VPD_CYC_DAT_LBN 0
++ #define EE_VPD_CYC_DAT_WIDTH 32
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_desc.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_desc.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_desc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_desc.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,75 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides EtherFabric NIC - EFXXXX (aka Falcon) descriptor
++ * definitions.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*************---- Descriptors C Headers ----*************/
++/* Receive Kernel IP Descriptor */
++ #define RX_KER_BUF_SIZE_LBN 48
++ #define RX_KER_BUF_SIZE_WIDTH 14
++ #define RX_KER_BUF_REGION_LBN 46
++ #define RX_KER_BUF_REGION_WIDTH 2
++ #define RX_KER_BUF_REGION0_DECODE 0
++ #define RX_KER_BUF_REGION1_DECODE 1
++ #define RX_KER_BUF_REGION2_DECODE 2
++ #define RX_KER_BUF_REGION3_DECODE 3
++ #define RX_KER_BUF_ADR_LBN 0
++ #define RX_KER_BUF_ADR_WIDTH 46
++/* Receive User IP Descriptor */
++ #define RX_USR_2BYTE_OFS_LBN 20
++ #define RX_USR_2BYTE_OFS_WIDTH 12
++ #define RX_USR_BUF_ID_LBN 0
++ #define RX_USR_BUF_ID_WIDTH 20
++/* Transmit Kernel IP Descriptor */
++ #define TX_KER_PORT_LBN 63
++ #define TX_KER_PORT_WIDTH 1
++ #define TX_KER_CONT_LBN 62
++ #define TX_KER_CONT_WIDTH 1
++ #define TX_KER_BYTE_CNT_LBN 48
++ #define TX_KER_BYTE_CNT_WIDTH 14
++ #define TX_KER_BUF_REGION_LBN 46
++ #define TX_KER_BUF_REGION_WIDTH 2
++ #define TX_KER_BUF_REGION0_DECODE 0
++ #define TX_KER_BUF_REGION1_DECODE 1
++ #define TX_KER_BUF_REGION2_DECODE 2
++ #define TX_KER_BUF_REGION3_DECODE 3
++ #define TX_KER_BUF_ADR_LBN 0
++ #define TX_KER_BUF_ADR_WIDTH 46
++/* Transmit User IP Descriptor */
++ #define TX_USR_PORT_LBN 47
++ #define TX_USR_PORT_WIDTH 1
++ #define TX_USR_CONT_LBN 46
++ #define TX_USR_CONT_WIDTH 1
++ #define TX_USR_BYTE_CNT_LBN 33
++ #define TX_USR_BYTE_CNT_WIDTH 13
++ #define TX_USR_BUF_ID_LBN 13
++ #define TX_USR_BUF_ID_WIDTH 20
++ #define TX_USR_BYTE_OFS_LBN 0
++ #define TX_USR_BYTE_OFS_WIDTH 13
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_event.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_event.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_event.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_event.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,155 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides EtherFabric NIC - EFXXXX (aka Falcon) event
++ * definitions.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*************---- Events Format C Header ----*************/
++/*************---- Event entry ----*************/
++ #define EV_CODE_LBN 60
++ #define EV_CODE_WIDTH 4
++ #define RX_IP_EV_DECODE 0
++ #define TX_IP_EV_DECODE 2
++ #define DRIVER_EV_DECODE 5
++ #define GLOBAL_EV_DECODE 6
++ #define DRV_GEN_EV_DECODE 7
++ #define EV_DATA_LBN 0
++ #define EV_DATA_WIDTH 60
++/******---- Receive IP events for both Kernel & User event queues ----******/
++ #define RX_EV_PKT_OK_LBN 56
++ #define RX_EV_PKT_OK_WIDTH 1
++ #define RX_EV_BUF_OWNER_ID_ERR_LBN 54
++ #define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
++ #define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
++ #define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
++ #define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
++ #define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
++ #define RX_EV_ETH_CRC_ERR_LBN 50
++ #define RX_EV_ETH_CRC_ERR_WIDTH 1
++ #define RX_EV_FRM_TRUNC_LBN 49
++ #define RX_EV_FRM_TRUNC_WIDTH 1
++ #define RX_EV_DRIB_NIB_LBN 48
++ #define RX_EV_DRIB_NIB_WIDTH 1
++ #define RX_EV_TOBE_DISC_LBN 47
++ #define RX_EV_TOBE_DISC_WIDTH 1
++ #define RX_EV_PKT_TYPE_LBN 44
++ #define RX_EV_PKT_TYPE_WIDTH 3
++ #define RX_EV_PKT_TYPE_ETH_DECODE 0
++ #define RX_EV_PKT_TYPE_LLC_DECODE 1
++ #define RX_EV_PKT_TYPE_JUMBO_DECODE 2
++ #define RX_EV_PKT_TYPE_VLAN_DECODE 3
++ #define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4
++ #define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5
++ #define RX_EV_HDR_TYPE_LBN 42
++ #define RX_EV_HDR_TYPE_WIDTH 2
++ #define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0
++ #define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1
++ #define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2
++ #define RX_EV_HDR_TYPE_NON_IP_DECODE 3
++ #define RX_EV_DESC_Q_EMPTY_LBN 41
++ #define RX_EV_DESC_Q_EMPTY_WIDTH 1
++ #define RX_EV_MCAST_HASH_MATCH_LBN 40
++ #define RX_EV_MCAST_HASH_MATCH_WIDTH 1
++ #define RX_EV_MCAST_PKT_LBN 39
++ #define RX_EV_MCAST_PKT_WIDTH 1
++ #define RX_EV_Q_LABEL_LBN 32
++ #define RX_EV_Q_LABEL_WIDTH 5
++ #define RX_JUMBO_CONT_LBN 31
++ #define RX_JUMBO_CONT_WIDTH 1
++ #define RX_SOP_LBN 15
++ #define RX_SOP_WIDTH 1
++ #define RX_PORT_LBN 30
++ #define RX_PORT_WIDTH 1
++ #define RX_EV_BYTE_CNT_LBN 16
++ #define RX_EV_BYTE_CNT_WIDTH 14
++ #define RX_iSCSI_PKT_OK_LBN 14
++ #define RX_iSCSI_PKT_OK_WIDTH 1
++ #define RX_ISCSI_DDIG_ERR_LBN 13
++ #define RX_ISCSI_DDIG_ERR_WIDTH 1
++ #define RX_ISCSI_HDIG_ERR_LBN 12
++ #define RX_ISCSI_HDIG_ERR_WIDTH 1
++ #define RX_EV_DESC_PTR_LBN 0
++ #define RX_EV_DESC_PTR_WIDTH 12
++/******---- Transmit IP events for both Kernel & User event queues ----******/
++ #define TX_EV_PKT_ERR_LBN 38
++ #define TX_EV_PKT_ERR_WIDTH 1
++ #define TX_EV_PKT_TOO_BIG_LBN 37
++ #define TX_EV_PKT_TOO_BIG_WIDTH 1
++ #define TX_EV_Q_LABEL_LBN 32
++ #define TX_EV_Q_LABEL_WIDTH 5
++ #define TX_EV_PORT_LBN 16
++ #define TX_EV_PORT_WIDTH 1
++ #define TX_EV_WQ_FF_FULL_LBN 15
++ #define TX_EV_WQ_FF_FULL_WIDTH 1
++ #define TX_EV_BUF_OWNER_ID_ERR_LBN 14
++ #define TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
++ #define TX_EV_COMP_LBN 12
++ #define TX_EV_COMP_WIDTH 1
++ #define TX_EV_DESC_PTR_LBN 0
++ #define TX_EV_DESC_PTR_WIDTH 12
++/*************---- Char or Kernel driver events ----*************/
++ #define DRIVER_EV_SUB_CODE_LBN 56
++ #define DRIVER_EV_SUB_CODE_WIDTH 4
++ #define TX_DESCQ_FLS_DONE_EV_DECODE 0x0
++ #define RX_DESCQ_FLS_DONE_EV_DECODE 0x1
++ #define EVQ_INIT_DONE_EV_DECODE 0x2
++ #define EVQ_NOT_EN_EV_DECODE 0x3
++ #define RX_DESCQ_FLSFF_OVFL_EV_DECODE 0x4
++ #define SRM_UPD_DONE_EV_DECODE 0x5
++ #define WAKE_UP_EV_DECODE 0x6
++ #define TX_PKT_NON_TCP_UDP_DECODE 0x9
++ #define TIMER_EV_DECODE 0xA
++ #define RX_DSC_ERROR_EV_DECODE 0xE
++ #define DRIVER_EV_TX_DESCQ_ID_LBN 0
++ #define DRIVER_EV_TX_DESCQ_ID_WIDTH 12
++ #define DRIVER_EV_RX_DESCQ_ID_LBN 0
++ #define DRIVER_EV_RX_DESCQ_ID_WIDTH 12
++ #define DRIVER_EV_EVQ_ID_LBN 0
++ #define DRIVER_EV_EVQ_ID_WIDTH 12
++ #define DRIVER_TMR_ID_LBN 0
++ #define DRIVER_TMR_ID_WIDTH 12
++ #define DRIVER_EV_SRM_UPD_LBN 0
++ #define DRIVER_EV_SRM_UPD_WIDTH 2
++ #define SRM_CLR_EV_DECODE 0
++ #define SRM_UPD_EV_DECODE 1
++ #define SRM_ILLCLR_EV_DECODE 2
++/********---- Global events. Sent to both event queue 0 and 4. ----********/
++ #define XFP_PHY_INTR_LBN 10
++ #define XFP_PHY_INTR_WIDTH 1
++ #define XG_PHY_INTR_LBN 9
++ #define XG_PHY_INTR_WIDTH 1
++ #define G_PHY1_INTR_LBN 8
++ #define G_PHY1_INTR_WIDTH 1
++ #define G_PHY0_INTR_LBN 7
++ #define G_PHY0_INTR_WIDTH 1
++/*************---- Driver generated events ----*************/
++ #define DRV_GEN_EV_CODE_LBN 60
++ #define DRV_GEN_EV_CODE_WIDTH 4
++ #define DRV_GEN_EV_DATA_LBN 0
++ #define DRV_GEN_EV_DATA_WIDTH 60
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_grmon.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_grmon.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_grmon.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_grmon.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,129 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides EtherFabric NIC - EFXXXX (aka Falcon) 1G MAC
++ * counters.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*************---- 1G MAC Statistical Counters C Header ----*************/
++#define GRxGoodOct_offset 0x0
++ #define GRxGoodOct_WIDTH 48
++#define GRxBadOct_offset 0x8
++ #define GRxBadOct_WIDTH 48
++#define GRxMissPkt_offset 0x10
++ #define GRxMissPkt_WIDTH 32
++#define GRxFalseCRS_offset 0x14
++ #define GRxFalseCRS_WIDTH 32
++#define GRxPausePkt_offset 0x18
++ #define GRxPausePkt_WIDTH 32
++#define GRxBadPkt_offset 0x1C
++ #define GRxBadPkt_WIDTH 32
++#define GRxUcastPkt_offset 0x20
++ #define GRxUcastPkt_WIDTH 32
++#define GRxMcastPkt_offset 0x24
++ #define GRxMcastPkt_WIDTH 32
++#define GRxBcastPkt_offset 0x28
++ #define GRxBcastPkt_WIDTH 32
++#define GRxGoodLt64Pkt_offset 0x2C
++ #define GRxGoodLt64Pkt_WIDTH 32
++#define GRxBadLt64Pkt_offset 0x30
++ #define GRxBadLt64Pkt_WIDTH 32
++#define GRx64Pkt_offset 0x34
++ #define GRx64Pkt_WIDTH 32
++#define GRx65to127Pkt_offset 0x38
++ #define GRx65to127Pkt_WIDTH 32
++#define GRx128to255Pkt_offset 0x3C
++ #define GRx128to255Pkt_WIDTH 32
++#define GRx256to511Pkt_offset 0x40
++ #define GRx256to511Pkt_WIDTH 32
++#define GRx512to1023Pkt_offset 0x44
++ #define GRx512to1023Pkt_WIDTH 32
++#define GRx1024to15xxPkt_offset 0x48
++ #define GRx1024to15xxPkt_WIDTH 32
++#define GRx15xxtoJumboPkt_offset 0x4C
++ #define GRx15xxtoJumboPkt_WIDTH 32
++#define GRxGtJumboPkt_offset 0x50
++ #define GRxGtJumboPkt_WIDTH 32
++#define GRxFcsErr64to15xxPkt_offset 0x54
++ #define GRxFcsErr64to15xxPkt_WIDTH 32
++#define GRxFcsErr15xxtoJumboPkt_offset 0x58
++ #define GRxFcsErr15xxtoJumboPkt_WIDTH 32
++#define GRxFcsErrGtJumboPkt_offset 0x5C
++ #define GRxFcsErrGtJumboPkt_WIDTH 32
++#define GTxGoodBadOct_offset 0x80
++ #define GTxGoodBadOct_WIDTH 48
++#define GTxGoodOct_offset 0x88
++ #define GTxGoodOct_WIDTH 48
++#define GTxSglColPkt_offset 0x90
++ #define GTxSglColPkt_WIDTH 32
++#define GTxMultColPkt_offset 0x94
++ #define GTxMultColPkt_WIDTH 32
++#define GTxExColPkt_offset 0x98
++ #define GTxExColPkt_WIDTH 32
++#define GTxDefPkt_offset 0x9C
++ #define GTxDefPkt_WIDTH 32
++#define GTxLateCol_offset 0xA0
++ #define GTxLateCol_WIDTH 32
++#define GTxExDefPkt_offset 0xA4
++ #define GTxExDefPkt_WIDTH 32
++#define GTxPausePkt_offset 0xA8
++ #define GTxPausePkt_WIDTH 32
++#define GTxBadPkt_offset 0xAC
++ #define GTxBadPkt_WIDTH 32
++#define GTxUcastPkt_offset 0xB0
++ #define GTxUcastPkt_WIDTH 32
++#define GTxMcastPkt_offset 0xB4
++ #define GTxMcastPkt_WIDTH 32
++#define GTxBcastPkt_offset 0xB8
++ #define GTxBcastPkt_WIDTH 32
++#define GTxLt64Pkt_offset 0xBC
++ #define GTxLt64Pkt_WIDTH 32
++#define GTx64Pkt_offset 0xC0
++ #define GTx64Pkt_WIDTH 32
++#define GTx65to127Pkt_offset 0xC4
++ #define GTx65to127Pkt_WIDTH 32
++#define GTx128to255Pkt_offset 0xC8
++ #define GTx128to255Pkt_WIDTH 32
++#define GTx256to511Pkt_offset 0xCC
++ #define GTx256to511Pkt_WIDTH 32
++#define GTx512to1023Pkt_offset 0xD0
++ #define GTx512to1023Pkt_WIDTH 32
++#define GTx1024to15xxPkt_offset 0xD4
++ #define GTx1024to15xxPkt_WIDTH 32
++#define GTx15xxtoJumboPkt_offset 0xD8
++ #define GTx15xxtoJumboPkt_WIDTH 32
++#define GTxGtJumboPkt_offset 0xDC
++ #define GTxGtJumboPkt_WIDTH 32
++#define GTxNonTcpUdpPkt_offset 0xE0
++ #define GTxNonTcpUdpPkt_WIDTH 16
++#define GTxMacSrcErrPkt_offset 0xE4
++ #define GTxMacSrcErrPkt_WIDTH 16
++#define GTxIpSrcErrPkt_offset 0xE8
++ #define GTxIpSrcErrPkt_WIDTH 16
++#define GDmaDone_offset 0xEC
++ #define GDmaDone_WIDTH 32
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_intr_vec.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_intr_vec.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_intr_vec.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_intr_vec.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,44 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides EtherFabric NIC - EFXXXX (aka Falcon) interrupt
++ * vector definitions.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*************---- Interrupt Vector Format C Header ----*************/
++#define DW0_OFST 0x0 /* Double-word 0: Event queue FIFO interrupts */
++ #define EVQ_FIFO_HF_LBN 1
++ #define EVQ_FIFO_HF_WIDTH 1
++ #define EVQ_FIFO_AF_LBN 0
++ #define EVQ_FIFO_AF_WIDTH 1
++#define DW1_OFST 0x4 /* Double-word 1: Interrupt indicator */
++ #define INT_FLAG_LBN 0
++ #define INT_FLAG_WIDTH 1
++#define DW2_OFST 0x8 /* Double-word 2: Fatal interrupts */
++ #define FATAL_INT_LBN 0
++ #define FATAL_INT_WIDTH 1
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_mac.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_mac.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_mac.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_mac.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,711 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides EtherFabric NIC - EFXXXX (aka Falcon) MAC register
++ * definitions.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*********---- 1G/10G Ethernet MAC Wrapper Registers C Header ----*********/
++#define MD_TXD_REG_KER_OFST 0xC00 /* PHY management transmit data register */
++#define MD_TXD_REG_OFST 0xC00 /* PHY management transmit data register */
++ #define MD_TXD_LBN 0
++ #define MD_TXD_WIDTH 16
++#define MD_RXD_REG_KER_OFST 0xC10 /* PHY management receive data register */
++#define MD_RXD_REG_OFST 0xC10 /* PHY management receive data register */
++ #define MD_RXD_LBN 0
++ #define MD_RXD_WIDTH 16
++#define MD_CS_REG_KER_OFST 0xC20 /* PHY management configuration &
++ status register */
++#define MD_CS_REG_OFST 0xC20 /* PHY management configuration &
++ status register */
++ #define MD_PT_LBN 7
++ #define MD_PT_WIDTH 3
++ #define MD_PL_LBN 6
++ #define MD_PL_WIDTH 1
++ #define MD_INT_CLR_LBN 5
++ #define MD_INT_CLR_WIDTH 1
++ #define MD_GC_LBN 4
++ #define MD_GC_WIDTH 1
++ #define MD_PRSP_LBN 3
++ #define MD_PRSP_WIDTH 1
++ #define MD_RIC_LBN 2
++ #define MD_RIC_WIDTH 1
++ #define MD_RDC_LBN 1
++ #define MD_RDC_WIDTH 1
++ #define MD_WRC_LBN 0
++ #define MD_WRC_WIDTH 1
++#define MD_PHY_ADR_REG_KER_OFST 0xC30 /* PHY management PHY address register */
++#define MD_PHY_ADR_REG_OFST 0xC30 /* PHY management PHY address register */
++ #define MD_PHY_ADR_LBN 0
++ #define MD_PHY_ADR_WIDTH 16
++#define MD_ID_REG_KER_OFST 0xC40 /* PHY management ID register */
++#define MD_ID_REG_OFST 0xC40 /* PHY management ID register */
++ #define MD_PRT_ADR_LBN 11
++ #define MD_PRT_ADR_WIDTH 5
++ #define MD_DEV_ADR_LBN 6
++ #define MD_DEV_ADR_WIDTH 5
++#define MD_STAT_REG_KER_OFST 0xC50 /* PHY management status & mask register */
++#define MD_STAT_REG_OFST 0xC50 /* PHY management status & mask register */
++ #define MD_PINT_LBN 4
++ #define MD_PINT_WIDTH 1
++ #define MD_DONE_LBN 3
++ #define MD_DONE_WIDTH 1
++ #define MD_BSERR_LBN 2
++ #define MD_BSERR_WIDTH 1
++ #define MD_LNFL_LBN 1
++ #define MD_LNFL_WIDTH 1
++ #define MD_BSY_LBN 0
++ #define MD_BSY_WIDTH 1
++#define MAC0_STAT_DMA_REG_KER_OFST 0xC60 /* Port 0 MAC statistical counter
++ DMA register */
++#define MAC0_STAT_DMA_REG_OFST 0xC60 /* Port 0 MAC statistical counter
++ DMA register */
++ #define MAC0_STAT_DMA_CMD_LBN 48
++ #define MAC0_STAT_DMA_CMD_WIDTH 1
++ #define MAC0_STAT_DMA_ADR_LBN 0
++ #define MAC0_STAT_DMA_ADR_WIDTH 48
++#define MAC1_STAT_DMA_REG_KER_OFST 0xC70 /* Port 1 MAC statistical counter
++ DMA register */
++#define MAC1_STAT_DMA_REG_OFST 0xC70 /* Port 1 MAC statistical counter
++ DMA register */
++ #define MAC1_STAT_DMA_CMD_LBN 48
++ #define MAC1_STAT_DMA_CMD_WIDTH 1
++ #define MAC1_STAT_DMA_ADR_LBN 0
++ #define MAC1_STAT_DMA_ADR_WIDTH 48
++#define MAC0_CTRL_REG_KER_OFST 0xC80 /* Port 0 MAC control register */
++#define MAC0_CTRL_REG_OFST 0xC80 /* Port 0 MAC control register */
++ #define MAC0_XOFF_VAL_LBN 16
++ #define MAC0_XOFF_VAL_WIDTH 16
++ #define MAC0_BCAD_ACPT_LBN 4
++ #define MAC0_BCAD_ACPT_WIDTH 1
++ #define MAC0_UC_PROM_LBN 3
++ #define MAC0_UC_PROM_WIDTH 1
++ #define MAC0_LINK_STATUS_LBN 2
++ #define MAC0_LINK_STATUS_WIDTH 1
++ #define MAC0_SPEED_LBN 0
++ #define MAC0_SPEED_WIDTH 2
++#define MAC1_CTRL_REG_KER_OFST 0xC90 /* Port 1 MAC control register */
++#define MAC1_CTRL_REG_OFST 0xC90 /* Port 1 MAC control register */
++ #define MAC1_XOFF_VAL_LBN 16
++ #define MAC1_XOFF_VAL_WIDTH 16
++ #define MAC1_BCAD_ACPT_LBN 4
++ #define MAC1_BCAD_ACPT_WIDTH 1
++ #define MAC1_UC_PROM_LBN 3
++ #define MAC1_UC_PROM_WIDTH 1
++ #define MAC1_LINK_STATUS_LBN 2
++ #define MAC1_LINK_STATUS_WIDTH 1
++ #define MAC1_SPEED_LBN 0
++ #define MAC1_SPEED_WIDTH 2
++#define MAC_MC_HASH_REG0_KER_OFST 0xCA0 /* Multicast address hash table */
++#define MAC_MC_HASH_REG0_OFST 0xCA0 /* Multicast address hash table */
++ #define MAC_MCAST_HASH0_LBN 0
++ #define MAC_MCAST_HASH0_WIDTH 128
++#define MAC_MC_HASH_REG1_KER_OFST 0xCB0 /* Multicast address hash table */
++#define MAC_MC_HASH_REG1_OFST 0xCB0 /* Multicast address hash table */
++ #define MAC_MCAST_HASH1_LBN 0
++ #define MAC_MCAST_HASH1_WIDTH 128
++/*************---- 1G MAC Port 0 Registers C Header ----*************/
++#define GM_P0_BASE 0xE00
++#define GM_P1_BASE 0x1000
++#define GM_CFG1_REG_KER_OFST 0x00 /* GMAC configuration register 1 */
++#define GM_CFG1_REG_OFST 0x00 /* GMAC configuration register 1 */
++ #define GM_SW_RST_LBN 31
++ #define GM_SW_RST_WIDTH 1
++ #define GM_SIM_RST_LBN 30
++ #define GM_SIM_RST_WIDTH 1
++ #define GM_RST_RX_MAC_CTL_LBN 19
++ #define GM_RST_RX_MAC_CTL_WIDTH 1
++ #define GM_RST_TX_MAC_CTL_LBN 18
++ #define GM_RST_TX_MAC_CTL_WIDTH 1
++ #define GM_RST_RX_FUNC_LBN 17
++ #define GM_RST_RX_FUNC_WIDTH 1
++ #define GM_RST_TX_FUNC_LBN 16
++ #define GM_RST_TX_FUNC_WIDTH 1
++ #define GM_LOOP_LBN 8
++ #define GM_LOOP_WIDTH 1
++ #define GM_RX_FC_EN_LBN 5
++ #define GM_RX_FC_EN_WIDTH 1
++ #define GM_TX_FC_EN_LBN 4
++ #define GM_TX_FC_EN_WIDTH 1
++ #define GM_SYNC_RXEN_LBN 3
++ #define GM_SYNC_RXEN_WIDTH 1
++ #define GM_RX_EN_LBN 2
++ #define GM_RX_EN_WIDTH 1
++ #define GM_SYNC_TXEN_LBN 1
++ #define GM_SYNC_TXEN_WIDTH 1
++ #define GM_TX_EN_LBN 0
++ #define GM_TX_EN_WIDTH 1
++#define GM_CFG2_REG_KER_OFST 0x10 /* GMAC configuration register 2 */
++#define GM_CFG2_REG_OFST 0x10 /* GMAC configuration register 2 */
++ #define GM_PAMBL_LEN_LBN 12
++ #define GM_PAMBL_LEN_WIDTH 4
++ #define GM_IF_MODE_LBN 8
++ #define GM_IF_MODE_WIDTH 2
++ #define GM_HUGE_FRM_EN_LBN 5
++ #define GM_HUGE_FRM_EN_WIDTH 1
++ #define GM_LEN_CHK_LBN 4
++ #define GM_LEN_CHK_WIDTH 1
++ #define GM_PAD_CRC_EN_LBN 2
++ #define GM_PAD_CRC_EN_WIDTH 1
++ #define GM_CRC_EN_LBN 1
++ #define GM_CRC_EN_WIDTH 1
++ #define GM_FD_LBN 0
++ #define GM_FD_WIDTH 1
++#define GM_IPG_REG_KER_OFST 0x20 /* GMAC IPG register */
++#define GM_IPG_REG_OFST 0x20 /* GMAC IPG register */
++ #define GM_NONB2B_IPG1_LBN 24
++ #define GM_NONB2B_IPG1_WIDTH 7
++ #define GM_NONB2B_IPG2_LBN 16
++ #define GM_NONB2B_IPG2_WIDTH 7
++ #define GM_MIN_IPG_ENF_LBN 8
++ #define GM_MIN_IPG_ENF_WIDTH 8
++ #define GM_B2B_IPG_LBN 0
++ #define GM_B2B_IPG_WIDTH 7
++#define GM_HD_REG_KER_OFST 0x30 /* GMAC half duplex register */
++#define GM_HD_REG_OFST 0x30 /* GMAC half duplex register */
++ #define GM_ALT_BOFF_VAL_LBN 20
++ #define GM_ALT_BOFF_VAL_WIDTH 4
++ #define GM_ALT_BOFF_EN_LBN 19
++ #define GM_ALT_BOFF_EN_WIDTH 1
++ #define GM_BP_NO_BOFF_LBN 18
++ #define GM_BP_NO_BOFF_WIDTH 1
++ #define GM_DIS_BOFF_LBN 17
++ #define GM_DIS_BOFF_WIDTH 1
++ #define GM_EXDEF_TX_EN_LBN 16
++ #define GM_EXDEF_TX_EN_WIDTH 1
++ #define GM_RTRY_LIMIT_LBN 12
++ #define GM_RTRY_LIMIT_WIDTH 4
++ #define GM_COL_WIN_LBN 0
++ #define GM_COL_WIN_WIDTH 10
++#define GM_MAX_FLEN_REG_KER_OFST 0x40 /* GMAC maximum frame length register */
++#define GM_MAX_FLEN_REG_OFST 0x40 /* GMAC maximum frame length register */
++ #define GM_MAX_FLEN_LBN 0
++ #define GM_MAX_FLEN_WIDTH 16
++#define GM_TEST_REG_KER_OFST 0x70 /* GMAC test register */
++#define GM_TEST_REG_OFST 0x70 /* GMAC test register */
++ #define GM_MAX_BOFF_LBN 3
++ #define GM_MAX_BOFF_WIDTH 1
++ #define GM_REG_TX_FLOW_EN_LBN 2
++ #define GM_REG_TX_FLOW_EN_WIDTH 1
++ #define GM_TEST_PAUSE_LBN 1
++ #define GM_TEST_PAUSE_WIDTH 1
++ #define GM_SHORT_SLOT_LBN 0
++ #define GM_SHORT_SLOT_WIDTH 1
++#define GM_ADR1_REG_KER_OFST 0x100 /* GMAC station address register 1 */
++#define GM_ADR1_REG_OFST 0x100 /* GMAC station address register 1 */
++ #define GM_ADR1_LBN 0
++ #define GM_ADR1_WIDTH 32
++#define GM_ADR2_REG_KER_OFST 0x110 /* GMAC station address register 2 */
++#define GM_ADR2_REG_OFST 0x110 /* GMAC station address register 2 */
++ #define GM_ADR2_LBN 16
++ #define GM_ADR2_WIDTH 16
++#define GMF_CFG0_REG_KER_OFST 0x120 /* GMAC FIFO configuration register 0 */
++#define GMF_CFG0_REG_OFST 0x120 /* GMAC FIFO configuration register 0 */
++ #define GMF_FTFENRPLY_LBN 20
++ #define GMF_FTFENRPLY_WIDTH 1
++ #define GMF_STFENRPLY_LBN 19
++ #define GMF_STFENRPLY_WIDTH 1
++ #define GMF_FRFENRPLY_LBN 18
++ #define GMF_FRFENRPLY_WIDTH 1
++ #define GMF_SRFENRPLY_LBN 17
++ #define GMF_SRFENRPLY_WIDTH 1
++ #define GMF_WTMENRPLY_LBN 16
++ #define GMF_WTMENRPLY_WIDTH 1
++ #define GMF_FTFENREQ_LBN 12
++ #define GMF_FTFENREQ_WIDTH 1
++ #define GMF_STFENREQ_LBN 11
++ #define GMF_STFENREQ_WIDTH 1
++ #define GMF_FRFENREQ_LBN 10
++ #define GMF_FRFENREQ_WIDTH 1
++ #define GMF_SRFENREQ_LBN 9
++ #define GMF_SRFENREQ_WIDTH 1
++ #define GMF_WTMENREQ_LBN 8
++ #define GMF_WTMENREQ_WIDTH 1
++ #define GMF_HSTRSTFT_LBN 4
++ #define GMF_HSTRSTFT_WIDTH 1
++ #define GMF_HSTRSTST_LBN 3
++ #define GMF_HSTRSTST_WIDTH 1
++ #define GMF_HSTRSTFR_LBN 2
++ #define GMF_HSTRSTFR_WIDTH 1
++ #define GMF_HSTRSTSR_LBN 1
++ #define GMF_HSTRSTSR_WIDTH 1
++ #define GMF_HSTRSTWT_LBN 0
++ #define GMF_HSTRSTWT_WIDTH 1
++#define GMF_CFG1_REG_KER_OFST 0x130 /* GMAC FIFO configuration register 1 */
++#define GMF_CFG1_REG_OFST 0x130 /* GMAC FIFO configuration register 1 */
++ #define GMF_CFGFRTH_LBN 16
++ #define GMF_CFGFRTH_WIDTH 5
++ #define GMF_CFGXOFFRTX_LBN 0
++ #define GMF_CFGXOFFRTX_WIDTH 16
++#define GMF_CFG2_REG_KER_OFST 0x140 /* GMAC FIFO configuration register 2 */
++#define GMF_CFG2_REG_OFST 0x140 /* GMAC FIFO configuration register 2 */
++ #define GMF_CFGHWM_LBN 16
++ #define GMF_CFGHWM_WIDTH 6
++ #define GMF_CFGLWM_LBN 0
++ #define GMF_CFGLWM_WIDTH 6
++#define GMF_CFG3_REG_KER_OFST 0x150 /* GMAC FIFO configuration register 3 */
++#define GMF_CFG3_REG_OFST 0x150 /* GMAC FIFO configuration register 3 */
++ #define GMF_CFGHWMFT_LBN 16
++ #define GMF_CFGHWMFT_WIDTH 6
++ #define GMF_CFGFTTH_LBN 0
++ #define GMF_CFGFTTH_WIDTH 6
++#define GMF_CFG4_REG_KER_OFST 0x160 /* GMAC FIFO configuration register 4 */
++#define GMF_CFG4_REG_OFST 0x160 /* GMAC FIFO configuration register 4 */
++ #define GMF_HSTFLTRFRM_LBN 0
++ #define GMF_HSTFLTRFRM_WIDTH 18
++#define GMF_CFG5_REG_KER_OFST 0x170 /* GMAC FIFO configuration register 5 */
++#define GMF_CFG5_REG_OFST 0x170 /* GMAC FIFO configuration register 5 */
++ #define GMF_CFGHDPLX_LBN 22
++ #define GMF_CFGHDPLX_WIDTH 1
++ #define GMF_SRFULL_LBN 21
++ #define GMF_SRFULL_WIDTH 1
++ #define GMF_HSTSRFULLCLR_LBN 20
++ #define GMF_HSTSRFULLCLR_WIDTH 1
++ #define GMF_CFGBYTMODE_LBN 19
++ #define GMF_CFGBYTMODE_WIDTH 1
++ #define GMF_HSTDRPLT64_LBN 18
++ #define GMF_HSTDRPLT64_WIDTH 1
++ #define GMF_HSTFLTRFRMDC_LBN 0
++ #define GMF_HSTFLTRFRMDC_WIDTH 18
++/*************---- 10G MAC Registers C Header ----*************/
++#define XM_ADR_LO_REG_KER_P0_OFST 0x1200 /* XGMAC address register low -
++ port 0 */
++#define XM_ADR_LO_REG_P0_OFST 0x1200 /* XGMAC address register low -
++ port 0 */
++ #define XM_ADR_LO_LBN 0
++ #define XM_ADR_LO_WIDTH 32
++#define XM_ADR_HI_REG_KER_P0_OFST 0x1210 /* XGMAC address register high -
++ port 0 */
++#define XM_ADR_HI_REG_P0_OFST 0x1210 /* XGMAC address register high -
++ port 0 */
++ #define XM_ADR_HI_LBN 0
++ #define XM_ADR_HI_WIDTH 16
++#define XM_GLB_CFG_REG_KER_P0_OFST 0x1220 /* XGMAC global configuration -
++ port 0 */
++#define XM_GLB_CFG_REG_P0_OFST 0x1220 /* XGMAC global configuration -
++ port 0 */
++ #define XM_LINE_LB_DEEP_RSVD_LBN 28
++ #define XM_LINE_LB_DEEP_RSVD_WIDTH 1
++ #define XM_RMTFLT_GEN_LBN 17
++ #define XM_RMTFLT_GEN_WIDTH 1
++ #define XM_DEBUG_MODE_LBN 16
++ #define XM_DEBUG_MODE_WIDTH 1
++ #define XM_RX_STAT_EN_LBN 11
++ #define XM_RX_STAT_EN_WIDTH 1
++ #define XM_TX_STAT_EN_LBN 10
++ #define XM_TX_STAT_EN_WIDTH 1
++ #define XM_CUT_THRU_MODE_LBN 7
++ #define XM_CUT_THRU_MODE_WIDTH 1
++ #define XM_RX_JUMBO_MODE_LBN 6
++ #define XM_RX_JUMBO_MODE_WIDTH 1
++ #define XM_WAN_MODE_LBN 5
++ #define XM_WAN_MODE_WIDTH 1
++ #define XM_AUTOCLR_MODE_LBN 4
++ #define XM_AUTOCLR_MODE_WIDTH 1
++ #define XM_INTCLR_MODE_LBN 3
++ #define XM_INTCLR_MODE_WIDTH 1
++ #define XM_CORE_RST_LBN 0
++ #define XM_CORE_RST_WIDTH 1
++#define XM_TX_CFG_REG_KER_P0_OFST 0x1230 /* XGMAC transmit configuration -
++ port 0 */
++#define XM_TX_CFG_REG_P0_OFST 0x1230 /* XGMAC transmit configuration -
++ port 0 */
++ #define XM_TX_PROG_LBN 24
++ #define XM_TX_PROG_WIDTH 1
++ #define XM_IPG_LBN 16
++ #define XM_IPG_WIDTH 4
++ #define XM_FCNTL_LBN 10
++ #define XM_FCNTL_WIDTH 1
++ #define XM_TXCRC_LBN 8
++ #define XM_TXCRC_WIDTH 1
++ #define XM_EDRC_LBN 6
++ #define XM_EDRC_WIDTH 1
++ #define XM_AUTO_PAD_LBN 5
++ #define XM_AUTO_PAD_WIDTH 1
++ #define XM_TX_PRMBL_LBN 2
++ #define XM_TX_PRMBL_WIDTH 1
++ #define XM_TXEN_LBN 1
++ #define XM_TXEN_WIDTH 1
++ #define XM_TX_RST_LBN 0
++ #define XM_TX_RST_WIDTH 1
++#define XM_RX_CFG_REG_KER_P0_OFST 0x1240 /* XGMAC receive configuration -
++ port 0 */
++#define XM_RX_CFG_REG_P0_OFST 0x1240 /* XGMAC receive configuration -
++ port 0 */
++ #define XM_PASS_LENERR_LBN 26
++ #define XM_PASS_LENERR_WIDTH 1
++ #define XM_PASS_CRC_ERR_LBN 25
++ #define XM_PASS_CRC_ERR_WIDTH 1
++ #define XM_PASS_PRMBLE_ERR_LBN 24
++ #define XM_PASS_PRMBLE_ERR_WIDTH 1
++ #define XM_REJ_UCAST_LBN 18
++ #define XM_REJ_UCAST_WIDTH 1
++ #define XM_BSC_EN_LBN 17
++ #define XM_BSC_EN_WIDTH 1
++ #define XM_ACPT_ALL_MCAST_LBN 11
++ #define XM_ACPT_ALL_MCAST_WIDTH 1
++ #define XM_PASS_SAP_LBN 10
++ #define XM_PASS_SAP_WIDTH 1
++ #define XM_ACPT_ALL_UCAST_LBN 9
++ #define XM_ACPT_ALL_UCAST_WIDTH 1
++ #define XM_AUTO_DEPAD_LBN 8
++ #define XM_AUTO_DEPAD_WIDTH 1
++ #define XM_RXCRC_LBN 3
++ #define XM_RXCRC_WIDTH 1
++ #define XM_RX_PRMBL_LBN 2
++ #define XM_RX_PRMBL_WIDTH 1
++ #define XM_RXEN_LBN 1
++ #define XM_RXEN_WIDTH 1
++ #define XM_RX_RST_LBN 0
++ #define XM_RX_RST_WIDTH 1
++#define XM_FC_REG_KER_P0_OFST 0x1270 /* XGMAC flow control register -
++ port 0 */
++#define XM_FC_REG_P0_OFST 0x1270 /* XGMAC flow control register -
++ port 0 */
++ #define XM_PAUSE_TIME_LBN 16
++ #define XM_PAUSE_TIME_WIDTH 16
++ #define XM_RX_MAC_STAT_LBN 11
++ #define XM_RX_MAC_STAT_WIDTH 1
++ #define XM_TX_MAC_STAT_LBN 10
++ #define XM_TX_MAC_STAT_WIDTH 1
++ #define XM_MCNTL_PASS_LBN 8
++ #define XM_MCNTL_PASS_WIDTH 2
++ #define XM_REJ_CNTL_UCAST_LBN 6
++ #define XM_REJ_CNTL_UCAST_WIDTH 1
++ #define XM_REJ_CNTL_MCAST_LBN 5
++ #define XM_REJ_CNTL_MCAST_WIDTH 1
++ #define XM_AUTO_XMIT_ZPAUSE_LBN 4
++ #define XM_AUTO_XMIT_ZPAUSE_WIDTH 1
++ #define XM_AUTO_XMIT_PAUSE_LBN 3
++ #define XM_AUTO_XMIT_PAUSE_WIDTH 1
++ #define XM_ZPAUSE_LBN 2
++ #define XM_ZPAUSE_WIDTH 1
++ #define XM_XMIT_PAUSE_LBN 1
++ #define XM_XMIT_PAUSE_WIDTH 1
++ #define XM_DIS_FCNTL_LBN 0
++ #define XM_DIS_FCNTL_WIDTH 1
++#define XM_PAUSE_TIME_REG_KER_P0_OFST 0x1290 /* XGMAC pause time register -
++ port 0 */
++#define XM_PAUSE_TIME_REG_P0_OFST 0x1290 /* XGMAC pause time register -
++ port 0 */
++ #define XM_TX_PAUSE_CNT_LBN 16
++ #define XM_TX_PAUSE_CNT_WIDTH 16
++ #define XM_RX_PAUSE_CNT_LBN 0
++ #define XM_RX_PAUSE_CNT_WIDTH 16
++#define XM_TX_PARAM_REG_KER_P0_OFST 0x12D0 /* XGMAC transmit parameter
++ register - port 0 */
++#define XM_TX_PARAM_REG_P0_OFST 0x12D0 /* XGMAC transmit parameter register -
++ port 0 */
++ #define XM_TX_JUMBO_MODE_LBN 31
++ #define XM_TX_JUMBO_MODE_WIDTH 1
++ #define XM_MAX_TX_FRM_SIZE_LBN 16
++ #define XM_MAX_TX_FRM_SIZE_WIDTH 14
++ #define XM_PAD_CHAR_LBN 0
++ #define XM_PAD_CHAR_WIDTH 8
++#define XM_RX_PARAM_REG_KER_P0_OFST 0x12E0 /* XGMAC receive parameter
++ register - port 0 */
++#define XM_RX_PARAM_REG_P0_OFST 0x12E0 /* XGMAC receive parameter register -
++ port 0 */
++ #define XM_MAX_RX_FRM_SIZE_LBN 0
++ #define XM_MAX_RX_FRM_SIZE_WIDTH 14
++#define XX_PWR_RST_REG_KER_P0_OFST 0x1300 /* XGXS/XAUI powerdown/reset
++ register */
++#define XX_PWR_RST_REG_P0_OFST 0x1300 /* XGXS/XAUI powerdown/reset register */
++ #define XX_PWRDND_SIG_LBN 31
++ #define XX_PWRDND_SIG_WIDTH 1
++ #define XX_PWRDNC_SIG_LBN 30
++ #define XX_PWRDNC_SIG_WIDTH 1
++ #define XX_PWRDNB_SIG_LBN 29
++ #define XX_PWRDNB_SIG_WIDTH 1
++ #define XX_PWRDNA_SIG_LBN 28
++ #define XX_PWRDNA_SIG_WIDTH 1
++ #define XX_SIM_MODE_LBN 27
++ #define XX_SIM_MODE_WIDTH 1
++ #define XX_RSTPLLCD_SIG_LBN 25
++ #define XX_RSTPLLCD_SIG_WIDTH 1
++ #define XX_RSTPLLAB_SIG_LBN 24
++ #define XX_RSTPLLAB_SIG_WIDTH 1
++ #define XX_RESETD_SIG_LBN 23
++ #define XX_RESETD_SIG_WIDTH 1
++ #define XX_RESETC_SIG_LBN 22
++ #define XX_RESETC_SIG_WIDTH 1
++ #define XX_RESETB_SIG_LBN 21
++ #define XX_RESETB_SIG_WIDTH 1
++ #define XX_RESETA_SIG_LBN 20
++ #define XX_RESETA_SIG_WIDTH 1
++ #define XX_RSTXGXSTX_SIG_LBN 18
++ #define XX_RSTXGXSTX_SIG_WIDTH 1
++ #define XX_RSTXGXSRX_SIG_LBN 17
++ #define XX_RSTXGXSRX_SIG_WIDTH 1
++ #define XX_SD_RST_ACT_LBN 16
++ #define XX_SD_RST_ACT_WIDTH 1
++ #define XX_PWRDND_EN_LBN 15
++ #define XX_PWRDND_EN_WIDTH 1
++ #define XX_PWRDNC_EN_LBN 14
++ #define XX_PWRDNC_EN_WIDTH 1
++ #define XX_PWRDNB_EN_LBN 13
++ #define XX_PWRDNB_EN_WIDTH 1
++ #define XX_PWRDNA_EN_LBN 12
++ #define XX_PWRDNA_EN_WIDTH 1
++ #define XX_RSTPLLCD_EN_LBN 9
++ #define XX_RSTPLLCD_EN_WIDTH 1
++ #define XX_RSTPLLAB_EN_LBN 8
++ #define XX_RSTPLLAB_EN_WIDTH 1
++ #define XX_RESETD_EN_LBN 7
++ #define XX_RESETD_EN_WIDTH 1
++ #define XX_RESETC_EN_LBN 6
++ #define XX_RESETC_EN_WIDTH 1
++ #define XX_RESETB_EN_LBN 5
++ #define XX_RESETB_EN_WIDTH 1
++ #define XX_RESETA_EN_LBN 4
++ #define XX_RESETA_EN_WIDTH 1
++ #define XX_RSTXGXSTX_EN_LBN 2
++ #define XX_RSTXGXSTX_EN_WIDTH 1
++ #define XX_RSTXGXSRX_EN_LBN 1
++ #define XX_RSTXGXSRX_EN_WIDTH 1
++ #define XX_RST_XX_EN_LBN 0
++ #define XX_RST_XX_EN_WIDTH 1
++#define XX_SD_CTL_REG_KER_P0_OFST 0x1310 /* XGXS/XAUI powerdown/reset control
++ register */
++#define XX_SD_CTL_REG_P0_OFST 0x1310 /* XGXS/XAUI powerdown/reset control
++ register */
++ #define XX_TERMADJ1_LBN 17
++ #define XX_TERMADJ1_WIDTH 1
++ #define XX_TERMADJ0_LBN 16
++ #define XX_TERMADJ0_WIDTH 1
++ #define XX_HIDRVD_LBN 15
++ #define XX_HIDRVD_WIDTH 1
++ #define XX_LODRVD_LBN 14
++ #define XX_LODRVD_WIDTH 1
++ #define XX_HIDRVC_LBN 13
++ #define XX_HIDRVC_WIDTH 1
++ #define XX_LODRVC_LBN 12
++ #define XX_LODRVC_WIDTH 1
++ #define XX_HIDRVB_LBN 11
++ #define XX_HIDRVB_WIDTH 1
++ #define XX_LODRVB_LBN 10
++ #define XX_LODRVB_WIDTH 1
++ #define XX_HIDRVA_LBN 9
++ #define XX_HIDRVA_WIDTH 1
++ #define XX_LODRVA_LBN 8
++ #define XX_LODRVA_WIDTH 1
++ #define XX_LPBKD_LBN 3
++ #define XX_LPBKD_WIDTH 1
++ #define XX_LPBKC_LBN 2
++ #define XX_LPBKC_WIDTH 1
++ #define XX_LPBKB_LBN 1
++ #define XX_LPBKB_WIDTH 1
++ #define XX_LPBKA_LBN 0
++ #define XX_LPBKA_WIDTH 1
++#define XX_TXDRV_CTL_REG_KER_P0_OFST 0x1320 /* XAUI SerDes transmit drive
++ control register */
++#define XX_TXDRV_CTL_REG_P0_OFST 0x1320 /* XAUI SerDes transmit drive
++ control register */
++ #define XX_DEQD_LBN 28
++ #define XX_DEQD_WIDTH 4
++ #define XX_DEQC_LBN 24
++ #define XX_DEQC_WIDTH 4
++ #define XX_DEQB_LBN 20
++ #define XX_DEQB_WIDTH 4
++ #define XX_DEQA_LBN 16
++ #define XX_DEQA_WIDTH 4
++ #define XX_DTXD_LBN 12
++ #define XX_DTXD_WIDTH 4
++ #define XX_DTXC_LBN 8
++ #define XX_DTXC_WIDTH 4
++ #define XX_DTXB_LBN 4
++ #define XX_DTXB_WIDTH 4
++ #define XX_DTXA_LBN 0
++ #define XX_DTXA_WIDTH 4
++#define XX_PRBS_CTL_REG_KER_P0_OFST 0x1330 /* XAUI PRBS control register */
++#define XX_PRBS_CTL_REG_P0_OFST 0x1330 /* XAUI PRBS control register */
++ #define XX_CH3_RX_PRBS_SEL_LBN 30
++ #define XX_CH3_RX_PRBS_SEL_WIDTH 2
++ #define XX_CH3_RX_PRBS_INV_LBN 29
++ #define XX_CH3_RX_PRBS_INV_WIDTH 1
++ #define XX_CH3_RX_PRBS_CHKEN_LBN 28
++ #define XX_CH3_RX_PRBS_CHKEN_WIDTH 1
++ #define XX_CH2_RX_PRBS_SEL_LBN 26
++ #define XX_CH2_RX_PRBS_SEL_WIDTH 2
++ #define XX_CH2_RX_PRBS_INV_LBN 25
++ #define XX_CH2_RX_PRBS_INV_WIDTH 1
++ #define XX_CH2_RX_PRBS_CHKEN_LBN 24
++ #define XX_CH2_RX_PRBS_CHKEN_WIDTH 1
++ #define XX_CH1_RX_PRBS_SEL_LBN 22
++ #define XX_CH1_RX_PRBS_SEL_WIDTH 2
++ #define XX_CH1_RX_PRBS_INV_LBN 21
++ #define XX_CH1_RX_PRBS_INV_WIDTH 1
++ #define XX_CH1_RX_PRBS_CHKEN_LBN 20
++ #define XX_CH1_RX_PRBS_CHKEN_WIDTH 1
++ #define XX_CH0_RX_PRBS_SEL_LBN 18
++ #define XX_CH0_RX_PRBS_SEL_WIDTH 2
++ #define XX_CH0_RX_PRBS_INV_LBN 17
++ #define XX_CH0_RX_PRBS_INV_WIDTH 1
++ #define XX_CH0_RX_PRBS_CHKEN_LBN 16
++ #define XX_CH0_RX_PRBS_CHKEN_WIDTH 1
++ #define XX_CH3_TX_PRBS_SEL_LBN 14
++ #define XX_CH3_TX_PRBS_SEL_WIDTH 2
++ #define XX_CH3_TX_PRBS_INV_LBN 13
++ #define XX_CH3_TX_PRBS_INV_WIDTH 1
++ #define XX_CH3_TX_PRBS_CHKEN_LBN 12
++ #define XX_CH3_TX_PRBS_CHKEN_WIDTH 1
++ #define XX_CH2_TX_PRBS_SEL_LBN 10
++ #define XX_CH2_TX_PRBS_SEL_WIDTH 2
++ #define XX_CH2_TX_PRBS_INV_LBN 9
++ #define XX_CH2_TX_PRBS_INV_WIDTH 1
++ #define XX_CH2_TX_PRBS_CHKEN_LBN 8
++ #define XX_CH2_TX_PRBS_CHKEN_WIDTH 1
++ #define XX_CH1_TX_PRBS_SEL_LBN 6
++ #define XX_CH1_TX_PRBS_SEL_WIDTH 2
++ #define XX_CH1_TX_PRBS_INV_LBN 5
++ #define XX_CH1_TX_PRBS_INV_WIDTH 1
++ #define XX_CH1_TX_PRBS_CHKEN_LBN 4
++ #define XX_CH1_TX_PRBS_CHKEN_WIDTH 1
++ #define XX_CH0_TX_PRBS_SEL_LBN 2
++ #define XX_CH0_TX_PRBS_SEL_WIDTH 2
++ #define XX_CH0_TX_PRBS_INV_LBN 1
++ #define XX_CH0_TX_PRBS_INV_WIDTH 1
++ #define XX_CH0_TX_PRBS_CHKEN_LBN 0
++ #define XX_CH0_TX_PRBS_CHKEN_WIDTH 1
++#define XX_PRBS_CHK_REG_KER_P0_OFST 0x1340 /* XAUI PRBS checker control
++ register */
++#define XX_PRBS_CHK_REG_P0_OFST 0x1340 /* XAUI PRBS checker control
++ register */
++ #define XX_REV_LB_EN_LBN 16
++ #define XX_REV_LB_EN_WIDTH 1
++ #define XX_CH3_DEG_DET_LBN 15
++ #define XX_CH3_DEG_DET_WIDTH 1
++ #define XX_CH3_LFSR_LOCK_IND_LBN 14
++ #define XX_CH3_LFSR_LOCK_IND_WIDTH 1
++ #define XX_CH3_PRBS_FRUN_LBN 13
++ #define XX_CH3_PRBS_FRUN_WIDTH 1
++ #define XX_CH3_ERR_CHK_LBN 12
++ #define XX_CH3_ERR_CHK_WIDTH 1
++ #define XX_CH2_DEG_DET_LBN 11
++ #define XX_CH2_DEG_DET_WIDTH 1
++ #define XX_CH2_LFSR_LOCK_IND_LBN 10
++ #define XX_CH2_LFSR_LOCK_IND_WIDTH 1
++ #define XX_CH2_PRBS_FRUN_LBN 9
++ #define XX_CH2_PRBS_FRUN_WIDTH 1
++ #define XX_CH2_ERR_CHK_LBN 8
++ #define XX_CH2_ERR_CHK_WIDTH 1
++ #define XX_CH1_DEG_DET_LBN 7
++ #define XX_CH1_DEG_DET_WIDTH 1
++ #define XX_CH1_LFSR_LOCK_IND_LBN 6
++ #define XX_CH1_LFSR_LOCK_IND_WIDTH 1
++ #define XX_CH1_PRBS_FRUN_LBN 5
++ #define XX_CH1_PRBS_FRUN_WIDTH 1
++ #define XX_CH1_ERR_CHK_LBN 4
++ #define XX_CH1_ERR_CHK_WIDTH 1
++ #define XX_CH0_DEG_DET_LBN 3
++ #define XX_CH0_DEG_DET_WIDTH 1
++ #define XX_CH0_LFSR_LOCK_IND_LBN 2
++ #define XX_CH0_LFSR_LOCK_IND_WIDTH 1
++ #define XX_CH0_PRBS_FRUN_LBN 1
++ #define XX_CH0_PRBS_FRUN_WIDTH 1
++ #define XX_CH0_ERR_CHK_LBN 0
++ #define XX_CH0_ERR_CHK_WIDTH 1
++#define XX_PRBS_ERR_REG_KER_P0_OFST 0x1350 /* XAUI PRBS checker error
++ count register */
++#define XX_PRBS_ERR_REG_P0_OFST 0x1350 /* XAUI PRBS checker error count
++ register */
++ #define XX_CH3_PRBS_ERR_CNT_LBN 24
++ #define XX_CH3_PRBS_ERR_CNT_WIDTH 8
++ #define XX_CH2_PRBS_ERR_CNT_LBN 16
++ #define XX_CH2_PRBS_ERR_CNT_WIDTH 8
++ #define XX_CH1_PRBS_ERR_CNT_LBN 8
++ #define XX_CH1_PRBS_ERR_CNT_WIDTH 8
++ #define XX_CH0_PRBS_ERR_CNT_LBN 0
++ #define XX_CH0_PRBS_ERR_CNT_WIDTH 8
++#define XX_CORE_STAT_REG_KER_P0_OFST 0x1360 /* XAUI XGXS core status
++ register */
++#define XX_CORE_STAT_REG_P0_OFST 0x1360 /* XAUI XGXS core status register */
++ #define XX_FORCE_SIG3_LBN 31
++ #define XX_FORCE_SIG3_WIDTH 1
++ #define XX_FORCE_SIG3_VAL_LBN 30
++ #define XX_FORCE_SIG3_VAL_WIDTH 1
++ #define XX_FORCE_SIG2_LBN 29
++ #define XX_FORCE_SIG2_WIDTH 1
++ #define XX_FORCE_SIG2_VAL_LBN 28
++ #define XX_FORCE_SIG2_VAL_WIDTH 1
++ #define XX_FORCE_SIG1_LBN 27
++ #define XX_FORCE_SIG1_WIDTH 1
++ #define XX_FORCE_SIG1_VAL_LBN 26
++ #define XX_FORCE_SIG1_VAL_WIDTH 1
++ #define XX_FORCE_SIG0_LBN 25
++ #define XX_FORCE_SIG0_WIDTH 1
++ #define XX_FORCE_SIG0_VAL_LBN 24
++ #define XX_FORCE_SIG0_VAL_WIDTH 1
++ #define XX_XGXS_LB_EN_LBN 23
++ #define XX_XGXS_LB_EN_WIDTH 1
++ #define XX_XGMII_LB_EN_LBN 22
++ #define XX_XGMII_LB_EN_WIDTH 1
++ #define XX_MATCH_FAULT_LBN 21
++ #define XX_MATCH_FAULT_WIDTH 1
++ #define XX_ALIGN_DONE_LBN 20
++ #define XX_ALIGN_DONE_WIDTH 1
++ #define XX_SYNC_STAT3_LBN 19
++ #define XX_SYNC_STAT3_WIDTH 1
++ #define XX_SYNC_STAT2_LBN 18
++ #define XX_SYNC_STAT2_WIDTH 1
++ #define XX_SYNC_STAT1_LBN 17
++ #define XX_SYNC_STAT1_WIDTH 1
++ #define XX_SYNC_STAT0_LBN 16
++ #define XX_SYNC_STAT0_WIDTH 1
++ #define XX_COMMA_DET_CH3_LBN 15
++ #define XX_COMMA_DET_CH3_WIDTH 1
++ #define XX_COMMA_DET_CH2_LBN 14
++ #define XX_COMMA_DET_CH2_WIDTH 1
++ #define XX_COMMA_DET_CH1_LBN 13
++ #define XX_COMMA_DET_CH1_WIDTH 1
++ #define XX_COMMA_DET_CH0_LBN 12
++ #define XX_COMMA_DET_CH0_WIDTH 1
++ #define XX_CGRP_ALIGN_CH3_LBN 11
++ #define XX_CGRP_ALIGN_CH3_WIDTH 1
++ #define XX_CGRP_ALIGN_CH2_LBN 10
++ #define XX_CGRP_ALIGN_CH2_WIDTH 1
++ #define XX_CGRP_ALIGN_CH1_LBN 9
++ #define XX_CGRP_ALIGN_CH1_WIDTH 1
++ #define XX_CGRP_ALIGN_CH0_LBN 8
++ #define XX_CGRP_ALIGN_CH0_WIDTH 1
++ #define XX_CHAR_ERR_CH3_LBN 7
++ #define XX_CHAR_ERR_CH3_WIDTH 1
++ #define XX_CHAR_ERR_CH2_LBN 6
++ #define XX_CHAR_ERR_CH2_WIDTH 1
++ #define XX_CHAR_ERR_CH1_LBN 5
++ #define XX_CHAR_ERR_CH1_WIDTH 1
++ #define XX_CHAR_ERR_CH0_LBN 4
++ #define XX_CHAR_ERR_CH0_WIDTH 1
++ #define XX_DISPERR_CH3_LBN 3
++ #define XX_DISPERR_CH3_WIDTH 1
++ #define XX_DISPERR_CH2_LBN 2
++ #define XX_DISPERR_CH2_WIDTH 1
++ #define XX_DISPERR_CH1_LBN 1
++ #define XX_DISPERR_CH1_WIDTH 1
++ #define XX_DISPERR_CH0_LBN 0
++ #define XX_DISPERR_CH0_WIDTH 1
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_xgrmon.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_xgrmon.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_xgrmon.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_xgrmon.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,125 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides EtherFabric NIC - EFXXXX (aka Falcon) 10G MAC
++ * statistics register definitions.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*************---- 10G MAC Statistical Counters C Header ----*************/
++#define XgRxOctets_offset 0x0
++ #define XgRxOctets_WIDTH 48
++#define XgRxOctetsOK_offset 0x8
++ #define XgRxOctetsOK_WIDTH 48
++#define XgRxPkts_offset 0x10
++ #define XgRxPkts_WIDTH 32
++#define XgRxPktsOK_offset 0x14
++ #define XgRxPktsOK_WIDTH 32
++#define XgRxBroadcastPkts_offset 0x18
++ #define XgRxBroadcastPkts_WIDTH 32
++#define XgRxMulticastPkts_offset 0x1C
++ #define XgRxMulticastPkts_WIDTH 32
++#define XgRxUnicastPkts_offset 0x20
++ #define XgRxUnicastPkts_WIDTH 32
++#define XgRxUndersizePkts_offset 0x24
++ #define XgRxUndersizePkts_WIDTH 32
++#define XgRxOversizePkts_offset 0x28
++ #define XgRxOversizePkts_WIDTH 32
++#define XgRxJabberPkts_offset 0x2C
++ #define XgRxJabberPkts_WIDTH 32
++#define XgRxUndersizeFCSerrorPkts_offset 0x30
++ #define XgRxUndersizeFCSerrorPkts_WIDTH 32
++#define XgRxDropEvents_offset 0x34
++ #define XgRxDropEvents_WIDTH 32
++#define XgRxFCSerrorPkts_offset 0x38
++ #define XgRxFCSerrorPkts_WIDTH 32
++#define XgRxAlignError_offset 0x3C
++ #define XgRxAlignError_WIDTH 32
++#define XgRxSymbolError_offset 0x40
++ #define XgRxSymbolError_WIDTH 32
++#define XgRxInternalMACError_offset 0x44
++ #define XgRxInternalMACError_WIDTH 32
++#define XgRxControlPkts_offset 0x48
++ #define XgRxControlPkts_WIDTH 32
++#define XgRxPausePkts_offset 0x4C
++ #define XgRxPausePkts_WIDTH 32
++#define XgRxPkts64Octets_offset 0x50
++ #define XgRxPkts64Octets_WIDTH 32
++#define XgRxPkts65to127Octets_offset 0x54
++ #define XgRxPkts65to127Octets_WIDTH 32
++#define XgRxPkts128to255Octets_offset 0x58
++ #define XgRxPkts128to255Octets_WIDTH 32
++#define XgRxPkts256to511Octets_offset 0x5C
++ #define XgRxPkts256to511Octets_WIDTH 32
++#define XgRxPkts512to1023Octets_offset 0x60
++ #define XgRxPkts512to1023Octets_WIDTH 32
++#define XgRxPkts1024to15xxOctets_offset 0x64
++ #define XgRxPkts1024to15xxOctets_WIDTH 32
++#define XgRxPkts15xxtoMaxOctets_offset 0x68
++ #define XgRxPkts15xxtoMaxOctets_WIDTH 32
++#define XgRxLengthError_offset 0x6C
++ #define XgRxLengthError_WIDTH 32
++#define XgTxPkts_offset 0x80
++ #define XgTxPkts_WIDTH 32
++#define XgTxOctets_offset 0x88
++ #define XgTxOctets_WIDTH 48
++#define XgTxMulticastPkts_offset 0x90
++ #define XgTxMulticastPkts_WIDTH 32
++#define XgTxBroadcastPkts_offset 0x94
++ #define XgTxBroadcastPkts_WIDTH 32
++#define XgTxUnicastPkts_offset 0x98
++ #define XgTxUnicastPkts_WIDTH 32
++#define XgTxControlPkts_offset 0x9C
++ #define XgTxControlPkts_WIDTH 32
++#define XgTxPausePkts_offset 0xA0
++ #define XgTxPausePkts_WIDTH 32
++#define XgTxPkts64Octets_offset 0xA4
++ #define XgTxPkts64Octets_WIDTH 32
++#define XgTxPkts65to127Octets_offset 0xA8
++ #define XgTxPkts65to127Octets_WIDTH 32
++#define XgTxPkts128to255Octets_offset 0xAC
++ #define XgTxPkts128to255Octets_WIDTH 32
++#define XgTxPkts256to511Octets_offset 0xB0
++ #define XgTxPkts256to511Octets_WIDTH 32
++#define XgTxPkts512to1023Octets_offset 0xB4
++ #define XgTxPkts512to1023Octets_WIDTH 32
++#define XgTxPkts1024to15xxOctets_offset 0xB8
++ #define XgTxPkts1024to15xxOctets_WIDTH 32
++#define XgTxPkts1519toMaxOctets_offset 0xBC
++ #define XgTxPkts1519toMaxOctets_WIDTH 32
++#define XgTxUndersizePkts_offset 0xC0
++ #define XgTxUndersizePkts_WIDTH 32
++#define XgTxOversizePkts_offset 0xC4
++ #define XgTxOversizePkts_WIDTH 32
++#define xGTxNonTcpUdpPkt_offset 0xC8
++ #define xGTxNonTcpUdpPkt_WIDTH 16
++#define xGTxMacSrcErrPkt_offset 0xCC
++ #define xGTxMacSrcErrPkt_WIDTH 16
++#define xGTxIpSrcErrPkt_offset 0xD0
++ #define xGTxIpSrcErrPkt_WIDTH 16
++#define XgDmaDone_offset 0xD4
++ #define XgDmaDone_WIDTH 32
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,420 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides EtherFabric NIC - EFXXXX (aka Falcon) specific
++ * definitions.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_DRIVER_EFAB_HARDWARE_FALCON_H__
++#define __CI_DRIVER_EFAB_HARDWARE_FALCON_H__
++
++/*----------------------------------------------------------------------------
++ * Compile options
++ *---------------------------------------------------------------------------*/
++
++/* Falcon has an 8K maximum page size. */
++#define FALCON_MAX_PAGE_SIZE EFHW_8K
++
++/* include the register definitions */
++#include <ci/driver/efab/hardware/falcon/falcon_core.h>
++#include <ci/driver/efab/hardware/falcon/falcon_desc.h>
++#include <ci/driver/efab/hardware/falcon/falcon_event.h>
++#include <ci/driver/efab/hardware/falcon/falcon_mac.h>
++#include <ci/driver/efab/hardware/falcon/falcon_grmon.h>
++#include <ci/driver/efab/hardware/falcon/falcon_xgrmon.h>
++#include <ci/driver/efab/hardware/falcon/falcon_intr_vec.h>
++
++#define FALCON_DMA_TX_DESC_BYTES 8
++#define FALCON_DMA_RX_PHYS_DESC_BYTES 8
++#define FALCON_DMA_RX_BUF_DESC_BYTES 4
++
++
++/* ---- efhw_event_t helpers --- */
++
++#ifndef EFHW_IS_LITTLE_ENDIAN
++#error This needs lots of cpu_to_le64s() in
++#endif
++
++/*!\ TODO look at whether there is an efficiency gain to be had by
++ treating the event codes to 32bit masks as is done for EF1
++
++ These masks apply to the full 64 bits of the event to extract the
++ event code - followed by the common event codes to expect
++ */
++#define __FALCON_OPEN_MASK(WIDTH) ((((uint64_t)1) << (WIDTH)) - 1)
++#define FALCON_EVENT_CODE_MASK \
++ (__FALCON_OPEN_MASK(EV_CODE_WIDTH) << EV_CODE_LBN)
++#define FALCON_EVENT_EV_Q_ID_MASK \
++ (__FALCON_OPEN_MASK(DRIVER_EV_EVQ_ID_WIDTH) << DRIVER_EV_EVQ_ID_LBN)
++#define FALCON_EVENT_TX_FLUSH_Q_ID_MASK \
++ (__FALCON_OPEN_MASK(DRIVER_EV_TX_DESCQ_ID_WIDTH) << \
++ DRIVER_EV_TX_DESCQ_ID_LBN)
++#define FALCON_EVENT_RX_FLUSH_Q_ID_MASK \
++ (__FALCON_OPEN_MASK(DRIVER_EV_RX_DESCQ_ID_WIDTH) << \
++ DRIVER_EV_RX_DESCQ_ID_LBN)
++#define FALCON_EVENT_DRV_SUBCODE_MASK \
++ (__FALCON_OPEN_MASK(DRIVER_EV_SUB_CODE_WIDTH) << \
++ DRIVER_EV_SUB_CODE_LBN)
++
++#define FALCON_EVENT_FMT "[ev:%x:%08x:%08x]"
++#define FALCON_EVENT_PRI_ARG(e) \
++ ((unsigned)(((e).u64 & FALCON_EVENT_CODE_MASK) >> EV_CODE_LBN)), \
++ ((unsigned)((e).u64 >> 32)), ((unsigned)((e).u64 & 0xFFFFFFFF))
++
++#define FALCON_EVENT_CODE(evp) ((evp)->u64 & FALCON_EVENT_CODE_MASK)
++#define FALCON_EVENT_WAKE_EVQ_ID(evp) \
++ (((evp)->u64 & FALCON_EVENT_EV_Q_ID_MASK) >> DRIVER_EV_EVQ_ID_LBN)
++#define FALCON_EVENT_TX_FLUSH_Q_ID(evp) \
++ (((evp)->u64 & FALCON_EVENT_TX_FLUSH_Q_ID_MASK) >> \
++ DRIVER_EV_TX_DESCQ_ID_LBN)
++#define FALCON_EVENT_RX_FLUSH_Q_ID(evp) \
++ (((evp)->u64 & FALCON_EVENT_RX_FLUSH_Q_ID_MASK) >> \
++ DRIVER_EV_RX_DESCQ_ID_LBN)
++#define FALCON_EVENT_DRIVER_SUBCODE(evp) \
++ (((evp)->u64 & FALCON_EVENT_DRV_SUBCODE_MASK) >> \
++ DRIVER_EV_SUB_CODE_LBN)
++
++#define FALCON_EVENT_CODE_CHAR ((uint64_t)DRIVER_EV_DECODE << EV_CODE_LBN)
++#define FALCON_EVENT_CODE_SW ((uint64_t)DRV_GEN_EV_DECODE << EV_CODE_LBN)
++
++
++/* so this is the size in bytes of an awful lot of things */
++#define FALCON_REGISTER128 (16)
++
++/* we define some unique dummy values as a debug aid */
++#ifdef _WIN32
++#define FALCON_ATOMIC_BASE 0xdeadbeef00000000ui64
++#else
++#define FALCON_ATOMIC_BASE 0xdeadbeef00000000ULL
++#endif
++#define FALCON_ATOMIC_UPD_REG (FALCON_ATOMIC_BASE | 0x1)
++#define FALCON_ATOMIC_PTR_TBL_REG (FALCON_ATOMIC_BASE | 0x2)
++#define FALCON_ATOMIC_SRPM_UDP_EVQ_REG (FALCON_ATOMIC_BASE | 0x3)
++#define FALCON_ATOMIC_RX_FLUSH_DESCQ (FALCON_ATOMIC_BASE | 0x4)
++#define FALCON_ATOMIC_TX_FLUSH_DESCQ (FALCON_ATOMIC_BASE | 0x5)
++#define FALCON_ATOMIC_INT_EN_REG (FALCON_ATOMIC_BASE | 0x6)
++#define FALCON_ATOMIC_TIMER_CMD_REG (FALCON_ATOMIC_BASE | 0x7)
++#define FALCON_ATOMIC_PACE_REG (FALCON_ATOMIC_BASE | 0x8)
++#define FALCON_ATOMIC_INT_ACK_REG (FALCON_ATOMIC_BASE | 0x9)
++/* XXX It crashed with odd value in FALCON_ATOMIC_INT_ADR_REG */
++#define FALCON_ATOMIC_INT_ADR_REG (FALCON_ATOMIC_BASE | 0xa)
++
++/*----------------------------------------------------------------------------
++ *
++ * PCI control blocks for Falcon -
++ * (P) primary is for NET
++ * (S) secondary is for CHAR
++ *
++ *---------------------------------------------------------------------------*/
++
++#define FALCON_P_CTR_AP_BAR 2
++#define FALCON_S_CTR_AP_BAR 0
++#define FALCON_S_DEVID 0x6703
++
++
++/*----------------------------------------------------------------------------
++ *
++ * Falcon constants
++ *
++ *---------------------------------------------------------------------------*/
++
++#define FALCON_DMAQ_NUM (EFHW_4K)
++#define FALCON_EVQ_TBL_NUM (EFHW_4K)
++#define FALCON_TIMERS_NUM (EFHW_4K)
++
++/* This value is an upper limit on the total number of filter table
++ * entries, including odd and even banks. The actual size of filter table
++ * is determined at runtime, as it can vary.
++ */
++#define FALCON_FILTER_TBL_NUM (EFHW_16K)
++
++/* max number of buffers which can be pushed before commiting */
++#define FALCON_BUFFER_UPD_MAX (128)
++
++/* We can tell falcon to write its RX buffers in 32 byte quantums,
++ and since we pad packets 2 bytes to the right we can't use
++ a full page (not unless we use jumbo mode for all queues)
++
++ NOTE: tests/nic/dma.c assumes that the value here is the real NIC
++ value, so we explicitly round it down to the nearest 32 bytes */
++
++/* #define FALCON_RX_USR_BUF_SIZE round_down(4096-2,32) */
++#define FALCON_RX_USR_BUF_SIZE 4064
++
++#define FALCON_EVQ_RPTR_REG_P0 0x400
++
++/*----------------------------------------------------------------------------
++ *
++ * Falcon requires user-space descriptor pushes to be:
++ * dword[0-2]; wiob(); dword[3]
++ *
++ * Driver register access must be locked against other threads from
++ * the same driver but can be in any order: i.e dword[0-3]; wiob()
++ *
++ * The following helpers ensure that valid dword orderings are exercised
++ *
++ *---------------------------------------------------------------------------*/
++
++/* A union to allow writting 64bit values as 32bit values, without
++ * hitting the compilers aliasing rules. We hope the compiler optimises
++ * away the copy's anyway */
++union __u64to32 {
++ uint64_t u64;
++ struct {
++#ifdef EFHW_IS_LITTLE_ENDIAN
++ uint32_t a;
++ uint32_t b;
++#else
++ uint32_t b;
++ uint32_t a;
++#endif
++ } s;
++};
++
++static inline void
++falcon_write_ddd_d(efhw_ioaddr_t kva,
++ uint32_t d0, uint32_t d1, uint32_t d2, uint32_t d3)
++{
++ writel(d0, kva + 0);
++ writel(d1, kva + 4);
++ writel(d2, kva + 8);
++ mmiowb();
++ writel(d3, kva + 12);
++}
++
++static inline void falcon_write_q(efhw_ioaddr_t kva, uint64_t q)
++{
++ union __u64to32 u;
++ u.u64 = q;
++
++ writel(u.s.a, kva);
++ mmiowb();
++ writel(u.s.b, kva + 4);
++}
++
++static inline void falcon_read_q(efhw_ioaddr_t addr, uint64_t *q0)
++{
++ /* It is essential that we read dword0 first, so that
++ * the shadow register is updated with the latest value
++ * and we get a self consistent value.
++ */
++ union __u64to32 u;
++ u.s.a = readl(addr);
++ rmb();
++ u.s.b = readl(addr + 4);
++
++ *q0 = u.u64;
++}
++
++static inline void
++falcon_write_qq(efhw_ioaddr_t kva, uint64_t q0, uint64_t q1)
++{
++ writeq(q0, kva + 0);
++ falcon_write_q(kva + 8, q1);
++}
++
++static inline void
++falcon_read_qq(efhw_ioaddr_t addr, uint64_t *q0, uint64_t *q1)
++{
++ falcon_read_q(addr, q0);
++ *q1 = readq(addr + 8);
++}
++
++
++
++/*----------------------------------------------------------------------------
++ *
++ * Buffer virtual addresses (4K buffers)
++ *
++ *---------------------------------------------------------------------------*/
++
++/* Form a buffer virtual address from buffer ID and offset. If the offset
++** is larger than the buffer size, then the buffer indexed will be
++** calculated appropriately. It is the responsibility of the caller to
++** ensure that they have valid buffers programmed at that address.
++*/
++#define FALCON_VADDR_8K_S (13)
++#define FALCON_VADDR_4K_S (12)
++#define FALCON_VADDR_M 0xfffff /* post shift mask */
++
++#define FALCON_BUFFER_8K_ADDR(id, off) (((id) << FALCON_VADDR_8K_S) + (off))
++#define FALCON_BUFFER_8K_PAGE(vaddr) \
++ (((vaddr) >> FALCON_VADDR_8K_S) & FALCON_VADDR_M)
++#define FALCON_BUFFER_8K_OFF(vaddr) \
++ ((vaddr) & __FALCON_MASK32(FALCON_VADDR_8K_S))
++
++#define FALCON_BUFFER_4K_ADDR(id, off) (((id) << FALCON_VADDR_4K_S) + (off))
++#define FALCON_BUFFER_4K_PAGE(vaddr) \
++ (((vaddr) >> FALCON_VADDR_4K_S) & FALCON_VADDR_M)
++#define FALCON_BUFFER_4K_OFF(vaddr) \
++ ((vaddr) & __FALCON_MASK32(FALCON_VADDR_4K_S))
++
++/*----------------------------------------------------------------------------
++ *
++ * Timer helpers
++ *
++ *---------------------------------------------------------------------------*/
++
++static inline int falcon_timer_page_addr(uint idx)
++{
++
++ EFHW_ASSERT(TIMER_CMD_REG_KER_OFST ==
++ (TIMER_CMD_REG_PAGE4_OFST - 4 * EFHW_8K));
++
++ EFHW_ASSERT(idx < FALCON_TIMERS_NUM);
++
++ if (idx < 4)
++ return TIMER_CMD_REG_KER_OFST + (idx * EFHW_8K);
++ else if (idx < 1024)
++ return TIMER_CMD_REG_PAGE4_OFST + ((idx - 4) * EFHW_8K);
++ else
++ return TIMER_CMD_REG_PAGE123K_OFST + ((idx - 1024) * EFHW_8K);
++}
++
++#define FALCON_TIMER_PAGE_MASK (EFHW_8K-1)
++
++static inline int falcon_timer_page_offset(uint idx)
++{
++ return falcon_timer_page_addr(idx) & FALCON_TIMER_PAGE_MASK;
++}
++
++/*----------------------------------------------------------------------------
++ *
++ * DMA Queue helpers
++ *
++ *---------------------------------------------------------------------------*/
++
++/* iSCSI queue for A1; see bug 5427 for more details. */
++#define FALCON_A1_ISCSI_DMAQ 4
++
++/*! returns an address within a bar of the TX DMA doorbell */
++static inline uint falcon_tx_dma_page_addr(uint dmaq_idx)
++{
++ uint page;
++
++ EFHW_ASSERT((((TX_DESC_UPD_REG_PAGE123K_OFST) & (EFHW_8K - 1)) ==
++ (((TX_DESC_UPD_REG_PAGE4_OFST) & (EFHW_8K - 1)))));
++
++ EFHW_ASSERT(dmaq_idx < FALCON_DMAQ_NUM);
++
++ if (dmaq_idx < 1024)
++ page = TX_DESC_UPD_REG_PAGE4_OFST + ((dmaq_idx - 4) * EFHW_8K);
++ else
++ page =
++ TX_DESC_UPD_REG_PAGE123K_OFST +
++ ((dmaq_idx - 1024) * EFHW_8K);
++
++ return page;
++}
++
++/*! returns an address within a bar of the RX DMA doorbell */
++static inline uint falcon_rx_dma_page_addr(uint dmaq_idx)
++{
++ uint page;
++
++ EFHW_ASSERT((((RX_DESC_UPD_REG_PAGE123K_OFST) & (EFHW_8K - 1)) ==
++ ((RX_DESC_UPD_REG_PAGE4_OFST) & (EFHW_8K - 1))));
++
++ EFHW_ASSERT(dmaq_idx < FALCON_DMAQ_NUM);
++
++ if (dmaq_idx < 1024)
++ page = RX_DESC_UPD_REG_PAGE4_OFST + ((dmaq_idx - 4) * EFHW_8K);
++ else
++ page =
++ RX_DESC_UPD_REG_PAGE123K_OFST +
++ ((dmaq_idx - 1024) * EFHW_8K);
++
++ return page;
++}
++
++/*! "page"=NIC-dependent register set size */
++#define FALCON_DMA_PAGE_MASK (EFHW_8K-1)
++
++/*! returns an address within a bar of the start of the "page"
++ containing the TX DMA doorbell */
++static inline int falcon_tx_dma_page_base(uint dma_idx)
++{
++ return falcon_tx_dma_page_addr(dma_idx) & ~FALCON_DMA_PAGE_MASK;
++}
++
++/*! returns an address within a bar of the start of the "page"
++ containing the RX DMA doorbell */
++static inline int falcon_rx_dma_page_base(uint dma_idx)
++{
++ return falcon_rx_dma_page_addr(dma_idx) & ~FALCON_DMA_PAGE_MASK;
++}
++
++/*! returns an offset within a "page" of the TX DMA doorbell */
++static inline int falcon_tx_dma_page_offset(uint dma_idx)
++{
++ return falcon_tx_dma_page_addr(dma_idx) & FALCON_DMA_PAGE_MASK;
++}
++
++/*! returns an offset within a "page" of the RX DMA doorbell */
++static inline int falcon_rx_dma_page_offset(uint dma_idx)
++{
++ return falcon_rx_dma_page_addr(dma_idx) & FALCON_DMA_PAGE_MASK;
++}
++
++/*----------------------------------------------------------------------------
++ *
++ * Events
++ *
++ *---------------------------------------------------------------------------*/
++
++/* Falcon nails down the event queue mappings */
++#define FALCON_EVQ_KERNEL0 (0) /* hardwired for net driver */
++#define FALCON_EVQ_CHAR (4) /* char driver's event queue */
++#define FALCON_EVQ_NONIRQ (5) /* char driver's non interrupting
++ queue. Subsequent queues are
++ available for user apps */
++
++/* reserved by the drivers */
++#define FALCON_EVQ_TBL_RESERVED (8)
++
++/* default DMA-Q sizes */
++#define FALCON_DMA_Q_DEFAULT_TX_SIZE 512
++
++#define FALCON_DMA_Q_DEFAULT_RX_SIZE 512
++
++#define FALCON_DMA_Q_DEFAULT_MMAP \
++ (FALCON_DMA_Q_DEFAULT_TX_SIZE * (FALCON_DMA_TX_DESC_BYTES * 2))
++
++/*----------------------------------------------------------------------------
++ *
++ * DEBUG - Analyser trigger
++ *
++ *---------------------------------------------------------------------------*/
++
++static inline void falcon_deadbeef(efhw_ioaddr_t efhw_kva, unsigned what)
++{
++ writel(what, efhw_kva + 0x300);
++ mmiowb();
++}
++#endif /* __CI_DRIVER_EFAB_HARDWARE_FALCON_H__ */
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/workarounds.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/workarounds.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/workarounds.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/workarounds.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,75 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides workaround settings for EtherFabric NICs.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_DRIVER_EFAB_WORKAROUNDS_H__
++#define __CI_DRIVER_EFAB_WORKAROUNDS_H__
++
++/*----------------------------------------------------------------------------
++ *
++ * Hardware workarounds which have global scope
++ *
++ *---------------------------------------------------------------------------*/
++
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++
++#if defined(__CI_HARDWARE_CONFIG_FALCON_B0__)
++/*------------------------------- B0 ---------------------------------------*/
++
++#define BUG2175_WORKAROUND 0 /* TX event batching for dual port operation.
++ This removes the effect (dup TX events)
++ of the fix
++ (TX event per packet + batch events) */
++#define BUG5302_WORKAROUND 0 /* unstick TX DMAQ after out-of-range wr ptr */
++#define BUG5475_WORKAROUND 1 /* 10G SNAP encapsulation broken */
++#define BUG5762_WORKAROUND 0 /* Set all queues to jumbo mode */
++#define BUG5391_WORKAROUND 0 /* Misaligned TX can't span 512-byte boundary */
++#define BUG7916_WORKAROUND 0 /* RX flush gets lost */
++
++#else
++/*------------------------------- A0/A1 ------------------------------------*/
++
++#define BUG2175_WORKAROUND 1 /* TX event batching for dual port operation.
++ This removes the effect (dup TX events)
++ of the fix
++ (TX event per packet + batch events) */
++#define BUG5302_WORKAROUND 1 /* unstick TX DMAQ after out-of-range wr ptr */
++#define BUG5475_WORKAROUND 1 /* 10G SNAP encapsulation broken */
++#define BUG5762_WORKAROUND 1 /* Set all queues to jumbo mode */
++#define BUG5391_WORKAROUND 1 /* Misaligned TX can't span 512-byte boundary */
++#define BUG7916_WORKAROUND 1 /* RX flush gets lost */
++
++#endif /* B0/A01 */
++
++#else
++# error Need hw support.
++#endif
++
++#endif /* __CI_DRIVER_EFAB_WORKAROUNDS_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,199 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides EtherFabric NIC hardware interface.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_DRIVER_EFAB_HARDWARE_H__
++#define __CI_DRIVER_EFAB_HARDWARE_H__
++
++#include "ci/driver/efab/hardware/workarounds.h"
++#include <ci/efhw/hardware_sysdep.h>
++
++
++/*----------------------------------------------------------------------------
++ *
++ * Common EtherFabric definitions
++ *
++ *---------------------------------------------------------------------------*/
++
++#include <ci/efhw/debug.h>
++#include <ci/efhw/common.h>
++#include <ci/driver/efab/hardware/common.h>
++
++/*----------------------------------------------------------------------------
++ *
++ * EtherFabric varients
++ *
++ *---------------------------------------------------------------------------*/
++
++#include <ci/driver/efab/hardware/falcon.h>
++
++/*----------------------------------------------------------------------------
++ *
++ * EtherFabric Portable Hardware Layer defines
++ *
++ *---------------------------------------------------------------------------*/
++
++ /*-------------- Initialisation ------------ */
++#define efhw_nic_close_hardware(nic) \
++ ((nic)->efhw_func->close_hardware(nic))
++
++#define efhw_nic_init_hardware(nic, ev_handlers, mac_addr) \
++ ((nic)->efhw_func->init_hardware((nic), (ev_handlers), (mac_addr)))
++
++/*-------------- Interrupt support ------------ */
++/** Handle interrupt. Return 0 if not handled, 1 if handled. */
++#define efhw_nic_interrupt(nic) \
++ ((nic)->efhw_func->interrupt(nic))
++
++#define efhw_nic_interrupt_enable(nic, index) \
++ ((nic)->efhw_func->interrupt_enable(nic, index))
++
++#define efhw_nic_interrupt_disable(nic, index) \
++ ((nic)->efhw_func->interrupt_disable(nic, index))
++
++#define efhw_nic_set_interrupt_moderation(nic, index, val) \
++ ((nic)->efhw_func->set_interrupt_moderation(nic, index, val))
++
++/*-------------- Event support ------------ */
++
++#define efhw_nic_event_queue_enable(nic, evq, size, q_base, buf_base) \
++ ((nic)->efhw_func->event_queue_enable(nic, evq, size, q_base, \
++ buf_base))
++
++#define efhw_nic_event_queue_disable(nic, evq, timer_only) \
++ ((nic)->efhw_func->event_queue_disable(nic, evq, timer_only))
++
++#define efhw_nic_wakeup_request(nic, q_base, index, evq) \
++ ((nic)->efhw_func->wakeup_request(nic, q_base, index, evq))
++
++#define efhw_nic_sw_event(nic, data, ev) \
++ ((nic)->efhw_func->sw_event(nic, data, ev))
++
++/*-------------- Filter support ------------ */
++#define efhw_nic_ipfilter_set(nic, type, index, dmaq, \
++ saddr, sport, daddr, dport) \
++ ((nic)->efhw_func->ipfilter_set(nic, type, index, dmaq, \
++ saddr, sport, daddr, dport))
++
++#define efhw_nic_ipfilter_attach(nic, index, dmaq) \
++ ((nic)->efhw_func->ipfilter_attach(nic, index, dmaq))
++
++#define efhw_nic_ipfilter_detach(nic, index) \
++ ((nic)->efhw_func->ipfilter_detach(nic, index))
++
++#define efhw_nic_ipfilter_clear(nic, index) \
++ ((nic)->efhw_func->ipfilter_clear(nic, index))
++
++/*-------------- DMA support ------------ */
++#define efhw_nic_dmaq_tx_q_init(nic, dmaq, evq, owner, tag, \
++ dmaq_size, index, flags) \
++ ((nic)->efhw_func->dmaq_tx_q_init(nic, dmaq, evq, owner, tag, \
++ dmaq_size, index, flags))
++
++#define efhw_nic_dmaq_rx_q_init(nic, dmaq, evq, owner, tag, \
++ dmaq_size, index, flags) \
++ ((nic)->efhw_func->dmaq_rx_q_init(nic, dmaq, evq, owner, tag, \
++ dmaq_size, index, flags))
++
++#define efhw_nic_dmaq_tx_q_disable(nic, dmaq) \
++ ((nic)->efhw_func->dmaq_tx_q_disable(nic, dmaq))
++
++#define efhw_nic_dmaq_rx_q_disable(nic, dmaq) \
++ ((nic)->efhw_func->dmaq_rx_q_disable(nic, dmaq))
++
++#define efhw_nic_flush_tx_dma_channel(nic, dmaq) \
++ ((nic)->efhw_func->flush_tx_dma_channel(nic, dmaq))
++
++#define efhw_nic_flush_rx_dma_channel(nic, dmaq) \
++ ((nic)->efhw_func->flush_rx_dma_channel(nic, dmaq))
++
++/*-------------- MAC Low level interface ---- */
++#define efhw_gmac_get_mac_addr(nic) \
++ ((nic)->gmac->get_mac_addr((nic)->gmac))
++
++/*-------------- Buffer table -------------- */
++#define efhw_nic_buffer_table_set(nic, addr, bufsz, region, \
++ own_id, buf_id) \
++ ((nic)->efhw_func->buffer_table_set(nic, addr, bufsz, region, \
++ own_id, buf_id))
++
++#define efhw_nic_buffer_table_set_n(nic, buf_id, addr, bufsz, \
++ region, n_pages, own_id) \
++ ((nic)->efhw_func->buffer_table_set_n(nic, buf_id, addr, bufsz, \
++ region, n_pages, own_id))
++
++#define efhw_nic_buffer_table_clear(nic, id, num) \
++ ((nic)->efhw_func->buffer_table_clear(nic, id, num))
++
++#define efhw_nic_buffer_table_commit(nic) \
++ ((nic)->efhw_func->buffer_table_commit(nic))
++
++/*----------------------------------------------------------------------------
++ * Hardware specific portability macros for performance critical code.
++ *
++ * Warning: and driver code which is using these defines is not
++ * capable of supporting multiple NIC varients and should be built and
++ * marked appropriately
++ *
++ *---------------------------------------------------------------------------*/
++
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++
++/* --- DMA --- */
++#define EFHW_DMA_ADDRMASK (0xffffffffffffffffULL)
++
++/* --- Buffers --- */
++#define EFHW_BUFFER_ADDR FALCON_BUFFER_4K_ADDR
++#define EFHW_BUFFER_PAGE FALCON_BUFFER_4K_PAGE
++#define EFHW_BUFFER_OFF FALCON_BUFFER_4K_OFF
++
++/* --- Filters --- */
++#define EFHW_IP_FILTER_NUM FALCON_FILTER_TBL_NUM
++
++#define EFHW_MAX_PAGE_SIZE FALCON_MAX_PAGE_SIZE
++
++#else
++# error no hardware definition found
++#endif
++
++#if PAGE_SIZE <= EFHW_MAX_PAGE_SIZE
++#define EFHW_NIC_PAGE_SIZE PAGE_SIZE
++#else
++#define EFHW_NIC_PAGE_SIZE EFHW_MAX_PAGE_SIZE
++#endif
++#define EFHW_NIC_PAGE_MASK (~(EFHW_NIC_PAGE_SIZE-1))
++
++#endif /* __CI_DRIVER_EFAB_HARDWARE_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/resource/efx_vi.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/resource/efx_vi.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/resource/efx_vi.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/resource/efx_vi.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,276 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains public EFX VI API to Solarflare resource manager.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_DRIVER_RESOURCE_EFX_VI_H__
++#define __CI_DRIVER_RESOURCE_EFX_VI_H__
++
++/* Default size of event queue in the efx_vi resource. Copied from
++ * CI_CFG_NETIF_EVENTQ_SIZE */
++#define EFX_VI_EVENTQ_SIZE_DEFAULT 1024
++
++extern int efx_vi_eventq_size;
++
++/**************************************************************************
++ * efx_vi_state types, allocation and free
++ **************************************************************************/
++
++/*! Handle for refering to a efx_vi */
++struct efx_vi_state;
++
++/*!
++ * Allocate an efx_vi, including event queue and pt_endpoint
++ *
++ * \param vih_out Pointer to a handle that is set on success
++ * \param nic_index Index of NIC to apply this resource to
++ * \return Zero on success (and vih_out set), non-zero on failure.
++ */
++extern int
++efx_vi_alloc(struct efx_vi_state **vih_out, int nic_index);
++
++/*!
++ * Free a previously allocated efx_vi
++ *
++ * \param vih The handle of the efx_vi to free
++ */
++extern void
++efx_vi_free(struct efx_vi_state *vih);
++
++/*!
++ * Reset a previously allocated efx_vi
++ *
++ * \param vih The handle of the efx_vi to reset
++ */
++extern void
++efx_vi_reset(struct efx_vi_state *vih);
++
++/**************************************************************************
++ * efx_vi_eventq types and functions
++ **************************************************************************/
++
++/*!
++ * Register a function to receive callbacks when event queue timeouts
++ * or wakeups occur. Only one function per efx_vi can be registered
++ * at once.
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param callback The function to callback
++ * \param context An argument to pass to the callback function
++ * \return Zero on success, non-zero on failure.
++ */
++extern int
++efx_vi_eventq_register_callback(struct efx_vi_state *vih,
++ void (*callback)(void *context, int is_timeout),
++ void *context);
++
++/*!
++ * Remove the current eventq timeout or wakeup callback function
++ *
++ * \param vih The handle to identify the efx_vi
++ * \return Zero on success, non-zero on failure
++ */
++extern int
++efx_vi_eventq_kill_callback(struct efx_vi_state *vih);
++
++/**************************************************************************
++ * efx_vi_dma_map types and functions
++ **************************************************************************/
++
++/*!
++ * Handle for refering to a efx_vi
++ */
++struct efx_vi_dma_map_state;
++
++/*!
++ * Map a list of buffer pages so they are registered with the hardware
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param addrs An array of page pointers to map
++ * \param n_addrs Length of the page pointer array. Must be a power of two.
++ * \param dmh_out Set on success to a handle used to refer to this mapping
++ * \return Zero on success, non-zero on failure.
++ */
++extern int
++efx_vi_dma_map_pages(struct efx_vi_state *vih, struct page **pages,
++ int n_pages, struct efx_vi_dma_map_state **dmh_out);
++extern int
++efx_vi_dma_map_addrs(struct efx_vi_state *vih,
++ unsigned long long *dev_bus_addrs, int n_pages,
++ struct efx_vi_dma_map_state **dmh_out);
++
++/*!
++ * Unmap a previously mapped set of pages so they are no longer registered
++ * with the hardware.
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param dmh The handle to identify the dma mapping
++ */
++extern void
++efx_vi_dma_unmap_pages(struct efx_vi_state *vih,
++ struct efx_vi_dma_map_state *dmh);
++extern void
++efx_vi_dma_unmap_addrs(struct efx_vi_state *vih,
++ struct efx_vi_dma_map_state *dmh);
++
++/*!
++ * Retrieve the buffer address of the mapping
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param dmh The handle to identify the buffer mapping
++ * \return The buffer address on success, or zero on failure
++ */
++extern unsigned
++efx_vi_dma_get_map_addr(struct efx_vi_state *vih,
++ struct efx_vi_dma_map_state *dmh);
++
++/**************************************************************************
++ * efx_vi filter functions
++ **************************************************************************/
++
++#define EFX_VI_STATIC_FILTERS 32
++
++/*! Handle to refer to a filter instance */
++struct filter_resource_t;
++
++/*!
++ * Allocate and add a filter
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param protocol The protocol of the new filter: UDP or TCP
++ * \param ip_addr_be32 The local ip address of the filter
++ * \param port_le16 The local port of the filter
++ * \param fh_out Set on success to be a handle to refer to this filter
++ * \return Zero on success, non-zero on failure.
++ */
++extern int
++efx_vi_filter(struct efx_vi_state *vih, int protocol, unsigned ip_addr_be32,
++ int port_le16, struct filter_resource_t **fh_out);
++
++/*!
++ * Remove a filter and free resources associated with it
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param fh The handle to identify the filter
++ * \return Zero on success, non-zero on failure
++ */
++extern int
++efx_vi_filter_stop(struct efx_vi_state *vih, struct filter_resource_t *fh);
++
++/**************************************************************************
++ * efx_vi hw resources types and functions
++ **************************************************************************/
++
++/*! Constants for the type field in efx_vi_hw_resource */
++#define EFX_VI_HW_RESOURCE_TXDMAQ 0x0 /* PFN of TX DMA Q */
++#define EFX_VI_HW_RESOURCE_RXDMAQ 0x1 /* PFN of RX DMA Q */
++#define EFX_VI_HW_RESOURCE_TXBELL 0x2 /* PFN of TX Doorbell (EF1) */
++#define EFX_VI_HW_RESOURCE_RXBELL 0x3 /* PFN of RX Doorbell (EF1) */
++#define EFX_VI_HW_RESOURCE_EVQTIMER 0x4 /* Address of event q timer */
++
++/* Address of event q pointer (EF1) */
++#define EFX_VI_HW_RESOURCE_EVQPTR 0x5
++/* Address of register pointer (Falcon A) */
++#define EFX_VI_HW_RESOURCE_EVQRPTR 0x6
++/* Offset of register pointer (Falcon B) */
++#define EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET 0x7
++/* Address of mem KVA */
++#define EFX_VI_HW_RESOURCE_EVQMEMKVA 0x8
++/* PFN of doorbell page (Falcon) */
++#define EFX_VI_HW_RESOURCE_BELLPAGE 0x9
++
++/*! How large an array to allocate for the get_() functions - smaller
++ than the total number of constants as some are mutually exclusive */
++#define EFX_VI_HW_RESOURCE_MAXSIZE 0x7
++
++/*! Constants for the mem_type field in efx_vi_hw_resource */
++#define EFX_VI_HW_RESOURCE_IOBUFFER 0 /* Host memory */
++#define EFX_VI_HW_RESOURCE_PERIPHERAL 1 /* Card memory/registers */
++
++/*!
++ * Data structure providing information on a hardware resource mapping
++ */
++struct efx_vi_hw_resource {
++ u8 type; /*!< What this resource represents */
++ u8 mem_type; /*!< What type of memory is it in, eg,
++ * host or iomem */
++ u8 more_to_follow; /*!< Is this part of a multi-region resource */
++ u32 length; /*!< Length of the resource in bytes */
++ unsigned long address; /*!< Address of this resource */
++};
++
++/*!
++ * Metadata concerning the list of hardware resource mappings
++ */
++struct efx_vi_hw_resource_metadata {
++ int version;
++ int evq_order;
++ int evq_offs;
++ int evq_capacity;
++ int instance;
++ unsigned rx_capacity;
++ unsigned tx_capacity;
++ int nic_arch;
++ int nic_revision;
++ char nic_variant;
++};
++
++/*!
++ * Obtain a list of hardware resource mappings, using virtual addresses
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param mdata Pointer to a structure to receive the metadata
++ * \param hw_res_array An array to receive the list of hardware resources
++ * \param length The length of hw_res_array. Updated on success to contain
++ * the number of entries in the supplied array that were used.
++ * \return Zero on success, non-zero on failure
++ */
++extern int
++efx_vi_hw_resource_get_virt(struct efx_vi_state *vih,
++ struct efx_vi_hw_resource_metadata *mdata,
++ struct efx_vi_hw_resource *hw_res_array,
++ int *length);
++
++/*!
++ * Obtain a list of hardware resource mappings, using physical addresses
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param mdata Pointer to a structure to receive the metadata
++ * \param hw_res_array An array to receive the list of hardware resources
++ * \param length The length of hw_res_array. Updated on success to contain
++ * the number of entries in the supplied array that were used.
++ * \return Zero on success, non-zero on failure
++ */
++extern int
++efx_vi_hw_resource_get_phys(struct efx_vi_state *vih,
++ struct efx_vi_hw_resource_metadata *mdata,
++ struct efx_vi_hw_resource *hw_res_array,
++ int *length);
++
++#endif /* __CI_DRIVER_RESOURCE_EFX_VI_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/resource/linux_efhw_nic.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/resource/linux_efhw_nic.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/driver/resource/linux_efhw_nic.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/driver/resource/linux_efhw_nic.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,76 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains definition of the public type struct linux_efhw_nic.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_DRIVER_RESOURCE_LINUX_RESOURCE__
++#define __CI_DRIVER_RESOURCE_LINUX_RESOURCE__
++
++#ifndef __linux__
++# error Silly
++#endif
++#ifndef __KERNEL__
++# error Silly
++#endif
++
++#include <ci/efhw/efhw_types.h>
++#include <linux/interrupt.h>
++
++
++/************************************************************************
++ * Per-nic structure in the resource driver *
++ ************************************************************************/
++
++struct linux_efhw_nic {
++ struct efhw_nic nic;
++
++ struct pci_dev *pci_dev; /*!< pci descriptor */
++ struct tasklet_struct tasklet; /*!< for interrupt bottom half */
++
++ /* Physical addresses of the control aperture bar. */
++ unsigned long ctr_ap_pci_addr;
++
++ /*! Callbacks for driverlink, when needed. */
++ struct efx_dl_callbacks *dl_callbacks;
++
++ /*! Event handlers. */
++ struct efhw_ev_handler *ev_handlers;
++
++};
++
++#define linux_efhw_nic(efhw_nic) \
++ container_of(efhw_nic, struct linux_efhw_nic, nic)
++
++#endif /* __CI_DRIVER_RESOURCE_LINUX_RESOURCE__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/checks.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/checks.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/checks.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/checks.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,118 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides helpers to turn bit shifts into dword shifts and
++ * check that the bit fields haven't overflown the dword etc.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_CHECK_H__
++#define __CI_EFHW_CHECK_H__
++
++/*----------------------------------------------------------------------------
++ *
++ * Helpers to turn bit shifts into dword shifts and check that the bit fields
++ * haven't overflown the dword etc. Aim is to preserve consistency with the
++ * autogenerated headers - once stable we could hard code.
++ *
++ *---------------------------------------------------------------------------*/
++
++/* mask constructors */
++#define __FALCON_MASK(WIDTH, T) ((((T)1) << (WIDTH)) - 1)
++#define __FALCON_MASK32(WIDTH) __FALCON_MASK((WIDTH), uint32_t)
++#define __FALCON_MASK64(WIDTH) __FALCON_MASK((WIDTH), uint64_t)
++
++#define __FALCON_MASKFIELD32(LBN, WIDTH) \
++ ((uint32_t)(__FALCON_MASK32(WIDTH) << (LBN)))
++
++/* constructors for fields which span the first and second dwords */
++#define __LW(LBN) (32 - LBN)
++#define __LOW(v, LBN, WIDTH) \
++ ((uint32_t)(((v) & __FALCON_MASK64(__LW((LBN)))) << (LBN)))
++#define __HIGH(v, LBN, WIDTH) \
++ ((uint32_t)(((v) >> __LW((LBN))) & \
++ __FALCON_MASK64((WIDTH - __LW((LBN))))))
++/* constructors for fields within the second dword */
++#define __DW2(LBN) ((LBN) - 32)
++
++/* constructors for fields which span the second and third dwords */
++#define __LW2(LBN) (64 - LBN)
++#define __LOW2(v, LBN, WIDTH) \
++ ((uint32_t)(((v) & __FALCON_MASK64(__LW2((LBN)))) << ((LBN) - 32)))
++#define __HIGH2(v, LBN, WIDTH) \
++ ((uint32_t)(((v) >> __LW2((LBN))) & \
++ __FALCON_MASK64((WIDTH - __LW2((LBN))))))
++
++/* constructors for fields within the third dword */
++#define __DW3(LBN) ((LBN) - 64)
++
++/* constructors for fields which span the third and fourth dwords */
++#define __LW3(LBN) (96 - LBN)
++#define __LOW3(v, LBN, WIDTH) \
++ ((uint32_t)(((v) & __FALCON_MASK64(__LW3((LBN)))) << ((LBN) - 64)))
++#define __HIGH3(v, LBN, WIDTH) \
++ ((ci_unit32)(((v) >> __LW3((LBN))) & \
++ __FALCON_MASK64((WIDTH - __LW3((LBN))))))
++
++/* constructors for fields within the fourth dword */
++#define __DW4(LBN) ((LBN) - 96)
++
++/* checks that the autogenerated headers are consistent with our model */
++#define __WIDTHCHCK(a, b) EFHW_ASSERT((a) == (b))
++#define __RANGECHCK(v, WIDTH) \
++ EFHW_ASSERT(((uint64_t)(v) & ~(__FALCON_MASK64((WIDTH)))) == 0)
++
++/* fields within the first dword */
++#define __DWCHCK(LBN, WIDTH) \
++ EFHW_ASSERT(((LBN) >= 0) && (((LBN)+(WIDTH)) <= 32))
++
++/* fields which span the first and second dwords */
++#define __LWCHK(LBN, WIDTH) EFHW_ASSERT(WIDTH >= __LW(LBN))
++
++/* fields within the second dword */
++#define __DW2CHCK(LBN, WIDTH) \
++ EFHW_ASSERT(((LBN) >= 32) && (((LBN)+(WIDTH)) <= 64))
++
++/* fields which span the second and third dwords */
++#define __LW2CHK(LBN, WIDTH) EFHW_ASSERT(WIDTH >= __LW2(LBN))
++
++/* fields within the third dword */
++#define __DW3CHCK(LBN, WIDTH) \
++ EFHW_ASSERT(((LBN) >= 64) && (((LBN)+(WIDTH)) <= 96))
++
++/* fields which span the third and fourth dwords */
++#define __LW3CHK(LBN, WIDTH) EFHW_ASSERT(WIDTH >= __LW3(LBN))
++
++/* fields within the fourth dword */
++#define __DW4CHCK(LBN, WIDTH) \
++ EFHW_ASSERT(((LBN) >= 96) && (((LBN)+(WIDTH)) <= 128))
++
++/* fields in the first qword */
++#define __QWCHCK(LBN, WIDTH) \
++ EFHW_ASSERT(((LBN) >= 0) && (((LBN)+(WIDTH)) <= 64))
++
++#endif /* __CI_EFHW_CHECK_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/common.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/common.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/common.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/common.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,102 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides API of the efhw library which may be used both from
++ * the kernel and from the user-space code.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_COMMON_H__
++#define __CI_EFHW_COMMON_H__
++
++#include <ci/efhw/common_sysdep.h>
++
++enum efhw_arch {
++ EFHW_ARCH_FALCON,
++ EFHW_ARCH_SIENA,
++};
++
++typedef uint32_t efhw_buffer_addr_t;
++#define EFHW_BUFFER_ADDR_FMT "[ba:%"PRIx32"]"
++
++/*! Comment? */
++typedef union {
++ uint64_t u64;
++ struct {
++ uint32_t a;
++ uint32_t b;
++ } opaque;
++ struct {
++ uint32_t code;
++ uint32_t status;
++ } ev1002;
++} efhw_event_t;
++
++/* Flags for TX/RX queues */
++#define EFHW_VI_JUMBO_EN 0x01 /*! scatter RX over multiple desc */
++#define EFHW_VI_ISCSI_RX_HDIG_EN 0x02 /*! iscsi rx header digest */
++#define EFHW_VI_ISCSI_TX_HDIG_EN 0x04 /*! iscsi tx header digest */
++#define EFHW_VI_ISCSI_RX_DDIG_EN 0x08 /*! iscsi rx data digest */
++#define EFHW_VI_ISCSI_TX_DDIG_EN 0x10 /*! iscsi tx data digest */
++#define EFHW_VI_TX_PHYS_ADDR_EN 0x20 /*! TX physical address mode */
++#define EFHW_VI_RX_PHYS_ADDR_EN 0x40 /*! RX physical address mode */
++#define EFHW_VI_RM_WITH_INTERRUPT 0x80 /*! VI with an interrupt */
++#define EFHW_VI_TX_IP_CSUM_DIS 0x100 /*! enable ip checksum generation */
++#define EFHW_VI_TX_TCPUDP_CSUM_DIS 0x200 /*! enable tcp/udp checksum
++ generation */
++#define EFHW_VI_TX_TCPUDP_ONLY 0x400 /*! drop non-tcp/udp packets */
++
++/* Types of hardware filter */
++/* Each of these values implicitly selects scatter filters on B0 - or in
++ EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK if a non-scatter filter is required */
++#define EFHW_IP_FILTER_TYPE_UDP_WILDCARD (0) /* dest host only */
++#define EFHW_IP_FILTER_TYPE_UDP_FULL (1) /* dest host and port */
++#define EFHW_IP_FILTER_TYPE_TCP_WILDCARD (2) /* dest based filter */
++#define EFHW_IP_FILTER_TYPE_TCP_FULL (3) /* src filter */
++/* Same again, but with RSS (for B0 only) */
++#define EFHW_IP_FILTER_TYPE_UDP_WILDCARD_RSS_B0 (4)
++#define EFHW_IP_FILTER_TYPE_UDP_FULL_RSS_B0 (5)
++#define EFHW_IP_FILTER_TYPE_TCP_WILDCARD_RSS_B0 (6)
++#define EFHW_IP_FILTER_TYPE_TCP_FULL_RSS_B0 (7)
++
++#define EFHW_IP_FILTER_TYPE_FULL_MASK (0x1) /* Mask for full / wildcard */
++#define EFHW_IP_FILTER_TYPE_TCP_MASK (0x2) /* Mask for TCP type */
++#define EFHW_IP_FILTER_TYPE_RSS_B0_MASK (0x4) /* Mask for B0 RSS enable */
++#define EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK (0x8) /* Mask for B0 SCATTER dsbl */
++
++#define EFHW_IP_FILTER_TYPE_MASK (0xffff) /* Mask of types above */
++
++#define EFHW_IP_FILTER_BROADCAST (0x10000) /* driverlink filter
++ support */
++
++#endif /* __CI_EFHW_COMMON_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/common_sysdep.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/common_sysdep.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/common_sysdep.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/common_sysdep.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,71 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides version-independent Linux kernel API for
++ * userland-to-kernel interfaces.
++ * Only kernels >=2.6.9 are supported.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_COMMON_LINUX_H__
++#define __CI_EFHW_COMMON_LINUX_H__
++
++#include <linux/types.h>
++#include <linux/version.h>
++
++/* Dirty hack, but Linux kernel does not provide DMA_ADDR_T_FMT */
++#if BITS_PER_LONG == 64 || defined(CONFIG_HIGHMEM64G)
++#define DMA_ADDR_T_FMT "%llx"
++#else
++#define DMA_ADDR_T_FMT "%x"
++#endif
++
++/* Linux kernel also does not provide PRIx32... Sigh. */
++#define PRIx32 "x"
++
++#ifdef __ia64__
++# define PRIx64 "lx"
++#else
++# define PRIx64 "llx"
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++enum {
++ false = 0,
++ true = 1
++};
++
++typedef _Bool bool;
++#endif /* LINUX_VERSION_CODE < 2.6.19 */
++
++#endif /* __CI_EFHW_COMMON_LINUX_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/debug.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/debug.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/debug.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/debug.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,84 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides debug-related API for efhw library using Linux kernel
++ * primitives.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_DEBUG_LINUX_H__
++#define __CI_EFHW_DEBUG_LINUX_H__
++
++#define EFHW_PRINTK_PREFIX "[sfc efhw] "
++
++#define EFHW_PRINTK(level, fmt, ...) \
++ printk(level EFHW_PRINTK_PREFIX fmt "\n", __VA_ARGS__)
++
++/* Following macros should be used with non-zero format parameters
++ * due to __VA_ARGS__ limitations. Use "%s" with __FUNCTION__ if you can't
++ * find better parameters. */
++#define EFHW_ERR(fmt, ...) EFHW_PRINTK(KERN_ERR, fmt, __VA_ARGS__)
++#define EFHW_WARN(fmt, ...) EFHW_PRINTK(KERN_WARNING, fmt, __VA_ARGS__)
++#define EFHW_NOTICE(fmt, ...) EFHW_PRINTK(KERN_NOTICE, fmt, __VA_ARGS__)
++#if 0 && !defined(NDEBUG)
++#define EFHW_TRACE(fmt, ...) EFHW_PRINTK(KERN_DEBUG, fmt, __VA_ARGS__)
++#else
++#define EFHW_TRACE(fmt, ...)
++#endif
++
++#ifndef NDEBUG
++#define EFHW_ASSERT(cond) BUG_ON((cond) == 0)
++#define EFHW_DO_DEBUG(expr) expr
++#else
++#define EFHW_ASSERT(cond)
++#define EFHW_DO_DEBUG(expr)
++#endif
++
++#define EFHW_TEST(expr) \
++ do { \
++ if (unlikely(!(expr))) \
++ BUG(); \
++ } while (0)
++
++/* Build time asserts. We paste the line number into the type name
++ * so that the macro can be used more than once per file even if the
++ * compiler objects to multiple identical typedefs. Collisions
++ * between use in different header files is still possible. */
++#ifndef EFHW_BUILD_ASSERT
++#define __EFHW_BUILD_ASSERT_NAME(_x) __EFHW_BUILD_ASSERT_ILOATHECPP(_x)
++#define __EFHW_BUILD_ASSERT_ILOATHECPP(_x) __EFHW_BUILD_ASSERT__ ##_x
++#define EFHW_BUILD_ASSERT(e) \
++ typedef char __EFHW_BUILD_ASSERT_NAME(__LINE__)[(e) ? 1 : -1]
++#endif
++
++#endif /* __CI_EFHW_DEBUG_LINUX_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/efhw_config.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/efhw_config.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/efhw_config.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/efhw_config.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,43 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides some limits used in both kernel and userland code.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_EFAB_CONFIG_H__
++#define __CI_EFHW_EFAB_CONFIG_H__
++
++#define EFHW_MAX_NR_DEVS 5 /* max number of efhw devices supported */
++
++#endif /* __CI_EFHW_EFAB_CONFIG_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/efhw_types.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/efhw_types.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/efhw_types.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/efhw_types.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,342 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides struct efhw_nic and some related types.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_EFAB_TYPES_H__
++#define __CI_EFHW_EFAB_TYPES_H__
++
++#include <ci/efhw/efhw_config.h>
++#include <ci/efhw/hardware_sysdep.h>
++#include <ci/efhw/iopage_types.h>
++#include <ci/efhw/sysdep.h>
++
++/*--------------------------------------------------------------------
++ *
++ * hardware limits used in the types
++ *
++ *--------------------------------------------------------------------*/
++
++#define EFHW_KEVENTQ_MAX 8
++
++/*--------------------------------------------------------------------
++ *
++ * forward type declarations
++ *
++ *--------------------------------------------------------------------*/
++
++struct efhw_nic;
++
++/*--------------------------------------------------------------------
++ *
++ * Managed interface
++ *
++ *--------------------------------------------------------------------*/
++
++struct efhw_buffer_table_allocation{
++ unsigned base;
++ unsigned order;
++};
++
++struct eventq_resource_hardware {
++ /*!iobuffer allocated for eventq - can be larger than eventq */
++ efhw_iopages_t iobuff;
++ unsigned iobuff_off;
++ struct efhw_buffer_table_allocation buf_tbl_alloc;
++ int capacity; /*!< capacity of event queue */
++};
++
++/*--------------------------------------------------------------------
++ *
++ * event queues and event driven callbacks
++ *
++ *--------------------------------------------------------------------*/
++
++struct efhw_keventq {
++ volatile int lock;
++ caddr_t evq_base;
++ int32_t evq_ptr;
++ uint32_t evq_mask;
++ unsigned instance;
++ struct eventq_resource_hardware hw;
++ struct efhw_ev_handler *ev_handlers;
++};
++
++/**********************************************************************
++ * Portable HW interface. ***************************************
++ **********************************************************************/
++
++/*--------------------------------------------------------------------
++ *
++ * EtherFabric Functional units - configuration and control
++ *
++ *--------------------------------------------------------------------*/
++
++struct efhw_func_ops {
++
++ /*-------------- Initialisation ------------ */
++
++ /*! close down all hardware functional units - leaves NIC in a safe
++ state for driver unload */
++ void (*close_hardware) (struct efhw_nic *nic);
++
++ /*! initialise all hardware functional units */
++ int (*init_hardware) (struct efhw_nic *nic,
++ struct efhw_ev_handler *,
++ const uint8_t *mac_addr);
++
++ /*-------------- Interrupt support ------------ */
++
++ /*! Main interrupt routine
++ ** This function returns,
++ ** - zero, if the IRQ was not generated by EF1
++ ** - non-zero, if EF1 was the source of the IRQ
++ **
++ **
++ ** opaque is an OS provided pointer for use by the OS callbacks
++ ** e.g in Windows used to indicate DPC scheduled
++ */
++ int (*interrupt) (struct efhw_nic *nic);
++
++ /*! Enable given interrupt mask for the given IRQ unit */
++ void (*interrupt_enable) (struct efhw_nic *nic, uint idx);
++
++ /*! Disable given interrupt mask for the given IRQ unit */
++ void (*interrupt_disable) (struct efhw_nic *nic, uint idx);
++
++ /*! Set interrupt moderation strategy for the given IRQ unit
++ ** val is in usec
++ */
++ void (*set_interrupt_moderation)(struct efhw_nic *nic,
++ uint idx, uint val);
++
++ /*-------------- Event support ------------ */
++
++ /*! Enable the given event queue
++ depending on the underlying implementation (EF1 or Falcon) then
++ either a q_base_addr in host memory, or a buffer base id should
++ be proivded
++ */
++ void (*event_queue_enable) (struct efhw_nic *nic,
++ uint evq, /* evnt queue index */
++ uint evq_size, /* units of #entries */
++ dma_addr_t q_base_addr, uint buf_base_id);
++
++ /*! Disable the given event queue (and any associated timer) */
++ void (*event_queue_disable) (struct efhw_nic *nic, uint evq,
++ int timer_only);
++
++ /*! request wakeup from the NIC on a given event Q */
++ void (*wakeup_request) (struct efhw_nic *nic, dma_addr_t q_base_addr,
++ int next_i, int evq);
++
++ /*! Push a SW event on a given eventQ */
++ void (*sw_event) (struct efhw_nic *nic, int data, int evq);
++
++ /*-------------- Filter support ------------ */
++
++ /*! Setup a given filter - The software can request a filter_i,
++ * but some EtherFabric implementations will override with
++ * a more suitable index
++ */
++ int (*ipfilter_set) (struct efhw_nic *nic, int type,
++ int *filter_i, int dmaq,
++ unsigned saddr_be32, unsigned sport_be16,
++ unsigned daddr_be32, unsigned dport_be16);
++
++ /*! Attach a given filter to a DMAQ */
++ void (*ipfilter_attach) (struct efhw_nic *nic, int filter_idx,
++ int dmaq_idx);
++
++ /*! Detach a filter from its DMAQ */
++ void (*ipfilter_detach) (struct efhw_nic *nic, int filter_idx);
++
++ /*! Clear down a given filter */
++ void (*ipfilter_clear) (struct efhw_nic *nic, int filter_idx);
++
++ /*-------------- DMA support ------------ */
++
++ /*! Initialise NIC state for a given TX DMAQ */
++ void (*dmaq_tx_q_init) (struct efhw_nic *nic,
++ uint dmaq, uint evq, uint owner, uint tag,
++ uint dmaq_size, uint buf_idx, uint flags);
++
++ /*! Initialise NIC state for a given RX DMAQ */
++ void (*dmaq_rx_q_init) (struct efhw_nic *nic,
++ uint dmaq, uint evq, uint owner, uint tag,
++ uint dmaq_size, uint buf_idx, uint flags);
++
++ /*! Disable a given TX DMAQ */
++ void (*dmaq_tx_q_disable) (struct efhw_nic *nic, uint dmaq);
++
++ /*! Disable a given RX DMAQ */
++ void (*dmaq_rx_q_disable) (struct efhw_nic *nic, uint dmaq);
++
++ /*! Flush a given TX DMA channel */
++ int (*flush_tx_dma_channel) (struct efhw_nic *nic, uint dmaq);
++
++ /*! Flush a given RX DMA channel */
++ int (*flush_rx_dma_channel) (struct efhw_nic *nic, uint dmaq);
++
++ /*-------------- Buffer table Support ------------ */
++
++ /*! Initialise a buffer table page */
++ void (*buffer_table_set) (struct efhw_nic *nic,
++ dma_addr_t dma_addr,
++ uint bufsz, uint region,
++ int own_id, int buffer_id);
++
++ /*! Initialise a block of buffer table pages */
++ void (*buffer_table_set_n) (struct efhw_nic *nic, int buffer_id,
++ dma_addr_t dma_addr,
++ uint bufsz, uint region,
++ int n_pages, int own_id);
++
++ /*! Clear a block of buffer table pages */
++ void (*buffer_table_clear) (struct efhw_nic *nic, int buffer_id,
++ int num);
++
++ /*! Commit a buffer table update */
++ void (*buffer_table_commit) (struct efhw_nic *nic);
++
++};
++
++
++/*----------------------------------------------------------------------------
++ *
++ * NIC type
++ *
++ *---------------------------------------------------------------------------*/
++
++struct efhw_device_type {
++ int arch; /* enum efhw_arch */
++ char variant; /* 'A', 'B', ... */
++ int revision; /* 0, 1, ... */
++};
++
++
++/*----------------------------------------------------------------------------
++ *
++ * EtherFabric NIC instance - nic.c for HW independent functions
++ *
++ *---------------------------------------------------------------------------*/
++
++/*! */
++struct efhw_nic {
++ /*! zero base index in efrm_nic_table.nic array */
++ volatile int index;
++ int ifindex; /*!< OS level nic index */
++#ifdef HAS_NET_NAMESPACE
++ struct net *nd_net;
++#endif
++
++ struct efhw_device_type devtype;
++
++ /*! Options that can be set by user. */
++ unsigned options;
++# define NIC_OPT_EFTEST 0x1 /* owner is an eftest app */
++
++# define NIC_OPT_DEFAULT 0
++
++ /*! Internal flags that indicate hardware properties at runtime. */
++ unsigned flags;
++# define NIC_FLAG_NO_INTERRUPT 0x01 /* to be set at init time only */
++# define NIC_FLAG_TRY_MSI 0x02
++# define NIC_FLAG_MSI 0x04
++# define NIC_FLAG_OS_IRQ_EN 0x08
++# define NIC_FLAG_10G 0x10
++
++ unsigned mtu; /*!< MAC MTU (includes MAC hdr) */
++
++ /* hardware resources */
++
++ /*! I/O address of the start of the bar */
++ efhw_ioaddr_t bar_ioaddr;
++
++ /*! Bar number of control aperture. */
++ unsigned ctr_ap_bar;
++ /*! Length of control aperture in bytes. */
++ unsigned ctr_ap_bytes;
++
++ uint8_t mac_addr[ETH_ALEN]; /*!< mac address */
++
++ /*! EtherFabric Functional Units -- functions */
++ const struct efhw_func_ops *efhw_func;
++
++ /* Value read from FPGA version register. Zero for asic. */
++ unsigned fpga_version;
++
++ /*! This lock protects a number of misc NIC resources. It should
++ * only be used for things that can be at the bottom of the lock
++ * order. ie. You mustn't attempt to grab any other lock while
++ * holding this one.
++ */
++ spinlock_t *reg_lock;
++ spinlock_t the_reg_lock;
++
++ int buf_commit_outstanding; /*!< outstanding buffer commits */
++
++ /*! interrupt callbacks (hard-irq) */
++ void (*irq_handler) (struct efhw_nic *, int unit);
++
++ /*! event queues per driver */
++ struct efhw_keventq evq[EFHW_KEVENTQ_MAX];
++
++/* for marking when we are not using an IRQ unit
++ - 0 is a valid offset to an IRQ unit on EF1! */
++#define EFHW_IRQ_UNIT_UNUSED 0xffff
++ /*! interrupt unit in use */
++ unsigned int irq_unit[EFHW_KEVENTQ_MAX];
++ efhw_iopage_t irq_iobuff; /*!< Falcon SYSERR interrupt */
++
++ /* The new driverlink infrastructure. */
++ struct efx_dl_device *net_driver_dev;
++ struct efx_dlfilt_cb_s *dlfilter_cb;
++
++ /*! Bit masks of the sizes of event queues and dma queues supported
++ * by the nic. */
++ unsigned evq_sizes;
++ unsigned rxq_sizes;
++ unsigned txq_sizes;
++
++ /* Size of filter table (including odd and even banks). */
++ unsigned filter_tbl_size;
++};
++
++
++#define EFHW_KVA(nic) ((nic)->bar_ioaddr)
++
++
++#endif /* __CI_EFHW_EFHW_TYPES_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/eventq.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/eventq.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/eventq.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/eventq.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,73 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains API provided by efhw/eventq.c file. This file is not
++ * designed for use outside of the SFC resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_EVENTQ_H__
++#define __CI_EFHW_EVENTQ_H__
++
++#include <ci/efhw/efhw_types.h>
++#include <ci/efhw/eventq_macros.h>
++
++/*! Poll the event queue. */
++extern int efhw_keventq_poll(struct efhw_nic *, struct efhw_keventq *);
++
++/*! Callbacks for handling events. */
++struct efhw_ev_handler {
++ void (*wakeup_fn)(struct efhw_nic *nic, efhw_event_t *ev);
++ void (*timeout_fn)(struct efhw_nic *nic, efhw_event_t *ev);
++ void (*sw_fn)(struct efhw_nic *nic, efhw_event_t *ev);
++ void (*dmaq_flushed_fn) (struct efhw_nic *, int, int);
++};
++
++extern int efhw_keventq_ctor(struct efhw_nic *, int instance,
++ struct efhw_keventq *, struct efhw_ev_handler *);
++extern void efhw_keventq_dtor(struct efhw_nic *, struct efhw_keventq *);
++
++extern void efhw_handle_txdmaq_flushed(struct efhw_nic *,
++ struct efhw_ev_handler *,
++ efhw_event_t *);
++extern void efhw_handle_rxdmaq_flushed(struct efhw_nic *,
++ struct efhw_ev_handler *,
++ efhw_event_t *);
++extern void efhw_handle_wakeup_event(struct efhw_nic *,
++ struct efhw_ev_handler *,
++ efhw_event_t *);
++extern void efhw_handle_timeout_event(struct efhw_nic *,
++ struct efhw_ev_handler *,
++ efhw_event_t *);
++
++#endif /* __CI_EFHW_EVENTQ_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/eventq_macros.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/eventq_macros.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/eventq_macros.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/eventq_macros.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,81 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides some event-related macros. This file is designed for
++ * use from kernel and from the userland contexts.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_EVENTQ_MACROS_H__
++#define __CI_EFHW_EVENTQ_MACROS_H__
++
++#include <ci/efhw/common.h>
++
++/*--------------------------------------------------------------------
++ *
++ * Event Queue manipulation
++ *
++ *--------------------------------------------------------------------*/
++
++#define EFHW_EVENT_OFFSET(q, s, i) \
++ (((s)->evq_ptr - (i) * (int32_t)sizeof(efhw_event_t)) \
++ & (q)->evq_mask)
++
++#define EFHW_EVENT_PTR(q, s, i) \
++ ((efhw_event_t *)((q)->evq_base + EFHW_EVENT_OFFSET(q, s, i)))
++
++#define EFHW_EVENTQ_NEXT(s) \
++ do { ((s)->evq_ptr += sizeof(efhw_event_t)); } while (0)
++
++#define EFHW_EVENTQ_PREV(s) \
++ do { ((s)->evq_ptr -= sizeof(efhw_event_t)); } while (0)
++
++/* Be worried about this on byteswapped machines */
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++ /* Due to crazy chipsets, we see the event words being written in
++ ** arbitrary order (bug4539). So test for presence of event must ensure
++ ** that both halves have changed from the null.
++ */
++ #define EFHW_IS_EVENT(evp) \
++ (((evp)->opaque.a != (uint32_t)-1) && \
++ ((evp)->opaque.b != (uint32_t)-1))
++ #define EFHW_CLEAR_EVENT(evp) ((evp)->u64 = (uint64_t)-1)
++ #define EFHW_CLEAR_EVENT_VALUE 0xff
++#else
++ #error Fixme - unknown hardware configuration
++#endif
++
++#define EFHW_EVENT_OVERFLOW(evq, s) \
++ (EFHW_IS_EVENT(EFHW_EVENT_PTR(evq, s, 1)))
++
++#endif /* __CI_EFHW_EVENTQ_MACROS_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/falcon.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/falcon.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/falcon.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/falcon.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,93 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains API provided by efhw/falcon.c file. This file is not
++ * designed for use outside of the SFC resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_FALCON_H__
++#define __CI_EFHW_FALCON_H__
++
++#include <ci/efhw/efhw_types.h>
++#include <ci/efhw/common.h>
++
++/*----------------------------------------------------------------------------
++ *
++ * Locks - unfortunately required
++ *
++ *---------------------------------------------------------------------------*/
++
++#define FALCON_LOCK_DECL irq_flags_t lock_state
++#define FALCON_LOCK_LOCK(nic) \
++ spin_lock_irqsave((nic)->reg_lock, lock_state)
++#define FALCON_LOCK_UNLOCK(nic) \
++ spin_unlock_irqrestore((nic)->reg_lock, lock_state)
++
++extern struct efhw_func_ops falcon_char_functional_units;
++
++/*! specify a pace value for a TX DMA Queue */
++extern void falcon_nic_pace(struct efhw_nic *nic, uint dmaq, uint pace);
++
++/*! confirm buffer table updates - should be used for items where
++ loss of data would be unacceptable. E.g for the buffers that back
++ an event or DMA queue */
++extern void falcon_nic_buffer_table_confirm(struct efhw_nic *nic);
++
++/*! Reset the all the TX DMA queue pointers. */
++extern void falcon_clobber_tx_dma_ptrs(struct efhw_nic *nic, uint dmaq);
++
++extern int
++falcon_handle_char_event(struct efhw_nic *nic,
++ struct efhw_ev_handler *h, efhw_event_t *evp);
++
++/*! map event queue instance space (0,1,2,..) onto event queue
++ number. This function takes into account the allocation rules for
++ the underlying driver model */
++extern int falcon_idx_to_evq(struct efhw_nic *nic, uint idx);
++
++/*! Acknowledge to HW that processing is complete on a given event queue */
++extern void falcon_nic_evq_ack(struct efhw_nic *nic, uint evq, /* evq id */
++ uint rptr, /* new read pointer update */
++ bool wakeup /* request a wakeup event if
++ ptr's != */
++ );
++
++extern void
++falcon_nic_buffer_table_set_n(struct efhw_nic *nic, int buffer_id,
++ dma_addr_t dma_addr, uint bufsz, uint region,
++ int n_pages, int own_id);
++
++extern void falcon_nic_ipfilter_ctor(struct efhw_nic *nic);
++
++#endif /* __CI_EFHW_FALCON_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/falcon_hash.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/falcon_hash.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/falcon_hash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/falcon_hash.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,58 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains API provided by efhw/falcon_hash.c file.
++ * Function declared in this file are not exported from the Linux
++ * sfc_resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_FALCON_HASH_H__
++#define __CI_EFHW_FALCON_HASH_H__
++
++/* All LE parameters */
++extern unsigned int
++falcon_hash_get_key(unsigned int src_ip, unsigned int src_port,
++ unsigned int dest_ip, unsigned int dest_port,
++ int tcp, int full);
++
++unsigned int falcon_hash_function1(unsigned int key, unsigned int nfilters);
++
++extern unsigned int
++falcon_hash_function2(unsigned int key, unsigned int nfitlers);
++
++extern unsigned int
++falcon_hash_iterator(unsigned int hash1, unsigned int hash2,
++ unsigned int n_search, unsigned int nfilters);
++
++#endif /* __CI_EFHW_FALCON_HASH_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/hardware_sysdep.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/hardware_sysdep.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/hardware_sysdep.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/hardware_sysdep.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,84 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides version-independent Linux kernel API for header files
++ * with hardware-related definitions (in ci/driver/efab/hardware*).
++ * Only kernels >=2.6.9 are supported.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_HARDWARE_LINUX_H__
++#define __CI_EFHW_HARDWARE_LINUX_H__
++
++#include <asm/io.h>
++
++#ifdef __LITTLE_ENDIAN
++#define EFHW_IS_LITTLE_ENDIAN
++#elif __BIG_ENDIAN
++#define EFHW_IS_BIG_ENDIAN
++#else
++#error Unknown endianness
++#endif
++
++#ifndef mmiowb
++ #if defined(__i386__) || defined(__x86_64__)
++ #define mmiowb()
++ #elif defined(__ia64__)
++ #ifndef ia64_mfa
++ #define ia64_mfa() asm volatile ("mf.a" ::: "memory")
++ #endif
++ #define mmiowb ia64_mfa
++ #else
++ #error "Need definition for mmiowb()"
++ #endif
++#endif
++
++typedef char *efhw_ioaddr_t;
++
++#ifndef readq
++static inline uint64_t __readq(void __iomem *addr)
++{
++ return *(volatile uint64_t *)addr;
++}
++#define readq(x) __readq(x)
++#endif
++
++#ifndef writeq
++static inline void __writeq(uint64_t v, void __iomem *addr)
++{
++ *(volatile uint64_t *)addr = v;
++}
++#define writeq(val, addr) __writeq((val), (addr))
++#endif
++
++#endif /* __CI_EFHW_HARDWARE_LINUX_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/iopage.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/iopage.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/iopage.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/iopage.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,58 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains OS-independent API for allocating iopage types.
++ * The implementation of these functions is highly OS-dependent.
++ * This file is not designed for use outside of the SFC resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_DRIVER_RESOURCE_IOPAGE_H__
++#define __CI_DRIVER_RESOURCE_IOPAGE_H__
++
++#include <ci/efhw/efhw_types.h>
++
++/*--------------------------------------------------------------------
++ *
++ * memory allocation
++ *
++ *--------------------------------------------------------------------*/
++
++extern int efhw_iopage_alloc(struct efhw_nic *, efhw_iopage_t *p);
++extern void efhw_iopage_free(struct efhw_nic *, efhw_iopage_t *p);
++
++extern int efhw_iopages_alloc(struct efhw_nic *, efhw_iopages_t *p,
++ unsigned order);
++extern void efhw_iopages_free(struct efhw_nic *, efhw_iopages_t *p);
++
++#endif /* __CI_DRIVER_RESOURCE_IOPAGE_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/iopage_types.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/iopage_types.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/iopage_types.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/iopage_types.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,188 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides efhw_page_t and efhw_iopage_t for Linux kernel.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_IOPAGE_LINUX_H__
++#define __CI_EFHW_IOPAGE_LINUX_H__
++
++#include <linux/gfp.h>
++#include <linux/hardirq.h>
++#include <ci/efhw/debug.h>
++
++/*--------------------------------------------------------------------
++ *
++ * efhw_page_t: A single page of memory. Directly mapped in the driver,
++ * and can be mapped to userlevel.
++ *
++ *--------------------------------------------------------------------*/
++
++typedef struct {
++ unsigned long kva;
++} efhw_page_t;
++
++static inline int efhw_page_alloc(efhw_page_t *p)
++{
++ p->kva = __get_free_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL);
++ return p->kva ? 0 : -ENOMEM;
++}
++
++static inline int efhw_page_alloc_zeroed(efhw_page_t *p)
++{
++ p->kva = get_zeroed_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL);
++ return p->kva ? 0 : -ENOMEM;
++}
++
++static inline void efhw_page_free(efhw_page_t *p)
++{
++ free_page(p->kva);
++ EFHW_DO_DEBUG(memset(p, 0, sizeof(*p)));
++}
++
++static inline char *efhw_page_ptr(efhw_page_t *p)
++{
++ return (char *)p->kva;
++}
++
++static inline unsigned efhw_page_pfn(efhw_page_t *p)
++{
++ return (unsigned)(__pa(p->kva) >> PAGE_SHIFT);
++}
++
++static inline void efhw_page_mark_invalid(efhw_page_t *p)
++{
++ p->kva = 0;
++}
++
++static inline int efhw_page_is_valid(efhw_page_t *p)
++{
++ return p->kva != 0;
++}
++
++static inline void efhw_page_init_from_va(efhw_page_t *p, void *va)
++{
++ p->kva = (unsigned long)va;
++}
++
++/*--------------------------------------------------------------------
++ *
++ * efhw_iopage_t: A single page of memory. Directly mapped in the driver,
++ * and can be mapped to userlevel. Can also be accessed by the NIC.
++ *
++ *--------------------------------------------------------------------*/
++
++typedef struct {
++ efhw_page_t p;
++ dma_addr_t dma_addr;
++} efhw_iopage_t;
++
++static inline dma_addr_t efhw_iopage_dma_addr(efhw_iopage_t *p)
++{
++ return p->dma_addr;
++}
++
++#define efhw_iopage_ptr(iop) efhw_page_ptr(&(iop)->p)
++#define efhw_iopage_pfn(iop) efhw_page_pfn(&(iop)->p)
++#define efhw_iopage_mark_invalid(iop) efhw_page_mark_invalid(&(iop)->p)
++#define efhw_iopage_is_valid(iop) efhw_page_is_valid(&(iop)->p)
++
++/*--------------------------------------------------------------------
++ *
++ * efhw_iopages_t: A set of pages that are contiguous in physical memory.
++ * Directly mapped in the driver, and can be mapped to userlevel. Can also
++ * be accessed by the NIC.
++ *
++ * NB. The O/S may be unwilling to allocate many, or even any of these. So
++ * only use this type where the NIC really needs a physically contiguous
++ * buffer.
++ *
++ *--------------------------------------------------------------------*/
++
++typedef struct {
++ caddr_t kva;
++ unsigned order;
++ dma_addr_t dma_addr;
++} efhw_iopages_t;
++
++static inline caddr_t efhw_iopages_ptr(efhw_iopages_t *p)
++{
++ return p->kva;
++}
++
++static inline unsigned efhw_iopages_pfn(efhw_iopages_t *p)
++{
++ return (unsigned)(__pa(p->kva) >> PAGE_SHIFT);
++}
++
++static inline dma_addr_t efhw_iopages_dma_addr(efhw_iopages_t *p)
++{
++ return p->dma_addr;
++}
++
++static inline unsigned efhw_iopages_size(efhw_iopages_t *p)
++{
++ return 1u << (p->order + PAGE_SHIFT);
++}
++
++/* efhw_iopage_t <-> efhw_iopages_t conversions for handling physically
++ * contiguous allocations in iobufsets for iSCSI. This allows the
++ * essential information about contiguous allocations from
++ * efhw_iopages_alloc() to be saved away in the efhw_iopage_t array in an
++ * iobufset. (Changing the iobufset resource to use a union type would
++ * involve a lot of code changes, and make the iobufset's metadata larger
++ * which could be bad as it's supposed to fit into a single page on some
++ * platforms.)
++ */
++static inline void
++efhw_iopage_init_from_iopages(efhw_iopage_t *iopage,
++ efhw_iopages_t *iopages, unsigned pageno)
++{
++ iopage->p.kva = ((unsigned long)efhw_iopages_ptr(iopages))
++ + (pageno * PAGE_SIZE);
++ iopage->dma_addr = efhw_iopages_dma_addr(iopages) +
++ (pageno * PAGE_SIZE);
++}
++
++static inline void
++efhw_iopages_init_from_iopage(efhw_iopages_t *iopages,
++ efhw_iopage_t *iopage, unsigned order)
++{
++ iopages->kva = (caddr_t) efhw_iopage_ptr(iopage);
++ EFHW_ASSERT(iopages->kva);
++ iopages->order = order;
++ iopages->dma_addr = efhw_iopage_dma_addr(iopage);
++}
++
++#endif /* __CI_EFHW_IOPAGE_LINUX_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/nic.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/nic.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/nic.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/nic.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,62 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains API provided by efhw/nic.c file. This file is not
++ * designed for use outside of the SFC resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_NIC_H__
++#define __CI_EFHW_NIC_H__
++
++#include <ci/efhw/efhw_types.h>
++#include <ci/efhw/public.h>
++
++
++/* Convert PCI info to device type. Returns false when device is not
++ * recognised.
++ */
++extern int efhw_device_type_init(struct efhw_device_type *dt,
++ int vendor_id, int device_id, int revision);
++
++/* Initialise fields that do not involve touching hardware. */
++extern void efhw_nic_init(struct efhw_nic *nic, unsigned flags,
++ unsigned options, struct efhw_device_type dev_type);
++
++/*! Destruct NIC resources */
++extern void efhw_nic_dtor(struct efhw_nic *nic);
++
++/*! Shutdown interrupts */
++extern void efhw_nic_close_interrupts(struct efhw_nic *nic);
++
++#endif /* __CI_EFHW_NIC_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/public.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/public.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/public.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/public.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,83 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides public API of efhw library exported from the SFC
++ * resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_PUBLIC_H__
++#define __CI_EFHW_PUBLIC_H__
++
++#include <ci/efhw/common.h>
++#include <ci/efhw/efhw_types.h>
++
++/*! Returns true if we have some EtherFabric functional units -
++ whether configured or not */
++static inline int efhw_nic_have_functional_units(struct efhw_nic *nic)
++{
++ return nic->efhw_func != 0;
++}
++
++/*! Returns true if the EtherFabric functional units have been configured */
++static inline int efhw_nic_have_hw(struct efhw_nic *nic)
++{
++ return efhw_nic_have_functional_units(nic) && (EFHW_KVA(nic) != 0);
++}
++
++/*! Helper function to allocate the iobuffer needed by an eventq
++ * - it ensures the eventq has the correct alignment for the NIC
++ *
++ * \param rm Event-queue resource manager
++ * \param instance Event-queue instance (index)
++ * \param buf_bytes Requested size of eventq
++ * \return < 0 if iobuffer allocation fails
++ */
++int efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic,
++ struct eventq_resource_hardware *h,
++ int evq_instance, unsigned buf_bytes);
++
++extern void falcon_nic_set_rx_usr_buf_size(struct efhw_nic *,
++ int rx_usr_buf_size);
++
++extern void
++falcon_nic_rx_filter_ctl_set(struct efhw_nic *nic, uint32_t tcp_full,
++ uint32_t tcp_wild,
++ uint32_t udp_full, uint32_t udp_wild);
++
++extern void
++falcon_nic_rx_filter_ctl_get(struct efhw_nic *nic, uint32_t *tcp_full,
++ uint32_t *tcp_wild,
++ uint32_t *udp_full, uint32_t *udp_wild);
++
++#endif /* __CI_EFHW_PUBLIC_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/sysdep.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/sysdep.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efhw/sysdep.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efhw/sysdep.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,72 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides version-independent Linux kernel API for efhw library.
++ * Only kernels >=2.6.9 are supported.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_SYSDEP_LINUX_H__
++#define __CI_EFHW_SYSDEP_LINUX_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/delay.h>
++#include <linux/if_ether.h>
++
++#include <linux/netdevice.h> /* necessary for etherdevice.h on some kernels */
++#include <linux/etherdevice.h>
++
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
++static inline int is_local_ether_addr(const u8 *addr)
++{
++ return (0x02 & addr[0]);
++}
++#endif
++
++typedef unsigned long irq_flags_t;
++
++#define spin_lock_destroy(l_) do {} while (0)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++#define HAS_NET_NAMESPACE
++#endif
++
++/* Funny, but linux has round_up for x86 only, defined in
++ * x86-specific header */
++#ifndef round_up
++#define round_up(x, y) (((x) + (y) - 1) & ~((y)-1))
++#endif
++
++#endif /* __CI_EFHW_SYSDEP_LINUX_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/buddy.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/buddy.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/buddy.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/buddy.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,69 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides private API for buddy allocator. This API is not
++ * designed for use outside of SFC resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_BUDDY_H__
++#define __CI_EFRM_BUDDY_H__
++
++#include <ci/efrm/sysdep.h>
++
++/*! Comment? */
++struct efrm_buddy_allocator {
++ struct list_head *free_lists; /* array[order+1] */
++ struct list_head *links; /* array[1<<order] */
++ uint8_t *orders; /* array[1<<order] */
++ unsigned order; /*!< total size == (1 << order) */
++ /* ?? Consider recording largest available order + for each order the
++ ** smallest available order that is big enough.
++ */
++};
++
++ /*! Returns total size of managed space. */
++static inline unsigned long efrm_buddy_size(struct efrm_buddy_allocator *b)
++{
++ return 1ul << b->order;
++}
++
++int efrm_buddy_ctor(struct efrm_buddy_allocator *b, unsigned order);
++void efrm_buddy_dtor(struct efrm_buddy_allocator *b);
++int efrm_buddy_alloc(struct efrm_buddy_allocator *b, unsigned order);
++void efrm_buddy_free(struct efrm_buddy_allocator *b, unsigned addr,
++ unsigned order);
++void efrm_buddy_reserve_at_start(struct efrm_buddy_allocator *b, unsigned n);
++void efrm_buddy_reserve_at_end(struct efrm_buddy_allocator *b, unsigned n);
++
++#endif /* __CI_EFRM_BUDDY_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/buffer_table.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/buffer_table.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/buffer_table.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/buffer_table.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,86 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides private buffer table API. This API is not designed
++ * for use outside of SFC resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_BUFFER_TABLE_H__
++#define __CI_EFRM_BUFFER_TABLE_H__
++
++#include <ci/efhw/efhw_types.h>
++
++/*--------------------------------------------------------------------
++ *
++ * NIC's buffer table.
++ *
++ *--------------------------------------------------------------------*/
++
++/*! Managed interface. */
++
++/*! construct a managed buffer table object, allocated over a region of
++ * the NICs buffer table space
++ */
++extern int efrm_buffer_table_ctor(unsigned low, unsigned high);
++/*! destructor for above */
++extern void efrm_buffer_table_dtor(void);
++
++/*! allocate a contiguous region of buffer table space */
++extern int efrm_buffer_table_alloc(unsigned order,
++ struct efhw_buffer_table_allocation *a);
++
++/*! current size of the buffer table.
++ * FIXME This function should be inline, but it is never used from
++ * the fast path, so let it as-is. */
++unsigned long efrm_buffer_table_size(void);
++
++/*--------------------------------------------------------------------
++ *
++ * buffer table operations through the HW independent API
++ *
++ *--------------------------------------------------------------------*/
++
++/*! free a previously allocated region of buffer table space */
++extern void efrm_buffer_table_free(struct efhw_buffer_table_allocation *a);
++
++/*! commit the update of a buffer table entry to every NIC */
++void efrm_buffer_table_commit(void);
++
++/*! set a given buffer table entry. [pa] should be the physical
++ address of pinned down memory. This function can only be called from
++ the char driver */
++void efrm_buffer_table_set(struct efhw_buffer_table_allocation *a,
++ unsigned i, dma_addr_t dma_addr, int owner);
++
++#endif /* __CI_EFRM_BUFFER_TABLE_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/debug.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/debug.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/debug.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/debug.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,78 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides debug-related API for efrm library using Linux kernel
++ * primitives.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_DEBUG_LINUX_H__
++#define __CI_EFRM_DEBUG_LINUX_H__
++
++#define EFRM_PRINTK_PREFIX "[sfc efrm] "
++
++#define EFRM_PRINTK(level, fmt, ...) \
++ printk(level EFRM_PRINTK_PREFIX fmt "\n", __VA_ARGS__)
++
++/* Following macros should be used with non-zero format parameters
++ * due to __VA_ARGS__ limitations. Use "%s" with __FUNCTION__ if you can't
++ * find better parameters. */
++#define EFRM_ERR(fmt, ...) EFRM_PRINTK(KERN_ERR, fmt, __VA_ARGS__)
++#define EFRM_WARN(fmt, ...) EFRM_PRINTK(KERN_WARNING, fmt, __VA_ARGS__)
++#define EFRM_NOTICE(fmt, ...) EFRM_PRINTK(KERN_NOTICE, fmt, __VA_ARGS__)
++#if 0 && !defined(NDEBUG)
++#define EFRM_TRACE(fmt, ...) EFRM_PRINTK(KERN_DEBUG, fmt, __VA_ARGS__)
++#else
++#define EFRM_TRACE(fmt, ...)
++#endif
++
++#ifndef NDEBUG
++#define EFRM_ASSERT(cond) BUG_ON((cond) == 0)
++#define _EFRM_ASSERT(cond, file, line) \
++ do { \
++ if (unlikely(!(cond))) { \
++ EFRM_ERR("assertion \"%s\" failed at %s %d", \
++ #cond, file, line); \
++ BUG(); \
++ } \
++ } while (0)
++
++#define EFRM_DO_DEBUG(expr) expr
++#define EFRM_VERIFY_EQ(expr, val) EFRM_ASSERT((expr) == (val))
++#else
++#define EFRM_ASSERT(cond)
++#define EFRM_DO_DEBUG(expr)
++#define EFRM_VERIFY_EQ(expr, val) expr
++#endif
++
++#endif /* __CI_EFRM_DEBUG_LINUX_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/driver_private.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/driver_private.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/driver_private.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/driver_private.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,86 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides private API of efrm library to be used from the SFC
++ * resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_DRIVER_PRIVATE_H__
++#define __CI_EFRM_DRIVER_PRIVATE_H__
++
++#include <ci/efrm/resource.h>
++#include <ci/efrm/sysdep.h>
++
++/*--------------------------------------------------------------------
++ *
++ * global variables
++ *
++ *--------------------------------------------------------------------*/
++
++/* Internal structure for resource driver */
++extern struct efrm_resource_manager *efrm_rm_table[];
++
++/*--------------------------------------------------------------------
++ *
++ * efrm_nic_table handling
++ *
++ *--------------------------------------------------------------------*/
++
++extern int efrm_driver_ctor(void);
++extern int efrm_driver_dtor(void);
++extern int efrm_driver_register_nic(struct efhw_nic *, int nic_index);
++extern int efrm_driver_unregister_nic(struct efhw_nic *);
++
++/*--------------------------------------------------------------------
++ *
++ * create/destroy resource managers
++ *
++ *--------------------------------------------------------------------*/
++
++struct vi_resource_dimensions {
++ unsigned evq_int_min, evq_int_max;
++ unsigned evq_timer_min, evq_timer_max;
++ unsigned rxq_min, rxq_max;
++ unsigned txq_min, txq_max;
++};
++
++/*! Initialise resources */
++extern int
++efrm_resources_init(const struct vi_resource_dimensions *,
++ int buffer_table_min, int buffer_table_max);
++
++/*! Tear down resources */
++extern void efrm_resources_fini(void);
++
++#endif /* __CI_EFRM_DRIVER_PRIVATE_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/filter.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/filter.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/filter.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/filter.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,147 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides public API for filter resource.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_FILTER_H__
++#define __CI_EFRM_FILTER_H__
++
++#include <ci/efrm/resource.h>
++#include <ci/efrm/vi_resource.h>
++#include <ci/efrm/nic_set.h>
++#include <ci/efhw/common.h>
++
++/*! Comment? */
++struct filter_resource {
++ struct efrm_resource rs;
++ struct vi_resource *pt;
++ int filter_idx;
++ efrm_nic_set_t nic_set;
++};
++
++#define filter_resource(rs1) container_of((rs1), struct filter_resource, rs)
++
++/*!
++ * Allocate filter resource.
++ *
++ * \param vi_parent VI resource to use as parent. The function takes
++ * reference to the VI resource on success.
++ * \param frs_out pointer to return the new filter resource
++ *
++ * \return status code; if non-zero, frs_out is unchanged
++ */
++extern int
++efrm_filter_resource_alloc(struct vi_resource *vi_parent,
++ struct filter_resource **frs_out);
++
++/* efrm_filter_resource_free should be called only if
++ * __efrm_resource_ref_count_zero() returned true.
++ * The easiest way is to call efrm_filter_resource_release() */
++void efrm_filter_resource_free(struct filter_resource *frs);
++static inline void efrm_filter_resource_release(struct filter_resource *frs)
++{
++ unsigned id;
++
++ EFRM_RESOURCE_ASSERT_VALID(&frs->rs, 0);
++ id = EFRM_RESOURCE_INSTANCE(frs->rs.rs_handle);
++
++ if (atomic_dec_and_test(&frs->rs.rs_ref_count)) {
++ if (__efrm_resource_ref_count_zero(EFRM_RESOURCE_FILTER, id)) {
++ EFRM_ASSERT(EFRM_RESOURCE_INSTANCE(frs->rs.rs_handle) ==
++ id);
++ efrm_filter_resource_free(frs);
++ }
++ }
++}
++
++/*--------------------------------------------------------------------
++ *!
++ * Called to set/change the PT endpoint of a filter
++ *
++ * Example of use is TCP helper when it finds a wildcard IP filter
++ * needs to change which application it delivers traffic to
++ *
++ * \param frs filter resource
++ * \param pt_handle handle of new PT endpoint
++ *
++ * \return standard error codes
++ *
++ *--------------------------------------------------------------------*/
++extern int
++efrm_filter_resource_set_ptresource(struct filter_resource *frs,
++ struct vi_resource *virs);
++
++extern int efrm_filter_resource_clear(struct filter_resource *frs);
++
++extern int __efrm_filter_resource_set(struct filter_resource *frs, int type,
++ unsigned saddr_be32, uint16_t sport_be16,
++ unsigned daddr_be32, uint16_t dport_be16);
++
++static inline int
++efrm_filter_resource_tcp_set(struct filter_resource *frs,
++ unsigned saddr, uint16_t sport,
++ unsigned daddr, uint16_t dport)
++{
++ int type;
++
++ EFRM_ASSERT((saddr && sport) || (!saddr && !sport));
++
++ type =
++ saddr ? EFHW_IP_FILTER_TYPE_TCP_FULL :
++ EFHW_IP_FILTER_TYPE_TCP_WILDCARD;
++
++ return __efrm_filter_resource_set(frs, type,
++ saddr, sport, daddr, dport);
++}
++
++static inline int
++efrm_filter_resource_udp_set(struct filter_resource *frs,
++ unsigned saddr, uint16_t sport,
++ unsigned daddr, uint16_t dport)
++{
++ int type;
++
++ EFRM_ASSERT((saddr && sport) || (!saddr && !sport));
++
++ type =
++ saddr ? EFHW_IP_FILTER_TYPE_UDP_FULL :
++ EFHW_IP_FILTER_TYPE_UDP_WILDCARD;
++
++ return __efrm_filter_resource_set(frs,
++ type, saddr, sport, daddr, dport);
++}
++
++#endif /* __CI_EFRM_FILTER_H__ */
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/iobufset.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/iobufset.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/iobufset.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/iobufset.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,123 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides public API for iobufset resource.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_IOBUFSET_H__
++#define __CI_EFRM_IOBUFSET_H__
++
++#include <ci/efrm/vi_resource.h>
++
++/*! Iobufset resource structture.
++ * Users should not access the structure fields directly, but use the API
++ * below.
++ * However, this structure should not be moved out of public headers,
++ * because part of API (ex. efrm_iobufset_dma_addr function) is inline and
++ * is used in the fast-path code.
++ */
++struct iobufset_resource {
++ struct efrm_resource rs;
++ struct vi_resource *evq;
++ struct efhw_buffer_table_allocation buf_tbl_alloc;
++ unsigned int faultonaccess;
++ unsigned int n_bufs;
++ unsigned int pages_per_contiguous_chunk;
++ unsigned order;
++ efhw_iopage_t bufs[1];
++ /*!< up to n_bufs can follow this, so this must be the last member */
++};
++
++#define iobufset_resource(rs1) \
++ container_of((rs1), struct iobufset_resource, rs)
++
++/*!
++ * Allocate iobufset resource.
++ *
++ * \param vi_evq VI resource to use. The function takes
++ * reference to the VI resource on success.
++ * \param iobrs_out pointer to return the new filter resource
++ *
++ * \return status code; if non-zero, frs_out is unchanged
++ */
++extern int
++efrm_iobufset_resource_alloc(int32_t n_pages,
++ int32_t pages_per_contiguous_chunk,
++ struct vi_resource *vi_evq,
++ bool phys_addr_mode,
++ uint32_t faultonaccess,
++ struct iobufset_resource **iobrs_out);
++
++/* efrm_iobufset_resource_free should be called only if
++ * __efrm_resource_ref_count_zero() returned true.
++ * The easiest way is to call efrm_iobufset_resource_release() */
++void efrm_iobufset_resource_free(struct iobufset_resource *rs);
++static inline void
++efrm_iobufset_resource_release(struct iobufset_resource *iobrs)
++{
++ unsigned id;
++
++ EFRM_RESOURCE_ASSERT_VALID(&iobrs->rs, 0);
++ id = EFRM_RESOURCE_INSTANCE(iobrs->rs.rs_handle);
++
++ if (atomic_dec_and_test(&iobrs->rs.rs_ref_count)) {
++ if (__efrm_resource_ref_count_zero(EFRM_RESOURCE_IOBUFSET, id))
++ efrm_iobufset_resource_free(iobrs);
++ }
++}
++
++static inline char *
++efrm_iobufset_ptr(struct iobufset_resource *rs, unsigned offs)
++{
++ EFRM_ASSERT(offs < (unsigned)(rs->n_bufs << PAGE_SHIFT));
++ return efhw_iopage_ptr(&rs->bufs[offs >> PAGE_SHIFT])
++ + (offs & (PAGE_SIZE - 1));
++}
++
++static inline char *efrm_iobufset_page_ptr(struct iobufset_resource *rs,
++ unsigned page_i)
++{
++ EFRM_ASSERT(page_i < (unsigned)rs->n_bufs);
++ return efhw_iopage_ptr(&rs->bufs[page_i]);
++}
++
++static inline dma_addr_t
++efrm_iobufset_dma_addr(struct iobufset_resource *rs, unsigned offs)
++{
++ EFRM_ASSERT(offs < (unsigned)(rs->n_bufs << PAGE_SHIFT));
++ return efhw_iopage_dma_addr(&rs->bufs[offs >> PAGE_SHIFT])
++ + (offs & (PAGE_SIZE - 1));
++}
++
++#endif /* __CI_EFRM_IOBUFSET_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/nic_set.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/nic_set.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/nic_set.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/nic_set.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,104 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides public API for NIC sets.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_NIC_SET_H__
++#define __CI_EFRM_NIC_SET_H__
++
++#include <ci/efrm/debug.h>
++#include <ci/efhw/common_sysdep.h>
++#include <ci/efhw/efhw_config.h>
++
++/*--------------------------------------------------------------------
++ *
++ * efrm_nic_set_t - tracks which NICs something has been done on
++ *
++ *--------------------------------------------------------------------*/
++
++/* Internal suructure of efrm_nic_set_t should not be referenced outside of
++ * this file. Add a new accessor if you should do it. */
++typedef struct {
++ uint32_t nics;
++} efrm_nic_set_t;
++
++#if EFHW_MAX_NR_DEVS > 32
++#error change efrm_nic_set to handle EFHW_MAX_NR_DEVS number of devices
++#endif
++
++static inline bool
++efrm_nic_set_read(const efrm_nic_set_t *nic_set, unsigned index)
++{
++ EFRM_ASSERT(nic_set);
++ EFRM_ASSERT(index < EFHW_MAX_NR_DEVS && index < 32);
++ return (nic_set->nics & (1 << index)) ? true : false;
++}
++
++static inline void
++efrm_nic_set_write(efrm_nic_set_t *nic_set, unsigned index, bool value)
++{
++ EFRM_ASSERT(nic_set);
++ EFRM_ASSERT(index < EFHW_MAX_NR_DEVS && index < 32);
++ EFRM_ASSERT(value == false || value == true);
++ nic_set->nics = (nic_set->nics & (~(1 << index))) + (value << index);
++}
++
++static inline void efrm_nic_set_clear(efrm_nic_set_t *nic_set)
++{
++ nic_set->nics = 0;
++}
++
++static inline void efrm_nic_set_all(efrm_nic_set_t *nic_set)
++{
++ nic_set->nics = 0xffffffff;
++}
++
++static inline bool efrm_nic_set_is_all_clear(efrm_nic_set_t *nic_set)
++{
++ return nic_set->nics == 0 ? true : false;
++}
++
++#define EFRM_NIC_SET_FMT "%x"
++
++static inline uint32_t efrm_nic_set_pri_arg(efrm_nic_set_t *nic_set)
++{
++ return nic_set->nics;
++}
++
++#define EFRM_FOR_EACH_NIC_INDEX_IN_SET(_set, _nic_i) \
++ for ((_nic_i) = 0; (_nic_i) < EFHW_MAX_NR_DEVS; ++(_nic_i)) \
++ if (efrm_nic_set_read((_set), (_nic_i)))
++
++#endif /* __CI_EFRM_NIC_SET_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/nic_table.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/nic_table.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/nic_table.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/nic_table.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,98 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides public API for NIC table.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_NIC_TABLE_H__
++#define __CI_EFRM_NIC_TABLE_H__
++
++#include <ci/efhw/efhw_types.h>
++#include <ci/efrm/sysdep.h>
++
++/*--------------------------------------------------------------------
++ *
++ * struct efrm_nic_table - top level driver object keeping all NICs -
++ * implemented in driver_object.c
++ *
++ *--------------------------------------------------------------------*/
++
++/*! Comment? */
++struct efrm_nic_table {
++ /*! nics attached to this driver */
++ struct efhw_nic *nic[EFHW_MAX_NR_DEVS];
++ /*! pointer to an arbitrary struct efhw_nic if one exists;
++ * for code which does not care which NIC it wants but
++ * still needs one. Note you cannot assume nic[0] exists. */
++ struct efhw_nic *a_nic;
++ uint32_t nic_count; /*!< number of nics attached to this driver */
++ spinlock_t lock; /*!< lock for table modifications */
++ atomic_t ref_count; /*!< refcount for users of nic table */
++};
++
++/* Resource driver structures used by other drivers as well */
++extern struct efrm_nic_table efrm_nic_table;
++
++static inline void efrm_nic_table_hold(void)
++{
++ atomic_inc(&efrm_nic_table.ref_count);
++}
++
++static inline void efrm_nic_table_rele(void)
++{
++ atomic_dec(&efrm_nic_table.ref_count);
++}
++
++static inline int efrm_nic_table_held(void)
++{
++ return (atomic_read(&efrm_nic_table.ref_count) != 0);
++}
++
++/* Run code block _x multiple times with variable nic set to each
++ * registered NIC in turn.
++ * DO NOT "break" out of this loop early. */
++#define EFRM_FOR_EACH_NIC(_nic_i, _nic) \
++ for ((_nic_i) = (efrm_nic_table_hold(), 0); \
++ (_nic_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \
++ (_nic_i)++) \
++ if (((_nic) = efrm_nic_table.nic[_nic_i]))
++
++#define EFRM_FOR_EACH_NIC_IN_SET(_set, _i, _nic) \
++ for ((_i) = (efrm_nic_table_hold(), 0); \
++ (_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \
++ ++(_i)) \
++ if (((_nic) = efrm_nic_table.nic[_i]) && \
++ efrm_nic_set_read((_set), (_i)))
++
++#endif /* __CI_EFRM_NIC_TABLE_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/private.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/private.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/private.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/private.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,141 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides private API of efrm library -- resource handling.
++ * This API is not designed for use outside of SFC resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_PRIVATE_H__
++#define __CI_EFRM_PRIVATE_H__
++
++#include <ci/efrm/resource.h>
++#include <ci/efrm/driver_private.h>
++#include <ci/efrm/sysdep.h>
++#include <ci/efrm/debug.h>
++
++/*--------------------------------------------------------------------
++ *
++ * create resource managers
++ *
++ *--------------------------------------------------------------------*/
++
++/*! Create a resource manager for various types of resources
++ */
++extern int
++efrm_create_iobufset_resource_manager(struct efrm_resource_manager **out);
++
++extern int
++efrm_create_filter_resource_manager(struct efrm_resource_manager **out);
++
++extern int
++efrm_create_vi_resource_manager(struct efrm_resource_manager **out,
++ const struct vi_resource_dimensions *);
++
++/*--------------------------------------------------------------------
++ *
++ * efrm_resource_handle_t handling
++ *
++ *--------------------------------------------------------------------*/
++
++/*! Initialize an area of memory to be used as a resource */
++static inline void efrm_resource_init(struct efrm_resource *rs,
++ int type, int instance)
++{
++ EFRM_ASSERT(instance >= 0);
++ EFRM_ASSERT(type >= 0 && type < EFRM_RESOURCE_NUM);
++ atomic_set(&rs->rs_ref_count, 1);
++ rs->rs_handle.handle = (type << 28u) |
++ (((unsigned)jiffies & 0xfff) << 16) | instance;
++}
++
++/*--------------------------------------------------------------------
++ *
++ * Instance pool management
++ *
++ *--------------------------------------------------------------------*/
++
++/*! Allocate instance pool. Use kfifo_vfree to destroy it. */
++static inline int
++efrm_kfifo_id_ctor(struct kfifo **ids_out,
++ unsigned int base, unsigned int limit, spinlock_t *lock)
++{
++ unsigned int i;
++ struct kfifo *ids;
++ unsigned char *buffer;
++ unsigned int size = roundup_pow_of_two((limit - base) * sizeof(int));
++
++ EFRM_ASSERT(base <= limit);
++ buffer = vmalloc(size);
++ ids = kfifo_init(buffer, size, GFP_KERNEL, lock);
++ if (IS_ERR(ids))
++ return PTR_ERR(ids);
++ for (i = base; i < limit; i++)
++ EFRM_VERIFY_EQ(__kfifo_put(ids, (unsigned char *)&i,
++ sizeof(i)), sizeof(i));
++
++ *ids_out = ids;
++ return 0;
++}
++
++/*--------------------------------------------------------------------
++ *
++ * Various private functions
++ *
++ *--------------------------------------------------------------------*/
++
++/*! Initialize the fields in the provided resource manager memory area
++ * \param rm The area of memory to be initialized
++ * \param dtor A method to destroy the resource manager
++ * \param name A Textual name for the resource manager
++ * \param type The type of resource managed
++ * \param initial_table_size Initial size of the ID table
++ * \param auto_destroy Destroy resource manager on driver onload iff true
++ *
++ * A default table size is provided if the value 0 is provided.
++ */
++extern int
++efrm_resource_manager_ctor(struct efrm_resource_manager *rm,
++ void (*dtor)(struct efrm_resource_manager *),
++ const char *name, unsigned type,
++ int initial_table_size);
++
++extern void efrm_resource_manager_dtor(struct efrm_resource_manager *rm);
++
++/*! Insert a resource into table in the resource manager.
++ *
++ * Caller should free the resource if this function returns non-zero.
++ */
++extern int efrm_resource_manager_insert(struct efrm_resource *rs);
++
++#endif /* __CI_EFRM_PRIVATE_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/resource.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/resource.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/resource.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/resource.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,122 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides public interface of efrm library -- resource handling.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_RESOURCE_H__
++#define __CI_EFRM_RESOURCE_H__
++
++/*--------------------------------------------------------------------
++ *
++ * headers for type dependencies
++ *
++ *--------------------------------------------------------------------*/
++
++#include <ci/efhw/efhw_types.h>
++#include <ci/efrm/resource_id.h>
++#include <ci/efrm/sysdep.h>
++#include <ci/efhw/common_sysdep.h>
++
++#ifndef __ci_driver__
++#error "Driver-only file"
++#endif
++
++/*--------------------------------------------------------------------
++ *
++ * struct efrm_resource - represents an allocated resource
++ * (eg. pinned pages of memory, or resource on a NIC)
++ *
++ *--------------------------------------------------------------------*/
++
++/*! Representation of an allocated resource */
++struct efrm_resource {
++ atomic_t rs_ref_count; /*!< users count; see
++ * __efrm_resource_ref_count_zero() */
++ efrm_resource_handle_t rs_handle;
++};
++
++/*--------------------------------------------------------------------
++ *
++ * managed resource abstraction
++ *
++ *--------------------------------------------------------------------*/
++
++/*! Factory for resources of a specific type */
++struct efrm_resource_manager {
++ const char *rm_name; /*!< human readable only */
++ spinlock_t rm_lock;
++#ifndef NDEBUG
++ unsigned rm_type;
++#endif
++ int rm_resources;
++ int rm_resources_hiwat;
++ /*! table of allocated resources */
++ struct efrm_resource **rm_table;
++ unsigned rm_table_size;
++ /**
++ * Destructor for the resource manager. Other resource managers
++ * might be already dead, although the system guarantees that
++ * managers are destructed in the order by which they were created
++ */
++ void (*rm_dtor)(struct efrm_resource_manager *);
++};
++
++#ifdef NDEBUG
++# define EFRM_RESOURCE_ASSERT_VALID(rs, rc_mbz)
++# define EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm)
++#else
++/*! Check validity of resource and report on failure */
++extern void efrm_resource_assert_valid(struct efrm_resource *,
++ int rc_may_be_zero,
++ const char *file, int line);
++# define EFRM_RESOURCE_ASSERT_VALID(rs, rc_mbz) \
++ efrm_resource_assert_valid((rs), (rc_mbz), __FILE__, __LINE__)
++
++/*! Check validity of resource manager and report on failure */
++extern void efrm_resource_manager_assert_valid(struct efrm_resource_manager *,
++ const char *file, int line);
++# define EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm) \
++ efrm_resource_manager_assert_valid((rm), __FILE__, __LINE__)
++#endif
++
++/*! Check the reference count on the resource provided and delete its
++ * handle it in its owning resource manager if the
++ * reference count has fallen to zero.
++ *
++ * Returns TRUE if the caller should really free the resource.
++ */
++extern bool __efrm_resource_ref_count_zero(unsigned type, unsigned instance);
++
++#endif /* __CI_EFRM_RESOURCE_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/resource_id.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/resource_id.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/resource_id.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/resource_id.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,104 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides public type and definitions resource handle, and the
++ * definitions of resource types.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_DRIVER_EFRM_RESOURCE_ID_H__
++#define __CI_DRIVER_EFRM_RESOURCE_ID_H__
++
++/***********************************************************************
++ * Resource handles
++ *
++ * Resource handles are intended for identifying resources at kernel
++ * level, within the context of a particular NIC. particularly because
++ * for some resource types, the low 16 bites correspond to hardware
++ * IDs. They were historically also used at user level, with a nonce
++ * stored in the bits 16 to 27 (inclusive), but that approach is
++ * deprecated (but sill alive!).
++ *
++ * The handle value 0 is used to mean "no resource".
++ * Identify resources within the context of a file descriptor at user
++ * level.
++ ***********************************************************************/
++
++typedef struct efrm_resource_handle_s {
++ uint32_t handle;
++} efrm_resource_handle_t;
++
++/* You may think these following functions should all have
++ * _HANDLE_ in their names, but really we are providing an abstract set
++ * of methods on a (hypothetical) efrm_resource_t object, with
++ * efrm_resource_handle_t being just the reference one holds to access
++ * the object (aka "this" or "self").
++ */
++
++/* Below I use inline instead of macros where possible in order to get
++ * more type checking help from the compiler; hopefully we'll never
++ * have to rewrite these to use #define as we've found some horrible
++ * compiler on which we cannot make static inline do the Right Thing (tm).
++ *
++ * For consistency and to avoid pointless change I spell these
++ * routines as macro names (CAPTILIZE_UNDERSCORED), which also serves
++ * to remind people they are compact and inlined.
++ */
++
++#define EFRM_RESOURCE_FMT "[rs:%08x]"
++
++static inline unsigned EFRM_RESOURCE_PRI_ARG(efrm_resource_handle_t h)
++{
++ return (h.handle);
++}
++
++static inline unsigned EFRM_RESOURCE_INSTANCE(efrm_resource_handle_t h)
++{
++ return (h.handle & 0x0000ffff);
++}
++
++static inline unsigned EFRM_RESOURCE_TYPE(efrm_resource_handle_t h)
++{
++ return (h.handle & 0xf0000000) >> 28;
++}
++
++/***********************************************************************
++ * Resource type codes
++ ***********************************************************************/
++
++#define EFRM_RESOURCE_IOBUFSET 0x0
++#define EFRM_RESOURCE_VI 0x1
++#define EFRM_RESOURCE_FILTER 0x2
++#define EFRM_RESOURCE_NUM 0x3 /* This isn't a resource! */
++
++#define EFRM_RESOURCE_NAME(type) \
++ ((type) == EFRM_RESOURCE_IOBUFSET? "IOBUFSET" : \
++ (type) == EFRM_RESOURCE_VI? "VI" : \
++ (type) == EFRM_RESOURCE_FILTER? "FILTER" : \
++ "<invalid>")
++
++#endif /* __CI_DRIVER_EFRM_RESOURCE_ID_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/sysdep.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/sysdep.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/sysdep.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/sysdep.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,54 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides Linux-like system-independent API for efrm library.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_SYSDEP_H__
++#define __CI_EFRM_SYSDEP_H__
++
++/* Spinlocks are defined in efhw/sysdep.h */
++#include <ci/efhw/sysdep.h>
++
++#if defined(__linux__) && defined(__KERNEL__)
++
++# include <ci/efrm/sysdep_linux.h>
++
++#else
++
++# include <ci/efrm/sysdep_ci2linux.h>
++
++#endif
++
++#endif /* __CI_EFRM_SYSDEP_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/sysdep_linux.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/sysdep_linux.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/sysdep_linux.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/sysdep_linux.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,248 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides version-independent Linux kernel API for efrm library.
++ * Only kernels >=2.6.9 are supported.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Kfifo API is partially stolen from linux-2.6.22/include/linux/list.h
++ * Copyright (C) 2004 Stelian Pop <stelian@popies.net>
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_SYSDEP_LINUX_H__
++#define __CI_EFRM_SYSDEP_LINUX_H__
++
++#include <linux/version.h>
++#include <linux/list.h>
++#include <linux/vmalloc.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/workqueue.h>
++#include <linux/gfp.h>
++#include <linux/slab.h>
++#include <linux/hardirq.h>
++#include <linux/kernel.h>
++#include <linux/if_ether.h>
++#include <linux/completion.h>
++#include <linux/in.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++/* get roundup_pow_of_two(), which was in kernel.h in early kernel versions */
++#include <linux/log2.h>
++#endif
++
++/********************************************************************
++ *
++ * List API
++ *
++ ********************************************************************/
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
++static inline void
++list_replace_init(struct list_head *old, struct list_head *new)
++{
++ new->next = old->next;
++ new->next->prev = new;
++ new->prev = old->prev;
++ new->prev->next = new;
++ INIT_LIST_HEAD(old);
++}
++#endif
++
++static inline struct list_head *list_pop(struct list_head *list)
++{
++ struct list_head *link = list->next;
++ list_del(link);
++ return link;
++}
++
++static inline struct list_head *list_pop_tail(struct list_head *list)
++{
++ struct list_head *link = list->prev;
++ list_del(link);
++ return link;
++}
++
++/********************************************************************
++ *
++ * Workqueue API
++ *
++ ********************************************************************/
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++#define NEED_OLD_WORK_API
++
++/**
++ * The old and new work function prototypes just change
++ * the type of the pointer in the only argument, so it's
++ * safe to cast one function type to the other
++ */
++typedef void (*efrm_old_work_func_t) (void *p);
++
++#undef INIT_WORK
++#define INIT_WORK(_work, _func) \
++ do { \
++ INIT_LIST_HEAD(&(_work)->entry); \
++ (_work)->pending = 0; \
++ PREPARE_WORK((_work), \
++ (efrm_old_work_func_t) (_func), \
++ (_work)); \
++ } while (0)
++
++#endif
++
++/********************************************************************
++ *
++ * Kfifo API
++ *
++ ********************************************************************/
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
++
++#if !defined(RHEL_RELEASE_CODE) || (RHEL_RELEASE_CODE < 1029)
++typedef unsigned gfp_t;
++#endif
++
++#define HAS_NO_KFIFO
++
++struct kfifo {
++ unsigned char *buffer; /* the buffer holding the data */
++ unsigned int size; /* the size of the allocated buffer */
++ unsigned int in; /* data is added at offset (in % size) */
++ unsigned int out; /* data is extracted from off. (out % size) */
++ spinlock_t *lock; /* protects concurrent modifications */
++};
++
++extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
++ gfp_t gfp_mask, spinlock_t *lock);
++extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask,
++ spinlock_t *lock);
++extern void kfifo_free(struct kfifo *fifo);
++extern unsigned int __kfifo_put(struct kfifo *fifo,
++ unsigned char *buffer, unsigned int len);
++extern unsigned int __kfifo_get(struct kfifo *fifo,
++ unsigned char *buffer, unsigned int len);
++
++/**
++ * kfifo_put - puts some data into the FIFO
++ * @fifo: the fifo to be used.
++ * @buffer: the data to be added.
++ * @len: the length of the data to be added.
++ *
++ * This function copies at most @len bytes from the @buffer into
++ * the FIFO depending on the free space, and returns the number of
++ * bytes copied.
++ */
++static inline unsigned int
++kfifo_put(struct kfifo *fifo, unsigned char *buffer, unsigned int len)
++{
++ unsigned long flags;
++ unsigned int ret;
++
++ spin_lock_irqsave(fifo->lock, flags);
++
++ ret = __kfifo_put(fifo, buffer, len);
++
++ spin_unlock_irqrestore(fifo->lock, flags);
++
++ return ret;
++}
++
++/**
++ * kfifo_get - gets some data from the FIFO
++ * @fifo: the fifo to be used.
++ * @buffer: where the data must be copied.
++ * @len: the size of the destination buffer.
++ *
++ * This function copies at most @len bytes from the FIFO into the
++ * @buffer and returns the number of copied bytes.
++ */
++static inline unsigned int
++kfifo_get(struct kfifo *fifo, unsigned char *buffer, unsigned int len)
++{
++ unsigned long flags;
++ unsigned int ret;
++
++ spin_lock_irqsave(fifo->lock, flags);
++
++ ret = __kfifo_get(fifo, buffer, len);
++
++ /*
++ * optimization: if the FIFO is empty, set the indices to 0
++ * so we don't wrap the next time
++ */
++ if (fifo->in == fifo->out)
++ fifo->in = fifo->out = 0;
++
++ spin_unlock_irqrestore(fifo->lock, flags);
++
++ return ret;
++}
++
++/**
++ * __kfifo_len - returns the number of bytes available in the FIFO, no locking version
++ * @fifo: the fifo to be used.
++ */
++static inline unsigned int __kfifo_len(struct kfifo *fifo)
++{
++ return fifo->in - fifo->out;
++}
++
++/**
++ * kfifo_len - returns the number of bytes available in the FIFO
++ * @fifo: the fifo to be used.
++ */
++static inline unsigned int kfifo_len(struct kfifo *fifo)
++{
++ unsigned long flags;
++ unsigned int ret;
++
++ spin_lock_irqsave(fifo->lock, flags);
++
++ ret = __kfifo_len(fifo);
++
++ spin_unlock_irqrestore(fifo->lock, flags);
++
++ return ret;
++}
++
++#else
++#include <linux/kfifo.h>
++#endif
++
++static inline void kfifo_vfree(struct kfifo *fifo)
++{
++ vfree(fifo->buffer);
++ kfree(fifo);
++}
++
++#endif /* __CI_EFRM_SYSDEP_LINUX_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,171 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains public API for VI resource.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_VI_RESOURCE_H__
++#define __CI_EFRM_VI_RESOURCE_H__
++
++#include <ci/efhw/efhw_types.h>
++#include <ci/efrm/resource.h>
++#include <ci/efrm/debug.h>
++
++struct vi_resource;
++
++/* Make these inline instead of macros for type checking */
++static inline struct vi_resource *
++efrm_to_vi_resource(struct efrm_resource *rs)
++{
++ EFRM_ASSERT(EFRM_RESOURCE_TYPE(rs->rs_handle) == EFRM_RESOURCE_VI);
++ return (struct vi_resource *) rs;
++}
++static inline struct
++efrm_resource *efrm_from_vi_resource(struct vi_resource *rs)
++{
++ return (struct efrm_resource *)rs;
++}
++
++#define EFAB_VI_RESOURCE_INSTANCE(virs) \
++ EFRM_RESOURCE_INSTANCE(efrm_from_vi_resource(virs)->rs_handle)
++
++#define EFAB_VI_RESOURCE_PRI_ARG(virs) \
++ EFRM_RESOURCE_PRI_ARG(efrm_from_vi_resource(virs)->rs_handle)
++
++extern int
++efrm_vi_resource_alloc(struct vi_resource *evq_virs,
++ uint16_t vi_flags, int32_t evq_capacity,
++ int32_t txq_capacity, int32_t rxq_capacity,
++ uint8_t tx_q_tag, uint8_t rx_q_tag,
++ struct vi_resource **virs_in_out,
++ uint32_t *out_io_mmap_bytes,
++ uint32_t *out_mem_mmap_bytes,
++ uint32_t *out_txq_capacity,
++ uint32_t *out_rxq_capacity);
++
++static inline void efrm_vi_resource_ref(struct vi_resource *virs)
++{
++ atomic_inc(&efrm_from_vi_resource(virs)->rs_ref_count);
++}
++
++/* efrm_vi_resource_free should be called only if
++ * __efrm_resource_ref_count_zero() returned true.
++ * The easiest way is to call efrm_vi_resource_release() */
++extern void efrm_vi_resource_free(struct vi_resource *virs);
++static inline void efrm_vi_resource_release(struct vi_resource *virs)
++{
++ unsigned id;
++ struct efrm_resource *rs = efrm_from_vi_resource(virs);
++
++ id = EFRM_RESOURCE_INSTANCE(rs->rs_handle);
++
++ if (atomic_dec_and_test(&rs->rs_ref_count)) {
++ if (__efrm_resource_ref_count_zero(EFRM_RESOURCE_VI, id)) {
++ EFRM_ASSERT(EFRM_RESOURCE_INSTANCE(rs->rs_handle) ==
++ id);
++ efrm_vi_resource_free(virs);
++ }
++ }
++}
++
++/*--------------------------------------------------------------------
++ *
++ * eventq handling
++ *
++ *--------------------------------------------------------------------*/
++
++/*! Reset an event queue and clear any associated timers */
++extern void efrm_eventq_reset(struct vi_resource *virs, int nic_index);
++
++/*! Register a kernel-level handler for the event queue. This function is
++ * called whenever a timer expires, or whenever the event queue is woken
++ * but no thread is blocked on it.
++ *
++ * This function returns -EBUSY if a callback is already installed.
++ *
++ * \param rs Event-queue resource
++ * \param handler Callback-handler
++ * \param arg Argument to pass to callback-handler
++ * \return Status code
++ */
++extern int
++efrm_eventq_register_callback(struct vi_resource *rs,
++ void (*handler)(void *arg, int is_timeout,
++ struct efhw_nic *nic),
++ void *arg);
++
++/*! Kill the kernel-level callback.
++ *
++ * This function stops the timer from running and unregisters the callback
++ * function. It waits for any running timeout handlers to complete before
++ * returning.
++ *
++ * \param rs Event-queue resource
++ * \return Nothing
++ */
++extern void efrm_eventq_kill_callback(struct vi_resource *rs);
++
++/*! Ask the NIC to generate a wakeup when an event is next delivered. */
++extern void efrm_eventq_request_wakeup(struct vi_resource *rs,
++ unsigned current_ptr,
++ unsigned nic_index);
++
++/*! Register a kernel-level handler for flush completions.
++ * \TODO Currently, it is unsafe to install a callback more than once.
++ *
++ * \param rs VI resource being flushed.
++ * \param handler Callback handler function.
++ * \param arg Argument to be passed to handler.
++ */
++extern void
++efrm_vi_register_flush_callback(struct vi_resource *rs,
++ void (*handler)(void *),
++ void *arg);
++
++int efrm_vi_resource_flush_retry(struct vi_resource *virs);
++
++/*! Comment? */
++extern int efrm_pt_flush(struct vi_resource *);
++
++/*! Comment? */
++extern int efrm_pt_pace(struct vi_resource *, unsigned int val);
++
++uint32_t efrm_vi_rm_txq_bytes(struct vi_resource *virs
++ /*,struct efhw_nic *nic */ );
++uint32_t efrm_vi_rm_rxq_bytes(struct vi_resource *virs
++ /*,struct efhw_nic *nic */ );
++uint32_t efrm_vi_rm_evq_bytes(struct vi_resource *virs
++ /*,struct efhw_nic *nic */ );
++
++#endif /* __CI_EFRM_VI_RESOURCE_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_manager.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_manager.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_manager.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_manager.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,182 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains type definitions for VI resource. These types
++ * may be used outside of the SFC resource driver, but such use is not
++ * recommended.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_DRIVER_EFAB_VI_RESOURCE_MANAGER_H__
++#define __CI_DRIVER_EFAB_VI_RESOURCE_MANAGER_H__
++
++#include <ci/efhw/common.h>
++#include <ci/efrm/vi_resource.h>
++#include <ci/efrm/nic_set.h>
++
++#define EFRM_VI_RM_DMA_QUEUE_COUNT 2
++#define EFRM_VI_RM_DMA_QUEUE_TX 0
++#define EFRM_VI_RM_DMA_QUEUE_RX 1
++
++/** Numbers of bits which can be set in the evq_state member of
++ * vi_resource_evq_info. */
++enum {
++ /** This bit is set if a wakeup has been requested on the NIC. */
++ VI_RESOURCE_EVQ_STATE_WAKEUP_PENDING,
++ /** This bit is set if the wakeup is valid for the sleeping
++ * process. */
++ VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED,
++ /** This bit is set if a wakeup or timeout event is currently being
++ * processed. */
++ VI_RESOURCE_EVQ_STATE_BUSY,
++};
++#define VI_RESOURCE_EVQ_STATE(X) \
++ (((int32_t)1) << (VI_RESOURCE_EVQ_STATE_##X))
++
++/** Information about an event queue. */
++struct vi_resource_evq_info {
++ /** Flag bits indicating the state of wakeups. */
++ unsigned long evq_state;
++ /** A pointer to the resource instance for this queue. This member
++ * is only valid if evq_state is non-zero or the resource is known
++ * to have a non-zero reference count. */
++ struct vi_resource *evq_virs;
++};
++
++#ifdef __ci_ul_driver__
++#define EFRM_VI_USE_WORKQUEUE 0
++#else
++#define EFRM_VI_USE_WORKQUEUE 1
++#endif
++
++/*! Global information for the VI resource manager. */
++struct vi_resource_manager {
++ struct efrm_resource_manager rm;
++
++ struct kfifo *instances_with_timer;
++ int with_timer_base;
++ int with_timer_limit;
++ struct kfifo *instances_with_interrupt;
++ int with_interrupt_base;
++ int with_interrupt_limit;
++
++ bool iscsi_dmaq_instance_is_free;
++ struct vi_resource_evq_info *evq_infos;
++
++ /* We keep VI resources which need flushing on these lists. The VI
++ * is put on the outstanding list when the flush request is issued
++ * to the hardware and removed when the flush event arrives. The
++ * hardware can only handle a limited number of RX flush requests at
++ * once, so VIs are placed in the waiting list until the flush can
++ * be issued. Flushes can be requested by the client or internally
++ * by the VI resource manager. In the former case, the reference
++ * count must be non-zero for the duration of the flush and in the
++ * later case, the reference count must be zero. */
++ struct list_head rx_flush_waiting_list;
++ struct list_head rx_flush_outstanding_list;
++ struct list_head tx_flush_outstanding_list;
++ int rx_flush_outstanding_count;
++
++ /* once the flush has happened we push the close into the work queue
++ * so its OK on Windows to free the resources (Bug 3469). Resources
++ * on this list have zero reference count.
++ */
++ struct list_head close_pending;
++ struct work_struct work_item;
++#if EFRM_VI_USE_WORKQUEUE
++ struct workqueue_struct *workqueue;
++#endif
++};
++
++struct vi_resource_nic_info {
++ struct eventq_resource_hardware evq_pages;
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++ efhw_iopages_t dmaq_pages[EFRM_VI_RM_DMA_QUEUE_COUNT];
++#endif
++};
++
++struct vi_resource {
++ /* Some macros make the assumption that the struct efrm_resource is
++ * the first member of a struct vi_resource. */
++ struct efrm_resource rs;
++ atomic_t evq_refs; /*!< Number of users of the event queue. */
++
++ efrm_nic_set_t nic_set;
++
++ uint32_t bar_mmap_bytes;
++ uint32_t mem_mmap_bytes;
++
++ int32_t evq_capacity;
++ int32_t dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_COUNT];
++
++ uint8_t dmaq_tag[EFRM_VI_RM_DMA_QUEUE_COUNT];
++ uint16_t flags;
++
++ /* we keep PT endpoints that have been destroyed on a list
++ * until we have seen their TX and RX DMAQs flush complete
++ * (see Bug 1217)
++ */
++ struct list_head rx_flush_link;
++ struct list_head tx_flush_link;
++ efrm_nic_set_t rx_flush_nic_set;
++ efrm_nic_set_t rx_flush_outstanding_nic_set;
++ efrm_nic_set_t tx_flush_nic_set;
++ uint64_t flush_time;
++ int flush_count;
++
++ void (*flush_callback_fn)(void *);
++ void *flush_callback_arg;
++
++ void (*evq_callback_fn) (void *arg, int is_timeout,
++ struct efhw_nic *nic);
++ void *evq_callback_arg;
++
++ struct vi_resource *evq_virs; /*!< EVQ for DMA queues */
++
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++ struct efhw_buffer_table_allocation
++ dmaq_buf_tbl_alloc[EFRM_VI_RM_DMA_QUEUE_COUNT];
++#endif
++
++ struct vi_resource_nic_info nic_info[EFHW_MAX_NR_DEVS];
++};
++
++#undef vi_resource
++#define vi_resource(rs1) container_of((rs1), struct vi_resource, rs)
++
++static inline dma_addr_t
++efrm_eventq_dma_addr(struct vi_resource *virs, uint32_t nic_index)
++{
++ struct eventq_resource_hardware *hw;
++ EFRM_ASSERT(efrm_nic_set_read(&virs->nic_set, nic_index));
++
++ hw = &(virs->nic_info[nic_index].evq_pages);
++
++ return efhw_iopages_dma_addr(&(hw->iobuff)) + hw->iobuff_off;
++}
++
++#endif /* __CI_DRIVER_EFAB_VI_RESOURCE_MANAGER_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_private.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_private.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_private.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_private.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,83 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains private API for VI resource. The API is not designed
++ * to be used outside of the SFC resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_VI_RESOURCE_PRIVATE_H__
++#define __CI_EFRM_VI_RESOURCE_PRIVATE_H__
++
++#include <ci/efhw/common.h>
++#include <ci/efrm/vi_resource_manager.h>
++
++extern struct vi_resource_manager *efrm_vi_manager;
++
++/*************************************************************************/
++
++extern void efrm_vi_rm_delayed_free(struct work_struct *data);
++
++extern void efrm_vi_rm_salvage_flushed_vis(void);
++
++void efrm_vi_rm_free_flushed_resource(struct vi_resource *virs);
++
++void efrm_vi_rm_init_dmaq(struct vi_resource *virs, int queue_index,
++ struct efhw_nic *nic);
++
++static inline int
++efrm_eventq_bytes(struct vi_resource *virs, uint32_t nic_index)
++{
++ EFRM_ASSERT(efrm_nic_set_read(&virs->nic_set, nic_index));
++
++ return efrm_vi_rm_evq_bytes(virs);
++}
++
++static inline efhw_event_t *
++efrm_eventq_base(struct vi_resource *virs, uint32_t nic_index)
++{
++ struct eventq_resource_hardware *hw;
++
++ EFRM_ASSERT(efrm_nic_set_read(&virs->nic_set, nic_index));
++
++ hw = &(virs->nic_info[nic_index].evq_pages);
++
++ return (efhw_event_t *) (efhw_iopages_ptr(&(hw->iobuff)) +
++ hw->iobuff_off);
++}
++
++/*! Wakeup handler, see efhw_ev_handler_t for prototype */
++extern void efrm_handle_wakeup_event(struct efhw_nic *nic, efhw_event_t *ev);
++
++/*! Timeout handler, see efhw_ev_handler_t for prototype */
++extern void efrm_handle_timeout_event(struct efhw_nic *nic, efhw_event_t *ev);
++
++/*! DMA flush handler, see efhw_ev_handler_t for prototype */
++extern void efrm_handle_dmaq_flushed(struct efhw_nic *nic, int instance,
++ int rx_flush);
++
++#endif /* __CI_EFRM_VI_RESOURCE_PRIVATE_H__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/driverlink_new.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/driverlink_new.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/driverlink_new.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/driverlink_new.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,290 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains driverlink code which interacts with the sfc network
++ * driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include "linux_resource_internal.h"
++#include "driverlink_api.h"
++#include "kernel_compat.h"
++#include <ci/efhw/falcon.h>
++
++#include <linux/rtnetlink.h>
++#include <linux/netdevice.h>
++
++/* The DL driver and associated calls */
++static int efrm_dl_probe(struct efx_dl_device *efrm_dev,
++ const struct net_device *net_dev,
++ const struct efx_dl_device_info *dev_info,
++ const char *silicon_rev);
++
++static void efrm_dl_remove(struct efx_dl_device *efrm_dev);
++
++static void efrm_dl_reset_suspend(struct efx_dl_device *efrm_dev);
++
++static void efrm_dl_reset_resume(struct efx_dl_device *efrm_dev, int ok);
++
++static void efrm_dl_mtu_changed(struct efx_dl_device *, int);
++static void efrm_dl_event_falcon(struct efx_dl_device *efx_dev, void *p_event);
++
++static struct efx_dl_driver efrm_dl_driver = {
++ .name = "resource",
++ .probe = efrm_dl_probe,
++ .remove = efrm_dl_remove,
++ .reset_suspend = efrm_dl_reset_suspend,
++ .reset_resume = efrm_dl_reset_resume
++};
++
++static void
++init_vi_resource_dimensions(struct vi_resource_dimensions *rd,
++ const struct efx_dl_falcon_resources *res)
++{
++ rd->evq_timer_min = res->evq_timer_min;
++ rd->evq_timer_max = res->evq_timer_max;
++ rd->evq_int_min = res->evq_int_min;
++ rd->evq_int_max = res->evq_int_max;
++ rd->rxq_min = res->rxq_min;
++ rd->rxq_max = res->rxq_max;
++ rd->txq_min = res->txq_min;
++ rd->txq_max = res->txq_max;
++ EFRM_TRACE
++ ("Using evq_int(%d-%d) evq_timer(%d-%d) RXQ(%d-%d) TXQ(%d-%d)",
++ res->evq_int_min, res->evq_int_max, res->evq_timer_min,
++ res->evq_timer_max, res->rxq_min, res->rxq_max, res->txq_min,
++ res->txq_max);
++}
++
++#if defined(EFX_NOT_UPSTREAM)
++/* We have a module parameter that can tell us to only load the char driver
++ * for 1 NIC (if there are multiple NICs in the system), and if so which one.
++ * This tells us the PCI bus and slot of the NIC to load for, or -1 to just
++ * load on all NICs (the default).
++ * Value is a hex number in the format
++ * bbbbss
++ * where:
++ * bbbb - PCI bus number
++ * ss - PCI slot number
++ */
++unsigned int only_NIC = -1;
++
++/** @ingroup module_params */
++module_param(only_NIC, uint, 0444);
++MODULE_PARM_DESC(only_NIC,
++ "Initialise sfc_resource driver for one NIC only, "
++ "with specified PCI bus and slot");
++#endif
++
++static int
++efrm_dl_probe(struct efx_dl_device *efrm_dev,
++ const struct net_device *net_dev,
++ const struct efx_dl_device_info *dev_info,
++ const char *silicon_rev)
++{
++ struct vi_resource_dimensions res_dim;
++ struct efx_dl_falcon_resources *res;
++ struct linux_efhw_nic *lnic;
++ struct pci_dev *dev;
++ struct efhw_nic *nic;
++ unsigned probe_flags = 0;
++ int rc;
++
++ efrm_dev->priv = NULL;
++
++ efx_dl_for_each_device_info_matching(dev_info, EFX_DL_FALCON_RESOURCES,
++ struct efx_dl_falcon_resources,
++ hdr, res) {
++ /* break out, leaving res pointing at the falcon resources */
++ break;
++ }
++
++ if (res == NULL) {
++ EFRM_ERR("%s: Unable to find falcon driverlink resources",
++ __FUNCTION__);
++ return -EINVAL;
++ }
++
++ if (res->flags & EFX_DL_FALCON_USE_MSI)
++ probe_flags |= NIC_FLAG_TRY_MSI;
++
++ dev = efrm_dev->pci_dev;
++ if (res->flags & EFX_DL_FALCON_DUAL_FUNC) {
++ unsigned vendor = dev->vendor;
++ EFRM_ASSERT(dev->bus != NULL);
++ dev = NULL;
++
++#if defined(EFX_NOT_UPSTREAM)
++ if (only_NIC != -1 &&
++ (efrm_dev->pci_dev->bus->number !=
++ ((only_NIC >> 8) & 0xFFFF)
++ || PCI_SLOT(efrm_dev->pci_dev->devfn) !=
++ (only_NIC & 0xFF))) {
++ EFRM_NOTICE("Hiding char device %x:%x",
++ efrm_dev->pci_dev->bus->number,
++ PCI_SLOT(efrm_dev->pci_dev->devfn));
++ return -ENODEV;
++ }
++#endif
++
++ while ((dev = pci_get_device(vendor, FALCON_S_DEVID, dev))
++ != NULL) {
++ EFRM_ASSERT(dev->bus != NULL);
++ /* With PCIe (since it's point to point)
++ * the slot ID is usually 0 and
++ * the bus ID changes NIC to NIC, so we really
++ * need to check both. */
++ if (PCI_SLOT(dev->devfn) ==
++ PCI_SLOT(efrm_dev->pci_dev->devfn)
++ && dev->bus->number ==
++ efrm_dev->pci_dev->bus->number)
++ break;
++ }
++ if (dev == NULL) {
++ EFRM_ERR("%s: Unable to find falcon secondary "
++ "PCI device.", __FUNCTION__);
++ return -ENODEV;
++ }
++ pci_dev_put(dev);
++ }
++
++ init_vi_resource_dimensions(&res_dim, res);
++
++ rc = efrm_nic_add(dev, probe_flags, net_dev->dev_addr, &lnic,
++ res->biu_lock,
++ res->buffer_table_min, res->buffer_table_max,
++ &res_dim);
++ if (rc != 0)
++ return rc;
++
++ nic = &lnic->nic;
++ nic->mtu = net_dev->mtu + ETH_HLEN;
++ nic->net_driver_dev = efrm_dev;
++ nic->ifindex = net_dev->ifindex;
++#ifdef HAS_NET_NAMESPACE
++ nic->nd_net = net_dev->nd_net;
++#endif
++ efrm_dev->priv = nic;
++
++ /* Register a callback so we're told when MTU changes.
++ * We dynamically allocate efx_dl_callbacks, because
++ * the callbacks that we want depends on the NIC type.
++ */
++ lnic->dl_callbacks =
++ kmalloc(sizeof(struct efx_dl_callbacks), GFP_KERNEL);
++ if (!lnic->dl_callbacks) {
++ EFRM_ERR("Out of memory (%s)", __FUNCTION__);
++ efrm_nic_del(lnic);
++ return -ENOMEM;
++ }
++ memset(lnic->dl_callbacks, 0, sizeof(*lnic->dl_callbacks));
++ lnic->dl_callbacks->mtu_changed = efrm_dl_mtu_changed;
++
++ if ((res->flags & EFX_DL_FALCON_DUAL_FUNC) == 0) {
++ /* Net driver receives all management events.
++ * Register a callback to receive the ones
++ * we're interested in. */
++ lnic->dl_callbacks->event = efrm_dl_event_falcon;
++ }
++
++ rc = efx_dl_register_callbacks(efrm_dev, lnic->dl_callbacks);
++ if (rc < 0) {
++ EFRM_ERR("%s: efx_dl_register_callbacks failed (%d)",
++ __FUNCTION__, rc);
++ kfree(lnic->dl_callbacks);
++ efrm_nic_del(lnic);
++ return rc;
++ }
++
++ return 0;
++}
++
++/* When we unregister ourselves on module removal, this function will be
++ * called for all the devices we claimed */
++static void efrm_dl_remove(struct efx_dl_device *efrm_dev)
++{
++ struct efhw_nic *nic = efrm_dev->priv;
++ struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
++ EFRM_TRACE("%s called", __FUNCTION__);
++ if (lnic->dl_callbacks) {
++ efx_dl_unregister_callbacks(efrm_dev, lnic->dl_callbacks);
++ kfree(lnic->dl_callbacks);
++ }
++ if (efrm_dev->priv)
++ efrm_nic_del(lnic);
++ EFRM_TRACE("%s OK", __FUNCTION__);
++}
++
++static void efrm_dl_reset_suspend(struct efx_dl_device *efrm_dev)
++{
++ EFRM_NOTICE("%s:", __FUNCTION__);
++}
++
++static void efrm_dl_reset_resume(struct efx_dl_device *efrm_dev, int ok)
++{
++ EFRM_NOTICE("%s: ok=%d", __FUNCTION__, ok);
++}
++
++int efrm_driverlink_register(void)
++{
++ EFRM_TRACE("%s:", __FUNCTION__);
++ return efx_dl_register_driver(&efrm_dl_driver);
++}
++
++void efrm_driverlink_unregister(void)
++{
++ EFRM_TRACE("%s:", __FUNCTION__);
++ efx_dl_unregister_driver(&efrm_dl_driver);
++}
++
++static void efrm_dl_mtu_changed(struct efx_dl_device *efx_dev, int mtu)
++{
++ struct efhw_nic *nic = efx_dev->priv;
++
++ ASSERT_RTNL(); /* Since we're looking at efx_dl_device::port_net_dev */
++
++ EFRM_TRACE("%s: old=%d new=%d", __FUNCTION__, nic->mtu, mtu + ETH_HLEN);
++ /* If this happened we must have agreed to it above */
++ nic->mtu = mtu + ETH_HLEN;
++}
++
++static void efrm_dl_event_falcon(struct efx_dl_device *efx_dev, void *p_event)
++{
++ struct efhw_nic *nic = efx_dev->priv;
++ struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
++ efhw_event_t *ev = p_event;
++
++ switch (FALCON_EVENT_CODE(ev)) {
++ case FALCON_EVENT_CODE_CHAR:
++ falcon_handle_char_event(nic, lnic->ev_handlers, ev);
++ break;
++ default:
++ EFRM_WARN("%s: unknown event type=%x", __FUNCTION__,
++ (unsigned)FALCON_EVENT_CODE(ev));
++ break;
++ }
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/driver_object.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/driver_object.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/driver_object.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/driver_object.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,174 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains support for the global driver variables.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efrm/nic_table.h>
++#include <ci/efrm/resource.h>
++#include <ci/efrm/debug.h>
++
++/* We use #define rather than static inline here so that the Windows
++ * "prefast" compiler can see its own locking primitive when these
++ * two function are used (and then perform extra checking where they
++ * are used)
++ *
++ * Both macros operate on an irq_flags_t
++*/
++
++#define efrm_driver_lock(irqlock_state) \
++ spin_lock_irqsave(&efrm_nic_table.lock, irqlock_state)
++
++#define efrm_driver_unlock(irqlock_state) \
++ spin_unlock_irqrestore(&efrm_nic_table.lock, \
++ irqlock_state);
++
++/* These routines are all methods on the architecturally singleton
++ global variables: efrm_nic_table, efrm_rm_table.
++
++ I hope we never find a driver model that does not allow global
++ structure variables :) (but that would break almost every driver I've
++ ever seen).
++*/
++
++/*! Exported driver state */
++struct efrm_nic_table efrm_nic_table;
++EXPORT_SYMBOL(efrm_nic_table);
++
++/* Internal table with resource managers.
++ * We'd like to not export it, but we are still using efrm_rm_table
++ * in the char driver. So, it is declared in the private header with
++ * a purpose. */
++struct efrm_resource_manager *efrm_rm_table[EFRM_RESOURCE_NUM];
++EXPORT_SYMBOL(efrm_rm_table);
++
++int efrm_driver_ctor(void)
++{
++ memset(&efrm_nic_table, 0, sizeof(efrm_nic_table));
++ memset(&efrm_rm_table, 0, sizeof(efrm_rm_table));
++
++ spin_lock_init(&efrm_nic_table.lock);
++
++ EFRM_TRACE("%s: driver created", __FUNCTION__);
++ return 0;
++}
++
++int efrm_driver_dtor(void)
++{
++ EFRM_ASSERT(!efrm_nic_table_held());
++
++ spin_lock_destroy(&efrm_nic_table.lock);
++ EFRM_TRACE("%s: driver deleted", __FUNCTION__);
++ return 0;
++}
++
++int efrm_driver_register_nic(struct efhw_nic *nic, int nic_index)
++{
++ int rc = 0;
++ irq_flags_t lock_flags;
++
++ EFRM_ASSERT(nic_index >= 0);
++
++ efrm_driver_lock(lock_flags);
++
++ if (efrm_nic_table_held()) {
++ EFRM_WARN("%s: driver object is in use", __FUNCTION__);
++ rc = -EBUSY;
++ goto done;
++ }
++
++ if (efrm_nic_table.nic_count == EFHW_MAX_NR_DEVS) {
++ EFRM_WARN("%s: filled up NIC table size %d", __FUNCTION__,
++ EFHW_MAX_NR_DEVS);
++ rc = -E2BIG;
++ goto done;
++ }
++
++ EFRM_ASSERT(efrm_nic_table.nic[nic_index] == NULL);
++ efrm_nic_table.nic[nic_index] = nic;
++ nic->index = nic_index;
++
++ if (efrm_nic_table.a_nic == NULL)
++ efrm_nic_table.a_nic = nic;
++
++ efrm_nic_table.nic_count++;
++ efrm_driver_unlock(lock_flags);
++ return rc;
++
++done:
++ efrm_driver_unlock(lock_flags);
++ return rc;
++}
++
++int efrm_driver_unregister_nic(struct efhw_nic *nic)
++{
++ int rc = 0;
++ int nic_index = nic->index;
++ irq_flags_t lock_flags;
++
++ EFRM_ASSERT(nic_index >= 0);
++
++ efrm_driver_lock(lock_flags);
++
++ if (efrm_nic_table_held()) {
++ EFRM_WARN("%s: driver object is in use", __FUNCTION__);
++ rc = -EBUSY;
++ goto done;
++ }
++
++ EFRM_ASSERT(efrm_nic_table.nic[nic_index] == nic);
++
++ nic->index = -1;
++ efrm_nic_table.nic[nic_index] = NULL;
++
++ --efrm_nic_table.nic_count;
++
++ if (efrm_nic_table.a_nic == nic) {
++ if (efrm_nic_table.nic_count == 0) {
++ efrm_nic_table.a_nic = NULL;
++ } else {
++ for (nic_index = 0; nic_index < EFHW_MAX_NR_DEVS;
++ nic_index++) {
++ if (efrm_nic_table.nic[nic_index] != NULL)
++ efrm_nic_table.a_nic =
++ efrm_nic_table.nic[nic_index];
++ }
++ EFRM_ASSERT(efrm_nic_table.a_nic);
++ }
++ }
++
++done:
++ efrm_driver_unlock(lock_flags);
++ return rc;
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/efx_vi_shm.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/efx_vi_shm.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/efx_vi_shm.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/efx_vi_shm.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,701 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides implementation of EFX VI API, used from Xen
++ * acceleration driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include "linux_resource_internal.h"
++#include <ci/efrm/nic_table.h>
++#include <ci/efrm/vi_resource_manager.h>
++#include <ci/driver/resource/efx_vi.h>
++#include <ci/efrm/filter.h>
++#include <ci/efrm/buffer_table.h>
++#include <linux/pci.h>
++#include "kernel_compat.h"
++
++#if EFX_VI_STATIC_FILTERS
++struct filter_list_t {
++ struct filter_list_t *next;
++ struct filter_resource *fres;
++};
++#endif
++
++struct efx_vi_state {
++ struct vi_resource *vi_res;
++
++ int nic_index;
++
++ void (*callback_fn)(void *arg, int is_timeout);
++ void *callback_arg;
++
++ struct completion flush_completion;
++
++#if EFX_VI_STATIC_FILTERS
++ struct filter_list_t fres[EFX_VI_STATIC_FILTERS];
++ struct filter_list_t *free_fres;
++ struct filter_list_t *used_fres;
++#endif
++};
++
++static void efx_vi_flush_complete(void *state_void)
++{
++ struct efx_vi_state *state = (struct efx_vi_state *)state_void;
++
++ complete(&state->flush_completion);
++}
++
++static inline int alloc_ep(struct efx_vi_state *state)
++{
++ int rc;
++
++ rc = efrm_vi_resource_alloc(NULL, EFHW_VI_JUMBO_EN,
++ efx_vi_eventq_size,
++ FALCON_DMA_Q_DEFAULT_TX_SIZE,
++ FALCON_DMA_Q_DEFAULT_RX_SIZE,
++ 0, 0, &state->vi_res, NULL, NULL, NULL,
++ NULL);
++ if (rc < 0) {
++ EFRM_ERR("%s: ERROR efrm_vi_resource_alloc error %d",
++ __FUNCTION__, rc);
++ return rc;
++ }
++
++ efrm_vi_register_flush_callback(state->vi_res, &efx_vi_flush_complete,
++ (void *)state);
++
++ return 0;
++}
++
++static int free_ep(struct efx_vi_state *efx_state)
++{
++ efrm_vi_resource_release(efx_state->vi_res);
++
++ return 0;
++}
++
++#if EFX_VI_STATIC_FILTERS
++static int efx_vi_alloc_static_filters(struct efx_vi_state *efx_state)
++{
++ int i;
++ int rc;
++
++ efx_state->free_fres = efx_state->used_fres = NULL;
++
++ for (i = 0; i < EFX_VI_STATIC_FILTERS; i++) {
++ rc = efrm_filter_resource_alloc(efx_state->vi_res,
++ &efx_state->fres[i].fres);
++ if (rc < 0) {
++ EFRM_ERR("%s: efrm_filter_resource_alloc failed: %d",
++ __FUNCTION__, rc);
++ while (i > 0) {
++ i--;
++ efrm_filter_resource_release(efx_state->
++ fres[i].fres);
++ }
++ efx_state->free_fres = NULL;
++ return rc;
++ }
++ efx_state->fres[i].next = efx_state->free_fres;
++ efx_state->free_fres = &efx_state->fres[i];
++ }
++
++ return 0;
++}
++#endif
++
++int efx_vi_alloc(struct efx_vi_state **vih_out, int nic_index)
++{
++ struct efx_vi_state *efx_state;
++ int rc;
++
++ BUG_ON(nic_index < 0 || nic_index >= EFHW_MAX_NR_DEVS);
++
++ efx_state = kmalloc(sizeof(struct efx_vi_state), GFP_KERNEL);
++
++ if (!efx_state) {
++ EFRM_ERR("%s: failed to allocate memory for efx_vi_state",
++ __FUNCTION__);
++ rc = -ENOMEM;
++ goto fail;
++ }
++
++ efx_state->nic_index = nic_index;
++ init_completion(&efx_state->flush_completion);
++
++ /* basically allocate_pt_endpoint() */
++ rc = alloc_ep(efx_state);
++ if (rc) {
++ EFRM_ERR("%s: alloc_ep failed: %d", __FUNCTION__, rc);
++ goto fail_no_pt;
++ }
++#if EFX_VI_STATIC_FILTERS
++ /* Statically allocate a set of filter resources - removes the
++ restriction on not being able to use efx_vi_filter() from
++ in_atomic() */
++ rc = efx_vi_alloc_static_filters(efx_state);
++ if (rc)
++ goto fail_no_filters;
++#endif
++
++ *vih_out = efx_state;
++
++ return 0;
++#if EFX_VI_STATIC_FILTERS
++fail_no_filters:
++ free_ep(efx_state);
++#endif
++fail_no_pt:
++ kfree(efx_state);
++fail:
++ return rc;
++}
++EXPORT_SYMBOL(efx_vi_alloc);
++
++void efx_vi_free(struct efx_vi_state *vih)
++{
++ struct efx_vi_state *efx_state = vih;
++
++ /* TODO flush dma channels, init dma queues?. See ef_free_vnic() */
++#if EFX_VI_STATIC_FILTERS
++ int i;
++
++ for (i = 0; i < EFX_VI_STATIC_FILTERS; i++)
++ efrm_filter_resource_release(efx_state->fres[i].fres);
++#endif
++
++ if (efx_state->vi_res)
++ free_ep(efx_state);
++
++ kfree(efx_state);
++}
++EXPORT_SYMBOL(efx_vi_free);
++
++void efx_vi_reset(struct efx_vi_state *vih)
++{
++ struct efx_vi_state *efx_state = vih;
++
++ efrm_pt_flush(efx_state->vi_res);
++
++ while (wait_for_completion_timeout(&efx_state->flush_completion, HZ)
++ == 0)
++ efrm_vi_resource_flush_retry(efx_state->vi_res);
++
++ /* Bosch the eventq */
++ efrm_eventq_reset(efx_state->vi_res, 0);
++ return;
++}
++EXPORT_SYMBOL(efx_vi_reset);
++
++static void
++efx_vi_eventq_callback(void *context, int is_timeout, struct efhw_nic *nic)
++{
++ struct efx_vi_state *efx_state = (struct efx_vi_state *)context;
++
++ EFRM_ASSERT(efx_state->callback_fn);
++
++ return efx_state->callback_fn(efx_state->callback_arg, is_timeout);
++}
++
++int
++efx_vi_eventq_register_callback(struct efx_vi_state *vih,
++ void (*callback)(void *context, int is_timeout),
++ void *context)
++{
++ struct efx_vi_state *efx_state = vih;
++
++ efx_state->callback_fn = callback;
++ efx_state->callback_arg = context;
++
++ /* Register the eventq timeout event callback */
++ efrm_eventq_register_callback(efx_state->vi_res,
++ efx_vi_eventq_callback, efx_state);
++
++ return 0;
++}
++EXPORT_SYMBOL(efx_vi_eventq_register_callback);
++
++int efx_vi_eventq_kill_callback(struct efx_vi_state *vih)
++{
++ struct efx_vi_state *efx_state = vih;
++
++ if (efx_state->vi_res->evq_callback_fn)
++ efrm_eventq_kill_callback(efx_state->vi_res);
++
++ efx_state->callback_fn = NULL;
++ efx_state->callback_arg = NULL;
++
++ return 0;
++}
++EXPORT_SYMBOL(efx_vi_eventq_kill_callback);
++
++struct efx_vi_dma_map_state {
++ struct efhw_buffer_table_allocation bt_handle;
++ int n_pages;
++ dma_addr_t *dma_addrs;
++};
++
++int
++efx_vi_dma_map_pages(struct efx_vi_state *vih, struct page **pages,
++ int n_pages, struct efx_vi_dma_map_state **dmh_out)
++{
++ struct efx_vi_state *efx_state = vih;
++ int order = fls(n_pages - 1), rc, i, evq_id;
++ dma_addr_t dma_addr;
++ struct efx_vi_dma_map_state *dm_state;
++
++ if (n_pages != (1 << order)) {
++ EFRM_WARN("%s: Can only allocate buffers in power of 2 "
++ "sizes (not %d)", __FUNCTION__, n_pages);
++ return -EINVAL;
++ }
++
++ dm_state = kmalloc(sizeof(struct efx_vi_dma_map_state), GFP_KERNEL);
++ if (!dm_state)
++ return -ENOMEM;
++
++ dm_state->dma_addrs = kmalloc(sizeof(dma_addr_t) * n_pages,
++ GFP_KERNEL);
++ if (!dm_state->dma_addrs) {
++ kfree(dm_state);
++ return -ENOMEM;
++ }
++
++ rc = efrm_buffer_table_alloc(order, &dm_state->bt_handle);
++ if (rc < 0) {
++ kfree(dm_state->dma_addrs);
++ kfree(dm_state);
++ return rc;
++ }
++
++ evq_id = EFRM_RESOURCE_INSTANCE(efx_state->vi_res->rs.rs_handle);
++ for (i = 0; i < n_pages; i++) {
++ /* TODO do we need to get_page() here ? */
++
++ dma_addr = pci_map_page
++ (linux_efhw_nic(efrm_nic_table.nic[efx_state->nic_index])->
++ pci_dev, pages[i], 0, PAGE_SIZE, PCI_DMA_TODEVICE);
++
++ efrm_buffer_table_set(&dm_state->bt_handle, i, dma_addr,
++ evq_id);
++
++ dm_state->dma_addrs[i] = dma_addr;
++
++ /* Would be nice to not have to call commit each time, but
++ * comment says there are hardware restrictions on how often
++ * you can go without it, so do this to be safe */
++ efrm_buffer_table_commit();
++ }
++
++ dm_state->n_pages = n_pages;
++
++ *dmh_out = dm_state;
++
++ return 0;
++}
++EXPORT_SYMBOL(efx_vi_dma_map_pages);
++
++/* Function needed as Xen can't get pages for grants in dom0, but can
++ get dma address */
++int
++efx_vi_dma_map_addrs(struct efx_vi_state *vih,
++ unsigned long long *bus_dev_addrs,
++ int n_pages, struct efx_vi_dma_map_state **dmh_out)
++{
++ struct efx_vi_state *efx_state = vih;
++ int order = fls(n_pages - 1), rc, i, evq_id;
++ dma_addr_t dma_addr;
++ struct efx_vi_dma_map_state *dm_state;
++
++ if (n_pages != (1 << order)) {
++ EFRM_WARN("%s: Can only allocate buffers in power of 2 "
++ "sizes (not %d)", __FUNCTION__, n_pages);
++ return -EINVAL;
++ }
++
++ dm_state = kmalloc(sizeof(struct efx_vi_dma_map_state), GFP_KERNEL);
++ if (!dm_state)
++ return -ENOMEM;
++
++ dm_state->dma_addrs = kmalloc(sizeof(dma_addr_t) * n_pages,
++ GFP_KERNEL);
++ if (!dm_state->dma_addrs) {
++ kfree(dm_state);
++ return -ENOMEM;
++ }
++
++ rc = efrm_buffer_table_alloc(order, &dm_state->bt_handle);
++ if (rc < 0) {
++ kfree(dm_state->dma_addrs);
++ kfree(dm_state);
++ return rc;
++ }
++
++ evq_id = EFRM_RESOURCE_INSTANCE(efx_state->vi_res->rs.rs_handle);
++#if 0
++ EFRM_WARN("%s: mapping %d pages to evq %d, bt_ids %d-%d\n",
++ __FUNCTION__, n_pages, evq_id,
++ dm_state->bt_handle.base,
++ dm_state->bt_handle.base + n_pages);
++#endif
++ for (i = 0; i < n_pages; i++) {
++
++ dma_addr = (dma_addr_t)bus_dev_addrs[i];
++
++ efrm_buffer_table_set(&dm_state->bt_handle, i, dma_addr,
++ evq_id);
++
++ dm_state->dma_addrs[i] = dma_addr;
++
++ /* Would be nice to not have to call commit each time, but
++ * comment says there are hardware restrictions on how often
++ * you can go without it, so do this to be safe */
++ efrm_buffer_table_commit();
++ }
++
++ dm_state->n_pages = n_pages;
++
++ *dmh_out = dm_state;
++
++ return 0;
++}
++EXPORT_SYMBOL(efx_vi_dma_map_addrs);
++
++void
++efx_vi_dma_unmap_pages(struct efx_vi_state *vih,
++ struct efx_vi_dma_map_state *dmh)
++{
++ struct efx_vi_state *efx_state = vih;
++ struct efx_vi_dma_map_state *dm_state =
++ (struct efx_vi_dma_map_state *)dmh;
++ int i;
++
++ efrm_buffer_table_free(&dm_state->bt_handle);
++
++ for (i = 0; i < dm_state->n_pages; ++i)
++ pci_unmap_page(linux_efhw_nic
++ (efrm_nic_table.nic[efx_state->nic_index])->pci_dev,
++ dm_state->dma_addrs[i], PAGE_SIZE, PCI_DMA_TODEVICE);
++
++ kfree(dm_state->dma_addrs);
++ kfree(dm_state);
++
++ return;
++}
++EXPORT_SYMBOL(efx_vi_dma_unmap_pages);
++
++void
++efx_vi_dma_unmap_addrs(struct efx_vi_state *vih,
++ struct efx_vi_dma_map_state *dmh)
++{
++ struct efx_vi_dma_map_state *dm_state =
++ (struct efx_vi_dma_map_state *)dmh;
++
++ efrm_buffer_table_free(&dm_state->bt_handle);
++
++ kfree(dm_state->dma_addrs);
++ kfree(dm_state);
++
++ return;
++}
++EXPORT_SYMBOL(efx_vi_dma_unmap_addrs);
++
++unsigned
++efx_vi_dma_get_map_addr(struct efx_vi_state *vih,
++ struct efx_vi_dma_map_state *dmh)
++{
++ struct efx_vi_dma_map_state *dm_state =
++ (struct efx_vi_dma_map_state *)dmh;
++
++ return EFHW_BUFFER_ADDR(dm_state->bt_handle.base, 0);
++}
++EXPORT_SYMBOL(efx_vi_dma_get_map_addr);
++
++#if EFX_VI_STATIC_FILTERS
++static int
++get_filter(struct efx_vi_state *efx_state,
++ efrm_resource_handle_t pthandle, struct filter_resource **fres_out)
++{
++ struct filter_list_t *flist;
++ if (efx_state->free_fres == NULL)
++ return -ENOMEM;
++ else {
++ flist = efx_state->free_fres;
++ efx_state->free_fres = flist->next;
++ flist->next = efx_state->used_fres;
++ efx_state->used_fres = flist;
++ *fres_out = flist->fres;
++ return 0;
++ }
++}
++#endif
++
++static void
++release_filter(struct efx_vi_state *efx_state, struct filter_resource *fres)
++{
++#if EFX_VI_STATIC_FILTERS
++ struct filter_list_t *flist = efx_state->used_fres, *prev = NULL;
++ while (flist) {
++ if (flist->fres == fres) {
++ if (prev)
++ prev->next = flist->next;
++ else
++ efx_state->used_fres = flist->next;
++ flist->next = efx_state->free_fres;
++ efx_state->free_fres = flist;
++ return;
++ }
++ prev = flist;
++ flist = flist->next;
++ }
++ EFRM_ERR("%s: couldn't find filter", __FUNCTION__);
++#else
++ return efrm_filter_resource_release(fres);
++#endif
++}
++
++int
++efx_vi_filter(struct efx_vi_state *vih, int protocol,
++ unsigned ip_addr_be32, int port_le16,
++ struct filter_resource_t **fh_out)
++{
++ struct efx_vi_state *efx_state = vih;
++ struct filter_resource *frs;
++ int rc;
++
++#if EFX_VI_STATIC_FILTERS
++ rc = get_filter(efx_state, efx_state->vi_res->rs.rs_handle, &frs);
++#else
++ rc = efrm_filter_resource_alloc(efx_state->vi_res, &frs);
++#endif
++ if (rc < 0)
++ return rc;
++
++ /* Add the hardware filter. We pass in the source port and address
++ * as 0 (wildcard) to minimise the number of filters needed. */
++ if (protocol == IPPROTO_TCP) {
++ rc = efrm_filter_resource_tcp_set(frs, 0, 0, ip_addr_be32,
++ port_le16);
++ } else {
++ rc = efrm_filter_resource_udp_set(frs, 0, 0, ip_addr_be32,
++ port_le16);
++ }
++
++ *fh_out = (struct filter_resource_t *)frs;
++
++ return rc;
++}
++EXPORT_SYMBOL(efx_vi_filter);
++
++int
++efx_vi_filter_stop(struct efx_vi_state *vih, struct filter_resource_t *fh)
++{
++ struct efx_vi_state *efx_state = vih;
++ struct filter_resource *frs = (struct filter_resource *)fh;
++ int rc;
++
++ rc = efrm_filter_resource_clear(frs);
++ release_filter(efx_state, frs);
++
++ return rc;
++}
++EXPORT_SYMBOL(efx_vi_filter_stop);
++
++int
++efx_vi_hw_resource_get_virt(struct efx_vi_state *vih,
++ struct efx_vi_hw_resource_metadata *mdata,
++ struct efx_vi_hw_resource *hw_res_array,
++ int *length)
++{
++ EFRM_NOTICE("%s: TODO!", __FUNCTION__);
++
++ return 0;
++}
++EXPORT_SYMBOL(efx_vi_hw_resource_get_virt);
++
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++int
++efx_vi_hw_resource_get_phys(struct efx_vi_state *vih,
++ struct efx_vi_hw_resource_metadata *mdata,
++ struct efx_vi_hw_resource *hw_res_array,
++ int *length)
++{
++ struct efx_vi_state *efx_state = vih;
++ int i, ni = efx_state->nic_index;
++ struct linux_efhw_nic *lnic = linux_efhw_nic(efrm_nic_table.nic[ni]);
++ unsigned long phys = lnic->ctr_ap_pci_addr;
++ struct efrm_resource *ep_res = &efx_state->vi_res->rs;
++ unsigned ep_mmap_bytes;
++
++ if (*length < EFX_VI_HW_RESOURCE_MAXSIZE)
++ return -EINVAL;
++
++ mdata->version = 0;
++
++ mdata->nic_arch = efrm_nic_table.nic[ni]->devtype.arch;
++ mdata->nic_variant = efrm_nic_table.nic[ni]->devtype.variant;
++ mdata->nic_revision = efrm_nic_table.nic[ni]->devtype.revision;
++
++ mdata->evq_order =
++ efx_state->vi_res->nic_info[ni].evq_pages.iobuff.order;
++ mdata->evq_offs = efx_state->vi_res->nic_info[ni].evq_pages.iobuff_off;
++ mdata->evq_capacity = efx_vi_eventq_size;
++ mdata->instance = EFRM_RESOURCE_INSTANCE(ep_res->rs_handle);
++ mdata->rx_capacity = FALCON_DMA_Q_DEFAULT_RX_SIZE;
++ mdata->tx_capacity = FALCON_DMA_Q_DEFAULT_TX_SIZE;
++
++ ep_mmap_bytes = FALCON_DMA_Q_DEFAULT_MMAP;
++ EFRM_ASSERT(ep_mmap_bytes == PAGE_SIZE * 2);
++
++#ifndef NDEBUG
++ {
++ /* Sanity about doorbells */
++ unsigned long tx_dma_page_addr, rx_dma_page_addr;
++
++ /* get rx doorbell address */
++ rx_dma_page_addr =
++ phys + falcon_rx_dma_page_addr(mdata->instance);
++ /* get tx doorbell address */
++ tx_dma_page_addr =
++ phys + falcon_tx_dma_page_addr(mdata->instance);
++
++ /* Check the lower bits of the TX doorbell will be
++ * consistent. */
++ EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST &
++ FALCON_DMA_PAGE_MASK) ==
++ (TX_DESC_UPD_REG_PAGE123K_OFST &
++ FALCON_DMA_PAGE_MASK));
++
++ /* Check the lower bits of the RX doorbell will be
++ * consistent. */
++ EFRM_ASSERT((RX_DESC_UPD_REG_PAGE4_OFST &
++ FALCON_DMA_PAGE_MASK) ==
++ (RX_DESC_UPD_REG_PAGE123K_OFST &
++ FALCON_DMA_PAGE_MASK));
++
++ /* Check that the doorbells will be in the same page. */
++ EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST & PAGE_MASK) ==
++ (RX_DESC_UPD_REG_PAGE4_OFST & PAGE_MASK));
++
++ /* Check that the doorbells are in the same page. */
++ EFRM_ASSERT((tx_dma_page_addr & PAGE_MASK) ==
++ (rx_dma_page_addr & PAGE_MASK));
++
++ /* Check that the TX doorbell offset is correct. */
++ EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST & ~PAGE_MASK) ==
++ (tx_dma_page_addr & ~PAGE_MASK));
++
++ /* Check that the RX doorbell offset is correct. */
++ EFRM_ASSERT((RX_DESC_UPD_REG_PAGE4_OFST & ~PAGE_MASK) ==
++ (rx_dma_page_addr & ~PAGE_MASK));
++ }
++#endif
++
++ i = 0;
++ hw_res_array[i].type = EFX_VI_HW_RESOURCE_TXDMAQ;
++ hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
++ hw_res_array[i].more_to_follow = 0;
++ hw_res_array[i].length = PAGE_SIZE;
++ hw_res_array[i].address =
++ (unsigned long)efx_state->vi_res->nic_info[ni].
++ dmaq_pages[EFRM_VI_RM_DMA_QUEUE_TX].kva;
++
++ i++;
++ hw_res_array[i].type = EFX_VI_HW_RESOURCE_RXDMAQ;
++ hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
++ hw_res_array[i].more_to_follow = 0;
++ hw_res_array[i].length = PAGE_SIZE;
++ hw_res_array[i].address =
++ (unsigned long)efx_state->vi_res->nic_info[ni].
++ dmaq_pages[EFRM_VI_RM_DMA_QUEUE_RX].kva;
++
++ /* NB EFX_VI_HW_RESOURCE_TXBELL not used on Falcon */
++ /* NB EFX_VI_HW_RESOURCE_RXBELL not used on Falcon */
++
++ i++;
++ hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQTIMER;
++ hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
++ hw_res_array[i].more_to_follow = 0;
++ hw_res_array[i].length = PAGE_SIZE;
++ hw_res_array[i].address =
++ (unsigned long)phys + falcon_timer_page_addr(mdata->instance);
++
++ /* NB EFX_VI_HW_RESOURCE_EVQPTR not used on Falcon */
++
++ i++;
++ switch (efrm_nic_table.nic[ni]->devtype.variant) {
++ case 'A':
++ hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQRPTR;
++ hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
++ hw_res_array[i].more_to_follow = 0;
++ hw_res_array[i].length = PAGE_SIZE;
++ hw_res_array[i].address = (unsigned long)phys +
++ EVQ_RPTR_REG_OFST +
++ (FALCON_REGISTER128 * mdata->instance);
++ break;
++ case 'B':
++ hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET;
++ hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
++ hw_res_array[i].more_to_follow = 0;
++ hw_res_array[i].length = PAGE_SIZE;
++ hw_res_array[i].address =
++ (unsigned long)FALCON_EVQ_RPTR_REG_P0;
++ break;
++ default:
++ EFRM_ASSERT(0);
++ break;
++ }
++
++ i++;
++ hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQMEMKVA;
++ hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_IOBUFFER;
++ hw_res_array[i].more_to_follow = 0;
++ hw_res_array[i].length = PAGE_SIZE;
++ hw_res_array[i].address = (unsigned long)efx_state->vi_res->
++ nic_info[ni].evq_pages.iobuff.kva;
++
++ i++;
++ hw_res_array[i].type = EFX_VI_HW_RESOURCE_BELLPAGE;
++ hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL;
++ hw_res_array[i].more_to_follow = 0;
++ hw_res_array[i].length = PAGE_SIZE;
++ hw_res_array[i].address =
++ (unsigned long)(phys +
++ falcon_tx_dma_page_addr(mdata->instance))
++ >> PAGE_SHIFT;
++
++ i++;
++
++ EFRM_ASSERT(i <= *length);
++
++ *length = i;
++
++ return 0;
++}
++EXPORT_SYMBOL(efx_vi_hw_resource_get_phys);
++#endif
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/eventq.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/eventq.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/eventq.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/eventq.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,320 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains event queue support.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efhw/debug.h>
++#include <ci/efhw/iopage.h>
++#include <ci/driver/efab/hardware.h>
++#include <ci/efhw/eventq.h>
++#include <ci/efhw/falcon.h>
++#include <ci/efhw/nic.h>
++
++#define KEVENTQ_MAGIC 0x07111974
++
++/*! Helper function to allocate the iobuffer needed by an eventq
++ * - it ensures the eventq has the correct alignment for the NIC
++ *
++ * \param rm Event-queue resource manager
++ * \param instance Event-queue instance (index)
++ * \param buf_bytes Requested size of eventq
++ * \return < 0 if iobuffer allocation fails
++ */
++int
++efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic,
++ struct eventq_resource_hardware *h,
++ int evq_instance, unsigned buf_bytes)
++{
++ unsigned int page_order;
++ int rc;
++
++ /* Allocate an iobuffer. */
++ page_order = get_order(buf_bytes);
++
++ h->iobuff_off = 0;
++
++ EFHW_TRACE("allocating eventq size %x",
++ 1u << (page_order + PAGE_SHIFT));
++ rc = efhw_iopages_alloc(nic, &h->iobuff, page_order);
++ if (rc < 0) {
++ EFHW_WARN("%s: failed to allocate %u pages",
++ __FUNCTION__, 1u << page_order);
++ return rc;
++ }
++
++ /* Set the eventq pages to match EFHW_CLEAR_EVENT() */
++ if (EFHW_CLEAR_EVENT_VALUE)
++ memset(efhw_iopages_ptr(&h->iobuff) + h->iobuff_off,
++ EFHW_CLEAR_EVENT_VALUE, (1u << page_order) * PAGE_SIZE);
++
++ EFHW_TRACE("%s: allocated %u pages", __FUNCTION__, 1u << (page_order));
++
++ /* For Falcon the NIC is programmed with the base buffer address of a
++ * contiguous region of buffer space. This means that larger than a
++ * PAGE event queues can be expected to allocate even when the host's
++ * physical memory is fragmented */
++ EFHW_ASSERT(efhw_nic_have_hw(nic));
++ EFHW_ASSERT(page_order <= h->buf_tbl_alloc.order);
++
++ /* Initialise the buffer table entries. */
++ falcon_nic_buffer_table_set_n(nic, h->buf_tbl_alloc.base,
++ efhw_iopages_dma_addr(&h->iobuff) +
++ h->iobuff_off, EFHW_NIC_PAGE_SIZE, 0,
++ 1 << page_order, 0);
++
++ if (evq_instance >= FALCON_EVQ_TBL_RESERVED)
++ falcon_nic_buffer_table_confirm(nic);
++ return 0;
++}
++
++/**********************************************************************
++ * Kernel event queue management.
++ */
++
++/* Values for [struct efhw_keventq::lock] field. */
++#define KEVQ_UNLOCKED 0
++#define KEVQ_LOCKED 1
++#define KEVQ_RECHECK 2
++
++int
++efhw_keventq_ctor(struct efhw_nic *nic, int instance,
++ struct efhw_keventq *evq,
++ struct efhw_ev_handler *ev_handlers)
++{
++ int rc;
++ unsigned buf_bytes = evq->hw.capacity * sizeof(efhw_event_t);
++
++ evq->instance = instance;
++ evq->ev_handlers = ev_handlers;
++
++ /* allocate an IObuffer for the eventq */
++ rc = efhw_nic_event_queue_alloc_iobuffer(nic, &evq->hw, evq->instance,
++ buf_bytes);
++ if (rc < 0)
++ return rc;
++
++ /* Zero the timer-value for this queue.
++ AND Tell the nic about the event queue. */
++ efhw_nic_event_queue_enable(nic, evq->instance, evq->hw.capacity,
++ efhw_iopages_dma_addr(&evq->hw.iobuff) +
++ evq->hw.iobuff_off,
++ evq->hw.buf_tbl_alloc.base);
++
++ evq->lock = KEVQ_UNLOCKED;
++ evq->evq_base = efhw_iopages_ptr(&evq->hw.iobuff) + evq->hw.iobuff_off;
++ evq->evq_ptr = 0;
++ evq->evq_mask = (evq->hw.capacity * sizeof(efhw_event_t)) - 1u;
++
++ EFHW_TRACE("%s: [%d] base=%p end=%p", __FUNCTION__, evq->instance,
++ evq->evq_base, evq->evq_base + buf_bytes);
++
++ return 0;
++}
++
++void efhw_keventq_dtor(struct efhw_nic *nic, struct efhw_keventq *evq)
++{
++ EFHW_ASSERT(evq);
++
++ EFHW_TRACE("%s: [%d]", __FUNCTION__, evq->instance);
++
++ /* Zero the timer-value for this queue.
++ And Tell NIC to stop using this event queue. */
++ efhw_nic_event_queue_disable(nic, evq->instance, 0);
++
++ /* free the pages used by the eventq itself */
++ efhw_iopages_free(nic, &evq->hw.iobuff);
++}
++
++void
++efhw_handle_txdmaq_flushed(struct efhw_nic *nic, struct efhw_ev_handler *h,
++ efhw_event_t *evp)
++{
++ int instance = (int)FALCON_EVENT_TX_FLUSH_Q_ID(evp);
++ EFHW_TRACE("%s: instance=%d", __FUNCTION__, instance);
++
++ if (!h->dmaq_flushed_fn) {
++ EFHW_WARN("%s: no handler registered", __FUNCTION__);
++ return;
++ }
++
++ h->dmaq_flushed_fn(nic, instance, false);
++}
++
++void
++efhw_handle_rxdmaq_flushed(struct efhw_nic *nic, struct efhw_ev_handler *h,
++ efhw_event_t *evp)
++{
++ int instance = (int)FALCON_EVENT_RX_FLUSH_Q_ID(evp);
++ EFHW_TRACE("%s: instance=%d", __FUNCTION__, instance);
++
++ if (!h->dmaq_flushed_fn) {
++ EFHW_WARN("%s: no handler registered", __FUNCTION__);
++ return;
++ }
++
++ h->dmaq_flushed_fn(nic, instance, true);
++}
++
++void
++efhw_handle_wakeup_event(struct efhw_nic *nic, struct efhw_ev_handler *h,
++ efhw_event_t *evp)
++{
++ if (!h->wakeup_fn) {
++ EFHW_WARN("%s: no handler registered", __FUNCTION__);
++ return;
++ }
++
++ h->wakeup_fn(nic, evp);
++}
++
++void
++efhw_handle_timeout_event(struct efhw_nic *nic, struct efhw_ev_handler *h,
++ efhw_event_t *evp)
++{
++ if (!h->timeout_fn) {
++ EFHW_WARN("%s: no handler registered", __FUNCTION__);
++ return;
++ }
++
++ h->timeout_fn(nic, evp);
++}
++
++/**********************************************************************
++ * Kernel event queue event handling.
++ */
++
++int efhw_keventq_poll(struct efhw_nic *nic, struct efhw_keventq *q)
++{
++ efhw_event_t *ev;
++ int l, count = 0;
++
++ EFHW_ASSERT(nic);
++ EFHW_ASSERT(q);
++ EFHW_ASSERT(q->ev_handlers);
++
++ /* Acquire the lock, or mark the queue as needing re-checking. */
++ for (;;) {
++ l = q->lock;
++ if (l == KEVQ_UNLOCKED) {
++ if ((int)cmpxchg(&q->lock, l, KEVQ_LOCKED) == l)
++ break;
++ } else if (l == KEVQ_LOCKED) {
++ if ((int)cmpxchg(&q->lock, l, KEVQ_RECHECK) == l)
++ return 0;
++ } else { /* already marked for re-checking */
++ EFHW_ASSERT(l == KEVQ_RECHECK);
++ return 0;
++ }
++ }
++
++ if (unlikely(EFHW_EVENT_OVERFLOW(q, q)))
++ goto overflow;
++
++ ev = EFHW_EVENT_PTR(q, q, 0);
++
++#ifndef NDEBUG
++ if (!EFHW_IS_EVENT(ev))
++ EFHW_TRACE("%s: %d NO EVENTS!", __FUNCTION__, q->instance);
++#endif
++
++ for (;;) {
++ /* Convention for return codes for handlers is:
++ ** 0 - no error, event consumed
++ ** 1 - no error, event not consumed
++ ** -ve - error, event not consumed
++ */
++ if (likely(EFHW_IS_EVENT(ev))) {
++ count++;
++
++ switch (FALCON_EVENT_CODE(ev)) {
++
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++ case FALCON_EVENT_CODE_CHAR:
++ falcon_handle_char_event(nic, q->ev_handlers,
++ ev);
++ break;
++#endif
++
++ default:
++ EFHW_ERR("efhw_keventq_poll: [%d] UNEXPECTED "
++ "EVENT:"FALCON_EVENT_FMT,
++ q->instance,
++ FALCON_EVENT_PRI_ARG(*ev));
++ }
++
++ EFHW_CLEAR_EVENT(ev);
++ EFHW_EVENTQ_NEXT(q);
++
++ ev = EFHW_EVENT_PTR(q, q, 0);
++ } else {
++ /* No events left. Release the lock (checking if we
++ * need to re-poll to avoid race). */
++ l = q->lock;
++ if (l == KEVQ_LOCKED) {
++ if ((int)cmpxchg(&q->lock, l, KEVQ_UNLOCKED)
++ == l) {
++ EFHW_TRACE
++ ("efhw_keventq_poll: %d clean exit",
++ q->instance);
++ goto clean_exit;
++ }
++ }
++
++ /* Potentially more work to do. */
++ l = q->lock;
++ EFHW_ASSERT(l == KEVQ_RECHECK);
++ EFHW_TEST((int)cmpxchg(&q->lock, l, KEVQ_LOCKED) == l);
++ EFHW_TRACE("efhw_keventq_poll: %d re-poll required",
++ q->instance);
++ }
++ }
++
++ /* shouldn't get here */
++ EFHW_ASSERT(0);
++
++overflow:
++ /* ?? Oh dear. Should we poll everything that could have possibly
++ ** happened? Or merely cry out in anguish...
++ */
++ EFHW_WARN("efhw_keventq_poll: %d ***** OVERFLOW nic %d *****",
++ q->instance, nic->index);
++
++ q->lock = KEVQ_UNLOCKED;
++ return count;
++
++clean_exit:
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++ /* Ack the processed events so that this event queue can potentially
++ raise interrupts again */
++ falcon_nic_evq_ack(nic, q->instance,
++ (EFHW_EVENT_OFFSET(q, q, 0) / sizeof(efhw_event_t)),
++ false);
++#endif
++ return count;
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/falcon.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/falcon.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/falcon.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/falcon.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2758 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains Falcon hardware support.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/driver/efab/hardware.h>
++#include <ci/efhw/debug.h>
++#include <ci/efhw/iopage.h>
++#include <ci/efhw/falcon.h>
++#include <ci/efhw/falcon_hash.h>
++#include <ci/efhw/nic.h>
++#include <ci/efhw/eventq.h>
++#include <ci/efhw/checks.h>
++
++
++/*----------------------------------------------------------------------------
++ *
++ * Workarounds and options
++ *
++ *---------------------------------------------------------------------------*/
++
++/* on for debug builds */
++#ifndef NDEBUG
++# define FALCON_FULL_FILTER_CACHE 1 /* complete SW shadow of filter tbl */
++# define FALCON_VERIFY_FILTERS 0
++#else /* Also adds duplicate filter check */
++# define FALCON_FULL_FILTER_CACHE 1 /* keep this on for some security */
++# define FALCON_VERIFY_FILTERS 0
++#endif
++
++/* options */
++#define RX_FILTER_CTL_SRCH_LIMIT_TCP_FULL 8 /* default search limit */
++#define RX_FILTER_CTL_SRCH_LIMIT_TCP_WILD 8 /* default search limit */
++#define RX_FILTER_CTL_SRCH_LIMIT_UDP_FULL 8 /* default search limit */
++#define RX_FILTER_CTL_SRCH_LIMIT_UDP_WILD 8 /* default search limit */
++#define RX_FILTER_CTL_SRCH_FUDGE_WILD 3 /* increase the search limit */
++#define RX_FILTER_CTL_SRCH_FUDGE_FULL 1 /* increase the search limit */
++
++#define FALCON_MAC_SET_TYPE_BY_SPEED 1
++
++/* FIXME: We should detect mode at runtime. */
++#define FALCON_BUFFER_TABLE_FULL_MODE 1
++
++/*----------------------------------------------------------------------------
++ *
++ * Debug Macros
++ *
++ *---------------------------------------------------------------------------*/
++
++#ifndef __KERNEL__
++#define _DEBUG_SYM_ extern
++#else
++#define _DEBUG_SYM_ static inline
++#endif
++
++ /*----------------------------------------------------------------------------
++ *
++ * Macros and forward declarations
++ *
++ *--------------------------------------------------------------------------*/
++
++#define FALCON_REGION_NUM 4 /* number of supported memory regions */
++
++#define FALCON_BUFFER_TBL_HALF_BYTES 4
++#define FALCON_BUFFER_TBL_FULL_BYTES 8
++
++/* Shadow buffer table - hack for testing only */
++#if FALCON_BUFFER_TABLE_FULL_MODE == 0
++# define FALCON_USE_SHADOW_BUFFER_TABLE 1
++#else
++# define FALCON_USE_SHADOW_BUFFER_TABLE 0
++#endif
++
++#if FALCON_USE_SHADOW_BUFFER_TABLE
++static uint64_t _falcon_buffer_table[FALCON_BUFFER_TBL_NUM];
++#endif
++
++/*----------------------------------------------------------------------------
++ *
++ * Header assertion checks
++ *
++ *---------------------------------------------------------------------------*/
++
++#define FALCON_ASSERT_VALID() /* nothing yet */
++
++/* Falcon has a 128bit register model but most registers have useful
++ defaults or only implement a small number of bits. Some registers
++ can be programmed 32bits UNLOCKED all others should be interlocked
++ against other threads within the same protection domain.
++
++ Aim is for software to perform the minimum number of writes and
++ also to minimise the read-modify-write activity (which generally
++ indicates a lack of clarity in the use model).
++
++ Registers which are programmed in this module are listed below
++ together with the method of access. Care must be taken to ensure
++ remain adequate if the register spec changes.
++
++ All 128bits programmed
++ FALCON_BUFFER_TBL_HALF
++ RX_FILTER_TBL
++ TX_DESC_PTR_TBL
++ RX_DESC_PTR_TBL
++ DRV_EV_REG
++
++ All 64bits programmed
++ FALCON_BUFFER_TBL_FULL
++
++ 32 bits are programmed (UNLOCKED)
++ EVQ_RPTR_REG
++
++ Low 64bits programmed remainder are written with a random number
++ RX_DC_CFG_REG
++ TX_DC_CFG_REG
++ SRM_RX_DC_CFG_REG
++ SRM_TX_DC_CFG_REG
++ BUF_TBL_CFG_REG
++ BUF_TBL_UPD_REG
++ SRM_UPD_EVQ_REG
++ EVQ_PTR_TBL
++ TIMER_CMD_REG
++ TX_PACE_TBL
++ FATAL_INTR_REG
++ INT_EN_REG (When enabling interrupts)
++ TX_FLUSH_DESCQ_REG
++ RX_FLUSH_DESCQ
++
++ Read Modify Write on low 32bits remainder are written with a random number
++ INT_EN_REG (When sending a driver interrupt)
++ DRIVER_REGX
++
++ Read Modify Write on low 64bits remainder are written with a random number
++ SRM_CFG_REG_OFST
++ RX_CFG_REG_OFST
++ RX_FILTER_CTL_REG
++
++ Read Modify Write on full 128bits
++ TXDP_RESERVED_REG (aka TXDP_UNDOCUMENTED)
++ TX_CFG_REG
++
++*/
++
++/*----------------------------------------------------------------------------
++ *
++ * Filters static data
++ *
++ *---------------------------------------------------------------------------*/
++
++/* Defaults are set here to support dma.c */
++static unsigned tcp_full_srch_limit = RX_FILTER_CTL_SRCH_LIMIT_TCP_FULL;
++static unsigned tcp_wild_srch_limit = RX_FILTER_CTL_SRCH_LIMIT_TCP_WILD;
++static unsigned udp_full_srch_limit = RX_FILTER_CTL_SRCH_LIMIT_UDP_FULL;
++static unsigned udp_wild_srch_limit = RX_FILTER_CTL_SRCH_LIMIT_UDP_WILD;
++
++#if FALCON_VERIFY_FILTERS
++static void _falcon_nic_ipfilter_sanity(struct efhw_nic *nic);
++#endif
++
++/*----------------------------------------------------------------------------
++ *
++ * Filters low-level register interface
++ *
++ *---------------------------------------------------------------------------*/
++
++/* Build the filter entry */
++static void
++_falcon_nic_ipfilter_build(struct efhw_nic *nic,
++ int tcp, int full, int rss_b0, int scat_b0,
++ uint filter_i, uint dmaq_id,
++ unsigned saddr_le32, unsigned sport_le16,
++ unsigned daddr_le32, unsigned dport_le16,
++ uint64_t *q0, uint64_t *q1)
++{
++ uint64_t v1, v2, v3, v4;
++ int type = tcp << 4 | full;
++
++ v4 = (((!tcp) << __DW4(TCP_UDP_1_LBN)) |
++ (dmaq_id << __DW4(RXQ_ID_1_LBN)));
++
++ switch (nic->devtype.variant) {
++ case 'A':
++ EFHW_ASSERT(!rss_b0);
++ break;
++ case 'B':
++ v4 |= scat_b0 << __DW4(SCATTER_EN_1_B0_LBN);
++ v4 |= rss_b0 << __DW4(RSS_EN_1_B0_LBN);
++ break;
++ default:
++ EFHW_ASSERT(0);
++ break;
++ }
++
++ v3 = daddr_le32;
++
++ switch (type) {
++
++ case 0x11: /* TCP_FULL */
++ case 0x01: /* UDP_FULL */
++ v2 = ((dport_le16 << __DW2(DEST_PORT_TCP_1_LBN)) |
++ (__HIGH(saddr_le32, SRC_IP_1_LBN, SRC_IP_1_WIDTH)));
++ v1 = ((__LOW(saddr_le32, SRC_IP_1_LBN, SRC_IP_1_WIDTH)) |
++ (sport_le16 << SRC_TCP_DEST_UDP_1_LBN));
++ break;
++
++ case 0x10: /* TCP_WILD */
++ v2 = ((uint64_t) dport_le16 << __DW2(DEST_PORT_TCP_1_LBN));
++ v1 = 0;
++ break;
++
++ case 0x00: /* UDP_WILD */
++ v2 = 0;
++ v1 = ((uint64_t) dport_le16 << SRC_TCP_DEST_UDP_0_LBN);
++ break;
++
++ default:
++ EFHW_ASSERT(0);
++ v2 = 0;
++ v1 = 0;
++ }
++
++ *q0 = (v2 << 32) | v1;
++ *q1 = (v4 << 32) | v3;
++}
++
++static void
++_falcon_nic_ipfilter_set(struct efhw_nic *nic, int tcp,
++ int full, int rss_b0, int scat_b0,
++ uint filter_i, uint dmaq_id,
++ unsigned saddr_le32, unsigned sport_le16,
++ unsigned daddr_le32, unsigned dport_le16)
++{
++ uint64_t q0, q1;
++
++ /* wish you wouldn't do this */
++ EFHW_BUILD_ASSERT(RX_FILTER_TBL1_OFST ==
++ RX_FILTER_TBL0_OFST + FALCON_REGISTER128);
++ EFHW_BUILD_ASSERT(TCP_UDP_1_LBN == TCP_UDP_0_LBN);
++ EFHW_BUILD_ASSERT(RXQ_ID_1_LBN == RXQ_ID_0_LBN);
++ EFHW_BUILD_ASSERT(DEST_IP_1_LBN == DEST_IP_0_LBN);
++ EFHW_BUILD_ASSERT(DEST_PORT_TCP_1_LBN == DEST_PORT_TCP_0_LBN);
++ EFHW_BUILD_ASSERT(SRC_IP_1_LBN == SRC_IP_0_LBN);
++ EFHW_BUILD_ASSERT(SRC_TCP_DEST_UDP_1_LBN == SRC_TCP_DEST_UDP_0_LBN);
++ EFHW_BUILD_ASSERT(SCATTER_EN_1_B0_LBN == SCATTER_EN_0_B0_LBN);
++ EFHW_BUILD_ASSERT(RSS_EN_1_B0_LBN == RSS_EN_0_B0_LBN);
++
++ EFHW_BUILD_ASSERT(TCP_UDP_1_WIDTH == TCP_UDP_0_WIDTH);
++ EFHW_BUILD_ASSERT(RXQ_ID_1_WIDTH == RXQ_ID_0_WIDTH);
++ EFHW_BUILD_ASSERT(DEST_IP_1_WIDTH == DEST_IP_0_WIDTH);
++ EFHW_BUILD_ASSERT(DEST_PORT_TCP_1_WIDTH == DEST_PORT_TCP_0_WIDTH);
++ EFHW_BUILD_ASSERT(SRC_IP_1_WIDTH == SRC_IP_0_WIDTH);
++ EFHW_BUILD_ASSERT(SRC_TCP_DEST_UDP_1_WIDTH == SRC_TCP_DEST_UDP_0_WIDTH);
++ EFHW_BUILD_ASSERT(SCATTER_EN_1_B0_WIDTH == SCATTER_EN_0_B0_WIDTH);
++ EFHW_BUILD_ASSERT(RSS_EN_1_B0_WIDTH == RSS_EN_0_B0_WIDTH);
++
++ /* TODO: Use filter table 1 as well */
++ ulong offset = RX_FILTER_TBL0_OFST + filter_i * 2 * FALCON_REGISTER128;
++
++ EFHW_TRACE("%s[%x]: offset=%lx", __FUNCTION__, filter_i, offset);
++
++ EFHW_TRACE("%s[%x]: filter %d tcp %d full %d src=%x:%x dest=%x:%x%s%s",
++ __FUNCTION__, filter_i, tcp, full, dmaq_id,
++ saddr_le32, sport_le16, daddr_le32, dport_le16,
++ rss_b0 ? " RSS" : "", scat_b0 ? " SCAT" : "");
++
++ EFHW_ASSERT(filter_i < nic->filter_tbl_size);
++
++ /* dword 4 */
++ __DW4CHCK(TCP_UDP_1_LBN, TCP_UDP_1_WIDTH);
++ __DW4CHCK(RXQ_ID_1_LBN, RXQ_ID_1_WIDTH);
++
++ __RANGECHCK(tcp, TCP_UDP_1_WIDTH);
++ __RANGECHCK(dmaq_id, RXQ_ID_1_WIDTH);
++
++ /* dword 3 */
++ __DW3CHCK(DEST_IP_1_LBN, DEST_IP_1_WIDTH);
++ __RANGECHCK(daddr_le32, DEST_IP_1_WIDTH);
++
++ /* dword 2 */
++ __DW2CHCK(DEST_PORT_TCP_1_LBN, DEST_PORT_TCP_1_WIDTH);
++ __LWCHK(SRC_IP_1_LBN, SRC_IP_1_WIDTH);
++ __RANGECHCK(saddr_le32, SRC_IP_1_WIDTH);
++
++ /* dword 1 */
++ __DWCHCK(SRC_TCP_DEST_UDP_1_LBN, SRC_TCP_DEST_UDP_1_WIDTH);
++ __RANGECHCK(sport_le16, SRC_TCP_DEST_UDP_1_WIDTH);
++ __RANGECHCK(dport_le16, SRC_TCP_DEST_UDP_1_WIDTH);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ _falcon_nic_ipfilter_build(nic, tcp, full, rss_b0, scat_b0,
++ filter_i, dmaq_id, saddr_le32, sport_le16,
++ daddr_le32, dport_le16, &q0, &q1);
++
++ EFHW_TRACE("%s[%x]@%p+%lx: %" PRIx64 " %" PRIx64, __FUNCTION__,
++ filter_i, EFHW_KVA(nic), offset, q0, q1);
++
++ falcon_write_qq(EFHW_KVA(nic) + offset, q0, q1);
++ mmiowb();
++
++#if FALCON_VERIFY_FILTERS
++ {
++ uint64_t q0read, q1read;
++
++ /* Read a different entry first - entry BIU flushed shadow */
++ falcon_read_qq(EFHW_KVA(nic) + offset+0x10, &q0read, &q1read);
++ falcon_read_qq(EFHW_KVA(nic) + offset, &q0read, &q1read);
++ EFHW_ASSERT(q0read == q0);
++ EFHW_ASSERT(q1read == q1);
++
++ _falcon_nic_ipfilter_sanity(nic);
++ }
++#endif
++}
++
++static void _falcon_nic_ipfilter_clear(struct efhw_nic *nic, uint filter_i)
++{
++ /* TODO: Use filter table 1 as well */
++ ulong offset = RX_FILTER_TBL0_OFST + filter_i * 2 * FALCON_REGISTER128;
++
++ EFHW_ASSERT(filter_i < nic->filter_tbl_size);
++
++ EFHW_TRACE("%s[%x]", __FUNCTION__, filter_i);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ falcon_write_qq(EFHW_KVA(nic) + offset, 0, 0);
++ mmiowb();
++#if FALCON_VERIFY_FILTERS
++ {
++ uint64_t q0read, q1read;
++
++ /* Read a different entry first - entry BIU flushed shadow */
++ falcon_read_qq(EFHW_KVA(nic) + offset+0x10, &q0read, &q1read);
++ falcon_read_qq(EFHW_KVA(nic) + offset, &q0read, &q1read);
++ EFHW_ASSERT(q0read == 0);
++ EFHW_ASSERT(q1read == 0);
++
++ _falcon_nic_ipfilter_sanity(nic);
++ }
++#endif
++}
++
++/*----------------------------------------------------------------------------
++ *
++ * DMAQ low-level register interface
++ *
++ *---------------------------------------------------------------------------*/
++
++static unsigned dmaq_sizes[] = {
++ 512,
++ EFHW_1K,
++ EFHW_2K,
++ EFHW_4K,
++};
++
++#define N_DMAQ_SIZES (sizeof(dmaq_sizes) / sizeof(dmaq_sizes[0]))
++
++static inline ulong falcon_dma_tx_q_offset(struct efhw_nic *nic, unsigned dmaq)
++{
++ EFHW_ASSERT(dmaq < FALCON_DMAQ_NUM);
++ return TX_DESC_PTR_TBL_OFST + dmaq * FALCON_REGISTER128;
++}
++
++static inline uint falcon_dma_tx_q_size_index(uint dmaq_size)
++{
++ uint i;
++
++ /* size must be one of the various options, otherwise we assert */
++ for (i = 0; i < N_DMAQ_SIZES; i++) {
++ if (dmaq_size == dmaq_sizes[i])
++ break;
++ }
++ EFHW_ASSERT(i < N_DMAQ_SIZES);
++ return i;
++}
++
++static void
++falcon_dmaq_tx_q_init(struct efhw_nic *nic,
++ uint dmaq, uint evq_id, uint own_id,
++ uint tag, uint dmaq_size, uint buf_idx, uint flags)
++{
++ FALCON_LOCK_DECL;
++ uint index, desc_type;
++ uint64_t val1, val2, val3;
++ ulong offset;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++
++ /* Q attributes */
++ int iscsi_hdig_en = ((flags & EFHW_VI_ISCSI_TX_HDIG_EN) != 0);
++ int iscsi_ddig_en = ((flags & EFHW_VI_ISCSI_TX_DDIG_EN) != 0);
++ int csum_ip_dis = ((flags & EFHW_VI_TX_IP_CSUM_DIS) != 0);
++ int csum_tcp_dis = ((flags & EFHW_VI_TX_TCPUDP_CSUM_DIS) != 0);
++ int non_ip_drop_dis = ((flags & EFHW_VI_TX_TCPUDP_ONLY) == 0);
++
++ /* initialise the TX descriptor queue pointer table */
++
++ /* NB physical vs buffer addressing is determined by the Queue ID. */
++
++ offset = falcon_dma_tx_q_offset(nic, dmaq);
++ index = falcon_dma_tx_q_size_index(dmaq_size);
++
++ /* allow VI flag to override this queue's descriptor type */
++ desc_type = (flags & EFHW_VI_TX_PHYS_ADDR_EN) ? 0 : 1;
++
++ /* bug9403: It is dangerous to allow buffer-addressed queues to
++ * have owner_id=0. */
++ EFHW_ASSERT((own_id > 0) || desc_type == 0);
++
++ /* dword 1 */
++ __DWCHCK(TX_DESCQ_FLUSH_LBN, TX_DESCQ_FLUSH_WIDTH);
++ __DWCHCK(TX_DESCQ_TYPE_LBN, TX_DESCQ_TYPE_WIDTH);
++ __DWCHCK(TX_DESCQ_SIZE_LBN, TX_DESCQ_SIZE_WIDTH);
++ __DWCHCK(TX_DESCQ_LABEL_LBN, TX_DESCQ_LABEL_WIDTH);
++ __DWCHCK(TX_DESCQ_OWNER_ID_LBN, TX_DESCQ_OWNER_ID_WIDTH);
++
++ __LWCHK(TX_DESCQ_EVQ_ID_LBN, TX_DESCQ_EVQ_ID_WIDTH);
++
++ __RANGECHCK(1, TX_DESCQ_FLUSH_WIDTH);
++ __RANGECHCK(desc_type, TX_DESCQ_TYPE_WIDTH);
++ __RANGECHCK(index, TX_DESCQ_SIZE_WIDTH);
++ __RANGECHCK(tag, TX_DESCQ_LABEL_WIDTH);
++ __RANGECHCK(own_id, TX_DESCQ_OWNER_ID_WIDTH);
++ __RANGECHCK(evq_id, TX_DESCQ_EVQ_ID_WIDTH);
++
++ val1 = ((desc_type << TX_DESCQ_TYPE_LBN) |
++ (index << TX_DESCQ_SIZE_LBN) |
++ (tag << TX_DESCQ_LABEL_LBN) |
++ (own_id << TX_DESCQ_OWNER_ID_LBN) |
++ (__LOW(evq_id, TX_DESCQ_EVQ_ID_LBN, TX_DESCQ_EVQ_ID_WIDTH)));
++
++ /* dword 2 */
++ __DW2CHCK(TX_DESCQ_BUF_BASE_ID_LBN, TX_DESCQ_BUF_BASE_ID_WIDTH);
++ __RANGECHCK(buf_idx, TX_DESCQ_BUF_BASE_ID_WIDTH);
++
++ val2 = ((__HIGH(evq_id, TX_DESCQ_EVQ_ID_LBN, TX_DESCQ_EVQ_ID_WIDTH)) |
++ (buf_idx << __DW2(TX_DESCQ_BUF_BASE_ID_LBN)));
++
++ /* dword 3 */
++ __DW3CHCK(TX_ISCSI_HDIG_EN_LBN, TX_ISCSI_HDIG_EN_WIDTH);
++ __DW3CHCK(TX_ISCSI_DDIG_EN_LBN, TX_ISCSI_DDIG_EN_WIDTH);
++ __RANGECHCK(iscsi_hdig_en, TX_ISCSI_HDIG_EN_WIDTH);
++ __RANGECHCK(iscsi_ddig_en, TX_ISCSI_DDIG_EN_WIDTH);
++
++ val3 = ((iscsi_hdig_en << __DW3(TX_ISCSI_HDIG_EN_LBN)) |
++ (iscsi_ddig_en << __DW3(TX_ISCSI_DDIG_EN_LBN)) |
++ (1 << __DW3(TX_DESCQ_EN_LBN))); /* queue enable bit */
++
++ switch (nic->devtype.variant) {
++ case 'B':
++ __DW3CHCK(TX_NON_IP_DROP_DIS_B0_LBN,
++ TX_NON_IP_DROP_DIS_B0_WIDTH);
++ __DW3CHCK(TX_IP_CHKSM_DIS_B0_LBN, TX_IP_CHKSM_DIS_B0_WIDTH);
++ __DW3CHCK(TX_TCP_CHKSM_DIS_B0_LBN, TX_TCP_CHKSM_DIS_B0_WIDTH);
++
++ val3 |= ((non_ip_drop_dis << __DW3(TX_NON_IP_DROP_DIS_B0_LBN))|
++ (csum_ip_dis << __DW3(TX_IP_CHKSM_DIS_B0_LBN)) |
++ (csum_tcp_dis << __DW3(TX_TCP_CHKSM_DIS_B0_LBN)));
++ break;
++ case 'A':
++ if (csum_ip_dis || csum_tcp_dis || !non_ip_drop_dis)
++ EFHW_WARN
++ ("%s: bad settings for A1 csum_ip_dis=%d "
++ "csum_tcp_dis=%d non_ip_drop_dis=%d",
++ __FUNCTION__, csum_ip_dis,
++ csum_tcp_dis, non_ip_drop_dis);
++ break;
++ default:
++ EFHW_ASSERT(0);
++ break;
++ }
++
++ EFHW_TRACE("%s: txq %x evq %u tag %x id %x buf %x "
++ "%x:%x:%x->%" PRIx64 ":%" PRIx64 ":%" PRIx64,
++ __FUNCTION__,
++ dmaq, evq_id, tag, own_id, buf_idx, dmaq_size,
++ iscsi_hdig_en, iscsi_ddig_en, val1, val2, val3);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_write_qq(efhw_kva + offset, ((val2 << 32) | val1), val3);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++ return;
++}
++
++static inline ulong
++falcon_dma_rx_q_offset(struct efhw_nic *nic, unsigned dmaq)
++{
++ EFHW_ASSERT(dmaq < FALCON_DMAQ_NUM);
++ return RX_DESC_PTR_TBL_OFST + dmaq * FALCON_REGISTER128;
++}
++
++static void
++falcon_dmaq_rx_q_init(struct efhw_nic *nic,
++ uint dmaq, uint evq_id, uint own_id,
++ uint tag, uint dmaq_size, uint buf_idx, uint flags)
++{
++ FALCON_LOCK_DECL;
++ uint i, desc_type = 1;
++ uint64_t val1, val2, val3;
++ ulong offset;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++
++ /* Q attributes */
++#if BUG5762_WORKAROUND
++ int jumbo = 1; /* Queues must not have mixed types */
++#else
++ int jumbo = ((flags & EFHW_VI_JUMBO_EN) != 0);
++#endif
++ int iscsi_hdig_en = ((flags & EFHW_VI_ISCSI_RX_HDIG_EN) != 0);
++ int iscsi_ddig_en = ((flags & EFHW_VI_ISCSI_RX_DDIG_EN) != 0);
++
++ /* initialise the TX descriptor queue pointer table */
++ offset = falcon_dma_rx_q_offset(nic, dmaq);
++
++ /* size must be one of the various options, otherwise we assert */
++ for (i = 0; i < N_DMAQ_SIZES; i++) {
++ if (dmaq_size == dmaq_sizes[i])
++ break;
++ }
++ EFHW_ASSERT(i < N_DMAQ_SIZES);
++
++ /* allow VI flag to override this queue's descriptor type */
++ desc_type = (flags & EFHW_VI_RX_PHYS_ADDR_EN) ? 0 : 1;
++
++ /* bug9403: It is dangerous to allow buffer-addressed queues to have
++ * owner_id=0 */
++ EFHW_ASSERT((own_id > 0) || desc_type == 0);
++
++ /* dword 1 */
++ __DWCHCK(RX_DESCQ_EN_LBN, RX_DESCQ_EN_WIDTH);
++ __DWCHCK(RX_DESCQ_JUMBO_LBN, RX_DESCQ_JUMBO_WIDTH);
++ __DWCHCK(RX_DESCQ_TYPE_LBN, RX_DESCQ_TYPE_WIDTH);
++ __DWCHCK(RX_DESCQ_SIZE_LBN, RX_DESCQ_SIZE_WIDTH);
++ __DWCHCK(RX_DESCQ_LABEL_LBN, RX_DESCQ_LABEL_WIDTH);
++ __DWCHCK(RX_DESCQ_OWNER_ID_LBN, RX_DESCQ_OWNER_ID_WIDTH);
++
++ __LWCHK(RX_DESCQ_EVQ_ID_LBN, RX_DESCQ_EVQ_ID_WIDTH);
++
++ __RANGECHCK(1, RX_DESCQ_EN_WIDTH);
++ __RANGECHCK(jumbo, RX_DESCQ_JUMBO_WIDTH);
++ __RANGECHCK(desc_type, RX_DESCQ_TYPE_WIDTH);
++ __RANGECHCK(i, RX_DESCQ_SIZE_WIDTH);
++ __RANGECHCK(tag, RX_DESCQ_LABEL_WIDTH);
++ __RANGECHCK(own_id, RX_DESCQ_OWNER_ID_WIDTH);
++ __RANGECHCK(evq_id, RX_DESCQ_EVQ_ID_WIDTH);
++
++ val1 = ((1 << RX_DESCQ_EN_LBN) |
++ (jumbo << RX_DESCQ_JUMBO_LBN) |
++ (desc_type << RX_DESCQ_TYPE_LBN) |
++ (i << RX_DESCQ_SIZE_LBN) |
++ (tag << RX_DESCQ_LABEL_LBN) |
++ (own_id << RX_DESCQ_OWNER_ID_LBN) |
++ (__LOW(evq_id, RX_DESCQ_EVQ_ID_LBN, RX_DESCQ_EVQ_ID_WIDTH)));
++
++ /* dword 2 */
++ __DW2CHCK(RX_DESCQ_BUF_BASE_ID_LBN, RX_DESCQ_BUF_BASE_ID_WIDTH);
++ __RANGECHCK(buf_idx, RX_DESCQ_BUF_BASE_ID_WIDTH);
++
++ val2 = ((__HIGH(evq_id, RX_DESCQ_EVQ_ID_LBN, RX_DESCQ_EVQ_ID_WIDTH)) |
++ (buf_idx << __DW2(RX_DESCQ_BUF_BASE_ID_LBN)));
++
++ /* dword 3 */
++ __DW3CHCK(RX_ISCSI_HDIG_EN_LBN, RX_ISCSI_HDIG_EN_WIDTH);
++ __DW3CHCK(RX_ISCSI_DDIG_EN_LBN, RX_ISCSI_DDIG_EN_WIDTH);
++ __RANGECHCK(iscsi_hdig_en, RX_ISCSI_HDIG_EN_WIDTH);
++ __RANGECHCK(iscsi_ddig_en, RX_ISCSI_DDIG_EN_WIDTH);
++
++ val3 = (iscsi_hdig_en << __DW3(RX_ISCSI_HDIG_EN_LBN)) |
++ (iscsi_ddig_en << __DW3(RX_ISCSI_DDIG_EN_LBN));
++
++ EFHW_TRACE("%s: rxq %x evq %u tag %x id %x buf %x %s "
++ "%x:%x:%x -> %" PRIx64 ":%" PRIx64 ":%" PRIx64,
++ __FUNCTION__,
++ dmaq, evq_id, tag, own_id, buf_idx,
++ jumbo ? "jumbo" : "normal", dmaq_size,
++ iscsi_hdig_en, iscsi_ddig_en, val1, val2, val3);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_write_qq(efhw_kva + offset, ((val2 << 32) | val1), val3);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++ return;
++}
++
++static void falcon_dmaq_tx_q_disable(struct efhw_nic *nic, uint dmaq)
++{
++ FALCON_LOCK_DECL;
++ uint64_t val1, val2, val3;
++ ulong offset;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++
++ /* initialise the TX descriptor queue pointer table */
++
++ offset = falcon_dma_tx_q_offset(nic, dmaq);
++
++ /* dword 1 */
++ __DWCHCK(TX_DESCQ_TYPE_LBN, TX_DESCQ_TYPE_WIDTH);
++
++ val1 = ((uint64_t) 1 << TX_DESCQ_TYPE_LBN);
++
++ /* dword 2 */
++ val2 = 0;
++
++ /* dword 3 */
++ val3 = (0 << __DW3(TX_DESCQ_EN_LBN)); /* queue enable bit */
++
++ EFHW_TRACE("%s: %x->%" PRIx64 ":%" PRIx64 ":%" PRIx64,
++ __FUNCTION__, dmaq, val1, val2, val3);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_write_qq(efhw_kva + offset, ((val2 << 32) | val1), val3);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++ return;
++}
++
++static void falcon_dmaq_rx_q_disable(struct efhw_nic *nic, uint dmaq)
++{
++ FALCON_LOCK_DECL;
++ uint64_t val1, val2, val3;
++ ulong offset;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++
++ /* initialise the TX descriptor queue pointer table */
++ offset = falcon_dma_rx_q_offset(nic, dmaq);
++
++ /* dword 1 */
++ __DWCHCK(RX_DESCQ_EN_LBN, RX_DESCQ_EN_WIDTH);
++ __DWCHCK(RX_DESCQ_TYPE_LBN, RX_DESCQ_TYPE_WIDTH);
++
++ val1 = ((0 << RX_DESCQ_EN_LBN) | (1 << RX_DESCQ_TYPE_LBN));
++
++ /* dword 2 */
++ val2 = 0;
++
++ /* dword 3 */
++ val3 = 0;
++
++ EFHW_TRACE("falcon_dmaq_rx_q_disable: %x->%"
++ PRIx64 ":%" PRIx64 ":%" PRIx64,
++ dmaq, val1, val2, val3);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_write_qq(efhw_kva + offset, ((val2 << 32) | val1), val3);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++ return;
++}
++
++
++/*----------------------------------------------------------------------------
++ *
++ * Buffer Table low-level register interface
++ *
++ *---------------------------------------------------------------------------*/
++
++/*! Convert a (potentially) 64-bit physical address to 32-bits. Every use
++** of this function is a place where we're not 64-bit clean.
++*/
++static inline uint32_t dma_addr_to_u32(dma_addr_t addr)
++{
++ /* Top bits had better be zero! */
++ EFHW_ASSERT(addr == (addr & 0xffffffff));
++ return (uint32_t) addr;
++}
++
++static inline uint32_t
++falcon_nic_buffer_table_entry32_mk(dma_addr_t dma_addr, int own_id)
++{
++ uint32_t dma_addr32 = FALCON_BUFFER_4K_PAGE(dma_addr_to_u32(dma_addr));
++
++ /* don't do this to me */
++ EFHW_BUILD_ASSERT(BUF_ADR_HBUF_ODD_LBN == BUF_ADR_HBUF_EVEN_LBN + 32);
++ EFHW_BUILD_ASSERT(BUF_OWNER_ID_HBUF_ODD_LBN ==
++ BUF_OWNER_ID_HBUF_EVEN_LBN + 32);
++
++ EFHW_BUILD_ASSERT(BUF_OWNER_ID_HBUF_ODD_WIDTH ==
++ BUF_OWNER_ID_HBUF_EVEN_WIDTH);
++ EFHW_BUILD_ASSERT(BUF_ADR_HBUF_ODD_WIDTH == BUF_ADR_HBUF_EVEN_WIDTH);
++
++ __DWCHCK(BUF_ADR_HBUF_EVEN_LBN, BUF_ADR_HBUF_EVEN_WIDTH);
++ __DWCHCK(BUF_OWNER_ID_HBUF_EVEN_LBN, BUF_OWNER_ID_HBUF_EVEN_WIDTH);
++
++ __RANGECHCK(dma_addr32, BUF_ADR_HBUF_EVEN_WIDTH);
++ __RANGECHCK(own_id, BUF_OWNER_ID_HBUF_EVEN_WIDTH);
++
++ return ((dma_addr32 << BUF_ADR_HBUF_EVEN_LBN) |
++ (own_id << BUF_OWNER_ID_HBUF_EVEN_LBN));
++}
++
++static inline uint64_t
++falcon_nic_buffer_table_entry64_mk(dma_addr_t dma_addr,
++ int bufsz, /* bytes */
++ int region, int own_id)
++{
++ __DW2CHCK(IP_DAT_BUF_SIZE_LBN, IP_DAT_BUF_SIZE_WIDTH);
++ __DW2CHCK(BUF_ADR_REGION_LBN, BUF_ADR_REGION_WIDTH);
++ __LWCHK(BUF_ADR_FBUF_LBN, BUF_ADR_FBUF_WIDTH);
++ __DWCHCK(BUF_OWNER_ID_FBUF_LBN, BUF_OWNER_ID_FBUF_WIDTH);
++
++ EFHW_ASSERT((bufsz == EFHW_4K) || (bufsz == EFHW_8K));
++
++ dma_addr = (dma_addr >> 12) & __FALCON_MASK64(BUF_ADR_FBUF_WIDTH);
++
++ __RANGECHCK(dma_addr, BUF_ADR_FBUF_WIDTH);
++ __RANGECHCK(1, IP_DAT_BUF_SIZE_WIDTH);
++ __RANGECHCK(region, BUF_ADR_REGION_WIDTH);
++ __RANGECHCK(own_id, BUF_OWNER_ID_FBUF_WIDTH);
++
++ return (((uint64_t) (bufsz == EFHW_8K) << IP_DAT_BUF_SIZE_LBN) |
++ ((uint64_t) region << BUF_ADR_REGION_LBN) |
++ ((uint64_t) dma_addr << BUF_ADR_FBUF_LBN) |
++ ((uint64_t) own_id << BUF_OWNER_ID_FBUF_LBN));
++}
++
++static inline void
++_falcon_nic_buffer_table_set32(struct efhw_nic *nic,
++ dma_addr_t dma_addr, uint bufsz,
++ uint region, /* not used */
++ int own_id, int buffer_id)
++{
++ /* programming the half table needs to be done in pairs. */
++ uint64_t entry, val, shift;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ efhw_ioaddr_t offset;
++
++ EFHW_BUILD_ASSERT(BUF_ADR_HBUF_ODD_LBN == BUF_ADR_HBUF_EVEN_LBN + 32);
++ EFHW_BUILD_ASSERT(BUF_OWNER_ID_HBUF_ODD_LBN ==
++ BUF_OWNER_ID_HBUF_EVEN_LBN + 32);
++
++ shift = (buffer_id & 1) ? 32 : 0;
++
++ offset = (efhw_kva + BUF_HALF_TBL_OFST +
++ ((buffer_id & ~1) * FALCON_BUFFER_TBL_HALF_BYTES));
++
++ entry = falcon_nic_buffer_table_entry32_mk(dma_addr_to_u32(dma_addr),
++ own_id);
++
++#if FALCON_USE_SHADOW_BUFFER_TABLE
++ val = _falcon_buffer_table[buffer_id & ~1];
++#else
++ /* This will not work unless we've completed
++ * the buffer table updates */
++ falcon_read_q(offset, &val);
++#endif
++ val &= ~(((uint64_t) 0xffffffff) << shift);
++ val |= (entry << shift);
++
++ EFHW_TRACE("%s[%x]: " ci_dma_addr_fmt ":%x:%" PRIx64 "->%x = %"
++ PRIx64, __FUNCTION__, buffer_id, dma_addr, own_id, entry,
++ (unsigned)(offset - efhw_kva), val);
++
++ /* Falcon requires that access to this register is serialised */
++ falcon_write_q(offset, val);
++
++ /* NB. No mmiowb(). Caller should do that e.g by calling commit */
++
++#if FALCON_USE_SHADOW_BUFFER_TABLE
++ _falcon_buffer_table[buffer_id & ~1] = val;
++#endif
++
++ /* Confirm the entry if the event queues haven't been set up. */
++ if (!nic->irq_handler) {
++ uint64_t new_val;
++ int count = 0;
++ while (1) {
++ mmiowb();
++ falcon_read_q(offset, &new_val);
++ if (new_val == val)
++ break;
++ count++;
++ if (count > 1000) {
++ EFHW_WARN("%s: poll Timeout", __FUNCTION__);
++ break;
++ }
++ udelay(1);
++ }
++ }
++}
++
++static inline void
++_falcon_nic_buffer_table_set64(struct efhw_nic *nic,
++ dma_addr_t dma_addr, uint bufsz,
++ uint region, int own_id, int buffer_id)
++{
++ efhw_ioaddr_t offset;
++ uint64_t entry;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++
++ EFHW_ASSERT(region < FALCON_REGION_NUM);
++
++ EFHW_ASSERT((bufsz == EFHW_4K) ||
++ (bufsz == EFHW_8K && FALCON_BUFFER_TABLE_FULL_MODE));
++
++ offset = (efhw_kva + BUF_FULL_TBL_OFST +
++ (buffer_id * FALCON_BUFFER_TBL_FULL_BYTES));
++
++ entry = falcon_nic_buffer_table_entry64_mk(dma_addr, bufsz, region,
++ own_id);
++
++ EFHW_TRACE("%s[%x]: " ci_dma_addr_fmt
++ ":bufsz=%x:region=%x:ownid=%x",
++ __FUNCTION__, buffer_id, dma_addr, bufsz, region, own_id);
++
++ EFHW_TRACE("%s: BUF[%x]:NIC[%x]->%" PRIx64,
++ __FUNCTION__, buffer_id,
++ (unsigned int)(offset - efhw_kva), entry);
++
++ /* Falcon requires that access to this register is serialised */
++ falcon_write_q(offset, entry);
++
++ /* NB. No mmiowb(). Caller should do that e.g by calling commit */
++
++ /* Confirm the entry if the event queues haven't been set up. */
++ if (!nic->irq_handler) {
++ uint64_t new_entry;
++ int count = 0;
++ while (1) {
++ mmiowb();
++ falcon_read_q(offset, &new_entry);
++ if (new_entry == entry)
++ return;
++ count++;
++ if (count > 1000) {
++ EFHW_WARN("%s: poll Timeout waiting for "
++ "value %"PRIx64
++ " (last was %"PRIx64")",
++ __FUNCTION__, entry, new_entry);
++ break;
++ }
++ udelay(1);
++ }
++ }
++}
++
++#if FALCON_BUFFER_TABLE_FULL_MODE
++#define _falcon_nic_buffer_table_set _falcon_nic_buffer_table_set64
++#else
++#define _falcon_nic_buffer_table_set _falcon_nic_buffer_table_set32
++#endif
++
++static inline void _falcon_nic_buffer_table_commit(struct efhw_nic *nic)
++{
++ /* MUST be called holding the FALCON_LOCK */
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ uint64_t cmd;
++
++ EFHW_BUILD_ASSERT(BUF_TBL_UPD_REG_KER_OFST == BUF_TBL_UPD_REG_OFST);
++
++ __DW2CHCK(BUF_UPD_CMD_LBN, BUF_UPD_CMD_WIDTH);
++ __RANGECHCK(1, BUF_UPD_CMD_WIDTH);
++
++ cmd = ((uint64_t) 1 << BUF_UPD_CMD_LBN);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ falcon_write_qq(efhw_kva + BUF_TBL_UPD_REG_OFST,
++ cmd, FALCON_ATOMIC_UPD_REG);
++ mmiowb();
++
++ nic->buf_commit_outstanding++;
++ EFHW_TRACE("COMMIT REQ out=%d", nic->buf_commit_outstanding);
++}
++
++static void falcon_nic_buffer_table_commit(struct efhw_nic *nic)
++{
++ /* nothing to do */
++}
++
++static inline void
++_falcon_nic_buffer_table_clear(struct efhw_nic *nic, int buffer_id, int num)
++{
++ uint64_t cmd;
++ uint64_t start_id = buffer_id;
++ uint64_t end_id = buffer_id + num - 1;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++
++ efhw_ioaddr_t offset = (efhw_kva + BUF_TBL_UPD_REG_OFST);
++
++ EFHW_BUILD_ASSERT(BUF_TBL_UPD_REG_KER_OFST == BUF_TBL_UPD_REG_OFST);
++
++#if !FALCON_BUFFER_TABLE_FULL_MODE
++ /* buffer_ids in half buffer mode reference pairs of buffers */
++ EFHW_ASSERT(buffer_id % 1 == 0);
++ EFHW_ASSERT(num % 1 == 0);
++ start_id = start_id >> 1;
++ end_id = end_id >> 1;
++#endif
++
++ EFHW_ASSERT(num >= 1);
++
++ __DWCHCK(BUF_CLR_START_ID_LBN, BUF_CLR_START_ID_WIDTH);
++ __DW2CHCK(BUF_CLR_END_ID_LBN, BUF_CLR_END_ID_WIDTH);
++
++ __DW2CHCK(BUF_CLR_CMD_LBN, BUF_CLR_CMD_WIDTH);
++ __RANGECHCK(1, BUF_CLR_CMD_WIDTH);
++
++ __RANGECHCK(start_id, BUF_CLR_START_ID_WIDTH);
++ __RANGECHCK(end_id, BUF_CLR_END_ID_WIDTH);
++
++ cmd = (((uint64_t) 1 << BUF_CLR_CMD_LBN) |
++ (start_id << BUF_CLR_START_ID_LBN) |
++ (end_id << BUF_CLR_END_ID_LBN));
++
++ /* Falcon requires 128 bit atomic access for this register */
++ falcon_write_qq(offset, cmd, FALCON_ATOMIC_UPD_REG);
++ mmiowb();
++
++ nic->buf_commit_outstanding++;
++ EFHW_TRACE("COMMIT CLEAR out=%d", nic->buf_commit_outstanding);
++}
++
++/*----------------------------------------------------------------------------
++ *
++ * Events low-level register interface
++ *
++ *---------------------------------------------------------------------------*/
++
++static unsigned eventq_sizes[] = {
++ 512,
++ EFHW_1K,
++ EFHW_2K,
++ EFHW_4K,
++ EFHW_8K,
++ EFHW_16K,
++ EFHW_32K
++};
++
++#define N_EVENTQ_SIZES (sizeof(eventq_sizes) / sizeof(eventq_sizes[0]))
++
++static inline void falcon_nic_srm_upd_evq(struct efhw_nic *nic, int evq)
++{
++ /* set up the eventq which will receive events from the SRAM module.
++ * i.e buffer table updates and clears, TX and RX aperture table
++ * updates */
++
++ FALCON_LOCK_DECL;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++
++ EFHW_BUILD_ASSERT(SRM_UPD_EVQ_REG_OFST == SRM_UPD_EVQ_REG_KER_OFST);
++
++ EFHW_ASSERT((evq == FALCON_EVQ_KERNEL0) || (evq == FALCON_EVQ_CHAR) ||
++ (evq == FALCON_EVQ_NONIRQ));
++
++ __DWCHCK(SRM_UPD_EVQ_ID_LBN, SRM_UPD_EVQ_ID_WIDTH);
++ __RANGECHCK(evq, SRM_UPD_EVQ_ID_WIDTH);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_write_qq(efhw_kva + SRM_UPD_EVQ_REG_OFST,
++ ((uint64_t) evq << SRM_UPD_EVQ_ID_LBN),
++ FALCON_ATOMIC_SRPM_UDP_EVQ_REG);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++}
++
++static inline void
++falcon_nic_evq_ptr_tbl(struct efhw_nic *nic,
++ uint evq, /* evq id */
++ uint enable, /* 1 to enable, 0 to disable */
++ uint buf_base_id,/* Buffer table base for EVQ */
++ uint evq_size /* Number of events */ )
++{
++ FALCON_LOCK_DECL;
++ uint i, val;
++ ulong offset;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++
++ /* size must be one of the various options, otherwise we assert */
++ for (i = 0; i < N_EVENTQ_SIZES; i++) {
++ if (evq_size <= eventq_sizes[i])
++ break;
++ }
++ EFHW_ASSERT(i < N_EVENTQ_SIZES);
++
++ __DWCHCK(EVQ_BUF_BASE_ID_LBN, EVQ_BUF_BASE_ID_WIDTH);
++ __DWCHCK(EVQ_SIZE_LBN, EVQ_SIZE_WIDTH);
++ __DWCHCK(EVQ_EN_LBN, EVQ_EN_WIDTH);
++
++ __RANGECHCK(i, EVQ_SIZE_WIDTH);
++ __RANGECHCK(buf_base_id, EVQ_BUF_BASE_ID_WIDTH);
++ __RANGECHCK(1, EVQ_EN_WIDTH);
++
++ /* if !enable then only evq needs to be correct, although valid
++ * values need to be passed in for other arguments to prevent
++ * assertions */
++
++ val = ((i << EVQ_SIZE_LBN) | (buf_base_id << EVQ_BUF_BASE_ID_LBN) |
++ (enable ? (1 << EVQ_EN_LBN) : 0));
++
++ EFHW_ASSERT(evq < FALCON_EVQ_TBL_NUM);
++
++ offset = EVQ_PTR_TBL_CHAR_OFST;
++ offset += evq * FALCON_REGISTER128;
++
++ EFHW_TRACE("%s: evq %u en=%x:buf=%x:size=%x->%x at %lx",
++ __FUNCTION__, evq, enable, buf_base_id, evq_size, val,
++ offset);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_write_qq(efhw_kva + offset, val, FALCON_ATOMIC_PTR_TBL_REG);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++
++ /* caller must wait for an update done event before writing any more
++ table entries */
++
++ return;
++}
++
++void
++falcon_nic_evq_ack(struct efhw_nic *nic,
++ uint evq, /* evq id */
++ uint rptr, /* new read pointer update */
++ bool wakeup /* request a wakeup event if ptr's != */
++ )
++{
++ uint val;
++ ulong offset;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++
++ EFHW_BUILD_ASSERT(FALCON_EVQ_CHAR == 4);
++
++ __DWCHCK(EVQ_RPTR_LBN, EVQ_RPTR_WIDTH);
++ __RANGECHCK(rptr, EVQ_RPTR_WIDTH);
++
++ val = (rptr << EVQ_RPTR_LBN);
++
++ EFHW_ASSERT(evq < FALCON_EVQ_TBL_NUM);
++
++ if (evq < FALCON_EVQ_CHAR) {
++ offset = EVQ_RPTR_REG_KER_OFST;
++ offset += evq * FALCON_REGISTER128;
++
++ EFHW_ASSERT(!wakeup); /* don't try this at home */
++ } else {
++ offset = EVQ_RPTR_REG_OFST + (FALCON_EVQ_CHAR *
++ FALCON_REGISTER128);
++ offset += (evq - FALCON_EVQ_CHAR) * FALCON_REGISTER128;
++
++ /* nothing to do for interruptless event queues which do
++ * not want a wakeup */
++ if (evq != FALCON_EVQ_CHAR && !wakeup)
++ return;
++ }
++
++ EFHW_TRACE("%s: %x %x %x->%x", __FUNCTION__, evq, rptr, wakeup, val);
++
++ writel(val, efhw_kva + offset);
++ mmiowb();
++}
++
++/*----------------------------------------------------------------------------
++ *
++ * Helper for evq mapping
++ *
++ * idx = 0 && char => hw eventq[4]
++ * idx = 0 && net => hw eventq[0]
++ * 0 < idx < 5 => hw eventq[idx] (5 is non-interrupting)
++ *
++ *
++ *---------------------------------------------------------------------------*/
++
++int falcon_idx_to_evq(struct efhw_nic *nic, uint idx)
++{
++ EFHW_BUILD_ASSERT(FALCON_EVQ_CHAR == 4);
++ EFHW_ASSERT(idx <= FALCON_EVQ_NONIRQ);
++ return (idx > 0) ? idx : FALCON_EVQ_CHAR;
++}
++
++static inline int falcon_evq_is_interrupting(struct efhw_nic *nic, uint idx)
++{
++ EFHW_BUILD_ASSERT(FALCON_EVQ_CHAR == 4);
++ EFHW_ASSERT(idx <= FALCON_EVQ_NONIRQ);
++
++ /* only the first CHAR driver event queue is interrupting */
++ return (idx == FALCON_EVQ_CHAR);
++}
++
++static inline void
++falcon_drv_ev(struct efhw_nic *nic, uint64_t data, uint qid)
++{
++ FALCON_LOCK_DECL;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++
++ /* send an event from one driver to the other */
++ EFHW_BUILD_ASSERT(DRV_EV_REG_KER_OFST == DRV_EV_REG_OFST);
++ EFHW_BUILD_ASSERT(DRV_EV_DATA_LBN == 0);
++ EFHW_BUILD_ASSERT(DRV_EV_DATA_WIDTH == 64);
++ EFHW_BUILD_ASSERT(DRV_EV_QID_LBN == 64);
++ EFHW_BUILD_ASSERT(DRV_EV_QID_WIDTH == 12);
++
++ FALCON_LOCK_LOCK(nic);
++ falcon_write_qq(efhw_kva + DRV_EV_REG_OFST, data, qid);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++}
++
++_DEBUG_SYM_ void
++falcon_timer_cmd(struct efhw_nic *nic,
++ uint evq, /* timer id */
++ uint mode, /* mode bits */
++ uint countdown /* counting value to set */ )
++{
++ FALCON_LOCK_DECL;
++ uint val;
++ ulong offset;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++
++ EFHW_BUILD_ASSERT(TIMER_VAL_LBN == 0);
++
++ __DWCHCK(TIMER_MODE_LBN, TIMER_MODE_WIDTH);
++ __DWCHCK(TIMER_VAL_LBN, TIMER_VAL_WIDTH);
++
++ __RANGECHCK(mode, TIMER_MODE_WIDTH);
++ __RANGECHCK(countdown, TIMER_VAL_WIDTH);
++
++ val = ((mode << TIMER_MODE_LBN) | (countdown << TIMER_VAL_LBN));
++
++ if (evq < FALCON_EVQ_CHAR) {
++ offset = TIMER_CMD_REG_KER_OFST;
++ offset += evq * EFHW_8K; /* PAGE mapped register */
++ } else {
++ offset = TIMER_TBL_OFST;
++ offset += evq * FALCON_REGISTER128;
++ }
++ EFHW_ASSERT(evq < FALCON_EVQ_TBL_NUM);
++
++ EFHW_TRACE("%s: evq %u mode %x (%s) time %x -> %08x",
++ __FUNCTION__, evq, mode,
++ mode == 0 ? "DISABLE" :
++ mode == 1 ? "IMMED" :
++ mode == 2 ? (evq < 5 ? "HOLDOFF" : "RX_TRIG") :
++ "<BAD>", countdown, val);
++
++ /* Falcon requires 128 bit atomic access for this register when
++ * accessed from the driver. User access to timers is paged mapped
++ */
++ FALCON_LOCK_LOCK(nic);
++ falcon_write_qq(efhw_kva + offset, val, FALCON_ATOMIC_TIMER_CMD_REG);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++ return;
++}
++
++/*--------------------------------------------------------------------
++ *
++ * Rate pacing - Low level interface
++ *
++ *--------------------------------------------------------------------*/
++void falcon_nic_pace(struct efhw_nic *nic, uint dmaq, uint pace)
++{
++ /* Pace specified in 2^(units of microseconds). This is the minimum
++ additional delay imposed over and above the IPG.
++
++ Pacing only available on the virtual interfaces
++ */
++ FALCON_LOCK_DECL;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ ulong offset;
++
++ if (pace > 20)
++ pace = 20; /* maxm supported value */
++
++ __DWCHCK(TX_PACE_LBN, TX_PACE_WIDTH);
++ __RANGECHCK(pace, TX_PACE_WIDTH);
++
++ switch (nic->devtype.variant) {
++ case 'A':
++ EFHW_ASSERT(dmaq >= TX_PACE_TBL_FIRST_QUEUE_A1);
++ offset = TX_PACE_TBL_A1_OFST;
++ offset += (dmaq - TX_PACE_TBL_FIRST_QUEUE_A1) * 16;
++ break;
++ case 'B':
++ /* Would be nice to assert this, but as dmaq is unsigned and
++ * TX_PACE_TBL_FIRST_QUEUE_B0 is 0, it makes no sense
++ * EFHW_ASSERT(dmaq >= TX_PACE_TBL_FIRST_QUEUE_B0);
++ */
++ offset = TX_PACE_TBL_B0_OFST;
++ offset += (dmaq - TX_PACE_TBL_FIRST_QUEUE_B0) * 16;
++ break;
++ default:
++ EFHW_ASSERT(0);
++ offset = 0;
++ break;
++ }
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_write_qq(efhw_kva + offset, pace, FALCON_ATOMIC_PACE_REG);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++
++ EFHW_TRACE("%s: txq %d offset=%lx pace=2^%x",
++ __FUNCTION__, dmaq, offset, pace);
++}
++
++/*--------------------------------------------------------------------
++ *
++ * Interrupt - Low level interface
++ *
++ *--------------------------------------------------------------------*/
++
++static void falcon_nic_handle_fatal_int(struct efhw_nic *nic)
++{
++ FALCON_LOCK_DECL;
++ efhw_ioaddr_t offset;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ uint64_t val;
++
++ offset = (efhw_kva + FATAL_INTR_REG_OFST);
++
++ /* Falcon requires 32 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ val = readl(offset);
++ FALCON_LOCK_UNLOCK(nic);
++
++ /* ?? BUG3249 - need to disable illegal address interrupt */
++ /* ?? BUG3114 - need to backport interrupt storm protection code */
++ EFHW_ERR("fatal interrupt: %s%s%s%s%s%s%s%s%s%s%s%s[%" PRIx64 "]",
++ val & (1 << PCI_BUSERR_INT_CHAR_LBN) ? "PCI-bus-error " : "",
++ val & (1 << SRAM_OOB_INT_CHAR_LBN) ? "SRAM-oob " : "",
++ val & (1 << BUFID_OOB_INT_CHAR_LBN) ? "bufid-oob " : "",
++ val & (1 << MEM_PERR_INT_CHAR_LBN) ? "int-parity " : "",
++ val & (1 << RBUF_OWN_INT_CHAR_LBN) ? "rx-bufid-own " : "",
++ val & (1 << TBUF_OWN_INT_CHAR_LBN) ? "tx-bufid-own " : "",
++ val & (1 << RDESCQ_OWN_INT_CHAR_LBN) ? "rx-desc-own " : "",
++ val & (1 << TDESCQ_OWN_INT_CHAR_LBN) ? "tx-desc-own " : "",
++ val & (1 << EVQ_OWN_INT_CHAR_LBN) ? "evq-own " : "",
++ val & (1 << EVFF_OFLO_INT_CHAR_LBN) ? "evq-fifo " : "",
++ val & (1 << ILL_ADR_INT_CHAR_LBN) ? "ill-addr " : "",
++ val & (1 << SRM_PERR_INT_CHAR_LBN) ? "sram-parity " : "", val);
++}
++
++static void falcon_nic_interrupt_hw_enable(struct efhw_nic *nic)
++{
++ FALCON_LOCK_DECL;
++ uint val;
++ efhw_ioaddr_t offset;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++
++ EFHW_BUILD_ASSERT(DRV_INT_EN_CHAR_WIDTH == 1);
++
++ if (nic->flags & NIC_FLAG_NO_INTERRUPT)
++ return;
++
++ offset = (efhw_kva + INT_EN_REG_CHAR_OFST);
++ val = 1 << DRV_INT_EN_CHAR_LBN;
++
++ EFHW_NOTICE("%s: %x -> %x", __FUNCTION__, (int)(offset - efhw_kva),
++ val);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_write_qq(offset, val, FALCON_ATOMIC_INT_EN_REG);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++}
++
++static void falcon_nic_interrupt_hw_disable(struct efhw_nic *nic)
++{
++ FALCON_LOCK_DECL;
++ efhw_ioaddr_t offset;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++
++ EFHW_BUILD_ASSERT(SRAM_PERR_INT_KER_WIDTH == 1);
++ EFHW_BUILD_ASSERT(DRV_INT_EN_KER_LBN == 0);
++ EFHW_BUILD_ASSERT(SRAM_PERR_INT_CHAR_WIDTH == 1);
++ EFHW_BUILD_ASSERT(DRV_INT_EN_CHAR_LBN == 0);
++ EFHW_BUILD_ASSERT(SRAM_PERR_INT_KER_LBN == SRAM_PERR_INT_CHAR_LBN);
++ EFHW_BUILD_ASSERT(DRV_INT_EN_KER_LBN == DRV_INT_EN_CHAR_LBN);
++
++ if (nic->flags & NIC_FLAG_NO_INTERRUPT)
++ return;
++
++ offset = (efhw_kva + INT_EN_REG_CHAR_OFST);
++
++ EFHW_NOTICE("%s: %x -> 0", __FUNCTION__, (int)(offset - efhw_kva));
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_write_qq(offset, 0, FALCON_ATOMIC_INT_EN_REG);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++}
++
++#ifndef __ci_ul_driver__
++
++static void falcon_nic_irq_addr_set(struct efhw_nic *nic, dma_addr_t dma_addr)
++{
++ FALCON_LOCK_DECL;
++ efhw_ioaddr_t offset;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++
++ offset = (efhw_kva + INT_ADR_REG_CHAR_OFST);
++
++ EFHW_NOTICE("%s: %x -> " DMA_ADDR_T_FMT, __FUNCTION__,
++ (int)(offset - efhw_kva), dma_addr);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_write_qq(offset, dma_addr, FALCON_ATOMIC_INT_ADR_REG);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++}
++
++#endif
++
++
++/*--------------------------------------------------------------------
++ *
++ * RXDP - low level interface
++ *
++ *--------------------------------------------------------------------*/
++
++void
++falcon_nic_set_rx_usr_buf_size(struct efhw_nic *nic, int usr_buf_bytes)
++{
++ FALCON_LOCK_DECL;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ uint64_t val, val2, usr_buf_size = usr_buf_bytes / 32;
++ int rubs_lbn, rubs_width, roec_lbn;
++
++ EFHW_BUILD_ASSERT(RX_CFG_REG_OFST == RX_CFG_REG_KER_OFST);
++
++ switch (nic->devtype.variant) {
++ default:
++ EFHW_ASSERT(0);
++ /* Fall-through to avoid compiler warnings. */
++ case 'A':
++ rubs_lbn = RX_USR_BUF_SIZE_A1_LBN;
++ rubs_width = RX_USR_BUF_SIZE_A1_WIDTH;
++ roec_lbn = RX_OWNERR_CTL_A1_LBN;
++ break;
++ case 'B':
++ rubs_lbn = RX_USR_BUF_SIZE_B0_LBN;
++ rubs_width = RX_USR_BUF_SIZE_B0_WIDTH;
++ roec_lbn = RX_OWNERR_CTL_B0_LBN;
++ break;
++ }
++
++ __DWCHCK(rubs_lbn, rubs_width);
++ __QWCHCK(roec_lbn, 1);
++ __RANGECHCK(usr_buf_size, rubs_width);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_read_qq(efhw_kva + RX_CFG_REG_OFST, &val, &val2);
++
++ val &= ~((__FALCON_MASK64(rubs_width)) << rubs_lbn);
++ val |= (usr_buf_size << rubs_lbn);
++
++ /* shouldn't be needed for a production driver */
++ val |= ((uint64_t) 1 << roec_lbn);
++
++ falcon_write_qq(efhw_kva + RX_CFG_REG_OFST, val, val2);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++}
++EXPORT_SYMBOL(falcon_nic_set_rx_usr_buf_size);
++
++void
++falcon_nic_rx_filter_ctl_get(struct efhw_nic *nic, uint32_t *tcp_full,
++ uint32_t *tcp_wild,
++ uint32_t *udp_full, uint32_t *udp_wild)
++{
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ FALCON_LOCK_DECL;
++ uint64_t val;
++
++ FALCON_LOCK_LOCK(nic);
++ falcon_read_q(efhw_kva + RX_FILTER_CTL_REG_OFST, &val);
++ FALCON_LOCK_UNLOCK(nic);
++
++ *tcp_full = (uint32_t)((val >> TCP_FULL_SRCH_LIMIT_LBN) &
++ (__FALCON_MASK64(TCP_FULL_SRCH_LIMIT_WIDTH)));
++
++ *tcp_wild = (uint32_t)((val >> TCP_WILD_SRCH_LIMIT_LBN) &
++ (__FALCON_MASK64(TCP_WILD_SRCH_LIMIT_WIDTH)));
++
++ *udp_full = (uint32_t)((val >> UDP_FULL_SRCH_LIMIT_LBN) &
++ (__FALCON_MASK64(UDP_FULL_SRCH_LIMIT_WIDTH)));
++
++ *udp_wild = (uint32_t)((val >> UDP_WILD_SRCH_LIMIT_LBN) &
++ (__FALCON_MASK64(UDP_WILD_SRCH_LIMIT_WIDTH)));
++}
++EXPORT_SYMBOL(falcon_nic_rx_filter_ctl_get);
++
++void
++falcon_nic_rx_filter_ctl_set(struct efhw_nic *nic, uint32_t tcp_full,
++ uint32_t tcp_wild,
++ uint32_t udp_full, uint32_t udp_wild)
++{
++ uint64_t val, val2;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ FALCON_LOCK_DECL;
++
++ EFHW_ASSERT(tcp_full < nic->filter_tbl_size);
++ EFHW_ASSERT(tcp_wild < nic->filter_tbl_size);
++ EFHW_ASSERT(udp_full < nic->filter_tbl_size);
++ EFHW_ASSERT(udp_wild < nic->filter_tbl_size);
++
++ /* until we implement a dynamic scaling of search limits we wish to
++ * maintain the same limits set up by default in the net driver
++ * when we initialize the char driver */
++ tcp_full_srch_limit = tcp_full;
++ tcp_wild_srch_limit = tcp_wild;
++ udp_full_srch_limit = udp_full;
++ udp_wild_srch_limit = udp_wild;
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_read_qq(efhw_kva + RX_FILTER_CTL_REG_OFST, &val, &val2);
++
++ /* Search limits */
++ val &= ~((__FALCON_MASK64(TCP_FULL_SRCH_LIMIT_WIDTH))
++ << TCP_FULL_SRCH_LIMIT_LBN);
++
++ val |= ((uint64_t)tcp_full + RX_FILTER_CTL_SRCH_FUDGE_FULL)
++ << TCP_FULL_SRCH_LIMIT_LBN;
++
++ val &= ~((__FALCON_MASK64(TCP_WILD_SRCH_LIMIT_WIDTH))
++ << TCP_WILD_SRCH_LIMIT_LBN);
++
++ val |= ((uint64_t)tcp_wild + RX_FILTER_CTL_SRCH_FUDGE_WILD)
++ << TCP_WILD_SRCH_LIMIT_LBN;
++
++ val &= ~((__FALCON_MASK64(UDP_FULL_SRCH_LIMIT_WIDTH))
++ << UDP_FULL_SRCH_LIMIT_LBN);
++
++ val |= ((uint64_t)udp_full + RX_FILTER_CTL_SRCH_FUDGE_FULL)
++ << UDP_FULL_SRCH_LIMIT_LBN;
++
++ val &= ~((__FALCON_MASK64(UDP_WILD_SRCH_LIMIT_WIDTH))
++ << UDP_WILD_SRCH_LIMIT_LBN);
++
++ val |= ((uint64_t)udp_wild + RX_FILTER_CTL_SRCH_FUDGE_WILD)
++ << UDP_WILD_SRCH_LIMIT_LBN;
++
++ falcon_write_qq(efhw_kva + RX_FILTER_CTL_REG_OFST, val, val2);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++}
++EXPORT_SYMBOL(falcon_nic_rx_filter_ctl_set);
++
++/*--------------------------------------------------------------------
++ *
++ * TXDP - low level interface
++ *
++ *--------------------------------------------------------------------*/
++
++_DEBUG_SYM_ void falcon_nic_tx_cfg(struct efhw_nic *nic, int unlocked)
++{
++ FALCON_LOCK_DECL;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ uint64_t val1, val2;
++
++ EFHW_BUILD_ASSERT(TX_CFG_REG_OFST == TX_CFG_REG_KER_OFST);
++ __DWCHCK(TX_OWNERR_CTL_LBN, TX_OWNERR_CTL_WIDTH);
++ __DWCHCK(TX_NON_IP_DROP_DIS_LBN, TX_NON_IP_DROP_DIS_WIDTH);
++
++ FALCON_LOCK_LOCK(nic);
++ falcon_read_qq(efhw_kva + TX_CFG_REG_OFST, &val1, &val2);
++
++ /* Will flag fatal interrupts on owner id errors. This should not be
++ on for production code because there is otherwise a denial of
++ serivce attack possible */
++ val1 |= (1 << TX_OWNERR_CTL_LBN);
++
++ /* Setup user queue TCP/UDP only packet security */
++ if (unlocked)
++ val1 |= (1 << TX_NON_IP_DROP_DIS_LBN);
++ else
++ val1 &= ~(1 << TX_NON_IP_DROP_DIS_LBN);
++
++ falcon_write_qq(efhw_kva + TX_CFG_REG_OFST, val1, val2);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++}
++
++/*--------------------------------------------------------------------
++ *
++ * Random thresholds - Low level interface (Would like these to be op
++ * defaults wherever possible)
++ *
++ *--------------------------------------------------------------------*/
++
++static void falcon_nic_pace_cfg(struct efhw_nic *nic)
++{
++ FALCON_LOCK_DECL;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ unsigned offset = 0;
++ uint64_t val;
++
++ val = 0xa81682; /* !!!! */
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ switch (nic->devtype.variant) {
++ case 'A': offset = TX_PACE_REG_A1_OFST; break;
++ case 'B': offset = TX_PACE_REG_B0_OFST; break;
++ default: EFHW_ASSERT(0); break;
++ }
++ falcon_write_qq(efhw_kva + offset, val, 0);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++}
++
++/**********************************************************************
++ * Supporting modules. ************************************************
++ **********************************************************************/
++
++/*--------------------------------------------------------------------
++ *
++ * Filter support
++ *
++ *--------------------------------------------------------------------*/
++
++/*! \TODO this table should be per nic */
++struct falcon_cached_ipfilter {
++#if FALCON_FULL_FILTER_CACHE
++ unsigned dmaq;
++ unsigned saddr_le32;
++ unsigned daddr_le32;
++ unsigned sport_le16;
++ unsigned dport_le16;
++ unsigned tcp:1;
++ unsigned full:1;
++ unsigned rss_b0:1;
++ unsigned scat_b0:1;
++#endif
++ unsigned addr_valid:1;
++
++};
++
++
++/* TODO: Dynamically allocate this and store in struct efhw_nic. */
++static struct falcon_cached_ipfilter
++ host_ipfilter_cache[EFHW_MAX_NR_DEVS][FALCON_FILTER_TBL_NUM];
++
++
++static inline void host_ipfilter_cache_init(struct efhw_nic *nic)
++{
++ memset(host_ipfilter_cache[nic->index], 0,
++ sizeof(host_ipfilter_cache[0][0]) * nic->filter_tbl_size);
++}
++
++static inline int host_ipfilter_cache_active(struct efhw_nic *nic, uint idx)
++{
++ EFHW_ASSERT(nic->index < EFHW_MAX_NR_DEVS);
++ EFHW_ASSERT(idx < nic->filter_tbl_size);
++
++ return (host_ipfilter_cache[nic->index][idx].addr_valid);
++
++}
++
++static inline void host_ipfilter_cache_flush(struct efhw_nic *nic, uint idx)
++{
++ EFHW_ASSERT(nic->index < EFHW_MAX_NR_DEVS);
++ EFHW_ASSERT(idx < nic->filter_tbl_size);
++
++ memset(&host_ipfilter_cache[nic->index][idx], 0,
++ sizeof(struct falcon_cached_ipfilter));
++ mmiowb();
++}
++
++static inline void
++host_ipfilter_cache_set_addr(struct efhw_nic *nic, uint idx, uint dmaq,
++ unsigned tcp, unsigned full,
++ unsigned rss_b0, unsigned scat_b0,
++ unsigned saddr_le32, unsigned sport_le16,
++ unsigned daddr_le32, unsigned dport_le16)
++{
++ unsigned nic_i = nic->index;
++
++ EFHW_ASSERT(nic_i < EFHW_MAX_NR_DEVS);
++ EFHW_ASSERT(idx < nic->filter_tbl_size);
++ EFHW_ASSERT(!host_ipfilter_cache[nic_i][idx].addr_valid);
++
++ __RANGECHCK(sport_le16, SRC_TCP_DEST_UDP_1_WIDTH);
++ __RANGECHCK(dport_le16, SRC_TCP_DEST_UDP_1_WIDTH);
++
++#if FALCON_FULL_FILTER_CACHE
++ host_ipfilter_cache[nic_i][idx].dmaq = dmaq;
++ host_ipfilter_cache[nic_i][idx].saddr_le32 = saddr_le32;
++ host_ipfilter_cache[nic_i][idx].daddr_le32 = daddr_le32;
++ host_ipfilter_cache[nic_i][idx].sport_le16 = sport_le16;
++ host_ipfilter_cache[nic_i][idx].dport_le16 = dport_le16;
++ host_ipfilter_cache[nic_i][idx].tcp = tcp;
++ host_ipfilter_cache[nic_i][idx].full = full;
++ host_ipfilter_cache[nic_i][idx].rss_b0 = rss_b0;
++ host_ipfilter_cache[nic_i][idx].scat_b0 = scat_b0;
++#endif
++ host_ipfilter_cache[nic_i][idx].addr_valid = 1;
++ mmiowb();
++}
++
++#if FALCON_VERIFY_FILTERS
++/* Check that all active filters still exist by reading from H/W */
++static void _falcon_nic_ipfilter_sanity(struct efhw_nic *nic)
++{
++ unsigned i;
++ struct falcon_cached_ipfilter *f;
++ uint64_t q0_expect, q1_expect, q0_got, q1_got;
++
++ for (i = 0; i < nic->filter_tbl_size; i++) {
++ f = host_ipfilter_cache[nic->index] + i;
++ if (!f->addr_valid)
++ continue;
++
++ _falcon_nic_ipfilter_build(nic, f->tcp, f->full,
++ f->rss_b0, f->scat_b0, i, f->dmaq,
++ f->saddr_le32, f->sport_le16,
++ f->daddr_le32, f->dport_le16,
++ &q0_expect, &q1_expect);
++
++ falcon_read_qq(EFHW_KVA(nic) + RX_FILTER_TBL0_OFST +
++ i * 2 * FALCON_REGISTER128,
++ &q0_got, &q1_got);
++
++ if ((q0_got != q0_expect) || (q1_got != q1_expect)) {
++ falcon_write_qq(EFHW_KVA(nic) + 0x300,
++ q0_got, q1_got);
++ EFHW_ERR("ERROR: RX-filter[%d][%d] was "
++ "%"PRIx64":%" PRIx64" expected "
++ "%"PRIx64":%"PRIx64,
++ nic->index, i, q0_got, q1_got,
++ q0_expect, q1_expect);
++ }
++ }
++}
++#endif /* FALCON_VERIFY_FILTERS */
++
++#if FALCON_FULL_FILTER_CACHE
++static inline int
++host_ipfilter_cache_check_not(uint nic, uint idx, int tcp, int full,
++ unsigned saddr_le32, unsigned sport_le16,
++ unsigned daddr_le32, unsigned dport_le16)
++{
++ return ((host_ipfilter_cache[nic][idx].saddr_le32 != saddr_le32) ||
++ (host_ipfilter_cache[nic][idx].daddr_le32 != daddr_le32) ||
++ (host_ipfilter_cache[nic][idx].sport_le16 != sport_le16) ||
++ (host_ipfilter_cache[nic][idx].dport_le16 != dport_le16) ||
++ (host_ipfilter_cache[nic][idx].tcp != tcp) ||
++ (host_ipfilter_cache[nic][idx].full != full));
++}
++#endif
++
++#define host_ipfilter_cache_saddr_le32(nic, idx) \
++ host_ipfilter_cache[nic][idx].saddr_le32
++#define host_ipfilter_cache_daddr_le32(nic, idx) \
++ host_ipfilter_cache[nic][idx].daddr_le32
++#define host_ipfilter_cache_sport_le16(nic, idx) \
++ host_ipfilter_cache[nic][idx].sport_le16
++#define host_ipfilter_cache_dport_le16(nic, idx) \
++ host_ipfilter_cache[nic][idx].dport_le16
++#define host_ipfilter_cache_tcp(nic, idx) \
++ host_ipfilter_cache[nic][idx].tcp
++#define host_ipfilter_cache_full(nic, idx) \
++ host_ipfilter_cache[nic][idx].full
++
++/**********************************************************************
++ * Implementation of the HAL. ********************************************
++ **********************************************************************/
++
++/*----------------------------------------------------------------------------
++ *
++ * Initialisation and configuration discovery
++ *
++ *---------------------------------------------------------------------------*/
++
++#ifdef __ci_ul_driver__
++
++static int falcon_nic_init_irq_channel(struct efhw_nic *nic, int enable)
++{
++ EFHW_ERR("%s: not implemented for ul driver", __FUNCTION__);
++ return -EOPNOTSUPP;
++}
++
++#else
++
++static int falcon_nic_init_irq_channel(struct efhw_nic *nic, int enable)
++{
++ /* create a buffer for the irq channel */
++ int rc;
++
++ if (enable) {
++ rc = efhw_iopage_alloc(nic, &nic->irq_iobuff);
++ if (rc < 0)
++ return rc;
++
++ falcon_nic_irq_addr_set(nic,
++ efhw_iopage_dma_addr(&nic->irq_iobuff));
++ } else {
++ if (efhw_iopage_is_valid(&nic->irq_iobuff))
++ efhw_iopage_free(nic, &nic->irq_iobuff);
++
++ efhw_iopage_mark_invalid(&nic->irq_iobuff);
++ falcon_nic_irq_addr_set(nic, 0);
++ }
++
++ EFHW_TRACE("%s: " ci_dma_addr_fmt " %sable", __FUNCTION__,
++ efhw_iopage_dma_addr(&nic->irq_iobuff), enable ?
++ "en" : "dis");
++
++ return 0;
++}
++
++#endif
++
++static void falcon_nic_close_hardware(struct efhw_nic *nic)
++{
++ /* check we are in possession of some hardware */
++ if (!efhw_nic_have_hw(nic))
++ return;
++
++ falcon_nic_init_irq_channel(nic, 0);
++
++ EFHW_NOTICE("%s:", __FUNCTION__);
++}
++
++#ifdef __ci_ul_driver__
++extern
++#else
++static
++#endif
++int falcon_nic_get_mac_config(struct efhw_nic *nic)
++{
++ efhw_ioaddr_t efhw_kva = nic->bar_ioaddr;
++ int is_mac_type_1g;
++ uint32_t strap, altera;
++ uint64_t rx_cfg, r;
++
++ altera = readl(efhw_kva + ALTERA_BUILD_REG_OFST);
++ strap = readl(efhw_kva + STRAP_REG_KER_OFST) & 0x7;
++
++ switch (nic->devtype.variant) {
++ case 'A':
++ if ((altera & 0x0fff0000) == 0x1130000) {
++ strap = 2; /* FPGA - PCI-X 2G */
++ } else if ((altera & 0x00ff0000) == 0x140000) {
++ /* should be 114 */
++ strap = 4; /* FPGA - PCI-X 4G */
++ } else if (strap < 2 || strap > 5) {
++ EFHW_ERR("Invalid strap option %d altera_buid_ver=%x",
++ strap, altera);
++ return -EINVAL;
++ }
++ is_mac_type_1g = (0 != (strap & 2));
++ break;
++ case 'B':
++ /* Runtime check that the hardware and software agree about
++ * the size of the RXFIFO. Write binary 11 across the left
++ * most bit, and assert we get 1 back.
++ */
++ r = 1LL << RX_TOEP_TCP_SUPPRESS_B0_LBN;
++ r |= (r << 1);
++
++ /* Save the original value */
++ falcon_read_q(efhw_kva + RX_CFG_REG_OFST, &rx_cfg);
++
++ /* Write and ready the dummy value */
++ falcon_write_qq(efhw_kva + RX_CFG_REG_OFST, r, 0);
++ falcon_read_q(efhw_kva + RX_CFG_REG_OFST, &r);
++
++ /* Restore the original value */
++ falcon_write_qq(efhw_kva + RX_CFG_REG_OFST, rx_cfg, 0);
++
++ if (r != (1LL << RX_TOEP_TCP_SUPPRESS_B0_LBN)) {
++ EFHW_ERR("The FPGA build (%x) RXFIFO size does not "
++ "match the software", altera);
++ return -EINVAL;
++ }
++ is_mac_type_1g = (0 != (strap & 2));
++#if FALCON_MAC_SET_TYPE_BY_SPEED
++ /* Check the selected strap pins against the MAC speed -
++ * and adjust if necessary.
++ */
++ {
++ int speed;
++ speed = readl(efhw_kva + MAC0_CTRL_REG_OFST) & 0x3;
++ is_mac_type_1g = (speed <= 2);
++ }
++#endif
++ break;
++ default:
++ EFHW_ASSERT(0);
++ is_mac_type_1g = 0;
++ break;
++ }
++
++ nic->fpga_version = altera;
++
++ /* We can now set the MAC type correctly based on the strap pins. */
++ if (is_mac_type_1g) {
++ nic->flags &= ~NIC_FLAG_10G;
++ } else {
++ /* strap & 4 must be set according to checks above */
++ nic->flags |= NIC_FLAG_10G;
++ }
++ EFHW_NOTICE("Board has %s MAC: strap=%d",
++ 0 != (nic->flags & NIC_FLAG_10G) ? "10G" : "1G", strap);
++ return 0;
++}
++
++static int
++falcon_nic_init_hardware(struct efhw_nic *nic,
++ struct efhw_ev_handler *ev_handlers,
++ const uint8_t *mac_addr)
++{
++ int rc;
++
++ /* header sanity checks */
++ FALCON_ASSERT_VALID();
++
++ rc = falcon_nic_get_mac_config(nic);
++ if (rc < 0)
++ return rc;
++
++ /* Initialise supporting modules */
++ falcon_nic_ipfilter_ctor(nic);
++
++#if FALCON_USE_SHADOW_BUFFER_TABLE
++ CI_ZERO_ARRAY(_falcon_buffer_table, FALCON_BUFFER_TBL_NUM);
++#endif
++
++ /* Initialise the top level hardware blocks */
++ memcpy(nic->mac_addr, mac_addr, ETH_ALEN);
++
++ EFHW_TRACE("%s:", __FUNCTION__);
++
++ /* nic.c:efhw_nic_init marks all the interrupt units as unused.
++
++ ?? TODO we should be able to request the non-interrupting event
++ queue and the net driver's (for a net driver that is using libefhw)
++ additional RSS queues here.
++
++ Result would be that that net driver could call
++ nic.c:efhw_nic_allocate_common_hardware_resources() and that the
++ IFDEF FALCON's can be removed from
++ nic.c:efhw_nic_allocate_common_hardware_resources()
++ */
++ nic->irq_unit[0] = INT_EN_REG_CHAR_OFST;
++
++ /*****************************************************************
++ * The rest of this function deals with initialization of the NICs
++ * hardware (as opposed to the initialization of the
++ * struct efhw_nic data structure */
++
++ /* char driver grabs SRM events onto the non interrupting
++ * event queue */
++ falcon_nic_srm_upd_evq(nic, FALCON_EVQ_NONIRQ);
++
++ /* RXDP tweaks */
++
++ /* ?? bug2396 rx_cfg should be ok so long as the net driver
++ * always pushes buffers big enough for the link MTU */
++
++ /* set the RX buffer cutoff size to be the same as PAGE_SIZE.
++ * Use this value when we think that there will be a lot of
++ * jumbo frames.
++ *
++ * The default value 1600 is useful when packets are small,
++ * but would means that jumbo frame RX queues would need more
++ * descriptors pushing */
++ falcon_nic_set_rx_usr_buf_size(nic, FALCON_RX_USR_BUF_SIZE);
++
++ /* TXDP tweaks */
++ /* ?? bug2396 looks ok */
++ falcon_nic_tx_cfg(nic, /*unlocked(for non-UDP/TCP)= */ 0);
++ falcon_nic_pace_cfg(nic);
++
++ /* ?? bug2396
++ * netdriver must load first or else must RMW this register */
++ falcon_nic_rx_filter_ctl_set(nic, RX_FILTER_CTL_SRCH_LIMIT_TCP_FULL,
++ RX_FILTER_CTL_SRCH_LIMIT_TCP_WILD,
++ RX_FILTER_CTL_SRCH_LIMIT_UDP_FULL,
++ RX_FILTER_CTL_SRCH_LIMIT_UDP_WILD);
++
++ if (!(nic->flags & NIC_FLAG_NO_INTERRUPT)) {
++ rc = efhw_keventq_ctor(nic, FALCON_EVQ_CHAR, &nic->evq[0],
++ ev_handlers);
++ if (rc < 0) {
++ EFHW_ERR("%s: efhw_keventq_ctor() failed (%d) evq=%d",
++ __FUNCTION__, rc, FALCON_EVQ_CHAR);
++ return rc;
++ }
++ }
++ rc = efhw_keventq_ctor(nic, FALCON_EVQ_NONIRQ,
++ &nic->evq[FALCON_EVQ_NONIRQ], NULL);
++ if (rc < 0) {
++ EFHW_ERR("%s: efhw_keventq_ctor() failed (%d) evq=%d",
++ __FUNCTION__, rc, FALCON_EVQ_NONIRQ);
++ return rc;
++ }
++
++ /* allocate IRQ channel */
++ rc = falcon_nic_init_irq_channel(nic, 1);
++ /* ignore failure at user-level for eftest */
++ if ((rc < 0) && !(nic->options & NIC_OPT_EFTEST))
++ return rc;
++
++ return 0;
++}
++
++/*--------------------------------------------------------------------
++ *
++ * Interrupt
++ *
++ *--------------------------------------------------------------------*/
++
++static void
++falcon_nic_interrupt_enable(struct efhw_nic *nic, unsigned idx)
++{
++ int evq;
++
++ if (idx || (nic->flags & NIC_FLAG_NO_INTERRUPT))
++ return;
++
++ /* Enable driver interrupts */
++ EFHW_NOTICE("%s: enable master interrupt", __FUNCTION__);
++ falcon_nic_interrupt_hw_enable(nic);
++
++ /* An interrupting eventq must start of day ack its read pointer */
++ evq = falcon_idx_to_evq(nic, idx);
++
++ if (falcon_evq_is_interrupting(nic, evq)) {
++ struct efhw_keventq *q = &nic->evq[idx];
++ unsigned rdptr =
++ EFHW_EVENT_OFFSET(q, q, 1) / sizeof(efhw_event_t);
++ falcon_nic_evq_ack(nic, evq, rdptr, false);
++ EFHW_NOTICE("%s: ACK evq[%d]:%x", __FUNCTION__, evq, rdptr);
++ }
++}
++
++static void falcon_nic_interrupt_disable(struct efhw_nic *nic, uint idx)
++{
++ /* NB. No need to check for NIC_FLAG_NO_INTERRUPT, as
++ ** falcon_nic_interrupt_hw_disable() will do it. */
++ if (idx)
++ return;
++ falcon_nic_interrupt_hw_disable(nic);
++}
++
++static void
++falcon_nic_set_interrupt_moderation(struct efhw_nic *nic, uint idx,
++ uint32_t val)
++{
++ falcon_timer_cmd(nic, falcon_idx_to_evq(nic, idx),
++ TIMER_MODE_INT_HLDOFF, val / 5);
++}
++
++static inline void legacy_irq_ack(struct efhw_nic *nic)
++{
++ EFHW_ASSERT(!(nic->flags & NIC_FLAG_NO_INTERRUPT));
++
++ if (!(nic->flags & NIC_FLAG_MSI)) {
++ writel(1, EFHW_KVA(nic) + INT_ACK_REG_CHAR_A1_OFST);
++ mmiowb();
++ /* ?? FIXME: We should be doing a read here to ensure IRQ is
++ * thoroughly acked before we return from ISR. */
++ }
++}
++
++static int falcon_nic_interrupt(struct efhw_nic *nic)
++{
++ volatile uint32_t *syserr_ptr =
++ (uint32_t *) efhw_iopage_ptr(&nic->irq_iobuff);
++ int handled = 0;
++ int done_ack = 0;
++
++ EFHW_ASSERT(!(nic->flags & NIC_FLAG_NO_INTERRUPT));
++ EFHW_ASSERT(syserr_ptr);
++
++ /* FIFO fill level interrupt - just log it. */
++ if (unlikely(*(syserr_ptr + (DW0_OFST / 4)))) {
++ EFHW_WARN("%s: *** FIFO *** %x", __FUNCTION__,
++ *(syserr_ptr + (DW0_OFST / 4)));
++ *(syserr_ptr + (DW0_OFST / 4)) = 0;
++ handled++;
++ }
++
++ /* Fatal interrupts. */
++ if (unlikely(*(syserr_ptr + (DW2_OFST / 4)))) {
++ *(syserr_ptr + (DW2_OFST / 4)) = 0;
++ falcon_nic_handle_fatal_int(nic);
++ handled++;
++ }
++
++ /* Event queue interrupt. For legacy interrupts we have to check
++ * that the interrupt is for us, because it could be shared. */
++ if (*(syserr_ptr + (DW1_OFST / 4))) {
++ *(syserr_ptr + (DW1_OFST / 4)) = 0;
++ /* ACK must come before callback to handler fn. */
++ legacy_irq_ack(nic);
++ done_ack = 1;
++ handled++;
++ if (nic->irq_handler)
++ nic->irq_handler(nic, 0);
++ }
++
++ if (unlikely(!done_ack)) {
++ if (!handled)
++ /* Shared interrupt line (hopefully). */
++ return 0;
++ legacy_irq_ack(nic);
++ }
++
++ EFHW_TRACE("%s: handled %d", __FUNCTION__, handled);
++ return 1;
++}
++
++/*--------------------------------------------------------------------
++ *
++ * Event Management - and SW event posting
++ *
++ *--------------------------------------------------------------------*/
++
++static void
++falcon_nic_event_queue_enable(struct efhw_nic *nic, uint evq, uint evq_size,
++ dma_addr_t q_base_addr, /* not used */
++ uint buf_base_id)
++{
++ EFHW_ASSERT(nic);
++
++ /*!\ TODO we can be more efficient if we know whether or not there
++ * is a timer attached */
++ falcon_timer_cmd(nic, evq, 0 /* disable */ , 0);
++
++ falcon_nic_evq_ptr_tbl(nic, evq, 1, buf_base_id, evq_size);
++ EFHW_TRACE("%s: enable evq %u size %u", __FUNCTION__, evq, evq_size);
++}
++
++static void
++falcon_nic_event_queue_disable(struct efhw_nic *nic, uint evq, int timer_only)
++{
++ EFHW_ASSERT(nic);
++
++ /*!\ TODO we can be more efficient if we know whether or not there
++ * is a timer attached */
++ falcon_timer_cmd(nic, evq, 0 /* disable */ , 0);
++
++ if (!timer_only)
++ falcon_nic_evq_ptr_tbl(nic, evq, 0, 0, 0);
++ EFHW_TRACE("%s: disenable evq %u", __FUNCTION__, evq);
++}
++
++static void
++falcon_nic_wakeup_request(struct efhw_nic *nic, dma_addr_t q_base_addr,
++ int next_i, int evq)
++{
++ EFHW_ASSERT(evq > FALCON_EVQ_CHAR);
++ falcon_nic_evq_ack(nic, evq, next_i, true);
++ EFHW_TRACE("%s: evq %d next_i %d", __FUNCTION__, evq, next_i);
++}
++
++static void falcon_nic_sw_event(struct efhw_nic *nic, int data, int evq)
++{
++ uint64_t ev_data = data;
++
++ ev_data &= ~FALCON_EVENT_CODE_MASK;
++ ev_data |= FALCON_EVENT_CODE_SW;
++
++ falcon_drv_ev(nic, ev_data, evq);
++ EFHW_NOTICE("%s: evq[%d]->%x", __FUNCTION__, evq, data);
++}
++
++/*--------------------------------------------------------------------
++ *
++ * Filter support - TODO vary the depth of the search
++ *
++ *--------------------------------------------------------------------*/
++
++void
++falcon_nic_ipfilter_ctor(struct efhw_nic *nic)
++{
++ if (nic->devtype.variant == 'B' && nic->fpga_version)
++ nic->filter_tbl_size = 8 * 1024;
++ else
++ nic->filter_tbl_size = 16 * 1024;
++
++ host_ipfilter_cache_init(nic);
++}
++
++
++static int
++falcon_nic_ipfilter_set(struct efhw_nic *nic, int type, int *_filter_idx,
++ int dmaq,
++ unsigned saddr_be32, unsigned sport_be16,
++ unsigned daddr_be32, unsigned dport_be16)
++{
++ FALCON_LOCK_DECL;
++ int tcp;
++ int full;
++ int rss_b0;
++ int scat_b0;
++ int key, hash1, hash2, idx = -1;
++ int k;
++ int rc = 0;
++ unsigned max_srch = -1;
++
++ /* oh joy of joys .. maybe one day we'll optimise */
++ unsigned int saddr = ntohl(saddr_be32);
++ unsigned int daddr = ntohl(daddr_be32);
++ unsigned int sport = ntohs(sport_be16);
++ unsigned int dport = ntohs(dport_be16);
++
++ __RANGECHCK(sport, SRC_TCP_DEST_UDP_1_WIDTH);
++ __RANGECHCK(dport, SRC_TCP_DEST_UDP_1_WIDTH);
++
++ tcp = ((type & EFHW_IP_FILTER_TYPE_TCP_MASK) != 0) ? 1 : 0;
++ full = ((type & EFHW_IP_FILTER_TYPE_FULL_MASK) != 0) ? 1 : 0;
++ rss_b0 = ((type & EFHW_IP_FILTER_TYPE_RSS_B0_MASK) != 0) ? 1 : 0;
++ scat_b0 = ((type & EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK) != 0) ? 0 : 1;
++ if (tcp && full)
++ max_srch = tcp_full_srch_limit;
++ else if (tcp && !full)
++ max_srch = tcp_wild_srch_limit;
++ else if (!tcp && full)
++ max_srch = udp_full_srch_limit;
++ else if (!tcp && !full)
++ max_srch = udp_wild_srch_limit;
++
++ EFHW_TRACE("%s: %x tcp %d full %d max_srch=%d",
++ __FUNCTION__, type, tcp, full, max_srch);
++
++ /* The second hash function is simply
++ * h2(key) = 13 LSB of (key * 2 - 1)
++ * And the index(k), or the filter table address for kth search is
++ * index(k) = 13 LSB of (h1(key) + k * h2(key))
++ */
++ key = falcon_hash_get_key(saddr, sport, daddr, dport, tcp, full);
++ hash1 = falcon_hash_function1(key, nic->filter_tbl_size);
++ hash2 = falcon_hash_function2(key, nic->filter_tbl_size);
++
++ /* Avoid race to claim a filter entry */
++ FALCON_LOCK_LOCK(nic);
++
++ for (k = 0; (unsigned)k < max_srch; k++) {
++ idx = falcon_hash_iterator(hash1, hash2, k,
++ nic->filter_tbl_size);
++
++ EFHW_TRACE("ipfilter_set[%d:%d:%d]: src=%x:%d dest=%x:%d %s",
++ *_filter_idx, idx, k,
++ saddr, sport, daddr, dport,
++ host_ipfilter_cache_active(nic, idx) ?
++ "Active" : "Clear");
++
++ if (!host_ipfilter_cache_active(nic, idx))
++ break;
++
++#if FALCON_FULL_FILTER_CACHE
++ /* Check that we are not duplicating the filter */
++ if (!host_ipfilter_cache_check_not(nic->index, idx, tcp, full,
++ saddr, sport, daddr,
++ dport)) {
++ EFHW_WARN("%s: ERROR: duplicate filter (disabling "
++ "interrupts)", __FUNCTION__);
++ FALCON_LOCK_UNLOCK(nic);
++ falcon_nic_interrupt_hw_disable(nic);
++ return -EINVAL;
++ }
++#endif
++
++ }
++ if (k == max_srch) {
++ rc = -EADDRINUSE;
++ idx = -1;
++ goto fail1;
++ }
++
++ EFHW_ASSERT(idx < (int)nic->filter_tbl_size);
++
++ host_ipfilter_cache_set_addr(nic, idx, dmaq, tcp, full, rss_b0,
++ scat_b0, saddr, sport, daddr, dport);
++
++ _falcon_nic_ipfilter_set(nic, tcp, full, rss_b0,
++ scat_b0, idx, dmaq,
++ saddr, sport, daddr, dport);
++
++ *_filter_idx = idx;
++
++ EFHW_TRACE("%s: filter %x rxq %d src " NIPQUAD_FMT
++ ":%d dest " NIPQUAD_FMT ":%d set in %d",
++ __FUNCTION__, idx, dmaq,
++ NIPQUAD(&saddr), sport, NIPQUAD(&daddr), dport, k);
++
++fail1:
++ FALCON_LOCK_UNLOCK(nic);
++ return rc;
++}
++
++static void
++falcon_nic_ipfilter_attach(struct efhw_nic *nic, int filter_idx, int dmaq_idx)
++{
++ /* Intentionally empty - Falcon attaches and sets the filter
++ * in filter_set */
++ EFHW_TRACE("%s: attach filter %x with rxq %d - ignored",
++ __FUNCTION__, filter_idx, dmaq_idx);
++}
++
++static void falcon_nic_ipfilter_detach(struct efhw_nic *nic, int filter_idx)
++{
++ /* Intentionally empty - Falcon attaches and sets the filter
++ * in filter_clear */
++ EFHW_TRACE("%s: detach filter %x from rxq - ignored",
++ __FUNCTION__, filter_idx);
++}
++
++static void falcon_nic_ipfilter_clear(struct efhw_nic *nic, int filter_idx)
++{
++ FALCON_LOCK_DECL;
++
++ EFHW_TRACE("%s: filter %x", __FUNCTION__, filter_idx);
++
++ /* In case the filter has already been freed */
++ if (filter_idx == -1)
++ return;
++
++ FALCON_LOCK_LOCK(nic);
++
++ /* if we flush a chained hash then all we need to do is zero it out */
++ host_ipfilter_cache_flush(nic, filter_idx);
++ _falcon_nic_ipfilter_clear(nic, filter_idx);
++
++ FALCON_LOCK_UNLOCK(nic);
++ return;
++}
++
++/*--------------------------------------------------------------------
++ *
++ * Buffer table - helpers
++ *
++ *--------------------------------------------------------------------*/
++
++#define FALCON_LAZY_COMMIT_HWM (FALCON_BUFFER_UPD_MAX - 16)
++
++/* Note re.:
++ * falcon_nic_buffer_table_lazy_commit(struct efhw_nic *nic)
++ * falcon_nic_buffer_table_update_poll(struct efhw_nic *nic)
++ * falcon_nic_buffer_table_confirm(struct efhw_nic *nic)
++ * -- these are no-ops in the user-level driver because it would need to
++ * coordinate with the real driver on the number of outstanding commits.
++ *
++ * An exception is made for eftest apps, which manage the hardware without
++ * using the char driver.
++ */
++
++static inline void falcon_nic_buffer_table_lazy_commit(struct efhw_nic *nic)
++{
++#if defined(__ci_ul_driver__)
++ if (!(nic->options & NIC_OPT_EFTEST))
++ return;
++#endif
++
++ /* Do nothing if operating in synchronous mode. */
++ if (!nic->irq_handler)
++ return;
++}
++
++static inline void falcon_nic_buffer_table_update_poll(struct efhw_nic *nic)
++{
++ FALCON_LOCK_DECL;
++ int count = 0, rc = 0;
++
++#if defined(__ci_ul_driver__)
++ if (!(nic->options & NIC_OPT_EFTEST))
++ return;
++#endif
++
++ /* We can be called here early days */
++ if (!nic->irq_handler)
++ return;
++
++ /* If we need to gather buffer update events then poll the
++ non-interrupting event queue */
++
++ /* For each _buffer_table_commit there will be an update done
++ event. We don't keep track of how many buffers each commit has
++ committed, just make sure that all the expected events have been
++ gathered */
++ FALCON_LOCK_LOCK(nic);
++
++ EFHW_TRACE("%s: %d", __FUNCTION__, nic->buf_commit_outstanding);
++
++ while (nic->buf_commit_outstanding > 0) {
++ /* we're not expecting to handle any events that require
++ * upcalls into the core driver */
++ struct efhw_ev_handler handler;
++ memset(&handler, 0, sizeof(handler));
++ nic->evq[FALCON_EVQ_NONIRQ].ev_handlers = &handler;
++ rc = efhw_keventq_poll(nic, &nic->evq[FALCON_EVQ_NONIRQ]);
++ nic->evq[FALCON_EVQ_NONIRQ].ev_handlers = NULL;
++
++ if (rc < 0) {
++ EFHW_ERR("%s: poll ERROR (%d:%d) ***** ",
++ __FUNCTION__, rc,
++ nic->buf_commit_outstanding);
++ goto out;
++ }
++
++ FALCON_LOCK_UNLOCK(nic);
++
++ if (count++)
++ udelay(1);
++
++ if (count > 1000) {
++ EFHW_WARN("%s: poll Timeout ***** (%d)", __FUNCTION__,
++ nic->buf_commit_outstanding);
++ nic->buf_commit_outstanding = 0;
++ return;
++ }
++ FALCON_LOCK_LOCK(nic);
++ }
++
++out:
++ FALCON_LOCK_UNLOCK(nic);
++ return;
++}
++
++void falcon_nic_buffer_table_confirm(struct efhw_nic *nic)
++{
++ /* confirm buffer table updates - should be used for items where
++ loss of data would be unacceptable. E.g for the buffers that back
++ an event or DMA queue */
++ FALCON_LOCK_DECL;
++
++#if defined(__ci_ul_driver__)
++ if (!(nic->options & NIC_OPT_EFTEST))
++ return;
++#endif
++
++ /* Do nothing if operating in synchronous mode. */
++ if (!nic->irq_handler)
++ return;
++
++ FALCON_LOCK_LOCK(nic);
++
++ _falcon_nic_buffer_table_commit(nic);
++
++ FALCON_LOCK_UNLOCK(nic);
++
++ falcon_nic_buffer_table_update_poll(nic);
++}
++
++/*--------------------------------------------------------------------
++ *
++ * Buffer table - API
++ *
++ *--------------------------------------------------------------------*/
++
++static void
++falcon_nic_buffer_table_clear(struct efhw_nic *nic, int buffer_id, int num)
++{
++ FALCON_LOCK_DECL;
++ FALCON_LOCK_LOCK(nic);
++ _falcon_nic_buffer_table_clear(nic, buffer_id, num);
++ FALCON_LOCK_UNLOCK(nic);
++}
++
++static void
++falcon_nic_buffer_table_set(struct efhw_nic *nic, dma_addr_t dma_addr,
++ uint bufsz, uint region,
++ int own_id, int buffer_id)
++{
++ FALCON_LOCK_DECL;
++
++ EFHW_ASSERT(region < FALCON_REGION_NUM);
++
++ EFHW_ASSERT((bufsz == EFHW_4K) ||
++ (bufsz == EFHW_8K && FALCON_BUFFER_TABLE_FULL_MODE));
++
++ falcon_nic_buffer_table_update_poll(nic);
++
++ FALCON_LOCK_LOCK(nic);
++
++ _falcon_nic_buffer_table_set(nic, dma_addr, bufsz, region, own_id,
++ buffer_id);
++
++ falcon_nic_buffer_table_lazy_commit(nic);
++
++ FALCON_LOCK_UNLOCK(nic);
++}
++
++void
++falcon_nic_buffer_table_set_n(struct efhw_nic *nic, int buffer_id,
++ dma_addr_t dma_addr, uint bufsz, uint region,
++ int n_pages, int own_id)
++{
++ /* used to set up a contiguous range of buffers */
++ FALCON_LOCK_DECL;
++
++ EFHW_ASSERT(region < FALCON_REGION_NUM);
++
++ EFHW_ASSERT((bufsz == EFHW_4K) ||
++ (bufsz == EFHW_8K && FALCON_BUFFER_TABLE_FULL_MODE));
++
++ while (n_pages--) {
++
++ falcon_nic_buffer_table_update_poll(nic);
++
++ FALCON_LOCK_LOCK(nic);
++
++ _falcon_nic_buffer_table_set(nic, dma_addr, bufsz, region,
++ own_id, buffer_id++);
++
++ falcon_nic_buffer_table_lazy_commit(nic);
++
++ FALCON_LOCK_UNLOCK(nic);
++
++ dma_addr += bufsz;
++ }
++}
++
++/*--------------------------------------------------------------------
++ *
++ * DMA Queues - mid level API
++ *
++ *--------------------------------------------------------------------*/
++
++#if BUG5302_WORKAROUND
++
++/* Tx queues can get stuck if the software write pointer is set to an index
++ * beyond the configured size of the queue, such that they will not flush.
++ * This code can be run before attempting a flush; it will detect the bogus
++ * value and reset it. This fixes most instances of this problem, although
++ * sometimes it does not work, or we may not detect it in the first place,
++ * if the out-of-range value was replaced by an in-range value earlier.
++ * (In those cases we have to apply a bigger hammer later, if we see that
++ * the queue is still not flushing.)
++ */
++static void
++falcon_check_for_bogus_tx_dma_wptr(struct efhw_nic *nic, uint dmaq)
++{
++ FALCON_LOCK_DECL;
++ uint64_t val_low64, val_high64;
++ uint64_t size, hwptr, swptr, val;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ ulong offset = falcon_dma_tx_q_offset(nic, dmaq);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_read_qq(efhw_kva + offset, &val_low64, &val_high64);
++ FALCON_LOCK_UNLOCK(nic);
++
++ size = (val_low64 >> TX_DESCQ_SIZE_LBN)
++ & __FALCON_MASK64(TX_DESCQ_SIZE_WIDTH);
++ size = (1 << size) * 512;
++ hwptr = (val_high64 >> __DW3(TX_DESCQ_HW_RPTR_LBN))
++ & __FALCON_MASK64(TX_DESCQ_HW_RPTR_WIDTH);
++ swptr = (val_low64 >> TX_DESCQ_SW_WPTR_LBN)
++ & __FALCON_MASK64(__LW2(TX_DESCQ_SW_WPTR_LBN));
++ val = (val_high64)
++ &
++ __FALCON_MASK64(__DW3
++ (TX_DESCQ_SW_WPTR_LBN + TX_DESCQ_SW_WPTR_WIDTH));
++ val = val << __LW2(TX_DESCQ_SW_WPTR_LBN);
++ swptr = swptr | val;
++
++ if (swptr >= size) {
++ EFHW_WARN("Resetting bad write pointer for TXQ[%d]", dmaq);
++ writel((uint32_t) ((hwptr + 0) & (size - 1)),
++ efhw_kva + falcon_tx_dma_page_addr(dmaq) + 12);
++ mmiowb();
++ }
++}
++
++/* Here's that "bigger hammer": we reset all the pointers (hardware read,
++ * hardware descriptor cache read, software write) to zero.
++ */
++void falcon_clobber_tx_dma_ptrs(struct efhw_nic *nic, uint dmaq)
++{
++ FALCON_LOCK_DECL;
++ uint64_t val_low64, val_high64;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ ulong offset = falcon_dma_tx_q_offset(nic, dmaq);
++
++ EFHW_WARN("Recovering stuck TXQ[%d]", dmaq);
++ FALCON_LOCK_LOCK(nic);
++ falcon_read_qq(efhw_kva + offset, &val_low64, &val_high64);
++ val_high64 &= ~(__FALCON_MASK64(TX_DESCQ_HW_RPTR_WIDTH)
++ << __DW3(TX_DESCQ_HW_RPTR_LBN));
++ val_high64 &= ~(__FALCON_MASK64(TX_DC_HW_RPTR_WIDTH)
++ << __DW3(TX_DC_HW_RPTR_LBN));
++ falcon_write_qq(efhw_kva + offset, val_low64, val_high64);
++ mmiowb();
++ writel(0, efhw_kva + falcon_tx_dma_page_addr(dmaq) + 12);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++}
++
++#endif
++
++static inline int
++__falcon_really_flush_tx_dma_channel(struct efhw_nic *nic, uint dmaq)
++{
++ FALCON_LOCK_DECL;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ uint val;
++
++ EFHW_BUILD_ASSERT(TX_FLUSH_DESCQ_REG_KER_OFST ==
++ TX_FLUSH_DESCQ_REG_OFST);
++
++ __DWCHCK(TX_FLUSH_DESCQ_CMD_LBN, TX_FLUSH_DESCQ_CMD_WIDTH);
++ __DWCHCK(TX_FLUSH_DESCQ_LBN, TX_FLUSH_DESCQ_WIDTH);
++ __RANGECHCK(dmaq, TX_FLUSH_DESCQ_WIDTH);
++
++ val = ((1 << TX_FLUSH_DESCQ_CMD_LBN) | (dmaq << TX_FLUSH_DESCQ_LBN));
++
++ EFHW_TRACE("TX DMA flush[%d]", dmaq);
++
++#if BUG5302_WORKAROUND
++ falcon_check_for_bogus_tx_dma_wptr(nic, dmaq);
++#endif
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_write_qq(efhw_kva + TX_FLUSH_DESCQ_REG_OFST,
++ val, FALCON_ATOMIC_TX_FLUSH_DESCQ);
++
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++ return 0;
++}
++
++static inline int
++__falcon_is_tx_dma_channel_flushed(struct efhw_nic *nic, uint dmaq)
++{
++ FALCON_LOCK_DECL;
++ uint64_t val_low64, val_high64;
++ uint64_t enable, flush_pending;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ ulong offset = falcon_dma_tx_q_offset(nic, dmaq);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_read_qq(efhw_kva + offset, &val_low64, &val_high64);
++ FALCON_LOCK_UNLOCK(nic);
++
++ /* should see one of three values for these 2 bits
++ * 1, queue enabled no flush pending
++ * - i.e. first flush request
++ * 2, queue enabled, flush pending
++ * - i.e. request to reflush before flush finished
++ * 3, queue disabled (no flush pending)
++ * - flush complete
++ */
++ __DWCHCK(TX_DESCQ_FLUSH_LBN, TX_DESCQ_FLUSH_WIDTH);
++ __DW3CHCK(TX_DESCQ_EN_LBN, TX_DESCQ_EN_WIDTH);
++ enable = val_high64 & (1 << __DW3(TX_DESCQ_EN_LBN));
++ flush_pending = val_low64 & (1 << TX_DESCQ_FLUSH_LBN);
++
++ if (enable && !flush_pending)
++ return 0;
++
++ EFHW_TRACE("%d, %s: %s, %sflush pending", dmaq, __FUNCTION__,
++ enable ? "enabled" : "disabled",
++ flush_pending ? "" : "NO ");
++ /* still in progress */
++ if (enable && flush_pending)
++ return -EALREADY;
++
++ return -EAGAIN;
++}
++
++static int falcon_flush_tx_dma_channel(struct efhw_nic *nic, uint dmaq)
++{
++ int rc;
++ rc = __falcon_is_tx_dma_channel_flushed(nic, dmaq);
++ if (rc < 0) {
++ EFHW_WARN("%s: failed %d", __FUNCTION__, rc);
++ return rc;
++ }
++ return __falcon_really_flush_tx_dma_channel(nic, dmaq);
++}
++
++static int
++__falcon_really_flush_rx_dma_channel(struct efhw_nic *nic, uint dmaq)
++{
++ FALCON_LOCK_DECL;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ uint val;
++
++ EFHW_BUILD_ASSERT(RX_FLUSH_DESCQ_REG_KER_OFST ==
++ RX_FLUSH_DESCQ_REG_OFST);
++
++ __DWCHCK(RX_FLUSH_DESCQ_CMD_LBN, RX_FLUSH_DESCQ_CMD_WIDTH);
++ __DWCHCK(RX_FLUSH_DESCQ_LBN, RX_FLUSH_DESCQ_WIDTH);
++ __RANGECHCK(dmaq, RX_FLUSH_DESCQ_WIDTH);
++
++ val = ((1 << RX_FLUSH_DESCQ_CMD_LBN) | (dmaq << RX_FLUSH_DESCQ_LBN));
++
++ EFHW_TRACE("RX DMA flush[%d]", dmaq);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_write_qq(efhw_kva + RX_FLUSH_DESCQ_REG_OFST, val,
++ FALCON_ATOMIC_RX_FLUSH_DESCQ);
++ mmiowb();
++ FALCON_LOCK_UNLOCK(nic);
++ return 0;
++}
++
++static inline int
++__falcon_is_rx_dma_channel_flushed(struct efhw_nic *nic, uint dmaq)
++{
++ FALCON_LOCK_DECL;
++ uint64_t val;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ ulong offset = falcon_dma_rx_q_offset(nic, dmaq);
++
++ /* Falcon requires 128 bit atomic access for this register */
++ FALCON_LOCK_LOCK(nic);
++ falcon_read_q(efhw_kva + offset, &val);
++ FALCON_LOCK_UNLOCK(nic);
++
++ __DWCHCK(RX_DESCQ_EN_LBN, RX_DESCQ_EN_WIDTH);
++
++ /* is it enabled? */
++ return (val & (1 << RX_DESCQ_EN_LBN))
++ ? 0 : -EAGAIN;
++}
++
++static int falcon_flush_rx_dma_channel(struct efhw_nic *nic, uint dmaq)
++{
++ int rc;
++ rc = __falcon_is_rx_dma_channel_flushed(nic, dmaq);
++ if (rc < 0) {
++ EFHW_ERR("%s: failed %d", __FUNCTION__, rc);
++ return rc;
++ }
++ return __falcon_really_flush_rx_dma_channel(nic, dmaq);
++}
++
++/*--------------------------------------------------------------------
++ *
++ * Falcon specific event callbacks
++ *
++ *--------------------------------------------------------------------*/
++
++int
++falcon_handle_char_event(struct efhw_nic *nic, struct efhw_ev_handler *h,
++ efhw_event_t *ev)
++{
++ EFHW_TRACE("DRIVER EVENT: "FALCON_EVENT_FMT,
++ FALCON_EVENT_PRI_ARG(*ev));
++
++ switch (FALCON_EVENT_DRIVER_SUBCODE(ev)) {
++
++ case TX_DESCQ_FLS_DONE_EV_DECODE:
++ EFHW_TRACE("TX[%d] flushed",
++ (int)FALCON_EVENT_TX_FLUSH_Q_ID(ev));
++#if !defined(__ci_ul_driver__)
++ efhw_handle_txdmaq_flushed(nic, h, ev);
++#endif
++ break;
++
++ case RX_DESCQ_FLS_DONE_EV_DECODE:
++ EFHW_TRACE("RX[%d] flushed",
++ (int)FALCON_EVENT_TX_FLUSH_Q_ID(ev));
++#if !defined(__ci_ul_driver__)
++ efhw_handle_rxdmaq_flushed(nic, h, ev);
++#endif
++ break;
++
++ case SRM_UPD_DONE_EV_DECODE:
++ nic->buf_commit_outstanding =
++ max(0, nic->buf_commit_outstanding - 1);
++ EFHW_TRACE("COMMIT DONE %d", nic->buf_commit_outstanding);
++ break;
++
++ case EVQ_INIT_DONE_EV_DECODE:
++ EFHW_TRACE("EVQ INIT");
++ break;
++
++ case WAKE_UP_EV_DECODE:
++ EFHW_TRACE("WAKE UP");
++ efhw_handle_wakeup_event(nic, h, ev);
++ break;
++
++ case TIMER_EV_DECODE:
++ EFHW_TRACE("TIMER");
++ efhw_handle_timeout_event(nic, h, ev);
++ break;
++
++ case RX_DESCQ_FLSFF_OVFL_EV_DECODE:
++ /* This shouldn't happen. */
++ EFHW_ERR("%s: RX flush fifo overflowed", __FUNCTION__);
++ return -EINVAL;
++
++ default:
++ EFHW_TRACE("UNKOWN DRIVER EVENT: " FALCON_EVENT_FMT,
++ FALCON_EVENT_PRI_ARG(*ev));
++ break;
++ }
++ return 0;
++}
++
++/*--------------------------------------------------------------------
++ *
++ * Abstraction Layer Hooks
++ *
++ *--------------------------------------------------------------------*/
++
++struct efhw_func_ops falcon_char_functional_units = {
++ falcon_nic_close_hardware,
++ falcon_nic_init_hardware,
++ falcon_nic_interrupt,
++ falcon_nic_interrupt_enable,
++ falcon_nic_interrupt_disable,
++ falcon_nic_set_interrupt_moderation,
++ falcon_nic_event_queue_enable,
++ falcon_nic_event_queue_disable,
++ falcon_nic_wakeup_request,
++ falcon_nic_sw_event,
++ falcon_nic_ipfilter_set,
++ falcon_nic_ipfilter_attach,
++ falcon_nic_ipfilter_detach,
++ falcon_nic_ipfilter_clear,
++ falcon_dmaq_tx_q_init,
++ falcon_dmaq_rx_q_init,
++ falcon_dmaq_tx_q_disable,
++ falcon_dmaq_rx_q_disable,
++ falcon_flush_tx_dma_channel,
++ falcon_flush_rx_dma_channel,
++ falcon_nic_buffer_table_set,
++ falcon_nic_buffer_table_set_n,
++ falcon_nic_buffer_table_clear,
++ falcon_nic_buffer_table_commit,
++};
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/falcon_hash.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/falcon_hash.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/falcon_hash.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/falcon_hash.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,178 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains EtherFabric NIC hash algorithms implementation.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efhw/debug.h>
++#include <ci/driver/efab/hardware.h>
++
++
++/* this mask is per filter bank hence /2 */
++#define FILTER_MASK(n) ((n) / 2u - 1u)
++
++/*
++ * Main Functions related to the Hash Table Generation
++ * Author: Srinivasaih, Nataraj
++ * Created: Thu May 13:32:41 PDT 2004
++ * $Id: falcon_hash.c,v 1.20 2008/01/29 08:28:56 ok_sasha Exp $
++ */
++/***************************************************************************
++Class Maximum number of Valid address ranges
++ hosts per network
++A 16777214 1.0.0.1 through 9.255.255.254
++ 11.0.0.1 through 126.255.255.254
++B 65534 128.0.0.1 through 172.15.255.254
++ 172.32.0.1 through 191.255.255.254
++C 254 192.0.0.1 through 192.167.255.254
++ 192.169.0.1 through 223.255.255.254
++P 16777214 10.0.0.1 through 10.255.255.254 (10/8)
++ 1048574 172.16.0.1 through 172.31.255.254 (172.16/12)
++ 65534 192.168.0.1 through 192.168.255.254 (192.168/16)
++
++R - 0.0.0.0 through 0.255.255.255
++ (used if host will be assigned a
++ valid address dynamically)
++ 127.0.0.0 through 127.255.255.255
++ (loopback addresses)
++
++P : Private internets only
++R : Reserved
++****************************************************************************/
++
++/* All LE parameters */
++unsigned int
++falcon_hash_get_key(unsigned int src_ip, unsigned int src_port,
++ unsigned int dest_ip, unsigned int dest_port,
++ int tcp, int full)
++{
++
++ unsigned int result = 0;
++ int net_type;
++
++ EFHW_ASSERT(tcp == 0 || tcp == 1);
++ EFHW_ASSERT(full == 0 || full == 1);
++
++ net_type = tcp << 4 | full;
++
++ /* Note that src_ip and src_port ignored if a wildcard filter */
++ switch (net_type) {
++ case 0x01: /* UDP Full */
++ result = ((dest_ip & 0xfffffffe) | (!(dest_ip & 1))) ^
++ (((dest_port << 16) & 0xFFFF0000) |
++ ((src_ip >> 16) & 0x0000FFFF)) ^
++ (((src_ip << 16) & 0xFFFF0000) |
++ ((src_port & 0x0000FFFF)));
++ EFHW_TRACE("falcon_hash_get_key: UDP Full %x", result);
++ break;
++ case 0x00: /* UDP Wild Card */
++ result = ((dest_ip & 0xfffffffe) | (!(dest_ip & 1))) ^
++ (((dest_port << 16) & 0x00000000) |
++ ((src_ip >> 16) & 0x00000000)) ^
++ (((src_ip << 16) & 0x00000000) |
++ ((dest_port & 0x0000FFFF)));
++ EFHW_TRACE("falcon_hash_get_key: UDP Wildcard %x", result);
++ break;
++ case 0x10: /* TCP Wild Card */
++ result = (dest_ip) ^
++ (((dest_port << 16) & 0xFFFF0000) |
++ ((src_ip >> 16) & 0x00000000)) ^
++ (((src_ip << 16) & 0x00000000) |
++ ((src_port & 0x00000000)));
++ EFHW_TRACE("falcon_hash_get_key: TCP Wildcard %x", result);
++ break;
++ case 0x11: /* TCP Full */
++ result = (dest_ip) ^
++ (((dest_port << 16) & 0xFFFF0000) |
++ ((src_ip >> 16) & 0x0000FFFF)) ^
++ (((src_ip << 16) & 0xFFFF0000) |
++ ((src_port & 0x0000FFFF)));
++ EFHW_TRACE("falcon_hash_get_key: TCP Full %x", result);
++ break;
++ default:
++ EFHW_ASSERT(0);
++
++ }
++ return (result);
++}
++
++/* This function generates the First Hash key */
++unsigned int falcon_hash_function1(unsigned int key, unsigned int nfilters)
++{
++
++ unsigned short int lfsr_reg;
++ unsigned int tmp_key;
++ int index;
++
++ unsigned short int lfsr_input;
++ unsigned short int single_bit_key;
++ unsigned short int bit16_lfsr;
++ unsigned short int bit3_lfsr;
++
++ lfsr_reg = 0xFFFF;
++ tmp_key = key;
++
++ /* For Polynomial equation X^16+X^3+1 */
++ for (index = 0; index < 32; index++) {
++ /* Get the bit from key and shift the key */
++ single_bit_key = (tmp_key & 0x80000000) >> 31;
++ tmp_key = tmp_key << 1;
++
++ /* get the Tap bits to XOR operation */
++ bit16_lfsr = (lfsr_reg & 0x8000) >> 15;
++ bit3_lfsr = (lfsr_reg & 0x0004) >> 2;
++
++ /* Get the Input value to the LFSR */
++ lfsr_input = ((bit16_lfsr ^ bit3_lfsr) ^ single_bit_key);
++
++ /* Shift and store out of the two TAPs */
++ lfsr_reg = lfsr_reg << 1;
++ lfsr_reg = lfsr_reg | (lfsr_input & 0x0001);
++
++ }
++
++ lfsr_reg = lfsr_reg & FILTER_MASK(nfilters);
++
++ return lfsr_reg;
++}
++
++/* This function generates the Second Hash */
++unsigned int
++falcon_hash_function2(unsigned int key, unsigned int nfilters)
++{
++ return (unsigned int)(((unsigned long long)key * 2 - 1) &
++ FILTER_MASK(nfilters));
++}
++
++/* This function iterates through the hash table */
++unsigned int
++falcon_hash_iterator(unsigned int hash1, unsigned int hash2,
++ unsigned int n_search, unsigned int nfilters)
++{
++ return ((hash1 + (n_search * hash2)) & FILTER_MASK(nfilters));
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/falcon_mac.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/falcon_mac.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/falcon_mac.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/falcon_mac.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,171 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains MACs (Mentor MAC & GDACT1 ) support for Falcon.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efhw/falcon.h>
++#include <ci/driver/efab/hardware.h>
++
++/********************************************************************
++ * Mentor MAC
++ */
++
++#define _PRE(x) GM##x
++
++/*--------------------------------------------------------------------
++ *
++ * Debug Support
++ *
++ *--------------------------------------------------------------------*/
++
++#define MENTOR_MAC_ASSERT_VALID() \
++ EFHW_ASSERT(nic); \
++ EFHW_ASSERT(EFHW_KVA(nic)); \
++ EFHW_ASSERT(_PRE(_CFG1_REG_OFST) == _PRE(_CFG1_REG_KER_OFST)); \
++ EFHW_ASSERT(_PRE(_CFG2_REG_OFST) == _PRE(_CFG2_REG_KER_OFST)); \
++ EFHW_ASSERT(_PRE(_IPG_REG_OFST) == _PRE(_IPG_REG_KER_OFST)); \
++ EFHW_ASSERT(_PRE(_HD_REG_OFST) == _PRE(_HD_REG_KER_OFST)); \
++ EFHW_ASSERT(_PRE(_MAX_FLEN_REG_OFST) == _PRE(_MAX_FLEN_REG_KER_OFST)); \
++ EFHW_ASSERT(_PRE(_TEST_REG_OFST) == _PRE(_TEST_REG_KER_OFST)); \
++ EFHW_ASSERT(_PRE(_ADR1_REG_OFST) == _PRE(_ADR1_REG_KER_OFST)); \
++ EFHW_ASSERT(_PRE(_ADR2_REG_OFST) == _PRE(_ADR2_REG_KER_OFST)); \
++ EFHW_ASSERT(_PRE(F_CFG0_REG_OFST) == _PRE(F_CFG0_REG_KER_OFST)); \
++ EFHW_ASSERT(_PRE(F_CFG1_REG_OFST) == _PRE(F_CFG1_REG_KER_OFST)); \
++ EFHW_ASSERT(_PRE(F_CFG2_REG_OFST) == _PRE(F_CFG2_REG_KER_OFST)); \
++ EFHW_ASSERT(_PRE(F_CFG3_REG_OFST) == _PRE(F_CFG3_REG_KER_OFST)); \
++ EFHW_ASSERT(_PRE(F_CFG4_REG_OFST) == _PRE(F_CFG4_REG_KER_OFST)); \
++ EFHW_ASSERT(_PRE(F_CFG5_REG_OFST) == _PRE(F_CFG5_REG_KER_OFST));
++
++/*! Get MAC current address - i.e not necessarily the one in the EEPROM */
++static inline void mentormac_get_mac_addr(struct efhw_nic *nic)
++{
++ efhw_ioaddr_t mac_kva;
++ uint val1, val2;
++
++ MENTOR_MAC_ASSERT_VALID();
++
++ mac_kva = GM_P0_BASE + EFHW_KVA(nic);
++
++ val1 = readl(mac_kva + _PRE(_ADR1_REG_OFST));
++ val2 = readl(mac_kva + _PRE(_ADR2_REG_OFST));
++
++#if 0
++ nic->mac_addr[0] = (val1 & 0xff000000) >> 24;
++ nic->mac_addr[1] = (val1 & 0x00ff0000) >> 16;
++ nic->mac_addr[2] = (val1 & 0x0000ff00) >> 8;
++ nic->mac_addr[3] = (val1 & 0x000000ff) >> 0;
++ nic->mac_addr[4] = (val2 & 0xff000000) >> 24;
++ nic->mac_addr[5] = (val2 & 0x00ff0000) >> 16;
++#else
++ nic->mac_addr[5] = (val1 & 0xff000000) >> 24;
++ nic->mac_addr[4] = (val1 & 0x00ff0000) >> 16;
++ nic->mac_addr[3] = (val1 & 0x0000ff00) >> 8;
++ nic->mac_addr[2] = (val1 & 0x000000ff) >> 0;
++ nic->mac_addr[1] = (val2 & 0xff000000) >> 24;
++ nic->mac_addr[0] = (val2 & 0x00ff0000) >> 16;
++#endif
++}
++
++
++/********************************************************************
++ * GDACT10 MAC
++ */
++
++/*--------------------------------------------------------------------
++ *
++ * Debug Support
++ *
++ *--------------------------------------------------------------------*/
++
++#define GDACT10_MAC_ASSERT_VALID() \
++ EFHW_ASSERT(nic); \
++ EFHW_ASSERT(EFHW_KVA(nic)); \
++ EFHW_ASSERT(XM_GLB_CFG_REG_P0_OFST == XM_GLB_CFG_REG_KER_P0_OFST); \
++ EFHW_ASSERT(XM_TX_CFG_REG_P0_OFST == XM_TX_CFG_REG_KER_P0_OFST); \
++ EFHW_ASSERT(XM_RX_CFG_REG_P0_OFST == XM_RX_CFG_REG_KER_P0_OFST); \
++ EFHW_ASSERT(MAC0_SPEED_LBN == MAC1_SPEED_LBN); \
++ EFHW_ASSERT(MAC0_SPEED_WIDTH == MAC1_SPEED_WIDTH); \
++ EFHW_ASSERT(MAC0_LINK_STATUS_LBN == MAC1_LINK_STATUS_LBN); \
++ EFHW_ASSERT(MAC0_LINK_STATUS_WIDTH == MAC1_LINK_STATUS_WIDTH); \
++ EFHW_ASSERT(MAC1_BCAD_ACPT_LBN == MAC0_BCAD_ACPT_LBN); \
++ EFHW_ASSERT(MAC1_UC_PROM_LBN == MAC0_UC_PROM_LBN); \
++ EFHW_ASSERT(MAC0_CTRL_REG_KER_OFST == MAC0_CTRL_REG_OFST); \
++ EFHW_ASSERT(MAC1_CTRL_REG_KER_OFST == MAC1_CTRL_REG_OFST); \
++ EFHW_ASSERT(XM_ADR_LO_REG_KER_P0_OFST == XM_ADR_LO_REG_P0_OFST); \
++ EFHW_ASSERT(XM_ADR_HI_REG_KER_P0_OFST == XM_ADR_HI_REG_P0_OFST); \
++ EFHW_ASSERT(XM_RX_PARAM_REG_KER_P0_OFST == XM_RX_PARAM_REG_P0_OFST);
++
++/*--------------------------------------------------------------------
++ *
++ * Information gathering
++ *
++ *--------------------------------------------------------------------*/
++
++/*! Get MAC current address - i.e not necessarily the one in the EEPROM */
++static inline void GDACT10mac_get_mac_addr(struct efhw_nic *nic)
++{
++ uint val1, val2;
++ efhw_ioaddr_t efhw_kva = EFHW_KVA(nic);
++ FALCON_LOCK_DECL;
++
++ GDACT10_MAC_ASSERT_VALID();
++
++ EFHW_ASSERT(XM_ADR_LO_LBN == 0);
++ EFHW_ASSERT(XM_ADR_LO_WIDTH == 32);
++ EFHW_ASSERT(XM_ADR_HI_LBN == 0);
++ EFHW_ASSERT(XM_ADR_HI_WIDTH == 16);
++
++ FALCON_LOCK_LOCK(nic);
++
++ val1 = readl(efhw_kva + XM_ADR_LO_REG_P0_OFST);
++ val2 = readl(efhw_kva + XM_ADR_HI_REG_P0_OFST);
++
++ FALCON_LOCK_UNLOCK(nic);
++
++ /* The HW scores no points for consistency */
++ nic->mac_addr[5] = (val2 & 0x0000ff00) >> 8;
++ nic->mac_addr[4] = (val2 & 0x000000ff) >> 0;
++ nic->mac_addr[3] = (val1 & 0xff000000) >> 24;
++ nic->mac_addr[2] = (val1 & 0x00ff0000) >> 16;
++ nic->mac_addr[1] = (val1 & 0x0000ff00) >> 8;
++ nic->mac_addr[0] = (val1 & 0x000000ff) >> 0;
++}
++
++
++/********************************************************************
++ * Call one or another function
++ */
++
++void falcon_get_mac_addr(struct efhw_nic *nic)
++{
++ if (nic->flags & NIC_FLAG_10G)
++ GDACT10mac_get_mac_addr(nic);
++ else
++ mentormac_get_mac_addr(nic);
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/filter_resource.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/filter_resource.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/filter_resource.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/filter_resource.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,317 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains filters support.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efrm/nic_table.h>
++#include <ci/driver/efab/hardware.h>
++#include <ci/efhw/falcon.h>
++#include <ci/efrm/vi_resource_manager.h>
++#include <ci/efrm/private.h>
++#include <ci/efrm/filter.h>
++#include <ci/efrm/buffer_table.h>
++
++struct filter_resource_manager {
++ struct efrm_resource_manager rm;
++ struct kfifo *free_ids;
++};
++
++static struct filter_resource_manager *efrm_filter_manager;
++
++void efrm_filter_resource_free(struct filter_resource *frs)
++{
++ struct efhw_nic *nic;
++ int nic_i;
++ int id;
++
++ EFRM_RESOURCE_ASSERT_VALID(&frs->rs, 1);
++
++ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT, __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle));
++
++ /* if we have a PT endpoint */
++ if (NULL != frs->pt) {
++ /* Detach the filter */
++ EFRM_FOR_EACH_NIC_IN_SET(&frs->nic_set, nic_i, nic)
++ efhw_nic_ipfilter_detach(nic, frs->filter_idx);
++
++ /* Release our ref to the PT resource. */
++ EFRM_TRACE("%s: releasing PT resource reference",
++ __FUNCTION__);
++ efrm_vi_resource_release(frs->pt);
++ }
++
++ /* Disable the filter. */
++ EFRM_FOR_EACH_NIC_IN_SET(&frs->nic_set, nic_i, nic)
++ efhw_nic_ipfilter_clear(nic, frs->filter_idx);
++
++ /* Free this filter. */
++ id = EFRM_RESOURCE_INSTANCE(frs->rs.rs_handle);
++ EFRM_VERIFY_EQ(kfifo_put(efrm_filter_manager->free_ids,
++ (unsigned char *)&id, sizeof(id)),
++ sizeof(id));
++
++ EFRM_DO_DEBUG(memset(frs, 0, sizeof(*frs)));
++ kfree(frs);
++}
++EXPORT_SYMBOL(efrm_filter_resource_free);
++
++static void filter_rm_dtor(struct efrm_resource_manager *rm)
++{
++ EFRM_TRACE("filter_rm_dtor");
++
++ EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_filter_manager->rm);
++ EFRM_ASSERT(&efrm_filter_manager->rm == rm);
++
++ kfifo_vfree(efrm_filter_manager->free_ids);
++ EFRM_TRACE("filter_rm_dtor: done");
++}
++
++/**********************************************************************/
++/**********************************************************************/
++/**********************************************************************/
++
++int efrm_create_filter_resource_manager(struct efrm_resource_manager **rm_out)
++{
++ int rc;
++
++ EFRM_ASSERT(rm_out);
++
++ efrm_filter_manager =
++ kmalloc(sizeof(struct filter_resource_manager), GFP_KERNEL);
++ if (efrm_filter_manager == 0)
++ return -ENOMEM;
++ memset(efrm_filter_manager, 0, sizeof(*efrm_filter_manager));
++
++ rc = efrm_resource_manager_ctor(&efrm_filter_manager->rm,
++ filter_rm_dtor, "FILTER",
++ EFRM_RESOURCE_FILTER, 0);
++ if (rc < 0)
++ goto fail1;
++
++ /* Create a pool of free instances */
++ rc = efrm_kfifo_id_ctor(&efrm_filter_manager->free_ids,
++ 0, EFHW_IP_FILTER_NUM,
++ &efrm_filter_manager->rm.rm_lock);
++ if (rc != 0)
++ goto fail2;
++
++ *rm_out = &efrm_filter_manager->rm;
++ EFRM_TRACE("%s: filter resources created - %d IDs",
++ __FUNCTION__, kfifo_len(efrm_filter_manager->free_ids));
++ return 0;
++
++fail2:
++ efrm_resource_manager_dtor(&efrm_filter_manager->rm);
++fail1:
++ memset(efrm_filter_manager, 0, sizeof(*efrm_filter_manager));
++ kfree(efrm_filter_manager);
++ return rc;
++
++}
++
++/*--------------------------------------------------------------------
++ *!
++ * Called to set/change the PT endpoint of a filter
++ *
++ * Example of use is TCP helper when it finds a wildcard IP filter
++ * needs to change which application it delivers traffic to
++ *
++ * \param frs filter resource
++ * \param pt_handle handle of new PT endpoint
++ *
++ * \return standard error codes
++ *
++ *--------------------------------------------------------------------*/
++int
++efrm_filter_resource_set_ptresource(struct filter_resource *frs,
++ struct vi_resource *ptrs)
++{
++ int rc, pti, nic_i;
++ struct efhw_nic *nic;
++
++ EFRM_ASSERT(frs);
++
++ /* if filter is attached to a valid PT endpoint */
++ if (NULL != frs->pt) {
++
++ EFRM_TRACE("%s: detaching PT resource " EFRM_RESOURCE_FMT
++ " from filter ",
++ __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle));
++ /* Detach the filter */
++ EFRM_FOR_EACH_NIC_IN_SET(&frs->nic_set, nic_i, nic)
++ efhw_nic_ipfilter_detach(nic, frs->filter_idx);
++
++ /* release reference */
++ efrm_vi_resource_release(frs->pt);
++ frs->pt = NULL;
++ }
++
++ if (ptrs != NULL) {
++ /* get PT endpoint index */
++ EFRM_RESOURCE_ASSERT_VALID(&ptrs->rs, 0);
++ EFRM_ASSERT(EFRM_RESOURCE_TYPE(ptrs->rs.rs_handle) ==
++ EFRM_RESOURCE_VI);
++ pti = EFRM_RESOURCE_INSTANCE(ptrs->rs.rs_handle);
++ if (pti == 0) {
++ EFRM_ERR("%s: cannot filter for channel 0",
++ __FUNCTION__);
++ rc = -EINVAL;
++ goto fail2;
++ }
++ frs->pt = ptrs;
++ EFRM_TRACE("%s: attaching PT resource " EFRM_RESOURCE_FMT
++ " to filter",
++ __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(frs->pt->rs.rs_handle));
++ EFRM_FOR_EACH_NIC_IN_SET(&frs->nic_set, nic_i, nic)
++ efhw_nic_ipfilter_attach(nic, frs->filter_idx, pti);
++ efrm_vi_resource_ref(frs->pt);
++ }
++ return 0;
++
++fail2:
++ efrm_vi_resource_release(frs->pt);
++ return rc;
++}
++EXPORT_SYMBOL(efrm_filter_resource_set_ptresource);
++
++int efrm_filter_resource_clear(struct filter_resource *frs)
++{
++ struct efhw_nic *nic;
++ int nic_i;
++
++ EFRM_ASSERT(frs);
++ EFRM_FOR_EACH_NIC_IN_SET(&frs->nic_set, nic_i, nic)
++ efhw_nic_ipfilter_clear(nic, frs->filter_idx);
++
++ return 0;
++}
++EXPORT_SYMBOL(efrm_filter_resource_clear);
++
++int
++__efrm_filter_resource_set(struct filter_resource *frs, int type,
++ unsigned saddr, uint16_t sport,
++ unsigned daddr, uint16_t dport)
++{
++ struct efhw_nic *nic;
++ int nic_i, rc = 0;
++ unsigned instance = EFRM_RESOURCE_INSTANCE(frs->pt->rs.rs_handle);
++
++ EFRM_ASSERT(frs);
++ EFRM_ASSERT(frs->pt);
++
++ if (efrm_nic_table.a_nic->devtype.variant >= 'B') {
++ /* Scatter setting must match the setting for
++ * the corresponding RX queue */
++ if (!(frs->pt->flags & EFHW_VI_JUMBO_EN))
++ type |= EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK;
++ }
++
++ EFRM_FOR_EACH_NIC_IN_SET(&frs->nic_set, nic_i, nic)
++ if (rc >= 0)
++ rc = efhw_nic_ipfilter_set(nic, type, &frs->filter_idx,
++ instance,
++ saddr, sport, daddr, dport);
++
++ return rc;
++}
++EXPORT_SYMBOL(__efrm_filter_resource_set);;
++
++int
++efrm_filter_resource_alloc(struct vi_resource *vi_parent,
++ struct filter_resource **frs_out)
++{
++ struct efhw_nic *nic;
++ int nic_i, rc, instance;
++ struct filter_resource *frs;
++
++ EFRM_ASSERT(frs_out);
++ EFRM_ASSERT(efrm_filter_manager);
++ EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_filter_manager->rm);
++ EFRM_ASSERT(vi_parent == NULL ||
++ EFRM_RESOURCE_TYPE(vi_parent->rs.rs_handle) ==
++ EFRM_RESOURCE_VI);
++
++ /* Allocate resource data structure. */
++ frs = kmalloc(sizeof(struct filter_resource), GFP_KERNEL);
++ if (!frs)
++ return -ENOMEM;
++ efrm_nic_set_clear(&frs->nic_set);
++
++ /* Allocate an instance. */
++ rc = kfifo_get(efrm_filter_manager->free_ids,
++ (unsigned char *)&instance, sizeof(instance));
++ if (rc != sizeof(instance)) {
++ EFRM_TRACE("%s: out of instances", __FUNCTION__);
++ EFRM_ASSERT(rc == 0);
++ rc = -EBUSY;
++ goto fail1;
++ }
++
++ /* Initialise the resource DS. */
++ efrm_resource_init(&frs->rs, EFRM_RESOURCE_FILTER, instance);
++ frs->pt = vi_parent;
++ if (frs->pt)
++ efrm_vi_resource_ref(frs->pt);
++ frs->filter_idx = -1;
++ EFRM_FOR_EACH_NIC(nic_i, nic)
++ efrm_nic_set_write(&frs->nic_set, nic_i, true);
++
++ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " Q %d idx %x",
++ __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle),
++ vi_parent == NULL ? -1 :
++ EFRM_RESOURCE_INSTANCE(vi_parent->rs.rs_handle),
++ frs->filter_idx);
++
++ /* Put it into the resource manager's table. */
++ rc = efrm_resource_manager_insert(&frs->rs);
++ if (rc != 0) {
++ if (atomic_dec_and_test(&frs->rs.rs_ref_count))
++ efrm_filter_resource_free(frs);
++ return rc;
++ }
++
++ *frs_out = frs;
++ return 0;
++
++fail1:
++ memset(frs, 0, sizeof(*frs));
++ kfree(frs);
++ return rc;
++}
++EXPORT_SYMBOL(efrm_filter_resource_alloc);
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/iobufset_resource.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/iobufset_resource.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/iobufset_resource.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/iobufset_resource.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,373 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains non-contiguous I/O buffers support.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efrm/nic_table.h>
++#include <ci/efhw/iopage.h>
++#include <ci/driver/efab/hardware.h>
++#include <ci/efrm/private.h>
++#include <ci/efrm/iobufset.h>
++#include <ci/efrm/vi_resource_manager.h>
++#include <ci/efrm/buffer_table.h>
++
++#define EFRM_IOBUFSET_MAX_NUM_INSTANCES 0x00010000
++
++struct iobufset_resource_manager {
++ struct efrm_resource_manager rm;
++ struct kfifo *free_ids;
++};
++
++struct iobufset_resource_manager *efrm_iobufset_manager;
++
++#define iobsrs(rs1) iobufset_resource(rs1)
++
++/* Returns size of iobufset resource data structure. */
++static inline size_t iobsrs_size(int no_pages)
++{
++ return offsetof(struct iobufset_resource, bufs) +
++ no_pages * sizeof(efhw_iopage_t);
++}
++
++void efrm_iobufset_resource_free(struct iobufset_resource *rs)
++{
++ unsigned int no_pages;
++ unsigned int i;
++ int id;
++
++ EFRM_RESOURCE_ASSERT_VALID(&rs->rs, 1);
++ no_pages = rs->n_bufs;
++
++ if (rs->buf_tbl_alloc.base != (unsigned)-1)
++ efrm_buffer_table_free(&rs->buf_tbl_alloc);
++
++ /* see comment on call to efhw_iopage_alloc in the alloc routine above
++ for discussion on use of efrm_nic_table.a_nic here */
++ EFRM_ASSERT(efrm_nic_table.a_nic);
++ if (rs->order == 0) {
++ for (i = 0; i < rs->n_bufs; ++i)
++ efhw_iopage_free(efrm_nic_table.a_nic, &rs->bufs[i]);
++ } else {
++ /* it is important that this is executed in increasing page
++ * order because some implementations of
++ * efhw_iopages_init_from_iopage() assume this */
++ for (i = 0; i < rs->n_bufs;
++ i += rs->pages_per_contiguous_chunk) {
++ efhw_iopages_t iopages;
++ efhw_iopages_init_from_iopage(&iopages, &rs->bufs[i],
++ rs->order);
++ efhw_iopages_free(efrm_nic_table.a_nic, &iopages);
++ }
++ }
++
++ /* free the instance number */
++ id = EFRM_RESOURCE_INSTANCE(rs->rs.rs_handle);
++ EFRM_VERIFY_EQ(kfifo_put(efrm_iobufset_manager->free_ids,
++ (unsigned char *)&id, sizeof(id)), sizeof(id));
++
++ efrm_vi_resource_release(rs->evq);
++
++ EFRM_DO_DEBUG(memset(rs, 0, sizeof(*rs)));
++ if (iobsrs_size(no_pages) < PAGE_SIZE) {
++ kfree(rs);
++ } else {
++ vfree(rs);
++ }
++}
++EXPORT_SYMBOL(efrm_iobufset_resource_free);
++
++int
++efrm_iobufset_resource_alloc(int32_t n_pages,
++ int32_t pages_per_contiguous_chunk,
++ struct vi_resource *vi_evq,
++ bool phys_addr_mode,
++ uint32_t faultonaccess,
++ struct iobufset_resource **iobrs_out)
++{
++ struct iobufset_resource *iobrs;
++ int rc, instance, object_size;
++ unsigned int i;
++
++ EFRM_ASSERT(iobrs_out);
++ EFRM_ASSERT(efrm_iobufset_manager);
++ EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_iobufset_manager->rm);
++ EFRM_RESOURCE_ASSERT_VALID(&vi_evq->rs, 0);
++ EFRM_ASSERT(EFRM_RESOURCE_TYPE(vi_evq->rs.rs_handle) ==
++ EFRM_RESOURCE_VI);
++ EFRM_ASSERT(efrm_nic_table.a_nic);
++
++ /* allocate the resource data structure. */
++ object_size = iobsrs_size(n_pages);
++ if (object_size < PAGE_SIZE) {
++ /* this should be OK from a tasklet */
++ /* Necessary to do atomic alloc() as this
++ can be called from a weird-ass iSCSI context that is
++ !in_interrupt but is in_atomic - See BUG3163 */
++ iobrs = kmalloc(object_size, GFP_ATOMIC);
++ } else { /* can't do this within a tasklet */
++#ifndef NDEBUG
++ if (in_interrupt() || in_atomic()) {
++ EFRM_ERR("%s(): alloc->u.iobufset.in_n_pages=%d",
++ __FUNCTION__, n_pages);
++ EFRM_ASSERT(!in_interrupt());
++ EFRM_ASSERT(!in_atomic());
++ }
++#endif
++ iobrs = (struct iobufset_resource *) vmalloc(object_size);
++ }
++ if (iobrs == 0) {
++ rc = -ENOMEM;
++ goto fail1;
++ }
++
++ /* Allocate an instance number. */
++ rc = kfifo_get(efrm_iobufset_manager->free_ids,
++ (unsigned char *)&instance, sizeof(instance));
++ if (rc != sizeof(instance)) {
++ EFRM_TRACE("%s: out of instances", __FUNCTION__);
++ EFRM_ASSERT(rc == 0);
++ rc = -EBUSY;
++ goto fail3;
++ }
++
++ efrm_resource_init(&iobrs->rs, EFRM_RESOURCE_IOBUFSET, instance);
++
++ iobrs->evq = vi_evq;
++ efrm_vi_resource_ref(iobrs->evq);
++
++ iobrs->n_bufs = n_pages;
++ iobrs->pages_per_contiguous_chunk = pages_per_contiguous_chunk;
++ iobrs->order = fls(iobrs->pages_per_contiguous_chunk - 1);
++ iobrs->faultonaccess = faultonaccess;
++
++ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " %u pages", __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(iobrs->rs.rs_handle), iobrs->n_bufs);
++
++ /* Allocate the iobuffers. */
++ if (iobrs->order == 0) {
++ /* make sure iobufs are in a known state in case we don't
++ * finish our allocation */
++ for (i = 0; i < iobrs->n_bufs; ++i)
++ memset(&iobrs->bufs[i], 0, sizeof(iobrs->bufs[i]));
++
++ for (i = 0; i < iobrs->n_bufs; ++i) {
++ /* due to bug2426 we have to specifiy a NIC when
++ * allocating a DMAable page, which is a bit messy.
++ * For now we assume that if the page is suitable
++ * (e.g. DMAable) by one nic (efrm_nic_table.a_nic),
++ * it is suitable for all NICs.
++ * XXX I bet that breaks in Solaris.
++ */
++ rc = efhw_iopage_alloc(efrm_nic_table.a_nic,
++ &iobrs->bufs[i]);
++ if (rc < 0) {
++ EFRM_ERR("%s: failed (rc %d) to allocate "
++ "page (i=%u)", __FUNCTION__, rc, i);
++ goto fail4;
++ }
++ }
++ } else {
++ efhw_iopages_t iopages;
++ unsigned j;
++
++ /* make sure iobufs are in a known state in case we don't
++ * finish our allocation */
++ for (i = 0; i < iobrs->n_bufs; ++i)
++ memset(&iobrs->bufs[i], 0, sizeof(iobrs->bufs[i]));
++
++ for (i = 0; i < iobrs->n_bufs;
++ i += iobrs->pages_per_contiguous_chunk) {
++ rc = efhw_iopages_alloc(efrm_nic_table.a_nic,
++ &iopages, iobrs->order);
++ if (rc < 0) {
++ EFRM_ERR("%s: failed (rc %d) to allocate "
++ "pages (i=%u order %d)",
++ __FUNCTION__, rc, i, iobrs->order);
++ goto fail4;
++ }
++ for (j = 0; j < iobrs->pages_per_contiguous_chunk;
++ j++) {
++ /* some implementation of
++ * efhw_iopage_init_from_iopages() rely on
++ * this function being called for
++ * _all_ pages in the chunk */
++ efhw_iopage_init_from_iopages(
++ &iobrs->bufs[i + j],
++ &iopages, j);
++ }
++ }
++ }
++
++ iobrs->buf_tbl_alloc.base = (unsigned)-1;
++
++ if (!phys_addr_mode) {
++ unsigned instance = EFAB_VI_RESOURCE_INSTANCE(iobrs->evq);
++ /* Allocate space in the NIC's buffer table. */
++ rc = efrm_buffer_table_alloc(fls(iobrs->n_bufs - 1),
++ &iobrs->buf_tbl_alloc);
++ if (rc < 0) {
++ EFRM_ERR("%s: failed (%d) to alloc %d buffer table "
++ "entries", __FUNCTION__, rc, iobrs->n_bufs);
++ goto fail5;
++ }
++ EFRM_ASSERT(((unsigned)1 << iobrs->buf_tbl_alloc.order) >=
++ (unsigned)iobrs->n_bufs);
++
++ /* Initialise the buffer table entries. */
++ for (i = 0; i < iobrs->n_bufs; ++i) {
++ /*\ ?? \TODO burst them! */
++ efrm_buffer_table_set(&iobrs->buf_tbl_alloc, i,
++ efhw_iopage_dma_addr(&iobrs->
++ bufs[i]),
++ instance);
++ }
++ efrm_buffer_table_commit();
++ }
++
++ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " %d pages @ "
++ EFHW_BUFFER_ADDR_FMT, __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(iobrs->rs.rs_handle),
++ iobrs->n_bufs, EFHW_BUFFER_ADDR(iobrs->buf_tbl_alloc.base,
++ 0));
++
++ /* Put it into the resource manager's table. */
++ rc = efrm_resource_manager_insert(&iobrs->rs);
++ if (rc != 0) {
++ if (atomic_dec_and_test(&iobrs->rs.rs_ref_count))
++ efrm_iobufset_resource_free(iobrs);
++ return rc;
++ }
++
++ *iobrs_out = iobrs;
++ return 0;
++
++fail5:
++ i = iobrs->n_bufs;
++fail4:
++ /* see comment on call to efhw_iopage_alloc above for a discussion
++ * on use of efrm_nic_table.a_nic here */
++ if (iobrs->order == 0) {
++ while (i--) {
++ efhw_iopage_t *page = &iobrs->bufs[i];
++ efhw_iopage_free(efrm_nic_table.a_nic, page);
++ }
++ } else {
++ unsigned int j;
++ for (j = 0; j < i; j += iobrs->pages_per_contiguous_chunk) {
++ efhw_iopages_t iopages;
++
++ EFRM_ASSERT(j % iobrs->pages_per_contiguous_chunk
++ == 0);
++ /* it is important that this is executed in increasing
++ * page order because some implementations of
++ * efhw_iopages_init_from_iopage() assume this */
++ efhw_iopages_init_from_iopage(&iopages,
++ &iobrs->bufs[j],
++ iobrs->order);
++ efhw_iopages_free(efrm_nic_table.a_nic, &iopages);
++ }
++ }
++ efrm_vi_resource_release(iobrs->evq);
++fail3:
++ if (object_size < PAGE_SIZE) {
++ kfree(iobrs);
++ } else {
++ vfree(iobrs);
++ }
++fail1:
++ return rc;
++}
++EXPORT_SYMBOL(efrm_iobufset_resource_alloc);
++
++static void iobufset_rm_dtor(struct efrm_resource_manager *rm)
++{
++ EFRM_ASSERT(&efrm_iobufset_manager->rm == rm);
++ kfifo_vfree(efrm_iobufset_manager->free_ids);
++}
++
++int
++efrm_create_iobufset_resource_manager(struct efrm_resource_manager **rm_out)
++{
++ int rc, max;
++
++ EFRM_ASSERT(rm_out);
++
++ efrm_iobufset_manager =
++ kmalloc(sizeof(*efrm_iobufset_manager), GFP_KERNEL);
++ if (efrm_iobufset_manager == 0)
++ return -ENOMEM;
++ memset(efrm_iobufset_manager, 0, sizeof(*efrm_iobufset_manager));
++
++ /*
++ * Bug 1145, 1370: We need to set initial size of both the resource
++ * table and instance id table so they never need to grow as we
++ * want to be allocate new iobufset at tasklet time. Lets make
++ * a pessimistic guess at maximum number of iobufsets possible.
++ * Could be less because
++ * - jumbo frames have same no of packets per iobufset BUT more
++ * pages per buffer
++ * - buffer table entries used independently of iobufsets by
++ * sendfile
++ *
++ * Based on TCP/IP stack setting of PKTS_PER_SET_S=5 ...
++ * - can't use this define here as it breaks the layering.
++ */
++#define MIN_PAGES_PER_IOBUFSET (1 << 4)
++
++ max = efrm_buffer_table_size() / MIN_PAGES_PER_IOBUFSET;
++ max = min_t(int, max, EFRM_IOBUFSET_MAX_NUM_INSTANCES);
++
++ rc = efrm_kfifo_id_ctor(&efrm_iobufset_manager->free_ids,
++ 0, max, &efrm_iobufset_manager->rm.rm_lock);
++ if (rc != 0)
++ goto fail1;
++
++ rc = efrm_resource_manager_ctor(&efrm_iobufset_manager->rm,
++ iobufset_rm_dtor, "IOBUFSET",
++ EFRM_RESOURCE_IOBUFSET, max);
++ if (rc < 0)
++ goto fail2;
++
++ *rm_out = &efrm_iobufset_manager->rm;
++ return 0;
++
++fail2:
++ kfifo_vfree(efrm_iobufset_manager->free_ids);
++fail1:
++ EFRM_DO_DEBUG(memset(efrm_iobufset_manager, 0,
++ sizeof(*efrm_iobufset_manager)));
++ kfree(efrm_iobufset_manager);
++ return rc;
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/iopage.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/iopage.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/iopage.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/iopage.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,101 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides Linux-specific implementation for iopage API used
++ * from efhw library.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/driver/resource/linux_efhw_nic.h>
++#include "kernel_compat.h"
++#include <ci/efhw/common_sysdep.h> /* for dma_addr_t */
++
++int efhw_iopage_alloc(struct efhw_nic *nic, efhw_iopage_t *p)
++{
++ struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
++ dma_addr_t handle;
++ void *kva;
++
++ kva = efrm_pci_alloc_consistent(lnic->pci_dev, PAGE_SIZE,
++ &handle);
++ if (kva == 0)
++ return -ENOMEM;
++
++ EFHW_ASSERT((handle & ~PAGE_MASK) == 0);
++
++ memset((void *)kva, 0, PAGE_SIZE);
++ efhw_page_init_from_va(&p->p, kva);
++
++ p->dma_addr = handle;
++
++ return 0;
++}
++
++void efhw_iopage_free(struct efhw_nic *nic, efhw_iopage_t *p)
++{
++ struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
++ EFHW_ASSERT(efhw_page_is_valid(&p->p));
++
++ efrm_pci_free_consistent(lnic->pci_dev, PAGE_SIZE,
++ efhw_iopage_ptr(p), p->dma_addr);
++}
++
++int efhw_iopages_alloc(struct efhw_nic *nic, efhw_iopages_t *p, unsigned order)
++{
++ unsigned bytes = 1u << (order + PAGE_SHIFT);
++ struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
++ dma_addr_t handle;
++ caddr_t addr;
++ int gfp_flag;
++
++ /* Set __GFP_COMP if available to make reference counting work.
++ * This is recommended here:
++ * http://www.forbiddenweb.org/viewtopic.php?id=83167&page=4#348331
++ */
++ gfp_flag = ((in_atomic() ? GFP_ATOMIC : GFP_KERNEL) | __GFP_COMP);
++ addr = efrm_dma_alloc_coherent(&lnic->pci_dev->dev, bytes, &handle,
++ gfp_flag);
++ if (addr == NULL)
++ return -ENOMEM;
++
++ EFHW_ASSERT((handle & ~PAGE_MASK) == 0);
++
++ p->order = order;
++ p->dma_addr = handle;
++ p->kva = addr;
++
++ return 0;
++}
++
++void efhw_iopages_free(struct efhw_nic *nic, efhw_iopages_t *p)
++{
++ unsigned bytes = 1u << (p->order + PAGE_SHIFT);
++ struct linux_efhw_nic *lnic = linux_efhw_nic(nic);
++
++ efrm_dma_free_coherent(&lnic->pci_dev->dev, bytes,
++ (void *)p->kva, p->dma_addr);
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/kernel_compat.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/kernel_compat.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/kernel_compat.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/kernel_compat.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,584 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides compatibility layer for various Linux kernel versions
++ * (starting from 2.6.9 RHEL kernel).
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#define IN_KERNEL_COMPAT_C
++#include <linux/types.h>
++#include <ci/efrm/debug.h>
++#include "kernel_compat.h"
++
++/* Set this to 1 to enable very basic counting of iopage(s) allocations, then
++ * call dump_iopage_counts() to show the number of current allocations of
++ * orders 0-7.
++ */
++#define EFRM_IOPAGE_COUNTS_ENABLED 0
++
++
++
++/* I admit that it's a bit ugly going straight to the field, but it
++ * seems easiest given that get_page followed by put_page on a page
++ * with PG_reserved set will increment the ref count on 2.6.14 and
++ * below, but not 2.6.15. Also, RedHat have hidden put_page_testzero
++ * in a header file which produces warnings when compiled. This
++ * doesn't agree with our use of -Werror.
++ */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5)
++# define page_count_field(pg) ((pg)->count)
++#else
++# define page_count_field(pg) ((pg)->_count)
++#endif
++
++#define inc_page_count(page) atomic_inc(&page_count_field(page))
++#define dec_page_count(page) atomic_dec(&page_count_field(page))
++
++/* Bug 5531: set_page_count doesn't work if the new page count is an
++ * expression. */
++#define ci_set_page_count(page, n) set_page_count(page, (n))
++
++ /* Bug 3965: Gak! Reference counts just don't work on memory
++ * allocated through pci_alloc_consistent. Different versions and
++ * architectures do different things. There are several interacting
++ * bugs/features which have been described below and then summarised
++ * in a table for each kernel version. For each feature, there is a
++ * question, a short description, a hint at how to examine the
++ * kernel code for this feature and a description of the keys in the
++ * table.
++ *
++ * A. Is PG_compound set on multi-page allocations?
++ *
++ * When a multi-page allocation succeeds, the kernel sets the
++ * reference count of the first page to one and the count of the
++ * remaining pages to zero. This is an immediate problem because
++ * if these pages are mapped into user space, the VM will do
++ * get_page followed by put_page, at which point the reference
++ * count will return to zero and the page will be freed.
++ * PG_compound was introduced in 2.6.0 and back-ported to rhel3
++ * kernels. When it is set, all the pages have a pointer to the
++ * first page so that they can share the reference count. If
++ * PG_compound is set, calling get_page(pg+1) can change
++ * page_count(pg). It was originally set on all multi-page
++ * allocations, but later only set if the __GFP_COMP flag was
++ * provided to the allocator.
++ *
++ * See mm/page_alloc.c
++ * Does prep_compound_page get called when __GFP_COMP not set?
++ *
++ * Keys:
++ * NotDef - prep_compound_page and PG_compound are not defined.
++ * Comp - prep_compound_page is called for any multi-page allocation.
++ * Opt - prep_compound_page is only called if __GFP_COMP is set.
++ * OptInv - prep_compound_page is only called if __GFP_NO_COMP is not set.
++ *
++ * B. Are bounce buffers ever used to satisfy pci_alloc_consistent?
++ * (x86_64 only)
++ *
++ * 2.6 kernels introduced bounce buffers on x86_64 machines to access
++ * memory above 4G when using the DMA mapping API. At some point,
++ * code was added to allow pci_alloc_consistent/dma_alloc_coherent to
++ * allocate memory from the bounce buffers if the general purpose
++ * allocator produced memory which wasn't suitable. Such memory can
++ * be recognised by the PG_reserved bit being set. At a later point,
++ * the __GFP_DMA32 flag was added and used to restrict the allocator
++ * to below 4G. The effect of this later change was that 4G capable
++ * cards would no longer get memory from the bounce buffers, although
++ * a card which can address less than 4G might get memory from the
++ * bounce buffers.
++ *
++ * See dma_alloc_coherent or pci_alloc_consistent in
++ * arch/x86_64/kernel/pci-gart.c or arch/x86/kernel/pci-dma_64.c
++ * Is (gfp |= GFP_DMA32) before dma_alloc_pages?
++ * Is swiotlb_alloc_coherent called?
++ *
++ * Keys:
++ * NU - bounce buffers are Never Used
++ * Used - bounce buffers are sometimes used
++ *
++ * C. Does munmap decrement the reference count of a PG_reserved page?
++ *
++ * Originally, the munmap code would not decrement the reference count
++ * of a page which had PG_reserved set. At some point in the 2.6
++ * series, VM_PFNMAP was introduced and could be set on a vma to
++ * indicate that no pages in that vma should have the reference count
++ * decremented (unless they are copy-on-write copies). At that point,
++ * the check for PG_reserved pages in the munmap code path was
++ * removed. Some hackery in vm_normal_page means that a VM_PFNMAP vma
++ * must map contiguous physical pages. As a result, such pages should
++ * be mapped during mmap using remap_pfn_range (for an example, see
++ * drivers/char/mem.c).
++ *
++ * In 2.6 kernels: See release_pages in mm/swap.c
++ * Does PageReserved get tested?
++ * In 2.6 kernels: See mm/memory.c
++ * Is VM_PFNMAP used?
++ * In 2.4 kernels: See __free_pte in mm/memory.c
++ * Does PageReserved get tested?
++ * In 2.4 kernels: See __free_pages in mm/page_alloc.c
++ * Does PageReserved get tested?
++ *
++ * Keys:
++ * resv - The reference count is not touched for PG_reserved pages.
++ * pfnmap - The VM_PFNMAP flag is checked instead of PG_reserved.
++ *
++ * D. Does munmap honour the PG_compound bit?
++ *
++ * When PG_compound was originally introduced, the munmap code path
++ * didn't check it before decrementing the reference count on the
++ * page. As a result, the wrong reference count would be updated if a
++ * PG_compound page was ever mapped into user space.
++ *
++ * In 2.6 kernels: See release_pages in mm/swap.c
++ * Does PageCompound get tested?
++ * In 2.4 kernels: See __free_pages in mm/page_alloc.c
++ * Does PageCompound get tested?
++ *
++ * Keys:
++ * NotHon - The PG_compound bit isn't honoured by munmap.
++ * Hon - The PG_compound bit is honoured by munmap.
++ *
++ * OS A B C D
++ * 2.4.18 NotDef NU resv NotHon
++ * 2.4.29 NotDef NU resv NotHon
++ * 2.4.20-31.9 rhl9 NotDef NU resv NotHon
++ *
++ * 2.4.21-4.EL rhel3 Comp NU resv Hon
++ * 2.4.21-15.EL rhel3 Comp NU resv Hon
++ * 2.4.21-32.EL rhel3 Comp NU resv Hon
++ * 2.4.21-40.EL rhel3 Comp NU resv Hon
++ *
++ * 2.6.0 Comp NU resv NotHon
++ *
++ * 2.6.5-7.97 sles9 OptInv NU resv NotHon
++ * 2.6.9 rhel4 Opt NU resv NotHon
++ *
++ * 2.6.11 fc4 ? ? ? ?
++ * 2.6.12 fc4 Opt Used resv NotHon
++ * 2.6.13 Opt Used resv NotHon
++ *
++ * 2.6.15 Opt NU pfnmap NotHon
++ *
++ * 2.6.16 Opt NU pfnmap Hon
++ * 2.6.16.9 Opt NU pfnmap Hon
++ * 2.6.17.2 Opt NU pfnmap Hon
++ * 2.6.24-rc7 k.org Opt NU pfnmap Hon
++ *
++ * This LKML thread gives some low down on mapping pages into user
++ * space and using DMA.
++ * http://www.forbiddenweb.org/viewtopic.php?id=83167&page=1
++ *
++ * There is no problem with single page allocations (until some
++ * kernel hands us a PG_reserved page and expects us to use
++ * VM_PFNMAP on the vma).
++ *
++ * Bug 5450: Most kernels set the reference count to one on the
++ * first sub-page of a high-order page allocation and zero on
++ * subsequent sub-pages. Some kernels, however, set the page count
++ * to one on all the sub-pages. The SLES 9 range are affected, as
++ * are kernels built without CONFIG_MMU defined.
++ *
++ * Possible strategies for multi-page allocations:
++ *
++ * EFRM_MMAP_USE_COMPOUND
++ * 1. Allocate a compound page. Reference counting should then work
++ * on the whole allocation. This is a good theory, but is broken
++ * by bug/feature D (above).
++ *
++ * EFRM_MMAP_USE_SPLIT
++ * 2. Convert the multi-page allocation to many single page
++ * allocations. This involves incrementing the reference counts
++ * and clearing PG_compound on all the pages (including the
++ * first). The references should be released _after_ calling
++ * pci_free_consistent so that that call doesn't release the
++ * memory.
++ *
++ * EFRM_MMAP_USE_INCREMENT
++ * 3. Increment the reference count on all the pages after
++ * allocating and decrement them again before freeing. This gets
++ * round the zero reference count problem. It doesn't handle the
++ * case where someone else is holding a reference to one of our
++ * pages when we free the pages, but we think VM_IO stops this
++ * from happening.
++ */
++
++/* Should we use strategy 1? This can be forced on us by the OS. */
++#if defined(PG_compound)
++#define EFRM_MMAP_USE_COMPOUND 1
++#else
++#define EFRM_MMAP_USE_COMPOUND 0
++#endif
++
++/* Should we use strategy 2? This can be used even if strategy 1 is
++ * used. */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++#define EFRM_MMAP_USE_SPLIT 1
++#else
++#define EFRM_MMAP_USE_SPLIT 0
++#endif
++
++/* Should we use strategy 3? There's no point doing this if either
++ * strategy 1 or strategy 2 is used. */
++#if !EFRM_MMAP_USE_COMPOUND && !EFRM_MMAP_USE_SPLIT
++#error "We shouldn't have to use this strategy."
++#define EFRM_MMAP_USE_INCREMENT 1
++#else
++#define EFRM_MMAP_USE_INCREMENT 0
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++#define EFRM_MMAP_RESET_REFCNT 1
++#else
++#define EFRM_MMAP_RESET_REFCNT 0
++#endif
++
++/* NB. 2.6.17 has renamed SetPageCompound to __SetPageCompound and
++ * ClearPageCompound to __ClearPageCompound. */
++#if ((defined(PageCompound) != defined(PG_compound)) || \
++ (defined(SetPageCompound) != defined(PG_compound) && \
++ defined(__SetPageCompound) != defined(PG_compound)) || \
++ (defined(ClearPageCompound) != defined(PG_compound) && \
++ defined(__ClearPageCompound) != defined(PG_compound)) || \
++ (defined(__GFP_COMP) && !defined(PG_compound)))
++#error Mismatch of defined page-flags.
++#endif
++
++extern int use_pci_alloc; /* Use pci_alloc_consistent to alloc iopages */
++
++/****************************************************************************
++ *
++ * allocate a buffer suitable for DMA to/from the NIC
++ *
++ ****************************************************************************/
++
++static inline void pci_mmap_pages_hack_after_alloc(caddr_t kva, unsigned order)
++{
++ unsigned pfn = __pa(kva) >> PAGE_SHIFT;
++ struct page *start_pg = pfn_to_page(pfn);
++#if !defined(NDEBUG) || EFRM_MMAP_USE_SPLIT
++ struct page *end_pg = start_pg + (1 << order);
++ struct page *pg;
++#endif
++
++ /* Compound pages don't get created for order 0 pages and there's no
++ * fixing up needs to be done. */
++ if (order == 0)
++ return;
++
++ /* If we've been given a reserved page then it must have come from
++ * the bounce buffer pool. */
++ if (PageReserved(start_pg)) {
++#if defined(VM_PFNMAP) || !defined(__x86_64__)
++ /* Kernel allocated reserved pages when not expected */
++ BUG();
++#endif
++ return;
++ }
++
++ /* Check the page count and PG_compound bit. */
++#ifndef NDEBUG
++# if defined(PG_compound)
++ EFRM_ASSERT(PageCompound(start_pg) == EFRM_MMAP_USE_COMPOUND);
++# endif
++ EFRM_ASSERT(page_count(start_pg) == 1);
++
++ {
++ /* Some kernels have the page count field hold (ref_count-1)
++ * rather than (ref_count). This is so that decrementing the
++ * reference count to "zero" causes the internal value to change
++ * from 0 to -1 which sets the carry flag. Other kernels store
++ * the real reference count value in the obvious way. We handle
++ * this here by reading the reference count field of the first
++ * page, which is always 1. */
++ int pg_count_zero;
++ pg_count_zero = atomic_read(&page_count_field(start_pg)) - 1;
++ for (pg = start_pg + 1; pg < end_pg; pg++) {
++ int pg_count;
++# if defined(PG_compound)
++ EFRM_ASSERT(PageCompound(pg) == EFRM_MMAP_USE_COMPOUND);
++# endif
++
++ /* Bug 5450: Some kernels initialise the page count
++ * to one for pages other than the first and some
++ * leave it at zero. We allow either behaviour
++ * here, but disallow anything strange. Newer
++ * kernels only define set_page_count in an
++ * internal header file, so we have to make do with
++ * incrementing and decrementing the reference
++ * count. Fortunately, those kernels don't set the
++ * reference count to one on all the pages. */
++ pg_count = atomic_read(&page_count_field(pg));
++# if EFRM_MMAP_RESET_REFCNT
++ if (pg_count != pg_count_zero)
++ EFRM_ASSERT(pg_count == pg_count_zero + 1);
++# else
++ EFRM_ASSERT(pg_count == pg_count_zero);
++# endif
++ }
++ }
++#endif
++
++ /* Split the multi-page allocation if necessary. */
++#if EFRM_MMAP_USE_SPLIT
++ for (pg = start_pg; pg < end_pg; pg++) {
++
++ /* This is no longer a compound page. */
++# if EFRM_MMAP_USE_COMPOUND
++ ClearPageCompound(pg);
++ EFRM_ASSERT(PageCompound(pg) == 0);
++# endif
++
++# ifndef NDEBUG
++ {
++ int pg_count = page_count(pg);
++ /* Bug 5450: The page count can be zero or one here. */
++ if (pg == start_pg) {
++ EFRM_ASSERT(pg_count == 1);
++ } else {
++# if EFRM_MMAP_RESET_REFCNT
++ if (pg_count != 0)
++ EFRM_ASSERT(pg_count == 1);
++# else
++ EFRM_ASSERT(pg_count == 0);
++# endif
++ }
++ }
++# endif
++
++ /* Get a reference which will be released after the pages have
++ * been passed back to pci_free_consistent. */
++# if EFRM_MMAP_RESET_REFCNT
++ /* Bug 5450: Reset the reference count since the count might
++ * already be 1. */
++ ci_set_page_count(pg, (pg == start_pg) ? 2 : 1);
++# else
++ get_page(pg);
++# endif
++ }
++#endif
++
++ /* Fudge the reference count if necessary. */
++#if EFRM_MMAP_USE_INCREMENT
++ for (pg = start_pg; pg < end_pg; pg++)
++ inc_page_count(pg);
++#endif
++}
++
++static inline void pci_mmap_pages_hack_before_free(caddr_t kva, unsigned order)
++{
++#if EFRM_MMAP_USE_INCREMENT || !defined(NDEBUG)
++ /* Drop the references taken in pci_mmap_pages_hack_after_alloc */
++ unsigned pfn = __pa(kva) >> PAGE_SHIFT;
++ struct page *start_pg = pfn_to_page(pfn);
++ struct page *end_pg = start_pg + (1 << order);
++ struct page *pg;
++
++ /* Compound pages don't get created for order 0 pages and there's no
++ * fixing up needs to be done. */
++ if (order == 0)
++ return;
++
++ if (PageReserved(start_pg))
++ return;
++
++# if EFRM_MMAP_USE_INCREMENT
++ for (pg = start_pg; pg < end_pg; pg++)
++ dec_page_count(pg);
++# endif
++
++#if !defined(NDEBUG)
++ EFRM_ASSERT(page_count(start_pg) == 1+EFRM_MMAP_USE_SPLIT);
++
++# if EFRM_MMAP_USE_COMPOUND && !EFRM_MMAP_USE_SPLIT
++ for (pg = start_pg; pg < end_pg; pg++)
++ EFRM_ASSERT(PageCompound(pg));
++# else
++ for (pg = start_pg+1; pg < end_pg; pg++) {
++ unsigned exp_pg_count = EFRM_MMAP_USE_SPLIT;
++ /* NB. If this assertion fires, either we've messed up the
++ * page counting or someone is holding on to a reference.
++ */
++ EFRM_ASSERT(page_count(pg) == exp_pg_count);
++ }
++# endif
++#endif
++
++#endif
++}
++
++static inline void pci_mmap_pages_hack_after_free(caddr_t kva, unsigned order)
++{
++#if EFRM_MMAP_USE_SPLIT
++ /* Drop the references taken in pci_mmap_pages_hack_after_alloc */
++ unsigned pfn = __pa(kva) >> PAGE_SHIFT;
++ struct page *start_pg = pfn_to_page(pfn);
++ struct page *end_pg = start_pg + (1 << order);
++ struct page *pg;
++
++ /* Compound pages don't get created for order 0 pages and there's no
++ * fixing up needs to be done. */
++ if (order == 0)
++ return;
++
++ if (PageReserved(start_pg))
++ return;
++
++ for (pg = start_pg; pg < end_pg; pg++) {
++ EFRM_ASSERT(page_count(pg) == 1);
++ put_page(pg);
++ }
++#endif
++}
++
++
++#if EFRM_IOPAGE_COUNTS_ENABLED
++
++static int iopage_counts[8];
++
++void dump_iopage_counts(void)
++{
++ EFRM_NOTICE("iopage counts: %d %d %d %d %d %d %d %d", iopage_counts[0],
++ iopage_counts[1], iopage_counts[2], iopage_counts[3],
++ iopage_counts[4], iopage_counts[5], iopage_counts[6],
++ iopage_counts[7]);
++}
++
++#endif
++
++
++
++/*********** pci_alloc_consistent / pci_free_consistent ***********/
++
++void *efrm_dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_addr, int flag)
++{
++ struct pci_dev *pci_dev;
++ void *ptr;
++ unsigned order;
++ EFRM_IOMMU_DECL;
++
++ order = __ffs(size/PAGE_SIZE);
++ EFRM_ASSERT(size == (PAGE_SIZE<<order));
++
++ /* NB. The caller may well set __GFP_COMP. However we can't
++ * rely on this working on older kernels. 2.6.9 only acts on
++ * __GFP_COMP if CONFIG_HUGETLB_PAGE is defined. If the flag
++ * did have an effect then PG_compound will be set on the
++ * pages. */
++
++ if (use_pci_alloc) {
++ /* Can't take a spinlock here since the allocation can
++ * block. */
++ ptr = dma_alloc_coherent(dev, size, dma_addr, flag);
++ if (ptr == NULL)
++ return ptr;
++ } else {
++#ifdef CONFIG_SWIOTLB /* BUG1340 */
++ if (swiotlb) {
++ EFRM_ERR("%s: This kernel is using DMA bounce "
++ "buffers. Please upgrade kernel to "
++ "linux2.6 or reduce the amount of RAM "
++ "with mem=XXX.", __FUNCTION__);
++ return NULL;
++ }
++#endif
++ ptr = (void *)__get_free_pages(flag, order);
++
++ if (ptr == NULL)
++ return NULL;
++
++ EFRM_IOMMU_LOCK();
++ pci_dev = container_of(dev, struct pci_dev, dev);
++ *dma_addr = pci_map_single(pci_dev, ptr, size,
++ PCI_DMA_BIDIRECTIONAL);
++ EFRM_IOMMU_UNLOCK();
++ if (pci_dma_mapping_error(*dma_addr)) {
++ free_pages((unsigned long)ptr, order);
++ return NULL;
++ }
++ }
++
++#ifndef CONFIG_IA64
++ pci_mmap_pages_hack_after_alloc(ptr, order);
++#endif
++
++#if EFRM_IOPAGE_COUNTS_ENABLED
++ if (order < 8)
++ iopage_counts[order]++;
++ else
++ EFRM_ERR("Huge iopages alloc (order=%d) ??? (not counted)",
++ order);
++#endif
++
++ return ptr;
++}
++
++void efrm_dma_free_coherent(struct device *dev, size_t size,
++ void *ptr, dma_addr_t dma_addr)
++{
++ struct pci_dev *pci_dev;
++ unsigned order;
++ EFRM_IOMMU_DECL;
++
++ order = __ffs(size/PAGE_SIZE);
++ EFRM_ASSERT(size == (PAGE_SIZE<<order));
++
++#if EFRM_IOPAGE_COUNTS_ENABLED
++ if (order < 8)
++ --iopage_counts[order];
++ else
++ EFRM_ERR("Huge iopages free (order=%d) ??? (not counted)",
++ order);
++#endif
++#ifndef CONFIG_IA64
++ pci_mmap_pages_hack_before_free(ptr, order);
++#endif
++ if (use_pci_alloc) {
++ EFRM_IOMMU_LOCK();
++ dma_free_coherent(dev, size, ptr, dma_addr);
++ EFRM_IOMMU_UNLOCK();
++ } else {
++ pci_dev = container_of(dev, struct pci_dev, dev);
++ EFRM_IOMMU_LOCK();
++ efrm_pci_unmap_single(pci_dev, dma_addr, size,
++ PCI_DMA_BIDIRECTIONAL);
++ EFRM_IOMMU_UNLOCK();
++
++ free_pages((unsigned long)ptr, order);
++ }
++
++#ifndef CONFIG_IA64
++ pci_mmap_pages_hack_after_free(ptr, order);
++#endif
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/kernel_compat.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/kernel_compat.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/kernel_compat.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/kernel_compat.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,239 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides compatibility layer for various Linux kernel versions
++ * (starting from 2.6.9 RHEL kernel).
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef DRIVER_LINUX_RESOURCE_KERNEL_COMPAT_H
++#define DRIVER_LINUX_RESOURCE_KERNEL_COMPAT_H
++
++#include <linux/version.h>
++
++/********* wait_for_completion_timeout() ********************/
++#include <linux/sched.h>
++
++/* RHEL_RELEASE_CODE from linux/version.h is only defined for 2.6.9-55EL
++ * UTS_RELEASE is unfortunately unusable
++ * Really only need this fix for <2.6.9-34EL
++ */
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)) && \
++ !defined(RHEL_RELEASE_CODE)
++
++static inline unsigned long fastcall __sched
++efrm_wait_for_completion_timeout(struct completion *x, unsigned long timeout)
++{
++ might_sleep();
++
++ spin_lock_irq(&x->wait.lock);
++ if (!x->done) {
++ DECLARE_WAITQUEUE(wait, current);
++
++ wait.flags |= WQ_FLAG_EXCLUSIVE;
++ __add_wait_queue_tail(&x->wait, &wait);
++ do {
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ spin_unlock_irq(&x->wait.lock);
++ timeout = schedule_timeout(timeout);
++ spin_lock_irq(&x->wait.lock);
++ if (!timeout) {
++ __remove_wait_queue(&x->wait, &wait);
++ goto out;
++ }
++ } while (!x->done);
++ __remove_wait_queue(&x->wait, &wait);
++ }
++ x->done--;
++out:
++ spin_unlock_irq(&x->wait.lock);
++ return timeout;
++}
++
++# ifdef wait_for_completion_timeout
++# undef wait_for_completion_timeout
++# endif
++# define wait_for_completion_timeout efrm_wait_for_completion_timeout
++
++#endif
++
++/********* pci_map_*() ********************/
++
++#include <linux/pci.h>
++
++/* Bug 4560: Some kernels leak IOMMU entries under heavy load. Use a
++ * spinlock to serialise access where possible to alleviate the
++ * problem.
++ *
++ * NB. This is duplicated in the net driver. Please keep in sync. */
++#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) && \
++ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)) && \
++ defined(__x86_64__) && defined(CONFIG_SMP))
++
++#define EFRM_HAVE_IOMMU_LOCK 1
++
++#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,5)) && \
++ defined(CONFIG_SUSE_KERNEL))
++#define EFRM_NEED_ALTERNATE_MAX_PFN 1
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
++#if defined(CONFIG_GART_IOMMU)
++#define EFRM_NO_IOMMU no_iommu
++#else
++#define EFRM_NO_IOMMU 1
++#endif
++#else
++#define EFRM_NO_IOMMU 0
++#endif
++
++/* Set to 0 if we should never use the lock. Set to 1 if we should
++ * automatically determine if we should use the lock. Set to 2 if we
++ * should always use the lock. */
++extern unsigned int efx_use_iommu_lock;
++/* Defined in the net driver. */
++extern spinlock_t efx_iommu_lock;
++/* Non-zero if there is a card which needs the lock. */
++extern int efrm_need_iommu_lock;
++
++/* The IRQ state is needed if the lock is being used. The flag is
++ * cached to ensure that every lock is followed by an unlock, even
++ * if the global flag changes in the middle of the operation. */
++
++#define EFRM_IOMMU_DECL \
++ unsigned long efx_iommu_irq_state = 0; \
++ int efx_iommu_using_lock;
++#define EFRM_IOMMU_LOCK() \
++ do { \
++ efx_iommu_using_lock = (efx_use_iommu_lock && \
++ (efrm_need_iommu_lock || \
++ efx_use_iommu_lock >= 2)); \
++ if (efx_iommu_using_lock) \
++ spin_lock_irqsave(&efx_iommu_lock, efx_iommu_irq_state);\
++ } while (0)
++#define EFRM_IOMMU_UNLOCK() \
++ do { \
++ if (efx_iommu_using_lock) \
++ spin_unlock_irqrestore(&efx_iommu_lock, \
++ efx_iommu_irq_state); \
++ } while (0)
++
++#else /* defined(__x86_64__) && defined(CONFIG_SMP) */
++
++#define EFRM_HAVE_IOMMU_LOCK 0
++#define EFRM_IOMMU_DECL
++#define EFRM_IOMMU_LOCK() do {} while (0)
++#define EFRM_IOMMU_UNLOCK() do {} while (0)
++
++#endif
++
++static inline dma_addr_t efrm_pci_map_single(struct pci_dev *hwdev, void *ptr,
++ size_t size, int direction)
++{
++ dma_addr_t dma_addr;
++ EFRM_IOMMU_DECL;
++
++ EFRM_IOMMU_LOCK();
++ dma_addr = pci_map_single(hwdev, ptr, size, direction);
++ EFRM_IOMMU_UNLOCK();
++
++ return dma_addr;
++}
++
++static inline void efrm_pci_unmap_single(struct pci_dev *hwdev,
++ dma_addr_t dma_addr, size_t size,
++ int direction)
++{
++ EFRM_IOMMU_DECL;
++
++ EFRM_IOMMU_LOCK();
++ pci_unmap_single(hwdev, dma_addr, size, direction);
++ EFRM_IOMMU_UNLOCK();
++}
++
++static inline dma_addr_t efrm_pci_map_page(struct pci_dev *hwdev,
++ struct page *page,
++ unsigned long offset, size_t size,
++ int direction)
++{
++ dma_addr_t dma_addr;
++ EFRM_IOMMU_DECL;
++
++ EFRM_IOMMU_LOCK();
++ dma_addr = pci_map_page(hwdev, page, offset, size, direction);
++ EFRM_IOMMU_UNLOCK();
++
++ return dma_addr;
++}
++
++static inline void efrm_pci_unmap_page(struct pci_dev *hwdev,
++ dma_addr_t dma_addr, size_t size,
++ int direction)
++{
++ EFRM_IOMMU_DECL;
++
++ EFRM_IOMMU_LOCK();
++ pci_unmap_page(hwdev, dma_addr, size, direction);
++ EFRM_IOMMU_UNLOCK();
++}
++
++#ifndef IN_KERNEL_COMPAT_C
++# ifndef __GFP_COMP
++# define __GFP_COMP 0
++# endif
++# ifndef __GFP_ZERO
++# define __GFP_ZERO 0
++# endif
++#endif
++
++extern void *efrm_dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_addr, int flag);
++
++extern void efrm_dma_free_coherent(struct device *dev, size_t size,
++ void *ptr, dma_addr_t dma_addr);
++
++static inline void *efrm_pci_alloc_consistent(struct pci_dev *hwdev,
++ size_t size,
++ dma_addr_t *dma_addr)
++{
++ return efrm_dma_alloc_coherent(&hwdev->dev, size, dma_addr,
++ GFP_ATOMIC);
++}
++
++static inline void efrm_pci_free_consistent(struct pci_dev *hwdev, size_t size,
++ void *ptr, dma_addr_t dma_addr)
++{
++ efrm_dma_free_coherent(&hwdev->dev, size, ptr, dma_addr);
++}
++
++#endif /* DRIVER_LINUX_RESOURCE_KERNEL_COMPAT_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/kernel_proc.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/kernel_proc.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/kernel_proc.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/kernel_proc.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,111 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains /proc/driver/sfc_resource/ implementation.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efrm/debug.h>
++#include <ci/efrm/nic_table.h>
++#include <ci/efrm/driver_private.h>
++#include <linux/proc_fs.h>
++
++/** Top level directory for sfc specific stats **/
++static struct proc_dir_entry *efrm_proc_root; /* = NULL */
++
++static int
++efrm_resource_read_proc(char *buf, char **start, off_t offset, int count,
++ int *eof, void *data);
++
++int efrm_install_proc_entries(void)
++{
++ /* create the top-level directory for etherfabric specific stuff */
++ efrm_proc_root = proc_mkdir("sfc_resource", proc_root_driver);
++ if (!efrm_proc_root)
++ return -ENOMEM;
++ EFRM_ASSERT(efrm_proc_root);
++
++ if (create_proc_read_entry("resources", 0, efrm_proc_root,
++ efrm_resource_read_proc, 0) == NULL) {
++ EFRM_WARN("%s: Unable to create /proc/drivers/sfc_resource/"
++ "resources", __FUNCTION__);
++ }
++ return 0;
++}
++
++void efrm_uninstall_proc_entries(void)
++{
++ EFRM_ASSERT(efrm_proc_root);
++ remove_proc_entry("resources", efrm_proc_root);
++ remove_proc_entry("sfc_resource", proc_root_driver);
++ efrm_proc_root = NULL;
++}
++
++/****************************************************************************
++ *
++ * /proc/drivers/sfc/resources
++ *
++ ****************************************************************************/
++
++#define EFRM_PROC_PRINTF(buf, len, fmt, ...) \
++ do { \
++ if (count - len > 0) \
++ len += snprintf(buf+len, count-len, (fmt), \
++ __VA_ARGS__); \
++ } while (0)
++
++static int
++efrm_resource_read_proc(char *buf, char **start, off_t offset, int count,
++ int *eof, void *data)
++{
++ irq_flags_t lock_flags;
++ int len = 0;
++ int type;
++ struct efrm_resource_manager *rm;
++
++ for (type = 0; type < EFRM_RESOURCE_NUM; type++) {
++ rm = efrm_rm_table[type];
++ if (rm == NULL)
++ continue;
++
++ EFRM_PROC_PRINTF(buf, len, "*** %s ***\n", rm->rm_name);
++
++ spin_lock_irqsave(&rm->rm_lock, lock_flags);
++ EFRM_PROC_PRINTF(buf, len, "current = %u\n", rm->rm_resources);
++ EFRM_PROC_PRINTF(buf, len, " max = %u\n\n",
++ rm->rm_resources_hiwat);
++ spin_unlock_irqrestore(&rm->rm_lock, lock_flags);
++ }
++
++ return count ? strlen(buf) : 0;
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/kfifo.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/kfifo.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/kfifo.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/kfifo.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,212 @@
++/*
++ * A simple kernel FIFO implementation.
++ *
++ * Copyright (C) 2004 Stelian Pop <stelian@popies.net>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ */
++
++/*
++ * This file is stolen from the Linux kernel sources
++ * (linux-2.6.22/kernel/kfifo.c) into sfc_resource driver.
++ * It should be used for old kernels without kfifo implementation.
++ * Most part of linux/kfifo.h is incorporated into
++ * ci/efrm/sysdep_linux.h.
++ */
++#include <ci/efrm/sysdep_linux.h>
++#ifdef HAS_NO_KFIFO
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/err.h>
++/*#include <linux/kfifo.h>*/
++
++/**
++ * kfifo_init - allocates a new FIFO using a preallocated buffer
++ * @buffer: the preallocated buffer to be used.
++ * @size: the size of the internal buffer, this have to be a power of 2.
++ * @gfp_mask: get_free_pages mask, passed to kmalloc()
++ * @lock: the lock to be used to protect the fifo buffer
++ *
++ * Do NOT pass the kfifo to kfifo_free() after use! Simply free the
++ * &struct kfifo with kfree().
++ */
++struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
++ gfp_t gfp_mask, spinlock_t * lock)
++{
++ struct kfifo *fifo;
++
++ /* size must be a power of 2 */
++ BUG_ON(size & (size - 1));
++
++ fifo = kmalloc(sizeof(struct kfifo), gfp_mask);
++ if (!fifo)
++ return ERR_PTR(-ENOMEM);
++
++ fifo->buffer = buffer;
++ fifo->size = size;
++ fifo->in = fifo->out = 0;
++ fifo->lock = lock;
++
++ return fifo;
++}
++
++EXPORT_SYMBOL(kfifo_init);
++
++/**
++ * kfifo_alloc - allocates a new FIFO and its internal buffer
++ * @size: the size of the internal buffer to be allocated.
++ * @gfp_mask: get_free_pages mask, passed to kmalloc()
++ * @lock: the lock to be used to protect the fifo buffer
++ *
++ * The size will be rounded-up to a power of 2.
++ */
++struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t * lock)
++{
++ unsigned char *buffer;
++ struct kfifo *ret;
++
++ /*
++ * round up to the next power of 2, since our 'let the indices
++ * wrap' tachnique works only in this case.
++ */
++ if (size & (size - 1)) {
++ BUG_ON(size > 0x80000000);
++ size = roundup_pow_of_two(size);
++ }
++
++ buffer = kmalloc(size, gfp_mask);
++ if (!buffer)
++ return ERR_PTR(-ENOMEM);
++
++ ret = kfifo_init(buffer, size, gfp_mask, lock);
++
++ if (IS_ERR(ret))
++ kfree(buffer);
++
++ return ret;
++}
++
++EXPORT_SYMBOL(kfifo_alloc);
++
++/**
++ * kfifo_free - frees the FIFO
++ * @fifo: the fifo to be freed.
++ */
++void kfifo_free(struct kfifo *fifo)
++{
++ kfree(fifo->buffer);
++ kfree(fifo);
++}
++
++EXPORT_SYMBOL(kfifo_free);
++
++/**
++ * __kfifo_put - puts some data into the FIFO, no locking version
++ * @fifo: the fifo to be used.
++ * @buffer: the data to be added.
++ * @len: the length of the data to be added.
++ *
++ * This function copies at most @len bytes from the @buffer into
++ * the FIFO depending on the free space, and returns the number of
++ * bytes copied.
++ *
++ * Note that with only one concurrent reader and one concurrent
++ * writer, you don't need extra locking to use these functions.
++ */
++unsigned int
++__kfifo_put(struct kfifo *fifo, unsigned char *buffer, unsigned int len)
++{
++ unsigned int l;
++
++ len = min(len, fifo->size - fifo->in + fifo->out);
++
++ /*
++ * Ensure that we sample the fifo->out index -before- we
++ * start putting bytes into the kfifo.
++ */
++
++ smp_mb();
++
++ /* first put the data starting from fifo->in to buffer end */
++ l = min(len, fifo->size - (fifo->in & (fifo->size - 1)));
++ memcpy(fifo->buffer + (fifo->in & (fifo->size - 1)), buffer, l);
++
++ /* then put the rest (if any) at the beginning of the buffer */
++ memcpy(fifo->buffer, buffer + l, len - l);
++
++ /*
++ * Ensure that we add the bytes to the kfifo -before-
++ * we update the fifo->in index.
++ */
++
++ smp_wmb();
++
++ fifo->in += len;
++
++ return len;
++}
++
++EXPORT_SYMBOL(__kfifo_put);
++
++/**
++ * __kfifo_get - gets some data from the FIFO, no locking version
++ * @fifo: the fifo to be used.
++ * @buffer: where the data must be copied.
++ * @len: the size of the destination buffer.
++ *
++ * This function copies at most @len bytes from the FIFO into the
++ * @buffer and returns the number of copied bytes.
++ *
++ * Note that with only one concurrent reader and one concurrent
++ * writer, you don't need extra locking to use these functions.
++ */
++unsigned int
++__kfifo_get(struct kfifo *fifo, unsigned char *buffer, unsigned int len)
++{
++ unsigned int l;
++
++ len = min(len, fifo->in - fifo->out);
++
++ /*
++ * Ensure that we sample the fifo->in index -before- we
++ * start removing bytes from the kfifo.
++ */
++
++ smp_rmb();
++
++ /* first get the data from fifo->out until the end of the buffer */
++ l = min(len, fifo->size - (fifo->out & (fifo->size - 1)));
++ memcpy(buffer, fifo->buffer + (fifo->out & (fifo->size - 1)), l);
++
++ /* then get the rest (if any) from the beginning of the buffer */
++ memcpy(buffer + l, fifo->buffer, len - l);
++
++ /*
++ * Ensure that we remove the bytes from the kfifo -before-
++ * we update the fifo->out index.
++ */
++
++ smp_mb();
++
++ fifo->out += len;
++
++ return len;
++}
++
++EXPORT_SYMBOL(__kfifo_get);
++#endif
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/linux_resource_internal.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/linux_resource_internal.h
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/linux_resource_internal.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/linux_resource_internal.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,75 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains Linux-specific API internal for the resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __LINUX_RESOURCE_INTERNAL__
++#define __LINUX_RESOURCE_INTERNAL__
++
++#include <ci/driver/resource/linux_efhw_nic.h>
++#include <ci/efrm/debug.h>
++#include <ci/efrm/driver_private.h>
++#include <ci/driver/efab/hardware.h>
++
++
++/*! Linux specific EtherFabric initialisation */
++extern int
++linux_efrm_nic_ctor(struct linux_efhw_nic *, struct pci_dev *,
++ spinlock_t *reg_lock,
++ unsigned nic_flags, unsigned nic_options);
++
++/*! Linux specific EtherFabric initialisation */
++extern void linux_efrm_nic_dtor(struct linux_efhw_nic *);
++
++/*! Linux specific EtherFabric initialisation -- interrupt registration */
++extern int linux_efrm_irq_ctor(struct linux_efhw_nic *);
++
++/*! Linux specific EtherFabric initialisation -- interrupt deregistration */
++extern void linux_efrm_irq_dtor(struct linux_efhw_nic *);
++
++extern int efrm_driverlink_register(void);
++extern void efrm_driverlink_unregister(void);
++
++extern int
++efrm_nic_add(struct pci_dev *dev, unsigned int opts, const uint8_t *mac_addr,
++ struct linux_efhw_nic **lnic_out, spinlock_t *reg_lock,
++ int bt_min, int bt_max, const struct vi_resource_dimensions *);
++extern void efrm_nic_del(struct linux_efhw_nic *);
++
++
++extern int efrm_install_proc_entries(void);
++extern void efrm_uninstall_proc_entries(void);
++
++#endif /* __LINUX_RESOURCE_INTERNAL__ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/Makefile linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/Makefile
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,15 @@
++obj-$(CONFIG_SFC_RESOURCE) := sfc_resource.o
++
++EXTRA_CFLAGS += -D__CI_HARDWARE_CONFIG_FALCON__
++EXTRA_CFLAGS += -D__ci_driver__
++EXTRA_CFLAGS += -Werror
++EXTRA_CFLAGS += -Idrivers/net/sfc -Idrivers/net/sfc/sfc_resource
++
++sfc_resource-objs := resource_driver.o iopage.o efx_vi_shm.o \
++ driverlink_new.o kernel_proc.o kfifo.o \
++ nic.o eventq.o falcon.o falcon_mac.o falcon_hash.o \
++ assert_valid.o buddy.o buffer_table.o filter_resource.o \
++ iobufset_resource.o resource_manager.o resources.o \
++ vi_resource_alloc.o vi_resource_event.o vi_resource_flush.o \
++ vi_resource_manager.o driver_object.o kernel_compat.o
++
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/nic.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/nic.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/nic.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/nic.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,190 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains EtherFabric Generic NIC instance (init, interrupts,
++ * etc)
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efhw/debug.h>
++#include <ci/driver/efab/hardware.h>
++#include <ci/efhw/falcon.h>
++#include <ci/efhw/nic.h>
++#include <ci/efhw/eventq.h>
++
++
++int efhw_device_type_init(struct efhw_device_type *dt,
++ int vendor_id, int device_id,
++ int class_revision)
++{
++ if (vendor_id != 0x1924)
++ return 0;
++
++ switch (device_id) {
++ case 0x0703:
++ case 0x6703:
++ dt->arch = EFHW_ARCH_FALCON;
++ dt->variant = 'A';
++ switch (class_revision) {
++ case 0:
++ dt->revision = 0;
++ break;
++ case 1:
++ dt->revision = 1;
++ break;
++ default:
++ return 0;
++ }
++ break;
++ case 0x0710:
++ dt->arch = EFHW_ARCH_FALCON;
++ dt->variant = 'B';
++ switch (class_revision) {
++ case 2:
++ dt->revision = 0;
++ break;
++ default:
++ return 0;
++ }
++ break;
++ default:
++ return 0;
++ }
++
++ return 1;
++}
++
++
++/*--------------------------------------------------------------------
++ *
++ * NIC Initialisation
++ *
++ *--------------------------------------------------------------------*/
++
++/* make this separate from initialising data structure
++** to allow this to be called at a later time once we can access PCI
++** config space to find out what hardware we have
++*/
++void efhw_nic_init(struct efhw_nic *nic, unsigned flags, unsigned options,
++ struct efhw_device_type dev_type)
++{
++ int i;
++
++ nic->devtype = dev_type;
++ nic->flags = flags;
++ nic->options = options;
++ nic->bar_ioaddr = 0;
++ spin_lock_init(&nic->the_reg_lock);
++ nic->reg_lock = &nic->the_reg_lock;
++ nic->mtu = 1500 + ETH_HLEN;
++
++ for (i = 0; i < EFHW_KEVENTQ_MAX; i++)
++ nic->irq_unit[i] = EFHW_IRQ_UNIT_UNUSED;
++
++ switch (nic->devtype.arch) {
++ case EFHW_ARCH_FALCON:
++ nic->evq_sizes = 512 | 1024 | 2048 | 4096 | 8192 |
++ 16384 | 32768;
++ nic->txq_sizes = 512 | 1024 | 2048 | 4096;
++ nic->rxq_sizes = 512 | 1024 | 2048 | 4096;
++ nic->efhw_func = &falcon_char_functional_units;
++ nic->ctr_ap_bytes = EFHW_64M;
++ switch (nic->devtype.variant) {
++ case 'A':
++ nic->ctr_ap_bar = FALCON_S_CTR_AP_BAR;
++ break;
++ case 'B':
++ nic->flags |= NIC_FLAG_NO_INTERRUPT;
++ nic->ctr_ap_bar = FALCON_P_CTR_AP_BAR;
++ break;
++ default:
++ EFHW_ASSERT(0);
++ break;
++ }
++ break;
++ default:
++ EFHW_ASSERT(0);
++ break;
++ }
++}
++
++
++void efhw_nic_close_interrupts(struct efhw_nic *nic)
++{
++ int i;
++
++ EFHW_ASSERT(nic);
++ if (!efhw_nic_have_hw(nic))
++ return;
++
++ EFHW_ASSERT(efhw_nic_have_hw(nic));
++
++ for (i = 0; i < EFHW_KEVENTQ_MAX; i++) {
++ if (nic->irq_unit[i] != EFHW_IRQ_UNIT_UNUSED)
++ efhw_nic_interrupt_disable(nic, i);
++ }
++}
++
++void efhw_nic_dtor(struct efhw_nic *nic)
++{
++ EFHW_ASSERT(nic);
++
++ /* Check that we have functional units because the software only
++ * driver doesn't initialise anything hardware related any more */
++
++#ifndef __ci_ul_driver__
++ /* close interrupts is called first because the act of deregistering
++ the driver could cause this driver to change from master to slave
++ and hence the implicit interrupt mappings would be wrong */
++
++ EFHW_TRACE("%s: functional units ... ", __FUNCTION__);
++
++ if (efhw_nic_have_functional_units(nic)) {
++ efhw_nic_close_interrupts(nic);
++ efhw_nic_close_hardware(nic);
++ }
++ EFHW_TRACE("%s: functional units ... done", __FUNCTION__);
++#endif
++
++ /* destroy event queues */
++ EFHW_TRACE("%s: event queues ... ", __FUNCTION__);
++
++#ifndef __ci_ul_driver__
++ {
++ int i;
++ for (i = 0; i < EFHW_KEVENTQ_MAX; ++i)
++ if (nic->evq[i].evq_mask)
++ efhw_keventq_dtor(nic, &nic->evq[i]);
++ }
++#endif
++
++ EFHW_TRACE("%s: event queues ... done", __FUNCTION__);
++
++ spin_lock_destroy(&nic->the_reg_lock);
++
++ EFHW_TRACE("%s: DONE", __FUNCTION__);
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/resource_driver.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/resource_driver.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/resource_driver.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/resource_driver.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,640 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains main driver entry points.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include "linux_resource_internal.h"
++#include "kernel_compat.h"
++#include <ci/efrm/nic_table.h>
++#include <ci/driver/resource/efx_vi.h>
++#include <ci/efhw/eventq.h>
++#include <ci/efhw/nic.h>
++#include <ci/efrm/buffer_table.h>
++#include <ci/efrm/vi_resource_private.h>
++#include <ci/efrm/driver_private.h>
++
++#if EFRM_HAVE_IOMMU_LOCK
++#ifdef EFRM_NEED_ALTERNATE_MAX_PFN
++extern unsigned long blk_max_pfn;
++#define max_pfn blk_max_pfn
++#else
++#include <linux/bootmem.h>
++#endif
++#endif
++
++MODULE_AUTHOR("Solarflare Communications");
++MODULE_LICENSE("GPL");
++
++static struct efhw_ev_handler ev_handler = {
++ .wakeup_fn = efrm_handle_wakeup_event,
++ .timeout_fn = efrm_handle_timeout_event,
++ .dmaq_flushed_fn = efrm_handle_dmaq_flushed,
++};
++
++#if EFRM_HAVE_IOMMU_LOCK
++int efrm_need_iommu_lock;
++EXPORT_SYMBOL(efrm_need_iommu_lock);
++#endif
++
++const int max_hardware_init_repeats = 10;
++
++/*--------------------------------------------------------------------
++ *
++ * Module load time variables
++ *
++ *--------------------------------------------------------------------*/
++/* See docs/notes/pci_alloc_consistent */
++int use_pci_alloc = 1; /* Use pci_alloc_consistent to alloc iopages */
++static int do_irq = 1; /* enable interrupts */
++
++#if defined(CONFIG_X86_XEN)
++static int irq_moderation = 60; /* interrupt moderation (60 usec) */
++#else
++static int irq_moderation = 20; /* interrupt moderation (20 usec) */
++#endif
++static int nic_options = NIC_OPT_DEFAULT;
++int efx_vi_eventq_size = EFX_VI_EVENTQ_SIZE_DEFAULT;
++
++module_param(do_irq, int, S_IRUGO);
++MODULE_PARM_DESC(do_irq, "Enable interrupts. "
++ "Do not turn it off unless you know what are you doing.");
++module_param(irq_moderation, int, S_IRUGO);
++MODULE_PARM_DESC(irq_moderation, "IRQ moderation in usec");
++module_param(nic_options, int, S_IRUGO);
++MODULE_PARM_DESC(nic_options, "Nic options -- see efhw_types.h");
++module_param(use_pci_alloc, int, S_IRUGO);
++MODULE_PARM_DESC(use_pci_alloc, "Use pci_alloc_consistent to alloc iopages "
++ "(autodetected by kernel version)");
++module_param(efx_vi_eventq_size, int, S_IRUGO);
++MODULE_PARM_DESC(efx_vi_eventq_size,
++ "Size of event queue allocated by efx_vi library");
++
++/*--------------------------------------------------------------------
++ *
++ * Linux specific NIC initialisation
++ *
++ *--------------------------------------------------------------------*/
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++# define IRQ_PT_REGS_ARG , struct pt_regs *regs __attribute__ ((unused))
++#else
++# define IRQ_PT_REGS_ARG
++#endif
++
++#ifndef IRQF_SHARED
++# define IRQF_SHARED SA_SHIRQ
++#endif
++
++static inline irqreturn_t
++linux_efrm_interrupt(int irr, void *dev_id IRQ_PT_REGS_ARG)
++{
++ return efhw_nic_interrupt((struct efhw_nic *)dev_id);
++}
++
++int linux_efrm_irq_ctor(struct linux_efhw_nic *lnic)
++{
++ struct efhw_nic *nic = &lnic->nic;
++
++ nic->flags &= ~NIC_FLAG_MSI;
++ if (nic->flags & NIC_FLAG_TRY_MSI) {
++ int rc = pci_enable_msi(lnic->pci_dev);
++ if (rc < 0) {
++ EFRM_WARN("%s: Could not enable MSI (%d)",
++ __FUNCTION__, rc);
++ EFRM_WARN("%s: Continuing with legacy interrupt mode",
++ __FUNCTION__);
++ } else {
++ EFRM_NOTICE("%s: MSI enabled", __FUNCTION__);
++ nic->flags |= NIC_FLAG_MSI;
++ }
++ }
++
++ if (request_irq(lnic->pci_dev->irq, linux_efrm_interrupt,
++ IRQF_SHARED, "sfc_resource", nic)) {
++ EFRM_ERR("Request for interrupt #%d failed",
++ lnic->pci_dev->irq);
++ nic->flags &= ~NIC_FLAG_OS_IRQ_EN;
++ return -EBUSY;
++ }
++ nic->flags |= NIC_FLAG_OS_IRQ_EN;
++
++ return 0;
++}
++
++void linux_efrm_irq_dtor(struct linux_efhw_nic *lnic)
++{
++ EFRM_TRACE("linux_efrm_irq_dtor: start");
++
++ if (lnic->nic.flags & NIC_FLAG_OS_IRQ_EN) {
++ free_irq(lnic->pci_dev->irq, &lnic->nic);
++ lnic->nic.flags &= ~NIC_FLAG_OS_IRQ_EN;
++ }
++
++ if (lnic->nic.flags & NIC_FLAG_MSI) {
++ pci_disable_msi(lnic->pci_dev);
++ lnic->nic.flags &= ~NIC_FLAG_MSI;
++ }
++
++ EFRM_TRACE("linux_efrm_irq_dtor: done");
++}
++
++/* Allocate buffer table entries for a particular NIC.
++ */
++static int efrm_nic_buffer_table_alloc(struct efhw_nic *nic)
++{
++ int capacity;
++ int page_order;
++ int i;
++ int rc;
++
++ /* Choose queue size. */
++ for (capacity = 8192; capacity <= nic->evq_sizes; capacity <<= 1) {
++ if (capacity > nic->evq_sizes) {
++ EFRM_ERR
++ ("%s: Unable to choose EVQ size (supported=%x)",
++ __FUNCTION__, nic->evq_sizes);
++ return -E2BIG;
++ } else if (capacity & nic->evq_sizes)
++ break;
++ }
++ for (i = 0; i < EFHW_KEVENTQ_MAX; ++i) {
++ nic->evq[i].hw.capacity = capacity;
++ nic->evq[i].hw.buf_tbl_alloc.base = (unsigned)-1;
++ }
++
++ /* allocate buffer table entries to map onto the iobuffer */
++ page_order = get_order(capacity * sizeof(efhw_event_t));
++ if (!(nic->flags & NIC_FLAG_NO_INTERRUPT)) {
++ rc = efrm_buffer_table_alloc(page_order,
++ &nic->evq[0].hw.buf_tbl_alloc);
++ if (rc < 0) {
++ EFRM_WARN
++ ("%s: failed (%d) to alloc %d buffer table entries",
++ __FUNCTION__, rc, page_order);
++ return rc;
++ }
++ }
++ rc = efrm_buffer_table_alloc(page_order,
++ &nic->evq[FALCON_EVQ_NONIRQ].hw.
++ buf_tbl_alloc);
++ if (rc < 0) {
++ EFRM_WARN
++ ("%s: failed (%d) to alloc %d buffer table entries",
++ __FUNCTION__, rc, page_order);
++ return rc;
++ }
++
++ return 0;
++}
++
++/* Free buffer table entries allocated for a particular NIC.
++ */
++static void efrm_nic_buffer_table_free(struct efhw_nic *nic)
++{
++ int i;
++ for (i = 0; i <= FALCON_EVQ_NONIRQ; i++)
++ if (nic->evq[i].hw.buf_tbl_alloc.base != (unsigned)-1)
++ efrm_buffer_table_free(&nic->evq[i].hw.buf_tbl_alloc);
++
++}
++
++static int iomap_bar(struct linux_efhw_nic *lnic, size_t len)
++{
++ efhw_ioaddr_t ioaddr;
++
++ ioaddr = ioremap_nocache(lnic->ctr_ap_pci_addr, len);
++ if (ioaddr == 0)
++ return -ENOMEM;
++
++ lnic->nic.bar_ioaddr = ioaddr;
++ return 0;
++}
++
++static int linux_efhw_nic_map_ctr_ap(struct linux_efhw_nic *lnic)
++{
++ struct efhw_nic *nic = &lnic->nic;
++ int rc;
++
++ rc = iomap_bar(lnic, nic->ctr_ap_bytes);
++
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++ /* Bug 5195: workaround for now. */
++ if (rc != 0 && nic->ctr_ap_bytes > 16 * 1024 * 1024) {
++ /* Try half the size for now. */
++ nic->ctr_ap_bytes /= 2;
++ EFRM_WARN("Bug 5195 WORKAROUND: retrying iomap of %d bytes",
++ nic->ctr_ap_bytes);
++ rc = iomap_bar(lnic, nic->ctr_ap_bytes);
++ }
++#endif
++
++ if (rc < 0) {
++ EFRM_ERR("Failed (%d) to map bar (%d bytes)",
++ rc, nic->ctr_ap_bytes);
++ return rc;
++ }
++
++ return rc;
++}
++
++int
++linux_efrm_nic_ctor(struct linux_efhw_nic *lnic, struct pci_dev *dev,
++ spinlock_t *reg_lock,
++ unsigned nic_flags, unsigned nic_options)
++{
++ struct efhw_device_type dev_type;
++ struct efhw_nic *nic = &lnic->nic;
++ u8 class_revision;
++ int rc;
++
++ rc = pci_read_config_byte(dev, PCI_CLASS_REVISION, &class_revision);
++ if (rc != 0) {
++ EFRM_ERR("%s: pci_read_config_byte failed (%d)",
++ __FUNCTION__, rc);
++ return rc;
++ }
++
++ if (!efhw_device_type_init(&dev_type, dev->vendor, dev->device,
++ class_revision)) {
++ EFRM_ERR("%s: efhw_device_type_init failed %04x:%04x(%d)",
++ __FUNCTION__, (unsigned) dev->vendor,
++ (unsigned) dev->device, (int) class_revision);
++ return -ENODEV;
++ }
++
++ EFRM_NOTICE("attaching device type %04x:%04x %d:%c%d",
++ (unsigned) dev->vendor, (unsigned) dev->device,
++ dev_type.arch, dev_type.variant, dev_type.revision);
++
++ /* Initialise the adapter-structure. */
++ efhw_nic_init(nic, nic_flags, nic_options, dev_type);
++ lnic->pci_dev = dev;
++
++ rc = pci_enable_device(dev);
++ if (rc < 0) {
++ EFRM_ERR("%s: pci_enable_device failed (%d)",
++ __FUNCTION__, rc);
++ return rc;
++ }
++
++ lnic->ctr_ap_pci_addr = pci_resource_start(dev, nic->ctr_ap_bar);
++
++ if (!pci_dma_supported(dev, (dma_addr_t)EFHW_DMA_ADDRMASK)) {
++ EFRM_ERR("%s: pci_dma_supported(%lx) failed", __FUNCTION__,
++ (unsigned long)EFHW_DMA_ADDRMASK);
++ return -ENODEV;
++ }
++
++ if (pci_set_dma_mask(dev, (dma_addr_t)EFHW_DMA_ADDRMASK)) {
++ EFRM_ERR("%s: pci_set_dma_mask(%lx) failed", __FUNCTION__,
++ (unsigned long)EFHW_DMA_ADDRMASK);
++ return -ENODEV;
++ }
++
++ if (pci_set_consistent_dma_mask(dev, (dma_addr_t)EFHW_DMA_ADDRMASK)) {
++ EFRM_ERR("%s: pci_set_consistent_dma_mask(%lx) failed",
++ __FUNCTION__, (unsigned long)EFHW_DMA_ADDRMASK);
++ return -ENODEV;
++ }
++
++ rc = linux_efhw_nic_map_ctr_ap(lnic);
++ if (rc < 0)
++ return rc;
++
++ /* By default struct efhw_nic contains its own lock for protecting
++ * access to nic registers. We override it with a pointer to the
++ * lock in the net driver. This is needed when resource and net
++ * drivers share a single PCI function (falcon B series).
++ */
++ nic->reg_lock = reg_lock;
++ return 0;
++}
++
++void linux_efrm_nic_dtor(struct linux_efhw_nic *lnic)
++{
++ struct efhw_nic *nic = &lnic->nic;
++ efhw_ioaddr_t bar_ioaddr = nic->bar_ioaddr;
++
++ efhw_nic_dtor(nic);
++
++ efrm_nic_buffer_table_free(nic);
++
++ /* Unmap the bar. */
++ EFRM_ASSERT(bar_ioaddr);
++ iounmap(bar_ioaddr);
++ nic->bar_ioaddr = 0;
++}
++
++/****************************************************************************
++ *
++ * efrm_tasklet - used to poll the eventq which may result in further callbacks
++ *
++ ****************************************************************************/
++
++static void efrm_tasklet(unsigned long pdev)
++{
++ struct efhw_nic *nic = (struct efhw_nic *)pdev;
++
++ EFRM_ASSERT(!(nic->flags & NIC_FLAG_NO_INTERRUPT));
++
++ efhw_keventq_poll(nic, &nic->evq[0]);
++ EFRM_TRACE("tasklet complete");
++}
++
++/****************************************************************************
++ *
++ * char driver specific interrupt callbacks -- run at hard IRQL
++ *
++ ****************************************************************************/
++static void efrm_handle_eventq_irq(struct efhw_nic *nic, int evq)
++{
++ /* NB. The interrupt must have already been acked (for legacy mode). */
++
++ EFRM_TRACE("%s: starting tasklet", __FUNCTION__);
++ EFRM_ASSERT(!(nic->flags & NIC_FLAG_NO_INTERRUPT));
++
++ tasklet_schedule(&linux_efhw_nic(nic)->tasklet);
++}
++
++/* A count of how many NICs this driver knows about. */
++static int n_nics_probed;
++
++/****************************************************************************
++ *
++ * efrm_nic_add: add the NIC to the resource driver
++ *
++ * NOTE: the flow of control through this routine is quite subtle
++ * because of the number of operations that can fail. We therefore
++ * take the apporaching of keeping the return code (rc) variable
++ * accurate, and only do operations while it is non-negative. Tear down
++ * is done at the end if rc is negative, depending on what has been set up
++ * by that point.
++ *
++ * So basically just make sure that any code you add checks rc>=0 before
++ * doing any work and you'll be fine.
++ *
++ ****************************************************************************/
++int
++efrm_nic_add(struct pci_dev *dev, unsigned flags, const uint8_t *mac_addr,
++ struct linux_efhw_nic **lnic_out, spinlock_t *reg_lock,
++ int bt_min, int bt_max,
++ const struct vi_resource_dimensions *res_dim)
++{
++ struct linux_efhw_nic *lnic = NULL;
++ struct efhw_nic *nic = NULL;
++ int count = 0, rc = 0, resources_init = 0;
++ int constructed = 0;
++ int registered_nic = 0;
++ int buffers_allocated = 0;
++ static unsigned nic_index; /* = 0; */
++
++ EFRM_TRACE("%s: device detected (Slot '%s', IRQ %d)", __FUNCTION__,
++ pci_name(dev) ? pci_name(dev) : "?", dev->irq);
++
++ /* Ensure that we have room for the new adapter-structure. */
++ if (efrm_nic_table.nic_count == EFHW_MAX_NR_DEVS) {
++ EFRM_WARN("%s: WARNING: too many devices", __FUNCTION__);
++ rc = -ENOMEM;
++ goto failed;
++ }
++
++ if (n_nics_probed == 0) {
++ rc = efrm_resources_init(res_dim, bt_min, bt_max);
++ if (rc != 0)
++ goto failed;
++ resources_init = 1;
++ }
++
++ /* Allocate memory for the new adapter-structure. */
++ lnic = kmalloc(sizeof(*lnic), GFP_KERNEL);
++ if (lnic == NULL) {
++ EFRM_ERR("%s: ERROR: failed to allocate memory", __FUNCTION__);
++ rc = -ENOMEM;
++ goto failed;
++ }
++ memset(lnic, 0, sizeof(*lnic));
++ nic = &lnic->nic;
++
++ lnic->ev_handlers = &ev_handler;
++
++ /* OS specific hardware mappings */
++ rc = linux_efrm_nic_ctor(lnic, dev, reg_lock, flags, nic_options);
++ if (rc < 0) {
++ EFRM_ERR("%s: ERROR: initialisation failed", __FUNCTION__);
++ goto failed;
++ }
++
++ constructed = 1;
++
++ /* Tell the driver about the NIC - this needs to be done before the
++ resources managers get created below. Note we haven't initialised
++ the hardware yet, and I don't like doing this before the perhaps
++ unreliable hardware initialisation. However, there's quite a lot
++ of code to review if we wanted to hardware init before bringing
++ up the resource managers. */
++ rc = efrm_driver_register_nic(nic, nic_index++);
++ if (rc < 0) {
++ EFRM_ERR("%s: cannot register nic %d with nic error code %d",
++ __FUNCTION__, efrm_nic_table.nic_count, rc);
++ goto failed;
++ }
++ registered_nic = 1;
++
++ rc = efrm_nic_buffer_table_alloc(nic);
++ if (rc < 0)
++ goto failed;
++ buffers_allocated = 1;
++
++ /****************************************************/
++ /* hardware bringup */
++ /****************************************************/
++ /* Detecting hardware can be a slightly unreliable process;
++ we want to make sure that we maximise our chances, so we
++ loop a few times until all is good. */
++ for (count = 0; count < max_hardware_init_repeats; count++) {
++ rc = efhw_nic_init_hardware(nic, &ev_handler, mac_addr);
++ if (rc >= 0)
++ break;
++
++ /* pain */
++ EFRM_ERR
++ ("error - hardware initialisation failed code %d, "
++ "attempt %d of %d", rc, count + 1,
++ max_hardware_init_repeats);
++ }
++ if (rc < 0)
++ goto failed;
++
++ tasklet_init(&lnic->tasklet, efrm_tasklet, (ulong)nic);
++
++ /* set up interrupt handlers (hard-irq) */
++ nic->irq_handler = &efrm_handle_eventq_irq;
++
++ /* this device can now take management interrupts */
++ if (do_irq && !(nic->flags & NIC_FLAG_NO_INTERRUPT)) {
++ rc = linux_efrm_irq_ctor(lnic);
++ if (rc < 0) {
++ EFRM_ERR("Interrupt initialisation failed (%d)", rc);
++ goto failed;
++ }
++ efhw_nic_set_interrupt_moderation(nic, 0, irq_moderation);
++ efhw_nic_interrupt_enable(nic, 0);
++ }
++ EFRM_TRACE("interrupts are %sregistered", do_irq ? "" : "not ");
++
++#if EFRM_HAVE_IOMMU_LOCK
++ /* Bug 4560: We need the lock if there is memory which cannot be
++ * accessed by the card and there is an IOMMU to access it. In that
++ * case, the kernel will use the IOMMU to access the high memory. */
++ if ((dev->dma_mask >> PAGE_SHIFT) < max_pfn && !EFRM_NO_IOMMU)
++ efrm_need_iommu_lock = 1;
++#endif
++
++ *lnic_out = lnic;
++ EFRM_ASSERT(rc == 0);
++ ++n_nics_probed;
++ return 0;
++
++failed:
++ if (buffers_allocated)
++ efrm_nic_buffer_table_free(nic);
++ if (registered_nic)
++ efrm_driver_unregister_nic(nic);
++ if (constructed)
++ linux_efrm_nic_dtor(lnic);
++ kfree(lnic); /* safe in any case */
++ if (resources_init)
++ efrm_resources_fini();
++ return rc;
++}
++
++/****************************************************************************
++ *
++ * efrm_nic_del: Remove the nic from the resource driver structures
++ *
++ ****************************************************************************/
++void efrm_nic_del(struct linux_efhw_nic *lnic)
++{
++ struct efhw_nic *nic = &lnic->nic;
++
++ EFRM_TRACE("%s:", __FUNCTION__);
++ EFRM_ASSERT(nic);
++
++ efrm_driver_unregister_nic(nic);
++
++ /*
++ * Synchronise here with any running ISR.
++ * Remove the OS handler. There should be no IRQs being generated
++ * by our NIC at this point.
++ */
++ if (efhw_nic_have_functional_units(nic)) {
++ efhw_nic_close_interrupts(nic);
++ linux_efrm_irq_dtor(lnic);
++ tasklet_kill(&lnic->tasklet);
++ }
++
++ /* Close down hardware and free resources. */
++ linux_efrm_nic_dtor(lnic);
++ kfree(lnic);
++
++ if (--n_nics_probed == 0)
++ efrm_resources_fini();
++
++ EFRM_TRACE("NIC teardown: Done");
++}
++
++/****************************************************************************
++ *
++ * init_module: register as a PCI driver.
++ *
++ ****************************************************************************/
++static int init_sfc_resource(void)
++{
++ int rc = 0;
++
++ EFRM_TRACE("%s: RESOURCE driver starting", __FUNCTION__);
++
++ rc = efrm_driver_ctor();
++ if (rc < 0) {
++ EFRM_ERR("%s: efrm_driver_ctor: error %d", __FUNCTION__, rc);
++ goto fail_driver_ctor;
++ }
++
++ /* Register the driver so that our 'probe' function is called for
++ * each EtherFabric device in the system.
++ */
++ rc = efrm_driverlink_register();
++ if (rc == -ENODEV)
++ EFRM_ERR("%s: no devices found", __FUNCTION__);
++ if (rc < 0)
++ goto failed_driverlink;
++
++ if (efrm_install_proc_entries() != 0) {
++ /* Do not fail, but print a warning */
++ EFRM_WARN("%s: WARNING: failed to install /proc entries",
++ __FUNCTION__);
++ }
++
++ return 0;
++
++failed_driverlink:
++ /* No need to release resource managers here since they register
++ * destructors with the driver. */
++ efrm_driver_dtor();
++fail_driver_ctor:
++ EFRM_ASSERT(rc != 0);
++ return rc;
++}
++
++/****************************************************************************
++ *
++ * cleanup_module: module-removal entry-point
++ *
++ ****************************************************************************/
++static void cleanup_sfc_resource(void)
++{
++ efrm_uninstall_proc_entries();
++
++ efrm_driverlink_unregister();
++
++ /* Clean up char-driver specific initialisation.
++ - driver dtor can use both work queue and buffer table entries */
++ efrm_driver_dtor();
++
++ EFRM_TRACE("unloaded");
++}
++
++module_init(init_sfc_resource);
++module_exit(cleanup_sfc_resource);
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/resource_manager.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/resource_manager.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/resource_manager.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/resource_manager.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,263 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains generic code for resources and resource managers.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efrm/debug.h>
++#include <ci/efrm/nic_table.h>
++#include <ci/efhw/iopage.h>
++#include <ci/efrm/driver_private.h>
++
++/**********************************************************************
++ * Internal stuff.
++ */
++
++#define EFRM_RM_TABLE_SIZE_INIT 256
++
++static int grow_table(struct efrm_resource_manager *rm, unsigned min_size)
++{
++ irq_flags_t lock_flags;
++ struct efrm_resource **table, **old_table;
++ unsigned new_size;
++
++ EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm);
++
++ spin_lock_irqsave(&rm->rm_lock, lock_flags);
++
++ /* Check whether the size of the table increased whilst the lock was
++ * dropped. */
++ if (min_size <= rm->rm_table_size) {
++ spin_unlock_irqrestore(&rm->rm_lock, lock_flags);
++ return 0;
++ }
++
++ new_size = rm->rm_table_size << 1;
++ if (new_size < min_size)
++ new_size = min_size;
++
++ spin_unlock_irqrestore(&rm->rm_lock, lock_flags);
++ if (in_atomic()) {
++ EFRM_WARN("%s: in_atomic in grow_table()", __FUNCTION__);
++ EFRM_WARN("%s: allocating %u bytes", __FUNCTION__,
++ (unsigned)(new_size *
++ sizeof(struct efrm_resource *)));
++ return -ENOMEM;
++ }
++
++ table =
++ (struct efrm_resource **)vmalloc(new_size *
++ sizeof(struct efrm_resource *));
++ spin_lock_irqsave(&rm->rm_lock, lock_flags);
++
++ if (table == 0) {
++ EFRM_ERR("%s: out of memory in grow_table()", __FUNCTION__);
++ EFRM_ERR("%s: allocating %u bytes", __FUNCTION__,
++ (unsigned)(new_size *
++ sizeof(struct efrm_resource *)));
++ spin_unlock_irqrestore(&rm->rm_lock, lock_flags);
++ return -ENOMEM;
++ }
++
++ /* Could have got bigger while we dropped the lock... */
++ if (new_size <= rm->rm_table_size) {
++ spin_unlock_irqrestore(&rm->rm_lock, lock_flags);
++ vfree(table);
++ return 0;
++ }
++
++ memcpy(table, rm->rm_table, rm->rm_table_size * sizeof(*table));
++ memset(table + rm->rm_table_size, 0,
++ sizeof(*table) * (new_size - rm->rm_table_size));
++ /* remember old table so we can free the
++ memory after we drop the lock (bug 1040) */
++ old_table = rm->rm_table;
++ rm->rm_table = table;
++ rm->rm_table_size = new_size;
++ spin_unlock_irqrestore(&rm->rm_lock, lock_flags);
++ vfree(old_table);
++
++ return 0;
++}
++
++/**********************************************************************
++ * struct efrm_resource_manager
++ */
++
++void efrm_resource_manager_dtor(struct efrm_resource_manager *rm)
++{
++ EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm);
++
++ /* call destructor */
++ EFRM_DO_DEBUG(if (rm->rm_resources)
++ EFRM_ERR("%s: %s leaked %d resources",
++ __FUNCTION__, rm->rm_name, rm->rm_resources));
++ EFRM_ASSERT(rm->rm_resources == 0);
++
++ rm->rm_dtor(rm);
++
++ /* clear out things built by efrm_resource_manager_ctor */
++ spin_lock_destroy(&rm->rm_lock);
++ vfree(rm->rm_table);
++
++ /* and the free the memory */
++ EFRM_DO_DEBUG(memset(rm, 0, sizeof(*rm)));
++ kfree(rm);
++}
++
++/* Construct a resource manager. Resource managers are singletons. */
++int
++efrm_resource_manager_ctor(struct efrm_resource_manager *rm,
++ void (*dtor)(struct efrm_resource_manager *),
++ const char *name, unsigned type,
++ int initial_table_size)
++{
++ EFRM_ASSERT(rm);
++ EFRM_ASSERT(dtor);
++
++ rm->rm_name = name;
++ EFRM_DO_DEBUG(rm->rm_type = type);
++ rm->rm_dtor = dtor;
++ spin_lock_init(&rm->rm_lock);
++ rm->rm_resources = 0;
++ rm->rm_resources_hiwat = 0;
++
++ /* if not set then pick a number */
++ rm->rm_table_size = (initial_table_size) ?
++ initial_table_size : EFRM_RM_TABLE_SIZE_INIT;
++
++ rm->rm_table = vmalloc(rm->rm_table_size *
++ sizeof(struct efrm_resource *));
++
++ if (rm->rm_table == 0) {
++ spin_lock_destroy(&rm->rm_lock);
++ return -ENOMEM;
++ }
++ memset(rm->rm_table, 0, sizeof(*rm->rm_table) * rm->rm_table_size);
++
++ EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm);
++ return 0;
++}
++
++int efrm_resource_manager_insert(struct efrm_resource *rs)
++{
++ irq_flags_t lock_flags;
++ struct efrm_resource_manager *rm;
++ int instance = EFRM_RESOURCE_INSTANCE(rs->rs_handle);
++
++ EFRM_ASSERT(EFRM_RESOURCE_TYPE(rs->rs_handle) < EFRM_RESOURCE_NUM);
++ rm = efrm_rm_table[EFRM_RESOURCE_TYPE(rs->rs_handle)];
++ EFRM_ASSERT(EFRM_RESOURCE_TYPE(rs->rs_handle) == rm->rm_type);
++ EFRM_RESOURCE_ASSERT_VALID(rs, 0);
++
++ /* Put an entry in the resource table. */
++ spin_lock_irqsave(&rm->rm_lock, lock_flags);
++ if ((unsigned)instance >= rm->rm_table_size) {
++ spin_unlock_irqrestore(&rm->rm_lock, lock_flags);
++ if (grow_table(rm, instance + 1) < 0)
++ return -ENOMEM;
++ spin_lock_irqsave(&rm->rm_lock, lock_flags);
++ }
++ EFRM_ASSERT(rm->rm_table_size > (unsigned)instance);
++ EFRM_ASSERT(rm->rm_table[instance] == NULL);
++ rm->rm_table[instance] = rs;
++ rm->rm_resources++;
++ if (rm->rm_resources > rm->rm_resources_hiwat)
++ rm->rm_resources_hiwat = rm->rm_resources;
++
++ /* Put the resource in the linked list. */
++ /* ?? broken list_add(&rm->rm_resources, &rs->rs_link); */
++ /* DJR wrote that it causes problem on driver unload, and DR tried
++ * it and saw (probably) this cause an assertion failure due to a
++ * bad link structure in
++ * /runbench/results/2005/09/22/0_DupTester_15-16-46 */
++
++ spin_unlock_irqrestore(&rm->rm_lock, lock_flags);
++
++ return 0;
++}
++
++bool __efrm_resource_ref_count_zero(unsigned type, unsigned instance)
++{
++ /* This is rather nasty because when a resource's ref count goes to
++ * zero there is still a pointer to it in the [rm_table]. Thus
++ * arriving here does not guarantee that we have exclusive access
++ * to the resource and can free it. In fact the resource may
++ * already have been freed by another thread (after we dropped our
++ * ref, but before arriving here).
++ *
++ * At this point the only pointers to this resource should be [rs]
++ * and the one in [rm_table]. EXCEPT: Someone could have got in
++ * and looked-up the resource in the table before we got the lock.
++ * In this case the ref will have been hiked again.
++ *
++ * Therefore, if ref count is non-zero here, we shouldn't do
++ * anything, as someone else holds a ref to the resource, and will
++ * eventually release it.
++ *
++ * Otherwise, we zero-out the table entry. Therefore we have the
++ * only pointer to the resource, and can kill it safely.
++ */
++ struct efrm_resource_manager *rm = efrm_rm_table[type];
++ irq_flags_t lock_flags;
++ struct efrm_resource *rs;
++ bool do_free = false;
++
++ EFRM_TRACE("efrm_resource_ref_count_zero: type=%d instance=%d",
++ rm->rm_type, instance);
++
++ EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm);
++ EFRM_ASSERT(rm->rm_table_size > instance);
++
++ spin_lock_irqsave(&rm->rm_lock, lock_flags);
++
++ rs = rm->rm_table[instance];
++ if (rs != NULL) {
++ do_free = atomic_read(&rs->rs_ref_count) == 0;
++ if (do_free) {
++ EFRM_ASSERT(rm->rm_resources > 0);
++ --rm->rm_resources;
++ rm->rm_table[instance] = 0;
++ }
++ }
++
++ spin_unlock_irqrestore(&rm->rm_lock, lock_flags);
++
++ return do_free;
++}
++EXPORT_SYMBOL(__efrm_resource_ref_count_zero);
++
++/*
++ * vi: sw=8:ai:aw
++ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/resources.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/resources.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/resources.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/resources.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,94 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains resource managers initialisation functions.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efrm/private.h>
++#include <ci/efrm/buffer_table.h>
++
++int
++efrm_resources_init(const struct vi_resource_dimensions *vi_res_dim,
++ int buffer_table_min, int buffer_table_max)
++{
++ int i, rc;
++
++ rc = efrm_buffer_table_ctor(buffer_table_min, buffer_table_max);
++ if (rc != 0)
++ return rc;
++
++ /* Create resources in the correct order */
++ for (i = 0; i < EFRM_RESOURCE_NUM; ++i) {
++ struct efrm_resource_manager **rmp = &efrm_rm_table[i];
++
++ EFRM_ASSERT(*rmp == NULL);
++ switch (i) {
++ case EFRM_RESOURCE_VI:
++ rc = efrm_create_vi_resource_manager(rmp,
++ vi_res_dim);
++ break;
++ case EFRM_RESOURCE_FILTER:
++ rc = efrm_create_filter_resource_manager(rmp);
++ break;
++ case EFRM_RESOURCE_IOBUFSET:
++ rc = efrm_create_iobufset_resource_manager(rmp);
++ break;
++ default:
++ rc = 0;
++ break;
++ }
++
++ if (rc < 0) {
++ EFRM_ERR("%s: failed type=%d (%d)",
++ __FUNCTION__, i, rc);
++ efrm_buffer_table_dtor();
++ return rc;
++ }
++ }
++
++ return 0;
++}
++
++void efrm_resources_fini(void)
++{
++ int i;
++
++ for (i = EFRM_RESOURCE_NUM - 1; i >= 0; --i)
++ if (efrm_rm_table[i]) {
++ efrm_resource_manager_dtor(efrm_rm_table[i]);
++ efrm_rm_table[i] = NULL;
++ }
++
++ efrm_buffer_table_dtor();
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/vi_resource_alloc.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/vi_resource_alloc.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/vi_resource_alloc.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/vi_resource_alloc.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,876 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains allocation of VI resources.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efrm/nic_table.h>
++#include <ci/efhw/iopage.h>
++#include <ci/driver/efab/hardware.h>
++#include <ci/efhw/public.h>
++#include <ci/efhw/falcon.h>
++#include <ci/efrm/private.h>
++#include <ci/efrm/buffer_table.h>
++#include <ci/efrm/vi_resource_private.h>
++
++/*** Data definitions ****************************************************/
++
++static const char *dmaq_names[] = { "TX", "RX" };
++
++struct vi_resource_manager *efrm_vi_manager;
++
++/*** Forward references **************************************************/
++
++static int
++efrm_vi_resource_alloc_or_free(int alloc, struct vi_resource *evq_virs,
++ uint16_t vi_flags, int32_t evq_capacity,
++ int32_t txq_capacity, int32_t rxq_capacity,
++ uint8_t tx_q_tag, uint8_t rx_q_tag,
++ struct vi_resource **virs_in_out);
++
++/*** Reference count handling ********************************************/
++
++static inline void efrm_vi_rm_get_ref(struct vi_resource *virs)
++{
++ atomic_inc(&virs->evq_refs);
++}
++
++static inline void efrm_vi_rm_drop_ref(struct vi_resource *virs)
++{
++ EFRM_ASSERT(atomic_read(&virs->evq_refs) != 0);
++ if (atomic_dec_and_test(&virs->evq_refs))
++ efrm_vi_resource_alloc_or_free(false, NULL, 0, 0, 0, 0, 0, 0,
++ &virs);
++}
++
++/*** Instance numbers ****************************************************/
++
++static inline int efrm_vi_rm_alloc_id(uint16_t vi_flags, int32_t evq_capacity)
++{
++ irq_flags_t lock_flags;
++ int instance;
++ int rc;
++
++ if (efrm_nic_table.a_nic == NULL) /* ?? FIXME: surely not right */
++ return -ENODEV;
++
++ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags);
++
++ /* Falcon A1 RX phys addr wierdness. */
++ if (efrm_nic_table.a_nic->devtype.variant == 'A' &&
++ (vi_flags & EFHW_VI_RX_PHYS_ADDR_EN)) {
++ if (vi_flags & EFHW_VI_JUMBO_EN) {
++ /* Falcon-A cannot do phys + scatter. */
++ EFRM_WARN
++ ("%s: falcon-A does not support phys+scatter mode",
++ __FUNCTION__);
++ instance = -1;
++ } else if (efrm_vi_manager->iscsi_dmaq_instance_is_free
++ && evq_capacity == 0) {
++ /* Falcon-A has a single RXQ that gives the correct
++ * semantics for physical addressing. However, it
++ * happens to have the same instance number as the
++ * 'char' event queue, so we cannot also hand out
++ * the event queue. */
++ efrm_vi_manager->iscsi_dmaq_instance_is_free = false;
++ instance = FALCON_A1_ISCSI_DMAQ;
++ } else {
++ EFRM_WARN("%s: iSCSI receive queue not free",
++ __FUNCTION__);
++ instance = -1;
++ }
++ goto unlock_out;
++ }
++
++ if (vi_flags & EFHW_VI_RM_WITH_INTERRUPT) {
++ rc = __kfifo_get(efrm_vi_manager->instances_with_interrupt,
++ (unsigned char *)&instance, sizeof(instance));
++ if (rc != sizeof(instance)) {
++ EFRM_ASSERT(rc == 0);
++ instance = -1;
++ }
++ goto unlock_out;
++ }
++
++ /* Otherwise a normal run-of-the-mill VI. */
++ rc = __kfifo_get(efrm_vi_manager->instances_with_timer,
++ (unsigned char *)&instance, sizeof(instance));
++ if (rc != sizeof(instance)) {
++ EFRM_ASSERT(rc == 0);
++ instance = -1;
++ }
++
++unlock_out:
++ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags);
++ return instance;
++}
++
++static void efrm_vi_rm_free_id(int instance)
++{
++ irq_flags_t lock_flags;
++ struct kfifo *instances;
++
++ if (efrm_nic_table.a_nic == NULL) /* ?? FIXME: surely not right */
++ return;
++
++ if (efrm_nic_table.a_nic->devtype.variant == 'A' &&
++ instance == FALCON_A1_ISCSI_DMAQ) {
++ EFRM_ASSERT(efrm_vi_manager->iscsi_dmaq_instance_is_free ==
++ false);
++ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags);
++ efrm_vi_manager->iscsi_dmaq_instance_is_free = true;
++ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock,
++ lock_flags);
++ } else {
++ if (instance >= efrm_vi_manager->with_timer_base &&
++ instance < efrm_vi_manager->with_timer_limit) {
++ instances = efrm_vi_manager->instances_with_timer;
++ } else {
++ EFRM_ASSERT(instance >=
++ efrm_vi_manager->with_interrupt_base);
++ EFRM_ASSERT(instance <
++ efrm_vi_manager->with_interrupt_limit);
++ instances = efrm_vi_manager->instances_with_interrupt;
++ }
++
++ EFRM_VERIFY_EQ(kfifo_put(instances, (unsigned char *)&instance,
++ sizeof(instance)), sizeof(instance));
++ }
++}
++
++/*** Queue sizes *********************************************************/
++
++/* NB. This should really take a nic as an argument, but that makes
++ * the buffer table allocation difficult. */
++uint32_t efrm_vi_rm_evq_bytes(struct vi_resource *virs
++ /*,struct efhw_nic *nic */ )
++{
++ return virs->evq_capacity * sizeof(efhw_event_t);
++}
++EXPORT_SYMBOL(efrm_vi_rm_evq_bytes);
++
++/* NB. This should really take a nic as an argument, but that makes
++ * the buffer table allocation difficult. */
++uint32_t efrm_vi_rm_txq_bytes(struct vi_resource *virs
++ /*,struct efhw_nic *nic */ )
++{
++ return virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] *
++ FALCON_DMA_TX_DESC_BYTES;
++}
++EXPORT_SYMBOL(efrm_vi_rm_txq_bytes);
++
++/* NB. This should really take a nic as an argument, but that makes
++ * the buffer table allocation difficult. */
++uint32_t efrm_vi_rm_rxq_bytes(struct vi_resource *virs
++ /*,struct efhw_nic *nic */ )
++{
++ uint32_t bytes_per_desc = ((virs->flags & EFHW_VI_RX_PHYS_ADDR_EN)
++ ? FALCON_DMA_RX_PHYS_DESC_BYTES
++ : FALCON_DMA_RX_BUF_DESC_BYTES);
++ return virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] * bytes_per_desc;
++}
++EXPORT_SYMBOL(efrm_vi_rm_rxq_bytes);
++
++static int choose_size(int size_rq, unsigned sizes)
++{
++ int size;
++
++ /* size_rq < 0 means default, but we interpret this as 'minimum'. */
++
++ for (size = 256;; size <<= 1)
++ if ((sizes & ~((size - 1) | size)) == 0)
++ return -1;
++ else if ((size & sizes) && size >= size_rq)
++ return size;
++}
++
++static int
++efrm_vi_rm_adjust_alloc_request(struct vi_resource *virs, struct efhw_nic *nic)
++{
++ int capacity;
++
++ EFRM_ASSERT(nic->efhw_func);
++
++ if (virs->evq_capacity) {
++ capacity = choose_size(virs->evq_capacity, nic->evq_sizes);
++ if (capacity < 0) {
++ EFRM_ERR("vi_resource: bad evq size %d (supported=%x)",
++ virs->evq_capacity, nic->evq_sizes);
++ return -E2BIG;
++ }
++ virs->evq_capacity = capacity;
++ }
++ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) {
++ capacity =
++ choose_size(virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX],
++ nic->txq_sizes);
++ if (capacity < 0) {
++ EFRM_ERR("vi_resource: bad txq size %d (supported=%x)",
++ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX],
++ nic->txq_sizes);
++ return -E2BIG;
++ }
++ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] = capacity;
++ }
++ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) {
++ capacity =
++ choose_size(virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX],
++ nic->rxq_sizes);
++ if (capacity < 0) {
++ EFRM_ERR("vi_resource: bad rxq size %d (supported=%x)",
++ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX],
++ nic->rxq_sizes);
++ return -E2BIG;
++ }
++ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] = capacity;
++ }
++
++ return 0;
++}
++
++/* remove the reference to the event queue in this VI resource and decrement
++ the event queue's use count */
++static inline void efrm_vi_rm_detach_evq(struct vi_resource *virs)
++{
++ struct vi_resource *evq_virs;
++
++ EFRM_ASSERT(virs != NULL);
++
++ evq_virs = virs->evq_virs;
++
++ if (evq_virs != NULL) {
++ virs->evq_virs = NULL;
++ if (evq_virs == virs) {
++ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT
++ " had internal event queue ", __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
++ } else {
++ efrm_vi_rm_drop_ref(evq_virs);
++ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " had event queue "
++ EFRM_RESOURCE_FMT, __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle),
++ EFRM_RESOURCE_PRI_ARG(evq_virs->rs.
++ rs_handle));
++ }
++ } else {
++ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT
++ " had no event queue (nothing to do)",
++ __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
++ }
++}
++
++/*** Buffer Table allocations ********************************************/
++
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++static int
++efrm_vi_rm_alloc_or_free_buffer_table(struct vi_resource *virs, bool is_alloc)
++{
++ uint32_t bytes;
++ int page_order;
++ int rc;
++
++ if (!is_alloc)
++ goto destroy;
++
++ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) {
++ bytes = efrm_vi_rm_txq_bytes(virs);
++ page_order = get_order(bytes);
++ rc = efrm_buffer_table_alloc(page_order,
++ (virs->dmaq_buf_tbl_alloc +
++ EFRM_VI_RM_DMA_QUEUE_TX));
++ if (rc != 0) {
++ EFRM_TRACE
++ ("%s: Error %d allocating TX buffer table entry",
++ __FUNCTION__, rc);
++ goto fail_txq_alloc;
++ }
++ }
++
++ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) {
++ bytes = efrm_vi_rm_rxq_bytes(virs);
++ page_order = get_order(bytes);
++ rc = efrm_buffer_table_alloc(page_order,
++ (virs->dmaq_buf_tbl_alloc +
++ EFRM_VI_RM_DMA_QUEUE_RX));
++ if (rc != 0) {
++ EFRM_TRACE
++ ("%s: Error %d allocating RX buffer table entry",
++ __FUNCTION__, rc);
++ goto fail_rxq_alloc;
++ }
++ }
++ return 0;
++
++destroy:
++ rc = 0;
++
++ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) {
++ efrm_buffer_table_free(&virs->
++ dmaq_buf_tbl_alloc
++ [EFRM_VI_RM_DMA_QUEUE_RX]);
++ }
++fail_rxq_alloc:
++
++ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) {
++ efrm_buffer_table_free(&virs->
++ dmaq_buf_tbl_alloc
++ [EFRM_VI_RM_DMA_QUEUE_TX]);
++ }
++fail_txq_alloc:
++
++ return rc;
++}
++
++#endif /* defined(__CI_HARDWARE_CONFIG_FALCON__) */
++
++/*** Per-NIC allocations *************************************************/
++
++static inline int
++efrm_vi_rm_init_evq(struct vi_resource *virs, int nic_index)
++{
++ int rc;
++ struct efhw_nic *nic = efrm_nic_table.nic[nic_index];
++ int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
++ struct eventq_resource_hardware *evq_hw =
++ &virs->nic_info[nic_index].evq_pages;
++ uint32_t buf_bytes = efrm_vi_rm_evq_bytes(virs);
++
++ if (virs->evq_capacity == 0)
++ return 0;
++ evq_hw->capacity = virs->evq_capacity;
++
++ /* Allocate buffer table entries to map onto the iobuffer. This
++ * currently allocates its own buffer table entries on Falcon which is
++ * a bit wasteful on a multi-NIC system. */
++ evq_hw->buf_tbl_alloc.base = (unsigned)-1;
++ rc = efrm_buffer_table_alloc(get_order(buf_bytes),
++ &evq_hw->buf_tbl_alloc);
++ if (rc < 0) {
++ EFHW_WARN("%s: failed (%d) to alloc %d buffer table entries",
++ __FUNCTION__, rc, get_order(buf_bytes));
++ return rc;
++ }
++
++ /* Allocate the event queue memory. */
++ rc = efhw_nic_event_queue_alloc_iobuffer(nic, evq_hw, instance,
++ buf_bytes);
++ if (rc != 0) {
++ EFRM_ERR("%s: Error allocating iobuffer: %d", __FUNCTION__, rc);
++ efrm_buffer_table_free(&evq_hw->buf_tbl_alloc);
++ return rc;
++ }
++
++ /* Initialise the event queue hardware */
++ efhw_nic_event_queue_enable(nic, instance, virs->evq_capacity,
++ efhw_iopages_dma_addr(&evq_hw->iobuff) +
++ evq_hw->iobuff_off,
++ evq_hw->buf_tbl_alloc.base);
++
++ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " capacity=%u", __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle),
++ virs->evq_capacity);
++
++#if defined(__ia64__)
++ /* Page size may be large, so for now just increase the
++ * size of the requested evq up to a round number of
++ * pages
++ */
++ buf_bytes = CI_ROUND_UP(buf_bytes, PAGE_SIZE);
++#endif
++ EFRM_ASSERT(buf_bytes % PAGE_SIZE == 0);
++
++ virs->mem_mmap_bytes += buf_bytes;
++
++ return 0;
++}
++
++static inline void
++efrm_vi_rm_fini_evq(struct vi_resource *virs, int nic_index)
++{
++ struct efhw_nic *nic = efrm_nic_table.nic[nic_index];
++ int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
++ struct vi_resource_nic_info *nic_info = &virs->nic_info[nic_index];
++
++ if (virs->evq_capacity == 0)
++ return;
++
++ /* Zero the timer-value for this queue.
++ And Tell NIC to stop using this event queue. */
++ efhw_nic_event_queue_disable(nic, instance, 0);
++
++ if (nic_info->evq_pages.buf_tbl_alloc.base != (unsigned)-1)
++ efrm_buffer_table_free(&nic_info->evq_pages.buf_tbl_alloc);
++
++ efhw_iopages_free(nic, &nic_info->evq_pages.iobuff);
++}
++
++/*! FIXME: we should make sure this number is never zero (=> unprotected) */
++/*! FIXME: put this definition in a relevant header (e.g. as (evqid)+1) */
++#define EFAB_EVQ_OWNER_ID(evqid) ((evqid))
++
++void
++efrm_vi_rm_init_dmaq(struct vi_resource *virs, int queue_type,
++ struct efhw_nic *nic)
++{
++ int instance;
++ struct vi_resource *evq_virs;
++ int evq_instance;
++ efhw_buffer_addr_t buf_addr;
++
++ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
++ evq_virs = virs->evq_virs;
++ evq_instance = EFRM_RESOURCE_INSTANCE(evq_virs->rs.rs_handle);
++
++ buf_addr = virs->dmaq_buf_tbl_alloc[queue_type].base;
++
++ if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX) {
++ efhw_nic_dmaq_tx_q_init(nic,
++ instance, /* dmaq */
++ evq_instance, /* evq */
++ EFAB_EVQ_OWNER_ID(evq_instance), /* owner */
++ virs->dmaq_tag[queue_type], /* tag */
++ virs->dmaq_capacity[queue_type], /* size of queue */
++ buf_addr, /* buffer index */
++ virs->flags); /* user specified Q attrs */
++ } else {
++ efhw_nic_dmaq_rx_q_init(nic,
++ instance, /* dmaq */
++ evq_instance, /* evq */
++ EFAB_EVQ_OWNER_ID(evq_instance), /* owner */
++ virs->dmaq_tag[queue_type], /* tag */
++ virs->dmaq_capacity[queue_type], /* size of queue */
++ buf_addr, /* buffer index */
++ virs->flags); /* user specified Q attrs */
++ }
++}
++
++static int
++efrm_vi_rm_init_or_fini_dmaq(struct vi_resource *virs,
++ int queue_type, int init, int nic_index)
++{
++ int rc;
++ struct efhw_nic *nic = efrm_nic_table.nic[nic_index];
++ int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
++ uint32_t buf_bytes;
++ struct vi_resource *evq_virs;
++
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++ struct vi_resource_nic_info *nic_info = &virs->nic_info[nic_index];
++ int page_order;
++ uint32_t num_pages;
++ efhw_iopages_t *iobuff;
++#endif
++
++ if (!init)
++ goto destroy;
++
++ /* Ignore disabled queues. */
++ if (virs->dmaq_capacity[queue_type] == 0) {
++ if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX)
++ efhw_nic_dmaq_tx_q_disable(nic, instance);
++ else
++ efhw_nic_dmaq_rx_q_disable(nic, instance);
++ return 0;
++ }
++
++ buf_bytes = (queue_type == EFRM_VI_RM_DMA_QUEUE_TX
++ ? efrm_vi_rm_txq_bytes(virs)
++ : efrm_vi_rm_rxq_bytes(virs));
++
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++ page_order = get_order(buf_bytes);
++
++ rc = efhw_iopages_alloc(nic, &nic_info->dmaq_pages[queue_type],
++ page_order);
++ if (rc != 0) {
++ EFRM_ERR("%s: Failed to allocate %s DMA buffer.", __FUNCTION__,
++ dmaq_names[queue_type]);
++ goto fail_iopages;
++ }
++
++ num_pages = 1 << page_order;
++ iobuff = &nic_info->dmaq_pages[queue_type];
++ efhw_nic_buffer_table_set_n(nic,
++ virs->dmaq_buf_tbl_alloc[queue_type].base,
++ efhw_iopages_dma_addr(iobuff),
++ EFHW_NIC_PAGE_SIZE, 0, num_pages, 0);
++
++ falcon_nic_buffer_table_confirm(nic);
++
++ virs->mem_mmap_bytes += round_up(buf_bytes, PAGE_SIZE);
++#endif /* __CI_HARDWARE_CONFIG_FALCON__ */
++
++ evq_virs = virs->evq_virs;
++ EFRM_ASSERT(evq_virs);
++
++ /* Make sure there is an event queue. */
++ if (evq_virs->evq_capacity <= 0) {
++ EFRM_ERR("%s: Cannot use empty event queue for %s DMA",
++ __FUNCTION__, dmaq_names[queue_type]);
++ rc = -EINVAL;
++ goto fail_evq;
++ }
++
++ efrm_vi_rm_init_dmaq(virs, queue_type, nic);
++
++ return 0;
++
++destroy:
++ rc = 0;
++
++ /* Ignore disabled queues. */
++ if (virs->dmaq_capacity[queue_type] == 0)
++ return 0;
++
++ /* No need to disable the queue here. Nobody is using it anyway. */
++
++fail_evq:
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++ efhw_iopages_free(nic, &nic_info->dmaq_pages[queue_type]);
++fail_iopages:
++#endif
++
++ return rc;
++}
++
++static int
++efrm_vi_rm_init_or_fini_nic(struct vi_resource *virs, int init, int nic_index)
++{
++ struct vi_resource *evq_virs;
++ int rc;
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++#ifndef NDEBUG
++ int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
++#endif
++#endif
++
++ if (!init)
++ goto destroy;
++
++ evq_virs = virs->evq_virs;
++ if (evq_virs != virs) {
++ if (!efrm_nic_set_read(&evq_virs->nic_set, nic_index)) {
++ /* Ignore this NIC. It's not supported by the event
++ * queue. */
++ return 0;
++ }
++ }
++
++ rc = efrm_vi_rm_init_evq(virs, nic_index);
++ if (rc != 0)
++ goto fail_evq;
++
++ rc = efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_TX,
++ init, nic_index);
++ if (rc != 0)
++ goto fail_txq;
++
++ rc = efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_RX,
++ init, nic_index);
++ if (rc != 0)
++ goto fail_rxq;
++
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++ /* Allocate space for the control page. */
++ EFRM_ASSERT(falcon_tx_dma_page_offset(instance) < PAGE_SIZE);
++ EFRM_ASSERT(falcon_rx_dma_page_offset(instance) < PAGE_SIZE);
++ EFRM_ASSERT(falcon_timer_page_offset(instance) < PAGE_SIZE);
++ virs->bar_mmap_bytes += PAGE_SIZE;
++#endif
++
++ /* Mark the NIC as having been initialised. */
++ efrm_nic_set_write(&virs->nic_set, nic_index, true);
++
++ return 0;
++
++destroy:
++ rc = 0;
++
++ efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_RX,
++ false, nic_index);
++fail_rxq:
++
++ efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_TX,
++ false, nic_index);
++fail_txq:
++
++ efrm_vi_rm_fini_evq(virs, nic_index);
++fail_evq:
++
++ /* Mark the NIC as having been finalised. */
++ efrm_nic_set_write(&virs->nic_set, nic_index, false);
++ EFRM_ASSERT(rc != 0 || !init);
++
++ return rc;
++}
++
++static int
++efrm_vi_resource_alloc_or_free(int alloc, struct vi_resource *evq_virs,
++ uint16_t vi_flags, int32_t evq_capacity,
++ int32_t txq_capacity, int32_t rxq_capacity,
++ uint8_t tx_q_tag, uint8_t rx_q_tag,
++ struct vi_resource **virs_in_out)
++{
++ struct vi_resource *virs;
++ int rc;
++ int instance;
++ struct efhw_nic *nic;
++ int nic_i;
++
++ EFRM_ASSERT(virs_in_out);
++ EFRM_ASSERT(efrm_vi_manager);
++ EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_vi_manager->rm);
++
++ if (!alloc)
++ goto destroy;
++
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++ rx_q_tag &= (1 << TX_DESCQ_LABEL_WIDTH) - 1;
++ tx_q_tag &= (1 << RX_DESCQ_LABEL_WIDTH) - 1;
++#endif
++
++ virs = kmalloc(sizeof(*virs), GFP_KERNEL);
++ if (virs == NULL) {
++ EFRM_ERR("%s: Error allocating VI resource object",
++ __FUNCTION__);
++ rc = -ENOMEM;
++ goto fail_alloc;
++ }
++ memset(virs, 0, sizeof(*virs));
++
++ /* Some macros make the assumption that the struct efrm_resource is
++ * the first member of a struct vi_resource. */
++ EFRM_ASSERT(&virs->rs == (struct efrm_resource *) (virs));
++
++ instance = efrm_vi_rm_alloc_id(vi_flags, evq_capacity);
++ if (instance < 0) {
++ /* Clear out the close list... */
++ efrm_vi_rm_salvage_flushed_vis();
++ instance = efrm_vi_rm_alloc_id(vi_flags, evq_capacity);
++ if (instance >= 0)
++ EFRM_TRACE("%s: Salvaged a closed VI.", __FUNCTION__);
++ }
++
++ if (instance < 0) {
++ /* Could flush resources and try again here. */
++ EFRM_ERR("%s: Out of appropriate VI resources", __FUNCTION__);
++ rc = -EBUSY;
++ goto fail_alloc_id;
++ }
++
++ EFRM_TRACE("%s: new VI ID %d", __FUNCTION__, instance);
++ efrm_resource_init(&virs->rs, EFRM_RESOURCE_VI, instance);
++
++ /* Start with one reference. Any external VIs using the EVQ of this
++ * resource will increment this reference rather than the resource
++ * reference to avoid DMAQ flushes from waiting for other DMAQ
++ * flushes to complete. When the resource reference goes to zero,
++ * the DMAQ flush happens. When the flush completes, this reference
++ * is decremented. When this reference reaches zero, the instance
++ * is freed. */
++ atomic_set(&virs->evq_refs, 1);
++
++ efrm_nic_set_clear(&virs->nic_set);
++
++ virs->bar_mmap_bytes = 0;
++ virs->mem_mmap_bytes = 0;
++ virs->evq_capacity = evq_capacity;
++ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] = txq_capacity;
++ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] = rxq_capacity;
++ virs->dmaq_tag[EFRM_VI_RM_DMA_QUEUE_TX] = tx_q_tag;
++ virs->dmaq_tag[EFRM_VI_RM_DMA_QUEUE_RX] = rx_q_tag;
++ virs->flags = vi_flags;
++
++ INIT_LIST_HEAD(&virs->tx_flush_link);
++ INIT_LIST_HEAD(&virs->rx_flush_link);
++ efrm_nic_set_clear(&virs->tx_flush_nic_set);
++ efrm_nic_set_clear(&virs->rx_flush_nic_set);
++
++ memset(&efrm_vi_manager->evq_infos[instance], 0,
++ sizeof(struct vi_resource_evq_info));
++ efrm_vi_manager->evq_infos[instance].evq_virs = virs;
++
++ /* Adjust the queue sizes. */
++ rc = 0;
++ EFRM_FOR_EACH_NIC(nic_i, nic)
++ if (rc == 0)
++ rc = efrm_vi_rm_adjust_alloc_request(virs, nic);
++ if (rc != 0)
++ goto fail_adjust_request;
++
++ /* Attach the EVQ early so that we can ensure that the NIC sets
++ * match. */
++ if (evq_virs == NULL) {
++ evq_virs = virs;
++ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT
++ " has no external event queue", __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
++ } else {
++ /* Make sure the resource managers are the same. */
++ if (EFRM_RESOURCE_TYPE(evq_virs->rs.rs_handle) !=
++ EFRM_RESOURCE_VI) {
++ EFRM_ERR("%s: Mismatched owner for event queue VI "
++ EFRM_RESOURCE_FMT, __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(evq_virs->rs.rs_handle));
++ return -EINVAL;
++ }
++ EFRM_ASSERT(atomic_read(&evq_virs->evq_refs) != 0);
++ efrm_vi_rm_get_ref(evq_virs);
++ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " uses event queue "
++ EFRM_RESOURCE_FMT,
++ __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle),
++ EFRM_RESOURCE_PRI_ARG(evq_virs->rs.rs_handle));
++ }
++ virs->evq_virs = evq_virs;
++
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++ rc = efrm_vi_rm_alloc_or_free_buffer_table(virs, true);
++ if (rc != 0)
++ goto fail_buffer_table;
++#endif
++
++ rc = 0;
++ EFRM_FOR_EACH_NIC(nic_i, nic)
++ if (rc == 0)
++ /* This updates virs->nic_set for the NICs which need
++ * finalising. */
++ rc = efrm_vi_rm_init_or_fini_nic(virs, true, nic_i);
++ if (rc != 0)
++ goto fail_init_nic;
++
++ /* Put it into the resource manager's table. */
++ rc = efrm_resource_manager_insert(&virs->rs);
++ if (rc != 0) {
++ if (atomic_dec_and_test(&virs->rs.rs_ref_count))
++ efrm_vi_resource_free(virs);
++ return rc;
++ }
++
++ *virs_in_out = virs;
++ EFRM_TRACE("%s: Allocated " EFRM_RESOURCE_FMT, __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
++ return 0;
++
++destroy:
++ virs = *virs_in_out;
++ EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 1);
++ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
++
++ EFRM_TRACE("%s: Freeing %d", __FUNCTION__,
++ EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle));
++
++ /* Destroying the VI. The reference count must be zero. */
++ EFRM_ASSERT(atomic_read(&virs->evq_refs) == 0);
++
++ /* The EVQ should have gone (and DMA disabled) so that this
++ * function can't be re-entered to destroy the EVQ VI. */
++ EFRM_ASSERT(virs->evq_virs == NULL);
++ rc = 0;
++
++fail_init_nic:
++ EFRM_FOR_EACH_NIC_IN_SET(&virs->nic_set, nic_i, nic)
++ efrm_vi_rm_init_or_fini_nic(virs, false, nic_i);
++
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++ efrm_vi_rm_alloc_or_free_buffer_table(virs, false);
++fail_buffer_table:
++#endif
++
++ efrm_vi_rm_detach_evq(virs);
++
++fail_adjust_request:
++
++ EFRM_ASSERT(virs->evq_callback_fn == NULL);
++ memset(&efrm_vi_manager->evq_infos[instance], 0,
++ sizeof(struct vi_resource_evq_info));
++ EFRM_TRACE("%s: delete VI ID %d", __FUNCTION__, instance);
++ efrm_vi_rm_free_id(instance);
++fail_alloc_id:
++
++ EFRM_DO_DEBUG(memset(virs, 0, sizeof(*virs)));
++ kfree(virs);
++fail_alloc:
++ *virs_in_out = NULL;
++
++ return rc;
++}
++
++/*** Resource object ****************************************************/
++
++int
++efrm_vi_resource_alloc(struct vi_resource *evq_virs,
++ uint16_t vi_flags, int32_t evq_capacity,
++ int32_t txq_capacity, int32_t rxq_capacity,
++ uint8_t tx_q_tag, uint8_t rx_q_tag,
++ struct vi_resource **virs_out,
++ uint32_t *out_io_mmap_bytes,
++ uint32_t *out_mem_mmap_bytes,
++ uint32_t *out_txq_capacity, uint32_t *out_rxq_capacity)
++{
++ int rc;
++ rc = efrm_vi_resource_alloc_or_free(true, evq_virs, vi_flags,
++ evq_capacity, txq_capacity,
++ rxq_capacity, tx_q_tag, rx_q_tag,
++ virs_out);
++ if (rc == 0) {
++ if (out_io_mmap_bytes != NULL)
++ *out_io_mmap_bytes = (*virs_out)->bar_mmap_bytes;
++ if (out_mem_mmap_bytes != NULL)
++ *out_mem_mmap_bytes = (*virs_out)->mem_mmap_bytes;
++ if (out_txq_capacity != NULL)
++ *out_txq_capacity =
++ (*virs_out)->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX];
++ if (out_rxq_capacity != NULL)
++ *out_rxq_capacity =
++ (*virs_out)->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX];
++ }
++
++ return rc;
++}
++EXPORT_SYMBOL(efrm_vi_resource_alloc);
++
++void efrm_vi_rm_free_flushed_resource(struct vi_resource *virs)
++{
++ EFRM_ASSERT(virs != NULL);
++ EFRM_ASSERT(atomic_read(&virs->rs.rs_ref_count) == 0);
++
++ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT, __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
++ /* release the associated event queue then drop our own reference
++ * count */
++ efrm_vi_rm_detach_evq(virs);
++ efrm_vi_rm_drop_ref(virs);
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/vi_resource_event.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/vi_resource_event.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/vi_resource_event.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/vi_resource_event.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,232 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains event handling for VI resource.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efrm/nic_table.h>
++#include <ci/driver/efab/hardware.h>
++#include <ci/efhw/eventq.h>
++#include <ci/efrm/private.h>
++#include <ci/efrm/vi_resource_private.h>
++
++void
++efrm_eventq_request_wakeup(struct vi_resource *virs, unsigned current_ptr,
++ unsigned nic_index)
++{
++ struct efhw_nic *nic;
++ int next_i;
++ EFRM_ASSERT(efrm_nic_set_read(&virs->nic_set, nic_index));
++ nic = efrm_nic_table.nic[nic_index];
++ EFRM_ASSERT(nic);
++ next_i = ((current_ptr / sizeof(efhw_event_t)) &
++ (virs->evq_capacity - 1));
++
++ efhw_nic_wakeup_request(nic, efrm_eventq_dma_addr(virs, nic_index),
++ next_i,
++ EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle));
++}
++EXPORT_SYMBOL(efrm_eventq_request_wakeup);
++
++void efrm_eventq_reset(struct vi_resource *virs, int nic_index)
++{
++ struct efhw_nic *nic = efrm_nic_table.nic[nic_index];
++ int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
++
++ EFRM_ASSERT(virs->evq_capacity != 0);
++ EFRM_ASSERT(efrm_nic_set_read(&virs->nic_set, nic_index));
++
++ /* FIXME: Protect against concurrent resets. */
++
++ efhw_nic_event_queue_disable(nic, instance, 0);
++
++ memset(efrm_eventq_base(virs, nic_index), EFHW_CLEAR_EVENT_VALUE,
++ efrm_eventq_bytes(virs, nic_index));
++ efhw_nic_event_queue_enable(nic, instance, virs->evq_capacity,
++ efrm_eventq_dma_addr(virs, nic_index),
++ virs->nic_info[nic_index].evq_pages.
++ buf_tbl_alloc.base);
++ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT, __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle));
++}
++EXPORT_SYMBOL(efrm_eventq_reset);
++
++int
++efrm_eventq_register_callback(struct vi_resource *virs,
++ void (*handler) (void *, int,
++ struct efhw_nic *nic),
++ void *arg)
++{
++ int instance;
++ int bit;
++
++ EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0);
++ EFRM_ASSERT(virs->evq_capacity != 0);
++
++ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
++
++ /* The handler can be set only once. */
++ bit = test_and_set_bit(VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED,
++ &efrm_vi_manager->evq_infos[instance].evq_state);
++ if (bit)
++ return -EBUSY;
++
++ /* Store the details. The order is important here. */
++ virs->evq_callback_arg = arg;
++ virs->evq_callback_fn = handler;
++
++ return 0;
++}
++EXPORT_SYMBOL(efrm_eventq_register_callback);
++
++void efrm_eventq_kill_callback(struct vi_resource *virs)
++{
++ int nic_i, instance;
++ struct efhw_nic *nic;
++ struct vi_resource_evq_info *evq_info;
++ int32_t evq_state;
++ int bit;
++
++ EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0);
++ EFRM_ASSERT(virs->evq_capacity != 0);
++
++ /* Clean out the callback so a new one can be installed. */
++ virs->evq_callback_fn = NULL;
++
++ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
++ evq_info = &efrm_vi_manager->evq_infos[instance];
++
++ /* Disable the event queue. */
++ EFRM_FOR_EACH_NIC_IN_SET(&virs->nic_set, nic_i, nic)
++ efhw_nic_event_queue_disable(nic, instance, /*timer_only */ 1);
++
++ /* Disable the callback. */
++ bit = test_and_clear_bit(VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED,
++ &evq_info->evq_state);
++ EFRM_ASSERT(bit); /* do not call me twice! */
++
++ /* Spin until the callback is complete. */
++ do {
++ rmb();
++
++ udelay(1);
++ evq_state = evq_info->evq_state;
++ } while ((evq_state & VI_RESOURCE_EVQ_STATE(BUSY)));
++}
++EXPORT_SYMBOL(efrm_eventq_kill_callback);
++
++static void
++efrm_eventq_do_callback(struct efhw_nic *nic, unsigned instance,
++ bool is_timeout)
++{
++ void (*handler) (void *, int is_timeout, struct efhw_nic *nic);
++ void *arg;
++ struct vi_resource_evq_info *evq_info;
++ int32_t evq_state;
++ int32_t new_evq_state;
++ struct vi_resource *virs;
++ int bit;
++
++ EFRM_TRACE("%s: q=%d %s", __FUNCTION__, instance,
++ is_timeout ? "timeout" : "wakeup");
++ EFRM_ASSERT(efrm_vi_manager);
++
++ evq_info = &efrm_vi_manager->evq_infos[instance];
++
++ /* Set the BUSY bit and clear WAKEUP_PENDING. Do this
++ * before waking up the sleeper to avoid races. */
++ while (1) {
++ evq_state = evq_info->evq_state;
++ new_evq_state = evq_state;
++
++ if ((evq_state & VI_RESOURCE_EVQ_STATE(BUSY)) != 0) {
++ EFRM_ERR("%s:%d: evq_state[%d] corrupted!",
++ __FUNCTION__, __LINE__, instance);
++ return;
++ }
++
++ if (!is_timeout)
++ new_evq_state &= ~VI_RESOURCE_EVQ_STATE(WAKEUP_PENDING);
++
++ if (evq_state & VI_RESOURCE_EVQ_STATE(CALLBACK_REGISTERED)) {
++ new_evq_state |= VI_RESOURCE_EVQ_STATE(BUSY);
++ if (cmpxchg(&evq_info->evq_state, evq_state,
++ new_evq_state) == evq_state) {
++ virs = evq_info->evq_virs;
++ break;
++ }
++
++ } else {
++ /* Just update the state if necessary. */
++ if (new_evq_state == evq_state ||
++ cmpxchg(&evq_info->evq_state, evq_state,
++ new_evq_state) == evq_state)
++ return;
++ }
++
++ udelay(1);
++ }
++
++ /* Call the callback if any. */
++ if (evq_state & VI_RESOURCE_EVQ_STATE(CALLBACK_REGISTERED)) {
++ /* Retrieve the callback fn. */
++ handler = virs->evq_callback_fn;
++ arg = virs->evq_callback_arg;
++ if (handler != NULL) /* avoid races */
++ handler(arg, is_timeout, nic);
++ }
++
++ /* Clear the BUSY bit. */
++ bit =
++ test_and_clear_bit(VI_RESOURCE_EVQ_STATE_BUSY,
++ &evq_info->evq_state);
++ if (!bit) {
++ EFRM_ERR("%s:%d: evq_state corrupted!",
++ __FUNCTION__, __LINE__);
++ }
++}
++
++void efrm_handle_wakeup_event(struct efhw_nic *nic, efhw_event_t *ev)
++{
++ efrm_eventq_do_callback(nic,
++ (unsigned int)FALCON_EVENT_WAKE_EVQ_ID(ev),
++ false);
++}
++
++void efrm_handle_timeout_event(struct efhw_nic *nic, efhw_event_t *ev)
++{
++ efrm_eventq_do_callback(nic,
++ (unsigned int)FALCON_EVENT_WAKE_EVQ_ID(ev),
++ true);
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/vi_resource_flush.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/vi_resource_flush.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/vi_resource_flush.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/vi_resource_flush.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,506 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains DMA queue flushing of VI resources.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efrm/nic_table.h>
++#include <ci/driver/efab/hardware.h>
++#include <ci/efhw/falcon.h>
++#include <ci/efrm/private.h>
++#include <ci/efrm/sysdep.h>
++#include <ci/efrm/buffer_table.h>
++#include <ci/efrm/vi_resource_private.h>
++
++#if EFRM_VI_USE_WORKQUEUE
++ /* can fail as workitem can already be scheuled -- ignore failure */
++#define EFRM_VI_RM_DELAYED_FREE(manager) \
++ queue_work(manager->workqueue, &manager->work_item)
++#else
++#define EFRM_VI_RM_DELAYED_FREE(manager) \
++ efrm_vi_rm_delayed_free(&manager->work_item)
++#endif
++
++static const int flush_fifo_hwm = 8 /* TODO should be a HW specific const */ ;
++
++static void
++efrm_vi_resource_rx_flush_done(struct vi_resource *virs, int nic_i,
++ bool *completed)
++{
++ /* We should only get a flush event if there is a flush
++ * outstanding. */
++ EFRM_ASSERT(efrm_nic_set_read
++ (&virs->rx_flush_outstanding_nic_set, nic_i));
++
++ efrm_nic_set_write(&virs->rx_flush_outstanding_nic_set, nic_i, false);
++ efrm_nic_set_write(&virs->rx_flush_nic_set, nic_i, false);
++
++ if (efrm_nic_set_is_all_clear(&virs->rx_flush_outstanding_nic_set)) {
++ list_del(&virs->rx_flush_link);
++ efrm_vi_manager->rx_flush_outstanding_count--;
++
++ if (efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set)) {
++ list_add_tail(&virs->rx_flush_link,
++ &efrm_vi_manager->close_pending);
++ *completed = 1;
++ }
++ }
++}
++
++static void
++efrm_vi_resource_tx_flush_done(struct vi_resource *virs, int nic_i,
++ bool *completed)
++{
++ /* We should only get a flush event if there is a flush
++ * outstanding. */
++ EFRM_ASSERT(efrm_nic_set_read(&virs->tx_flush_nic_set, nic_i));
++
++ efrm_nic_set_write(&virs->tx_flush_nic_set, nic_i, false);
++
++ if (efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set)) {
++ list_del(&virs->tx_flush_link);
++
++ if (efrm_nic_set_is_all_clear(&virs->rx_flush_nic_set)) {
++ list_add_tail(&virs->rx_flush_link,
++ &efrm_vi_manager->close_pending);
++ *completed = 1;
++ }
++ }
++}
++
++static void
++efrm_vi_resource_issue_rx_flush(struct vi_resource *virs, bool *completed)
++{
++ struct efhw_nic *nic;
++ int nic_i;
++ int instance;
++ int rc;
++
++ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
++
++ list_add_tail(&virs->rx_flush_link,
++ &efrm_vi_manager->rx_flush_outstanding_list);
++ virs->rx_flush_outstanding_nic_set = virs->rx_flush_nic_set;
++ efrm_vi_manager->rx_flush_outstanding_count++;
++
++ EFRM_FOR_EACH_NIC_IN_SET(&virs->nic_set, nic_i, nic) {
++ EFRM_TRACE("%s: rx queue %d flush requested for nic %d",
++ __FUNCTION__, instance, nic->index);
++ rc = efhw_nic_flush_rx_dma_channel(nic, instance);
++ if (rc == -EAGAIN)
++ efrm_vi_resource_rx_flush_done(virs, nic_i, completed);
++ }
++}
++
++static void
++efrm_vi_resource_issue_tx_flush(struct vi_resource *virs, bool *completed)
++{
++ struct efhw_nic *nic;
++ int nic_i;
++ int instance;
++ int rc;
++
++ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
++
++ list_add_tail(&virs->tx_flush_link,
++ &efrm_vi_manager->tx_flush_outstanding_list);
++
++ EFRM_FOR_EACH_NIC_IN_SET(&virs->nic_set, nic_i, nic) {
++ EFRM_TRACE("%s: tx queue %d flush requested for nic %d",
++ __FUNCTION__, instance, nic->index);
++ rc = efhw_nic_flush_tx_dma_channel(nic, instance);
++ if (rc == -EAGAIN)
++ efrm_vi_resource_tx_flush_done(virs, nic_i, completed);
++ }
++}
++
++static void efrm_vi_resource_process_waiting_flushes(bool *completed)
++{
++ struct vi_resource *virs;
++
++ while (efrm_vi_manager->rx_flush_outstanding_count < flush_fifo_hwm &&
++ !list_empty(&efrm_vi_manager->rx_flush_waiting_list)) {
++ virs =
++ list_entry(list_pop
++ (&efrm_vi_manager->rx_flush_waiting_list),
++ struct vi_resource, rx_flush_link);
++ efrm_vi_resource_issue_rx_flush(virs, completed);
++ }
++}
++
++#if BUG7916_WORKAROUND || BUG5302_WORKAROUND
++static void
++efrm_vi_resource_flush_retry_vi(struct vi_resource *virs,
++ int64_t time_now, bool *completed)
++{
++ struct efhw_nic *nic;
++ int nic_i;
++ int instance;
++
++ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
++
++ virs->flush_count++;
++ virs->flush_time = time_now;
++
++#if BUG7916_WORKAROUND
++ if (!efrm_nic_set_is_all_clear(&virs->rx_flush_outstanding_nic_set)) {
++ EFRM_TRACE("%s: Retrying RX flush on instance %d",
++ __FUNCTION__, instance);
++
++ list_del(&virs->rx_flush_link);
++ efrm_vi_manager->rx_flush_outstanding_count--;
++ efrm_vi_resource_issue_rx_flush(virs, completed);
++ efrm_vi_resource_process_waiting_flushes(completed);
++ }
++#endif
++
++#if BUG5302_WORKAROUND
++ if (!efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set)) {
++ if (virs->flush_count > 5) {
++ EFRM_TRACE("%s: VI resource stuck flush pending "
++ "(instance=%d, count=%d)",
++ __FUNCTION__, instance, virs->flush_count);
++ EFRM_FOR_EACH_NIC_IN_SET(&virs->tx_flush_nic_set,
++ nic_i, nic) {
++ falcon_clobber_tx_dma_ptrs(nic, instance);
++ }
++ } else {
++ EFRM_TRACE("%s: Retrying TX flush on instance %d",
++ __FUNCTION__, instance);
++ }
++
++ list_del(&virs->tx_flush_link);
++ efrm_vi_resource_issue_tx_flush(virs, completed);
++ }
++#endif
++}
++#endif
++
++int efrm_vi_resource_flush_retry(struct vi_resource *virs)
++{
++#if BUG7916_WORKAROUND || BUG5302_WORKAROUND
++ irq_flags_t lock_flags;
++ bool completed = false;
++
++ if (efrm_nic_set_is_all_clear(&virs->rx_flush_nic_set) &&
++ efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set))
++ return -EALREADY;
++
++ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags);
++ efrm_vi_resource_flush_retry_vi(virs, get_jiffies_64(), &completed);
++ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags);
++
++ if (completed)
++ EFRM_VI_RM_DELAYED_FREE(efrm_vi_manager);
++#endif
++
++ return 0;
++}
++EXPORT_SYMBOL(efrm_vi_resource_flush_retry);
++
++#if BUG7916_WORKAROUND || BUG5302_WORKAROUND
++/* resource manager lock should be taken before this call */
++static void efrm_vi_handle_flush_loss(bool *completed)
++{
++ struct list_head *pos, *temp;
++ struct vi_resource *virs;
++ int64_t time_now, time_pending;
++
++ /* It's possible we miss flushes - the list is sorted in order we
++ * generate flushes, see if any are very old. It's also possible
++ * that we decide an endpoint is flushed even though we've not
++ * received all the flush events. We *should * mark as
++ * completed, reclaim and loop again. ??
++ * THIS NEEDS BACKPORTING FROM THE FALCON branch
++ */
++ time_now = get_jiffies_64();
++
++#if BUG7916_WORKAROUND
++ list_for_each_safe(pos, temp,
++ &efrm_vi_manager->rx_flush_outstanding_list) {
++ virs = container_of(pos, struct vi_resource, rx_flush_link);
++
++ time_pending = time_now - virs->flush_time;
++
++ /* List entries are held in reverse chronological order. Only
++ * process the old ones. */
++ if (time_pending <= 0x100000000LL)
++ break;
++
++ efrm_vi_resource_flush_retry_vi(virs, time_now, completed);
++ }
++#endif
++
++#if BUG5302_WORKAROUND
++ list_for_each_safe(pos, temp,
++ &efrm_vi_manager->tx_flush_outstanding_list) {
++ virs = container_of(pos, struct vi_resource, tx_flush_link);
++
++ time_pending = time_now - virs->flush_time;
++
++ /* List entries are held in reverse chronological order.
++ * Only process the old ones. */
++ if (time_pending <= 0x100000000LL)
++ break;
++
++ efrm_vi_resource_flush_retry_vi(virs, time_now, completed);
++ }
++#endif
++}
++#endif
++
++void
++efrm_vi_register_flush_callback(struct vi_resource *virs,
++ void (*handler)(void *), void *arg)
++{
++ if (handler == NULL) {
++ virs->flush_callback_fn = handler;
++ wmb();
++ virs->flush_callback_arg = arg;
++ } else {
++ virs->flush_callback_arg = arg;
++ wmb();
++ virs->flush_callback_fn = handler;
++ }
++}
++EXPORT_SYMBOL(efrm_vi_register_flush_callback);
++
++int efrm_pt_flush(struct vi_resource *virs)
++{
++ int instance;
++ irq_flags_t lock_flags;
++ bool completed = false;
++
++ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
++
++ EFRM_ASSERT(efrm_nic_set_is_all_clear(&virs->rx_flush_nic_set));
++ EFRM_ASSERT(efrm_nic_set_is_all_clear
++ (&virs->rx_flush_outstanding_nic_set));
++ EFRM_ASSERT(efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set));
++
++ EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " EVQ=%d TXQ=%d RXQ=%d",
++ __FUNCTION__, EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle),
++ virs->evq_capacity,
++ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX],
++ virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]);
++
++ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags);
++
++ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] != 0)
++ virs->rx_flush_nic_set = virs->nic_set;
++
++ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] != 0)
++ virs->tx_flush_nic_set = virs->nic_set;
++
++ /* Clean up immediately if there are no flushes. */
++ if (efrm_nic_set_is_all_clear(&virs->rx_flush_nic_set) &&
++ efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set)) {
++ list_add_tail(&virs->rx_flush_link,
++ &efrm_vi_manager->close_pending);
++ completed = true;
++ }
++
++ /* Issue the RX flush if possible or queue it for later. */
++ if (!efrm_nic_set_is_all_clear(&virs->rx_flush_nic_set)) {
++#if BUG7916_WORKAROUND || BUG5302_WORKAROUND
++ if (efrm_vi_manager->rx_flush_outstanding_count >=
++ flush_fifo_hwm)
++ efrm_vi_handle_flush_loss(&completed);
++#endif
++ if (efrm_vi_manager->rx_flush_outstanding_count >=
++ flush_fifo_hwm) {
++ list_add_tail(&virs->rx_flush_link,
++ &efrm_vi_manager->rx_flush_waiting_list);
++ } else {
++ efrm_vi_resource_issue_rx_flush(virs, &completed);
++ }
++ }
++
++ /* Issue the TX flush. There's no limit to the number of
++ * outstanding TX flushes. */
++ if (!efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set))
++ efrm_vi_resource_issue_tx_flush(virs, &completed);
++
++ virs->flush_time = get_jiffies_64();
++
++ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags);
++
++ if (completed)
++ EFRM_VI_RM_DELAYED_FREE(efrm_vi_manager);
++
++ return 0;
++}
++EXPORT_SYMBOL(efrm_pt_flush);
++
++static void
++efrm_handle_rx_dmaq_flushed(struct efhw_nic *flush_nic, int instance,
++ bool *completed)
++{
++ struct list_head *pos, *temp;
++ struct vi_resource *virs;
++
++ list_for_each_safe(pos, temp,
++ &efrm_vi_manager->rx_flush_outstanding_list) {
++ virs = container_of(pos, struct vi_resource, rx_flush_link);
++
++ if (instance == EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle)) {
++ efrm_vi_resource_rx_flush_done(virs,
++ flush_nic->index,
++ completed);
++ efrm_vi_resource_process_waiting_flushes(completed);
++ return;
++ }
++ }
++ EFRM_TRACE("%s: Unhandled rx flush event, nic %d, instance %d",
++ __FUNCTION__, flush_nic->index, instance);
++}
++
++static void
++efrm_handle_tx_dmaq_flushed(struct efhw_nic *flush_nic, int instance,
++ bool *completed)
++{
++ struct list_head *pos, *temp;
++ struct vi_resource *virs;
++
++ list_for_each_safe(pos, temp,
++ &efrm_vi_manager->tx_flush_outstanding_list) {
++ virs = container_of(pos, struct vi_resource, tx_flush_link);
++
++ if (instance == EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle)) {
++ efrm_vi_resource_tx_flush_done(virs,
++ flush_nic->index,
++ completed);
++ return;
++ }
++ }
++ EFRM_TRACE("%s: Unhandled tx flush event, nic %d, instance %d",
++ __FUNCTION__, flush_nic->index, instance);
++}
++
++void
++efrm_handle_dmaq_flushed(struct efhw_nic *flush_nic, int instance,
++ int rx_flush)
++{
++ irq_flags_t lock_flags;
++ bool completed = false;
++
++ EFRM_TRACE("%s: nic_i=%d instance=%d rx_flush=%d", __FUNCTION__,
++ flush_nic->index, instance, rx_flush);
++
++ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags);
++
++ if (rx_flush)
++ efrm_handle_rx_dmaq_flushed(flush_nic, instance, &completed);
++ else
++ efrm_handle_tx_dmaq_flushed(flush_nic, instance, &completed);
++
++#if BUG7916_WORKAROUND || BUG5302_WORKAROUND
++ efrm_vi_handle_flush_loss(&completed);
++#endif
++
++ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags);
++
++ if (completed)
++ EFRM_VI_RM_DELAYED_FREE(efrm_vi_manager);
++}
++
++static void
++efrm_vi_rm_reinit_dmaqs(struct vi_resource *virs)
++{
++ struct efhw_nic *nic;
++ int nic_i;
++
++ EFRM_FOR_EACH_NIC_IN_SET(&virs->nic_set, nic_i, nic) {
++ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] != 0)
++ efrm_vi_rm_init_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_TX,
++ nic);
++ if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX])
++ efrm_vi_rm_init_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_RX,
++ nic);
++ }
++}
++
++/* free any PT endpoints whose flush has now complete */
++void efrm_vi_rm_delayed_free(struct work_struct *data)
++{
++ irq_flags_t lock_flags;
++ struct list_head close_pending;
++ struct vi_resource *virs;
++
++ EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_vi_manager->rm);
++
++ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags);
++ list_replace_init(&efrm_vi_manager->close_pending, &close_pending);
++ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags);
++
++ EFRM_TRACE("%s: %p", __FUNCTION__, efrm_vi_manager);
++ while (!list_empty(&close_pending)) {
++ virs =
++ list_entry(list_pop(&close_pending), struct vi_resource,
++ rx_flush_link);
++ EFRM_TRACE("%s: flushed VI instance=%d", __FUNCTION__,
++ EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle));
++
++ if (virs->flush_callback_fn != NULL) {
++ efrm_vi_rm_reinit_dmaqs(virs);
++ virs->flush_callback_fn(virs->flush_callback_arg);
++ } else
++ efrm_vi_rm_free_flushed_resource(virs);
++ }
++}
++
++void efrm_vi_rm_salvage_flushed_vis(void)
++{
++#if BUG7916_WORKAROUND || BUG5302_WORKAROUND
++ irq_flags_t lock_flags;
++ bool completed;
++
++ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags);
++ efrm_vi_handle_flush_loss(&completed);
++ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags);
++#endif
++
++ efrm_vi_rm_delayed_free(&efrm_vi_manager->work_item);
++}
++
++void efrm_vi_resource_free(struct vi_resource *virs)
++{
++ efrm_vi_register_flush_callback(virs, NULL, NULL);
++ efrm_pt_flush(virs);
++}
++EXPORT_SYMBOL(efrm_vi_resource_free);
++
++/*
++ * vi: sw=8:ai:aw
++ */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfc_resource/vi_resource_manager.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/vi_resource_manager.c
+--- linux-2.6.18.8/drivers/net/sfc/sfc_resource/vi_resource_manager.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfc_resource/vi_resource_manager.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,259 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains the VI resource manager.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <ci/efrm/nic_table.h>
++#include <ci/driver/efab/hardware.h>
++#include <ci/efhw/falcon.h>
++#include <ci/efrm/private.h>
++#include <ci/efrm/vi_resource_private.h>
++
++int efrm_pt_pace(struct vi_resource *virs, unsigned int val)
++{
++#if defined(__CI_HARDWARE_CONFIG_FALCON__)
++ int instance, nic_i;
++ struct efhw_nic *nic;
++
++ EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0);
++ instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle);
++
++ EFRM_FOR_EACH_NIC_IN_SET(&virs->nic_set, nic_i, nic)
++ falcon_nic_pace(nic, instance, val);
++
++ EFRM_TRACE("%s[%d]=%d DONE", __FUNCTION__, instance, val);
++ return 0;
++#else
++ return -EOPNOTSUPP;
++#endif
++}
++EXPORT_SYMBOL(efrm_pt_pace);
++
++/*** Resource manager creation/destruction *******************************/
++
++static void efrm_vi_rm_dtor(struct efrm_resource_manager *rm);
++
++static int
++efrm_create_or_destroy_vi_resource_manager(
++ struct efrm_resource_manager **rm_in_out,
++ const struct vi_resource_dimensions *dims,
++ bool destroy)
++{
++ struct vi_resource *virs;
++ struct list_head *pos, *temp;
++ struct list_head flush_pending;
++ irq_flags_t lock_flags;
++ int rc, i, n_evqs;
++ unsigned dmaq_min, dmaq_max;
++
++ EFRM_ASSERT(rm_in_out);
++
++ if (destroy)
++ goto destroy;
++
++ EFRM_ASSERT(dims);
++ EFRM_NOTICE("vi_resource_manager: evq_int=%u-%u evq_timer=%u-%u",
++ dims->evq_int_min, dims->evq_int_max,
++ dims->evq_timer_min, dims->evq_timer_max);
++ EFRM_NOTICE("vi_resource_manager: rxq=%u-%u txq=%u-%u",
++ dims->rxq_min, dims->rxq_max,
++ dims->txq_min, dims->txq_max);
++
++ efrm_vi_manager = kmalloc(sizeof(*efrm_vi_manager), GFP_KERNEL);
++ if (efrm_vi_manager == NULL) {
++ rc = -ENOMEM;
++ goto fail_alloc;
++ }
++
++ memset(efrm_vi_manager, 0, sizeof(*efrm_vi_manager));
++
++ efrm_vi_manager->iscsi_dmaq_instance_is_free = true;
++
++ dmaq_min = max(dims->rxq_min, dims->txq_min);
++ dmaq_max = min(dims->rxq_max, dims->txq_max);
++
++ efrm_vi_manager->with_timer_base =
++ max(dmaq_min, dims->evq_timer_min);
++ efrm_vi_manager->with_timer_limit =
++ min(dmaq_max, dims->evq_timer_max);
++ rc = efrm_kfifo_id_ctor(&efrm_vi_manager->instances_with_timer,
++ efrm_vi_manager->with_timer_base,
++ efrm_vi_manager->with_timer_limit,
++ &efrm_vi_manager->rm.rm_lock);
++ if (rc < 0)
++ goto fail_with_timer_id_pool;
++
++ efrm_vi_manager->with_interrupt_base =
++ max(dmaq_min, dims->evq_int_min);
++ efrm_vi_manager->with_interrupt_limit =
++ min(dmaq_max, dims->evq_int_max);
++ efrm_vi_manager->with_interrupt_limit =
++ max(efrm_vi_manager->with_interrupt_limit,
++ efrm_vi_manager->with_interrupt_base);
++ rc = efrm_kfifo_id_ctor(&efrm_vi_manager->instances_with_interrupt,
++ efrm_vi_manager->with_interrupt_base,
++ efrm_vi_manager->with_interrupt_limit,
++ &efrm_vi_manager->rm.rm_lock);
++ if (rc < 0)
++ goto fail_with_int_id_pool;
++
++ n_evqs = max(efrm_vi_manager->with_timer_limit,
++ efrm_vi_manager->with_interrupt_limit);
++ rc = -ENOMEM;
++ efrm_vi_manager->evq_infos =
++ vmalloc(n_evqs * sizeof(struct vi_resource_evq_info));
++ if (efrm_vi_manager->evq_infos == NULL)
++ goto fail_alloc_evq_infos;
++
++ for (i = 0; i < n_evqs; ++i) {
++ efrm_vi_manager->evq_infos[i].evq_state = 0;
++ efrm_vi_manager->evq_infos[i].evq_virs = NULL;
++ }
++
++ INIT_LIST_HEAD(&efrm_vi_manager->rx_flush_waiting_list);
++ INIT_LIST_HEAD(&efrm_vi_manager->rx_flush_outstanding_list);
++ INIT_LIST_HEAD(&efrm_vi_manager->tx_flush_outstanding_list);
++ efrm_vi_manager->rx_flush_outstanding_count = 0;
++
++ INIT_LIST_HEAD(&efrm_vi_manager->close_pending);
++#if EFRM_VI_USE_WORKQUEUE
++ efrm_vi_manager->workqueue = create_workqueue("sfc_vi");
++ if (efrm_vi_manager->workqueue == NULL)
++ goto fail_create_workqueue;
++#endif
++ INIT_WORK(&efrm_vi_manager->work_item, efrm_vi_rm_delayed_free);
++
++ /* NB. This must be the last step to avoid things getting tangled.
++ * efrm_resource_manager_dtor calls the vi_rm_dtor which ends up in
++ * this function. */
++ rc = efrm_resource_manager_ctor(&efrm_vi_manager->rm, efrm_vi_rm_dtor,
++ "VI", EFRM_RESOURCE_VI, 0);
++ if (rc < 0)
++ goto fail_rm_ctor;
++
++ *rm_in_out = &efrm_vi_manager->rm;
++ return 0;
++
++destroy:
++ rc = 0;
++ EFRM_RESOURCE_MANAGER_ASSERT_VALID(*rm_in_out);
++
++ /* Abort outstanding flushes. Note, a VI resource can be on more
++ * than one of these lists. We handle this by starting with the TX
++ * list and then append VIs to this list if they aren't on the TX
++ * list already. A VI is on the TX flush list if tx_flush_nic_set
++ * is not empty. */
++ spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags);
++
++ list_replace_init(&efrm_vi_manager->tx_flush_outstanding_list,
++ &flush_pending);
++
++ list_for_each_safe(pos, temp,
++ &efrm_vi_manager->rx_flush_waiting_list) {
++ virs = container_of(pos, struct vi_resource, rx_flush_link);
++
++ list_del(&virs->rx_flush_link);
++ if (efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set))
++ list_add_tail(&virs->tx_flush_link, &flush_pending);
++ }
++
++ list_for_each_safe(pos, temp,
++ &efrm_vi_manager->rx_flush_outstanding_list) {
++ virs = container_of(pos, struct vi_resource, rx_flush_link);
++
++ list_del(&virs->rx_flush_link);
++ if (efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set))
++ list_add_tail(&virs->tx_flush_link, &flush_pending);
++ }
++
++ spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags);
++
++ while (!list_empty(&flush_pending)) {
++ virs =
++ list_entry(list_pop(&flush_pending), struct vi_resource,
++ tx_flush_link);
++ EFRM_TRACE("%s: found PT endpoint " EFRM_RESOURCE_FMT
++ " with flush pending [Tx=0x%x, Rx=0x%x, RxO=0x%x]",
++ __FUNCTION__,
++ EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle),
++ virs->tx_flush_nic_set.nics,
++ virs->rx_flush_nic_set.nics,
++ virs->rx_flush_outstanding_nic_set.nics);
++ efrm_vi_rm_free_flushed_resource(virs);
++ }
++
++fail_rm_ctor:
++
++ /* Complete outstanding closes. */
++#if EFRM_VI_USE_WORKQUEUE
++ destroy_workqueue(efrm_vi_manager->workqueue);
++fail_create_workqueue:
++#endif
++ EFRM_ASSERT(list_empty(&efrm_vi_manager->close_pending));
++
++ n_evqs = max(efrm_vi_manager->with_timer_limit,
++ efrm_vi_manager->with_interrupt_limit);
++ vfree(efrm_vi_manager->evq_infos);
++fail_alloc_evq_infos:
++
++ kfifo_vfree(efrm_vi_manager->instances_with_interrupt);
++fail_with_int_id_pool:
++
++ kfifo_vfree(efrm_vi_manager->instances_with_timer);
++fail_with_timer_id_pool:
++
++ if (destroy)
++ return 0;
++
++ EFRM_DO_DEBUG(memset(efrm_vi_manager, 0, sizeof(*efrm_vi_manager)));
++ kfree(efrm_vi_manager);
++fail_alloc:
++
++ *rm_in_out = NULL;
++ EFRM_ERR("%s: failed rc=%d", __FUNCTION__, rc);
++ return rc;
++}
++
++int
++efrm_create_vi_resource_manager(struct efrm_resource_manager **rm_out,
++ const struct vi_resource_dimensions *dims)
++{
++ return efrm_create_or_destroy_vi_resource_manager(rm_out, dims, false);
++}
++
++static void efrm_vi_rm_dtor(struct efrm_resource_manager *rm)
++{
++ efrm_create_or_destroy_vi_resource_manager(&rm, NULL, true);
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/sfe4001.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfe4001.c
+--- linux-2.6.18.8/drivers/net/sfc/sfe4001.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/sfe4001.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,315 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************/
++
++/*****************************************************************************
++ * Support for the SFE4001 NIC: driver code for the PCA9539 I/O expander that
++ * controls the PHY power rails, and for the MAX6647 temp. sensor used to check
++ * the PHY
++ */
++#include <linux/delay.h>
++#include "efx.h"
++#include "phy.h"
++#include "boards.h"
++#include "falcon.h"
++#include "falcon_hwdefs.h"
++
++/**************************************************************************
++ *
++ * I2C IO Expander device
++ *
++ **************************************************************************/
++#define PCA9539 0x74
++
++#define P0_IN 0x00
++#define P0_OUT 0x02
++#define P0_INVERT 0x04
++#define P0_CONFIG 0x06
++
++#define P0_EN_1V0X_LBN 0
++#define P0_EN_1V0X_WIDTH 1
++#define P0_EN_1V2_LBN 1
++#define P0_EN_1V2_WIDTH 1
++#define P0_EN_2V5_LBN 2
++#define P0_EN_2V5_WIDTH 1
++#define P0_EN_3V3X_LBN 3
++#define P0_EN_3V3X_WIDTH 1
++#define P0_EN_5V_LBN 4
++#define P0_EN_5V_WIDTH 1
++#define P0_SHORTEN_JTAG_LBN 5
++#define P0_SHORTEN_JTAG_WIDTH 1
++#define P0_X_TRST_LBN 6
++#define P0_X_TRST_WIDTH 1
++#define P0_DSP_RESET_LBN 7
++#define P0_DSP_RESET_WIDTH 1
++
++#define P1_IN 0x01
++#define P1_OUT 0x03
++#define P1_INVERT 0x05
++#define P1_CONFIG 0x07
++
++#define P1_AFE_PWD_LBN 0
++#define P1_AFE_PWD_WIDTH 1
++#define P1_DSP_PWD25_LBN 1
++#define P1_DSP_PWD25_WIDTH 1
++#define P1_RESERVED_LBN 2
++#define P1_RESERVED_WIDTH 2
++#define P1_SPARE_LBN 4
++#define P1_SPARE_WIDTH 4
++
++
++/**************************************************************************
++ *
++ * Temperature Sensor
++ *
++ **************************************************************************/
++#define MAX6647 0x4e
++
++#define RLTS 0x00
++#define RLTE 0x01
++#define RSL 0x02
++#define RCL 0x03
++#define RCRA 0x04
++#define RLHN 0x05
++#define RLLI 0x06
++#define RRHI 0x07
++#define RRLS 0x08
++#define WCRW 0x0a
++#define WLHO 0x0b
++#define WRHA 0x0c
++#define WRLN 0x0e
++#define OSHT 0x0f
++#define REET 0x10
++#define RIET 0x11
++#define RWOE 0x19
++#define RWOI 0x20
++#define HYS 0x21
++#define QUEUE 0x22
++#define MFID 0xfe
++#define REVID 0xff
++
++/* Status bits */
++#define MAX6647_BUSY (1 << 7) /* ADC is converting */
++#define MAX6647_LHIGH (1 << 6) /* Local high temp. alarm */
++#define MAX6647_LLOW (1 << 5) /* Local low temp. alarm */
++#define MAX6647_RHIGH (1 << 4) /* Remote high temp. alarm */
++#define MAX6647_RLOW (1 << 3) /* Remote low temp. alarm */
++#define MAX6647_FAULT (1 << 2) /* DXN/DXP short/open circuit */
++#define MAX6647_EOT (1 << 1) /* Remote junction overtemp. */
++#define MAX6647_IOT (1 << 0) /* Local junction overtemp. */
++
++static const u8 xgphy_max_temperature = 90;
++
++void sfe4001_poweroff(struct efx_nic *efx)
++{
++ struct efx_i2c_interface *i2c = &efx->i2c;
++
++ u8 cfg, out, in;
++
++ EFX_INFO(efx, "%s\n", __func__);
++
++ /* Turn off all power rails */
++ out = 0xff;
++ (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
++
++ /* Disable port 1 outputs on IO expander */
++ cfg = 0xff;
++ (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, EFX_BYTE);
++
++ /* Disable port 0 outputs on IO expander */
++ cfg = 0xff;
++ (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, EFX_BYTE);
++
++ /* Clear any over-temperature alert */
++ (void) efx_i2c_read(i2c, MAX6647, RSL, &in, EFX_BYTE);
++}
++
++static int sfe4001_check_hw(struct efx_nic *efx)
++{
++ struct efx_i2c_interface *i2c = &efx->i2c;
++ int rc;
++ u8 status;
++
++ /* Check the powered status of the PHY. Lack of power implies that
++ * the MAX6647 has shut down power to it, probably due to a temp.
++ * alarm. Reading the power status rather than the MAX6647 status
++ * directly because the later is read-to-clear and would thus
++ * start to power up the PHY again when polled, causing us to blip
++ * the power undesirably */
++
++ /* If XAUI link is down, check power status. Reading
++ * power requires a I2C byte read, which is too slow
++ * to poll (see SFC bug 7884). */
++ if (falcon_xaui_link_ok(efx))
++ return 0;
++
++ rc = efx_i2c_read(i2c, PCA9539, P1_IN, &status, EFX_BYTE);
++ status &= ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN));
++
++ /* We know we can read from the IO expander because we did
++ * it during power-on. Assume failure now is bad news. */
++ if (rc != 0 || status == 0) {
++ sfe4001_poweroff(efx);
++
++ /* Note that the PHY is pining for the cooling fans */
++ tenxpress_set_state(efx, TENXPRESS_STATUS_OTEMP);
++
++ /* Log the info */
++ if (status == 0) {
++ EFX_ERR(efx, "%s: Temperature sensor reports "
++ "alarm! (0x%x) Shutting down PHY.\n",
++ __func__, status);
++ rc = -EIO;
++ } else {
++ EFX_ERR(efx, "%s: Failed to read PHY status!"
++ " Shutting down PHY.\n",
++ __func__);
++ }
++ }
++
++ return rc;
++}
++
++/* This board uses an I2C expander to provider power to the PHY, which needs to
++ * be turned on before the PHY can be used.
++ * Context: Process context, rtnl lock held
++ */
++int sfe4001_poweron(struct efx_nic *efx)
++{
++ struct efx_i2c_interface *i2c = &efx->i2c;
++ unsigned int count;
++ int rc;
++ u8 out, in, cfg;
++ efx_dword_t reg;
++
++ /* 10Xpress has fixed-function LED pins, so there is no board-specific
++ * blink code. */
++ efx->board_info.blink = tenxpress_phy_blink;
++
++ /* Ensure that XGXS and XAUI SerDes are held in reset */
++ EFX_POPULATE_DWORD_7(reg, XX_PWRDNA_EN, 1,
++ XX_PWRDNB_EN, 1,
++ XX_RSTPLLAB_EN, 1,
++ XX_RESETA_EN, 1,
++ XX_RESETB_EN, 1,
++ XX_RSTXGXSRX_EN, 1,
++ XX_RSTXGXSTX_EN, 1);
++ efx->mac_op->mac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
++ udelay(10);
++
++ efx->board_info.monitor = sfe4001_check_hw;
++ efx->board_info.fini = sfe4001_poweroff;
++
++ /* Set DSP over-temperature alert threshold */
++ EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature);
++ rc = efx_i2c_write(i2c, MAX6647, WLHO,
++ &xgphy_max_temperature, EFX_BYTE);
++ if (rc)
++ goto fail1;
++
++ /* Read it back and verify */
++ rc = efx_i2c_read(i2c, MAX6647, RLHN, &in, EFX_BYTE);
++ if (rc)
++ goto fail1;
++ if (in != xgphy_max_temperature) {
++ rc = -EFAULT;
++ goto fail1;
++ }
++
++ /* Clear any previous over-temperature alert */
++ rc = efx_i2c_read(i2c, MAX6647, RSL, &in, EFX_BYTE);
++ if (rc)
++ goto fail1;
++
++ /* Enable port 0 and port 1 outputs on IO expander */
++ cfg = 0x00;
++ rc = efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, EFX_BYTE);
++ if (rc)
++ goto fail1;
++ cfg = 0xff & ~(1 << P1_SPARE_LBN);
++ rc = efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, EFX_BYTE);
++ if (rc)
++ goto fail2;
++
++ /* Turn all power off then wait 1 sec. This ensures PHY is reset */
++ out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
++ (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
++ (0 << P0_EN_1V0X_LBN));
++ rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
++ if (rc)
++ goto fail3;
++
++ schedule_timeout_uninterruptible(HZ);
++ count = 0;
++ do {
++ /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
++ out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
++ (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
++ (1 << P0_X_TRST_LBN));
++
++ rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
++ if (rc)
++ goto fail3;
++ msleep(10);
++
++ /* Turn on 1V power rail */
++ out &= ~(1 << P0_EN_1V0X_LBN);
++ rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
++ if (rc)
++ goto fail3;
++
++ EFX_INFO(efx, "waiting for power (attempt %d)...\n", count);
++
++ schedule_timeout_uninterruptible(HZ);
++
++ /* Check DSP is powered */
++ rc = efx_i2c_read(i2c, PCA9539, P1_IN, &in, EFX_BYTE);
++ if (rc)
++ goto fail3;
++ if (in & (1 << P1_AFE_PWD_LBN))
++ goto done;
++
++ } while (++count < 20);
++
++ EFX_INFO(efx, "timed out waiting for power\n");
++ rc = -ETIMEDOUT;
++ goto fail3;
++
++done:
++ EFX_INFO(efx, "PHY is powered on\n");
++ return 0;
++
++fail3:
++ /* Turn off all power rails */
++ out = 0xff;
++ (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, EFX_BYTE);
++ /* Disable port 1 outputs on IO expander */
++ out = 0xff;
++ (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, EFX_BYTE);
++fail2:
++ /* Disable port 0 outputs on IO expander */
++ out = 0xff;
++ (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, EFX_BYTE);
++fail1:
++ return rc;
++}
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/spi.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/spi.h
+--- linux-2.6.18.8/drivers/net/sfc/spi.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/spi.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,186 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005: Fen Systems Ltd.
++ * Copyright 2006: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_SPI_H
++#define EFX_SPI_H
++
++#include "net_driver.h"
++
++/**************************************************************************
++ *
++ * Basic SPI command set and bit definitions
++ *
++ *************************************************************************/
++
++/*
++ * Commands common to all known devices.
++ *
++ */
++
++/* Write status register */
++#define SPI_WRSR 0x01
++
++/* Write data to memory array */
++#define SPI_WRITE 0x02
++
++/* Read data from memory array */
++#define SPI_READ 0x03
++
++/* Reset write enable latch */
++#define SPI_WRDI 0x04
++
++/* Read status register */
++#define SPI_RDSR 0x05
++
++/* Set write enable latch */
++#define SPI_WREN 0x06
++
++/* SST: Enable write to status register */
++#define SPI_SST_EWSR 0x50
++
++/*
++ * Status register bits. Not all bits are supported on all devices.
++ *
++ */
++
++/* Write-protect pin enabled */
++#define SPI_STATUS_WPEN 0x80
++
++/* Block protection bit 2 */
++#define SPI_STATUS_BP2 0x10
++
++/* Block protection bit 1 */
++#define SPI_STATUS_BP1 0x08
++
++/* Block protection bit 0 */
++#define SPI_STATUS_BP0 0x04
++
++/* State of the write enable latch */
++#define SPI_STATUS_WEN 0x02
++
++/* Device busy flag */
++#define SPI_STATUS_NRDY 0x01
++
++/**************************************************************************
++ *
++ * Efx SPI devices
++ *
++ **************************************************************************
++ */
++
++/**
++ * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
++ * @device_id: Controller's id for the device
++ * @size: Size (in bytes)
++ * @addr_len: Number of address bytes in read/write commands
++ * @munge_address: Flag whether addresses should be munged.
++ * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
++ * use bit 3 of the command byte as address bit A8, rather
++ * than having a two-byte address. If this flag is set, then
++ * commands should be munged in this way.
++ * @erase_command: Erase command (or 0 if sector erase not needed).
++ * @erase_size: Erase sector size (in bytes)
++ * Erase commands affect sectors with this size and alignment.
++ * This must be a power of two.
++ * @block_size: Write block size (in bytes).
++ * Write commands are limited to blocks with this size and alignment.
++ * @read: Read function for the device
++ * @write: Write function for the device
++ */
++struct efx_spi_device {
++ int device_id;
++ unsigned int size;
++ unsigned int addr_len;
++ unsigned int munge_address:1;
++ u8 erase_command;
++ unsigned int erase_size;
++ unsigned int block_size;
++ int (*read) (const struct efx_spi_device *spi,
++ struct efx_nic *efx, unsigned int command,
++ int address, void *data, unsigned int len);
++ int (*write) (const struct efx_spi_device *spi,
++ struct efx_nic *efx, unsigned int command,
++ int address, const void *data, unsigned int len);
++};
++
++/* Maximum length for SPI read or write through Falcon */
++#define FALCON_SPI_MAX_LEN 16U
++
++/**
++ * efx_spi_write_limit - calculate maximum permitted length for write
++ * @spi: SPI device description
++ * @start: Starting address
++ *
++ * Return the maximum length for a write starting at the given address
++ * in the device.
++ *
++ * SPI writes must not cross block boundaries. Devices tend
++ * to wrap addresses at block boundaries; e.g. trying to write 5 bytes
++ * starting at offset 14 with a block size of 16 might write
++ * {14,15,0,1,2} rather than {14,15,16,17,18}.
++ */
++static inline unsigned int
++efx_spi_write_limit(const struct efx_spi_device *spi, unsigned int start)
++{
++ return min(FALCON_SPI_MAX_LEN,
++ (spi->block_size - (start & (spi->block_size - 1))));
++}
++
++/**
++ * efx_spi_read_limit - calculate maximum permitted length for read
++ * @spi: SPI device description
++ * @start: Starting address
++ *
++ * Return the maximum length for a read starting at the given address
++ * in the device.
++ */
++static inline unsigned int
++efx_spi_read_limit(const struct efx_spi_device *spi __attribute__ ((unused)),
++ unsigned int start __attribute__ ((unused)))
++{
++ return FALCON_SPI_MAX_LEN;
++}
++
++/**
++ * efx_spi_munge_command - adjust command as necessary for given address
++ * @spi: SPI device description
++ * @command: Normal SPI command
++ * @address: Address for command
++ *
++ * Some devices with 9-bit addresses (e.g. AT25040A EEPROM) use bit 3
++ * of the command byte as address bit A8, rather than having a
++ * two-byte address. This function calculates the appropriate command
++ * byte for the device, taking this munging into account.
++ */
++static inline u8 efx_spi_munge_command(const struct efx_spi_device *spi,
++ const u8 command,
++ const unsigned int address)
++{
++ return (command | (((address >> 8) & spi->munge_address) << 3));
++}
++
++#endif /* EFX_SPI_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/tenxpress.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/tenxpress.c
+--- linux-2.6.18.8/drivers/net/sfc/tenxpress.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/tenxpress.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,697 @@
++/****************************************************************************
++ * Driver for Solarflare 802.3an compliant PHY
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************/
++
++#include <linux/delay.h>
++#include <linux/seq_file.h>
++#include "efx.h"
++#include "debugfs.h"
++#include "gmii.h"
++#include "mdio_10g.h"
++#include "falcon.h"
++#include "phy.h"
++#include "falcon_hwdefs.h"
++#include "boards.h"
++
++/* We expect these MMDs to be in the package */
++/* AN not here as mdio_check_mmds() requires STAT2 support */
++#define TENXPRESS_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PMAPMD | \
++ MDIO_MMDREG_DEVS0_PCS | \
++ MDIO_MMDREG_DEVS0_PHYXS)
++
++#define TENXPRESS_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \
++ (1 << LOOPBACK_PCS) | \
++ (1 << LOOPBACK_PMAPMD) | \
++ (1 << LOOPBACK_NETWORK))
++
++/* We complain if we fail to see the link partner as 10G capable this many
++ * times in a row (must be > 1 as sampling the autoneg. registers is racy)
++ */
++#define MAX_BAD_LP_TRIES (5)
++
++/* SNR operating margin register */
++#define PMA_PMD_SNR_MARGIN_0 (133)
++#define PMA_PMD_SNR_MARGIN_1 (134)
++#define PMA_PMD_SNR_MARGIN_2 (135)
++#define PMA_PMD_SNR_MARGIN_3 (136)
++
++/* Extended control register */
++#define PMA_PMD_XCONTROL_REG 0xc000
++#define PMA_PMD_LNPGA_POWERDOWN_LBN 8
++#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
++#define PMA_PMD_AFE_POWERDOWN_LBN 9
++#define PMA_PMD_AFE_POWERDOWN_WIDTH 1
++#define PMA_PMD_DSP_POWERDOWN_LBN 10
++#define PMA_PMD_DSP_POWERDOWN_WIDTH 1
++#define PMA_PMD_PHY_POWERDOWN_LBN 11
++#define PMA_PMD_PHY_POWERDOWN_WI
++
++/* extended status register */
++#define PMA_PMD_XSTATUS_REG 0xc001
++#define PMA_PMD_XSTAT_FLP_LBN (12)
++
++
++/* LED control register */
++#define PMA_PMD_LED_CTRL_REG (0xc007)
++#define PMA_PMA_LED_ACTIVITY_LBN (3)
++
++/* LED function override register */
++#define PMA_PMD_LED_OVERR_REG (0xc009)
++/* Bit positions for different LEDs (there are more but not wired on SFE4001)*/
++#define PMA_PMD_LED_LINK_LBN (0)
++#define PMA_PMD_LED_SPEED_LBN (2)
++#define PMA_PMD_LED_TX_LBN (4)
++#define PMA_PMD_LED_RX_LBN (6)
++/* Override settings */
++#define PMA_PMD_LED_AUTO (0) /* H/W control */
++#define PMA_PMD_LED_ON (1)
++#define PMA_PMD_LED_OFF (2)
++#define PMA_PMD_LED_FLASH (3)
++/* All LEDs under hardware control */
++#define PMA_PMD_LED_FULL_AUTO (0)
++/* Green and Amber under hardware control, Red off */
++#define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
++
++
++/* Self test (BIST) control register */
++#define PMA_PMD_BIST_CTRL_REG (0xc014)
++#define PMA_PMD_BIST_BER_LBN (2) /* Run BER test */
++#define PMA_PMD_BIST_CONT_LBN (1) /* Run continuous BIST until cleared */
++#define PMA_PMD_BIST_SINGLE_LBN (0) /* Run 1 BIST iteration (self clears) */
++/* Self test status register */
++#define PMA_PMD_BIST_STAT_REG (0xc015)
++#define PMA_PMD_BIST_ENX_LBN (3)
++#define PMA_PMD_BIST_PMA_LBN (2)
++#define PMA_PMD_BIST_RXD_LBN (1)
++#define PMA_PMD_BIST_AFE_LBN (0)
++
++/* Special Software reset register */
++#define PMA_PMD_EXT_CTRL_REG 49152
++#define PMA_PMD_EXT_SSR_LBN 15
++
++#define BIST_MAX_DELAY (1000)
++#define BIST_POLL_DELAY (10)
++
++static const char *bist_names[] = {
++ [PMA_PMD_BIST_AFE_LBN] = "AFE communication",
++ [PMA_PMD_BIST_RXD_LBN] = "RX data path",
++ [PMA_PMD_BIST_PMA_LBN] = "PMA loopback",
++ [PMA_PMD_BIST_ENX_LBN] = "ENX"
++};
++
++/* Identifier registers: each identifier has 4 part number and 2 revision
++ * registers starting at one of these addresses */
++#define PMA_PMD_AFE_ID_REG 49174
++#define PMA_PMD_DSP_ID_REG 49180
++#define PMA_PMD_FIRMWARE_ID_REG 49186
++
++/* Misc register defines */
++#define PCS_CLOCK_CTRL_REG 0xd801
++#define PLL312_RST_N_LBN 2
++
++#define PCS_SOFT_RST2_REG 0xd806
++#define SERDES_RST_N_LBN 13
++#define XGXS_RST_N_LBN 12
++
++#define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */
++#define CLK312_EN_LBN 3
++
++/* PHYXS registers */
++#define PHYXS_TEST1 (49162)
++#define LOOPBACK_NEAR_LBN (8)
++#define LOOPBACK_NEAR_WIDTH (1)
++
++/* Boot status register */
++#define PCS_BOOT_STATUS_REG (0xd000)
++#define PCS_BOOT_FATAL_ERR_LBN (0)
++#define PCS_BOOT_PROGRESS_LBN (1)
++#define PCS_BOOT_PROGRESS_WIDTH (2)
++#define PCS_BOOT_COMPLETE_LBN (3)
++
++#define PCS_BOOT_MAX_DELAY (100)
++#define PCS_BOOT_POLL_DELAY (10)
++
++#define TENXPRESS_ID_PN_LEN (8)
++#define TENXPRESS_ID_REV_LEN (4)
++#define TENXPRESS_ID_LEN (TENXPRESS_ID_PN_LEN+1+TENXPRESS_ID_REV_LEN)
++
++static const int bist_max = ARRAY_SIZE(bist_names);
++
++/* Time to wait between powering down the LNPGA and turning off the power
++ * rails */
++#define LNPGA_PDOWN_WAIT (HZ / 5)
++
++
++static int crc_error_reset_threshold = 100;
++module_param(crc_error_reset_threshold, int, 0644);
++MODULE_PARM_DESC(crc_error_reset_threshold,
++ "Max number of CRC errors before XAUI reset");
++
++struct tenxpress_phy_data {
++#ifdef CONFIG_SFC_DEBUGFS
++ char phy_snr[4];
++ char phy_afe_id[TENXPRESS_ID_LEN + 1];
++ char phy_dsp_id[TENXPRESS_ID_LEN + 1];
++ char phy_firmware_id[TENXPRESS_ID_LEN + 1];
++ struct efx_nic *efx;
++#endif
++ enum tenxpress_state state;
++ enum efx_loopback_mode loopback_mode;
++ atomic_t bad_crc_count;
++ int phy_powered;
++ int tx_disabled;
++ int bad_lp_tries;
++};
++
++static int tenxpress_state_is(struct efx_nic *efx, int state)
++{
++ struct tenxpress_phy_data *phy_data = efx->phy_data;
++ return (phy_data != NULL) && (state == phy_data->state);
++}
++
++void tenxpress_set_state(struct efx_nic *efx,
++ enum tenxpress_state state)
++{
++ struct tenxpress_phy_data *phy_data = efx->phy_data;
++ if (phy_data != NULL)
++ phy_data->state = state;
++}
++
++void tenxpress_crc_err(struct efx_nic *efx)
++{
++ struct tenxpress_phy_data *phy_data = efx->phy_data;
++ if (phy_data != NULL)
++ atomic_inc(&phy_data->bad_crc_count);
++}
++
++#ifdef CONFIG_SFC_DEBUGFS
++
++/* debugfs entries for this PHY */
++static int tenxpress_ber_read(struct seq_file *file, void *data)
++{
++ struct efx_nic *efx = *(struct efx_nic **)data;
++ int reg, ber;
++
++ reg = mdio_clause45_read(efx, efx->mii.phy_id, MDIO_MMD_PCS,
++ MDIO_PCS_10GBT_STATUS2);
++
++ /* Extract the BER */
++ ber = (reg >> MDIO_PCS_10GBT_STATUS2_BER_LBN) &
++ ((1 << MDIO_PCS_10GBT_STATUS2_BER_WIDTH) - 1);
++
++ return seq_printf(file, "%d", ber);
++}
++
++
++static int tenxpress_snr_read(struct seq_file *file, void *data)
++{
++ struct tenxpress_phy_data *phy_data = NULL;
++ struct efx_nic *efx;
++ int lane = *(char *) data;
++ int reg, snr;
++
++ EFX_BUG_ON_PARANOID(lane < 0 || lane >= 4);
++ phy_data = container_of(data, struct tenxpress_phy_data, phy_snr[lane]);
++ efx = phy_data->efx;
++
++ reg = mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_PMAPMD, PMA_PMD_SNR_MARGIN_0 + lane);
++
++ /* Convert from SNR margin to SNR to match phychk output */
++ snr = (reg - 0x8000 + 238);
++
++ return seq_printf(file, "%d.%d", snr / 10, snr % 10);
++}
++
++
++static struct efx_debugfs_parameter debug_entries[] = {
++ EFX_PER_LANE_PARAMETER("phy_lane", "_snr",
++ struct tenxpress_phy_data, phy_snr, char,
++ tenxpress_snr_read),
++ EFX_NAMED_PARAMETER(phy_ber, struct tenxpress_phy_data, efx,
++ struct efx_nic *, tenxpress_ber_read),
++ EFX_STRING_PARAMETER(struct tenxpress_phy_data, phy_afe_id),
++ EFX_STRING_PARAMETER(struct tenxpress_phy_data, phy_dsp_id),
++ EFX_STRING_PARAMETER(struct tenxpress_phy_data, phy_firmware_id),
++ {NULL}
++};
++
++static void tenxpress_phy_get_id(struct efx_nic *efx,
++ char *id_buf, int id_addr)
++{
++ int i, reg;
++ char ch;
++
++ for (i = TENXPRESS_ID_PN_LEN / 2 - 1; i >= 0; --i) {
++ reg = mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_PMAPMD, id_addr + i);
++ ch = reg & 0xFF;
++ *id_buf++ = ch ? ch : ' ';
++ ch = (reg & 0xFF00) >> 8;
++ *id_buf++ = ch ? ch : ' ';
++ }
++ *id_buf++ = ' ';
++ for (i = TENXPRESS_ID_REV_LEN / 2 - 1; i >= 0; --i) {
++ reg = mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_PMAPMD,
++ id_addr + TENXPRESS_ID_PN_LEN / 2 + i);
++ ch = reg & 0xFF;
++ *id_buf++ = ch ? ch : ' ';
++ ch = (reg & 0xFF00) >> 8;
++ *id_buf++ = ch ? ch : ' ';
++ }
++}
++
++static int tenxpress_debugfs_init(struct efx_nic *efx)
++{
++ struct tenxpress_phy_data *phy_data = efx->phy_data;
++ int lane, rc;
++
++ for (lane = 0; lane < 4; lane++)
++ phy_data->phy_snr[lane] = lane;
++
++ phy_data->efx = efx;
++ rc = efx_extend_debugfs_port(efx, efx->phy_data,
++ debug_entries);
++ if (rc < 0)
++ return rc;
++
++ tenxpress_phy_get_id(efx, phy_data->phy_afe_id,
++ PMA_PMD_AFE_ID_REG);
++ tenxpress_phy_get_id(efx, phy_data->phy_dsp_id,
++ PMA_PMD_DSP_ID_REG);
++ tenxpress_phy_get_id(efx, phy_data->phy_firmware_id,
++ PMA_PMD_FIRMWARE_ID_REG);
++
++ return 0;
++}
++
++#endif /* CONFIG_SFC_DEBUGFS */
++
++/* Check that the C166 has booted successfully */
++static int tenxpress_phy_check(struct efx_nic *efx)
++{
++ int phy_id = efx->mii.phy_id;
++ int count = PCS_BOOT_MAX_DELAY / PCS_BOOT_POLL_DELAY;
++ int boot_stat;
++
++ /* Wait for the boot to complete (or not) */
++ while (count) {
++ boot_stat = mdio_clause45_read(efx, phy_id,
++ MDIO_MMD_PCS,
++ PCS_BOOT_STATUS_REG);
++ if (boot_stat & (1 << PCS_BOOT_COMPLETE_LBN))
++ break;
++ count--;
++ udelay(PCS_BOOT_POLL_DELAY);
++ }
++
++ if (!count) {
++ EFX_ERR(efx, "%s: PHY boot timed out. Last status "
++ "%x\n", __func__,
++ (boot_stat >> PCS_BOOT_PROGRESS_LBN) &
++ ((1 << PCS_BOOT_PROGRESS_WIDTH) - 1));
++ return -ETIMEDOUT;
++ }
++
++ return 0;
++}
++
++static void tenxpress_reset_xaui(struct efx_nic *efx);
++
++/* Initialise the part post power on reset or software special reset */
++static int tenxpress_init(struct efx_nic *efx)
++{
++ int rc, reg;
++
++ /* Turn on the clock */
++ reg = (1 << CLK312_EN_LBN);
++ mdio_clause45_write(efx, efx->mii.phy_id,
++ MDIO_MMD_PCS, PCS_TEST_SELECT_REG, reg);
++
++ rc = tenxpress_phy_check(efx);
++ if (rc < 0)
++ return rc;
++
++ /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
++ reg = mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG);
++ reg |= (1 << PMA_PMA_LED_ACTIVITY_LBN);
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
++ PMA_PMD_LED_CTRL_REG, reg);
++
++ reg = PMA_PMD_LED_DEFAULT;
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
++ PMA_PMD_LED_OVERR_REG, reg);
++
++ return rc;
++}
++
++static int tenxpress_phy_init(struct efx_nic *efx)
++{
++ struct tenxpress_phy_data *phy_data;
++ int rc = 0;
++
++ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
++ efx->phy_data = phy_data;
++ phy_data->phy_powered = efx->phy_powered;
++
++ tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
++
++ rc = mdio_clause45_wait_reset_mmds(efx,
++ TENXPRESS_REQUIRED_DEVS);
++ if (rc < 0)
++ goto fail;
++
++ rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
++ if (rc < 0)
++ goto fail;
++
++ rc = tenxpress_init(efx);
++ if (rc < 0)
++ goto fail;
++
++#ifdef CONFIG_SFC_DEBUGFS
++ rc = tenxpress_debugfs_init(efx);
++ if (rc < 0)
++ goto fail;
++#endif
++
++ schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
++
++ /* Let XGXS and SerDes out of reset and resets 10XPress */
++ falcon_reset_xaui(efx);
++
++ return 0;
++
++ fail:
++ kfree(efx->phy_data);
++ efx->phy_data = NULL;
++ return rc;
++}
++
++static int tenxpress_special_reset(struct efx_nic *efx)
++{
++ int rc, reg;
++
++ EFX_TRACE(efx, "%s\n", __func__);
++
++ /* Initiate reset */
++ reg = mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_PMAPMD, PMA_PMD_EXT_CTRL_REG);
++ reg |= (1 << PMA_PMD_EXT_SSR_LBN);
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
++ PMA_PMD_EXT_CTRL_REG, reg);
++
++ msleep(200);
++
++ /* Wait for the blocks to come out of reset */
++ rc = mdio_clause45_wait_reset_mmds(efx,
++ TENXPRESS_REQUIRED_DEVS);
++ if (rc < 0)
++ return rc;
++
++ /* Try and reconfigure the device */
++ rc = tenxpress_init(efx);
++ if (rc < 0)
++ return rc;
++
++ return 0;
++}
++
++static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
++{
++ struct tenxpress_phy_data *pd = efx->phy_data;
++ int reg;
++
++ /* Nothing to do if all is well and was previously so. */
++ if (!(bad_lp || pd->bad_lp_tries))
++ return;
++
++ reg = mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG);
++
++ if (bad_lp)
++ pd->bad_lp_tries++;
++ else
++ pd->bad_lp_tries = 0;
++
++ if (pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
++ pd->bad_lp_tries = 0; /* Restart count */
++ reg &= ~(PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN);
++ reg |= (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN);
++ EFX_ERR(efx, "This NIC appears to be plugged into"
++ " a port that is not 10GBASE-T capable.\n"
++ " This PHY is 10GBASE-T ONLY, so no link can"
++ " be established.\n");
++ } else {
++ reg |= (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN);
++ }
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
++ PMA_PMD_LED_OVERR_REG, reg);
++}
++
++/* Check link status and return a boolean OK value. If the link is NOT
++ * OK we have a quick rummage round to see if we appear to be plugged
++ * into a non-10GBT port and if so warn the user that they won't get
++ * link any time soon as we are 10GBT only, unless caller specified
++ * not to do this check (it isn't useful in loopback) */
++static int tenxpress_link_ok(struct efx_nic *efx, int check_lp)
++{
++ int ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS);
++
++ if (ok) {
++ tenxpress_set_bad_lp(efx, 0);
++ } else if (check_lp) {
++ /* Are we plugged into the wrong sort of link? */
++ int bad_lp = 0;
++ int phy_id = efx->mii.phy_id;
++ int an_stat = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
++ MDIO_AN_STATUS);
++ int xphy_stat = mdio_clause45_read(efx, phy_id,
++ MDIO_MMD_PMAPMD,
++ PMA_PMD_XSTATUS_REG);
++ /* Are we plugged into anything that sends FLPs? If
++ * not we can't distinguish between not being plugged
++ * in and being plugged into a non-AN antique. The FLP
++ * bit has the advantage of not clearing when autoneg
++ * restarts. */
++ if (!(xphy_stat & (1 << PMA_PMD_XSTAT_FLP_LBN))) {
++ tenxpress_set_bad_lp(efx, 0);
++ return ok;
++ }
++
++ /* If it can do 10GBT it must be XNP capable */
++ bad_lp = !(an_stat & (1 << MDIO_AN_STATUS_XNP_LBN));
++ if (!bad_lp && (an_stat & (1 << MDIO_AN_STATUS_PAGE_LBN))) {
++ bad_lp = !(mdio_clause45_read(efx, phy_id,
++ MDIO_MMD_AN, MDIO_AN_10GBT_STATUS) &
++ (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN));
++ }
++ tenxpress_set_bad_lp(efx, bad_lp);
++ }
++ return ok;
++}
++
++static void tenxpress_phyxs_loopback(struct efx_nic *efx)
++{
++ int phy_id = efx->mii.phy_id;
++ int ctrl1, ctrl2;
++
++ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
++ PHYXS_TEST1);
++ if (efx->loopback_mode == LOOPBACK_PHYXS)
++ ctrl2 |= (1 << LOOPBACK_NEAR_LBN);
++ else
++ ctrl2 &= ~(1 << LOOPBACK_NEAR_LBN);
++ if (ctrl1 != ctrl2)
++ mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS,
++ PHYXS_TEST1, ctrl2);
++}
++
++static void tenxpress_phy_reconfigure(struct efx_nic *efx)
++{
++ struct tenxpress_phy_data *phy_data = efx->phy_data;
++ int loop_change = LOOPBACK_OUT_OF(phy_data, efx,
++ TENXPRESS_LOOPBACKS);
++
++ if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL))
++ return;
++
++ /* When coming out of transmit disable, coming out of low power
++ * mode, or moving out of any PHY internal loopback mode,
++ * perform a special software reset */
++ if (((efx->phy_powered && !efx->tx_disabled) &&
++ (!phy_data->phy_powered || phy_data->tx_disabled)) ||
++ loop_change) {
++ (void) tenxpress_special_reset(efx);
++ falcon_reset_xaui(efx);
++ }
++
++ mdio_clause45_transmit_disable(efx, efx->tx_disabled);
++ mdio_clause45_phy_reconfigure(efx);
++ tenxpress_phyxs_loopback(efx);
++
++ phy_data->tx_disabled = efx->tx_disabled;
++ phy_data->loopback_mode = efx->loopback_mode;
++ phy_data->phy_powered = efx->phy_powered;
++ efx->link_up = tenxpress_link_ok(efx, 0);
++ efx->link_options = GM_LPA_10000FULL;
++}
++
++static void tenxpress_phy_clear_interrupt(struct efx_nic *efx)
++{
++ /* Nothing done here - LASI interrupts aren't reliable so poll */
++}
++
++
++/* Poll PHY for interrupt */
++static int tenxpress_phy_check_hw(struct efx_nic *efx)
++{
++ struct tenxpress_phy_data *phy_data = efx->phy_data;
++ int phy_up = tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL);
++ int link_ok, rc = 0;
++
++ link_ok = phy_up && tenxpress_link_ok(efx, 1);
++
++ if (link_ok != efx->link_up) {
++ efx->link_up = link_ok;
++ efx->mac_op->fake_phy_event(efx);
++ }
++
++ /* Nothing to check if we've already shut down the PHY */
++ if (!phy_up)
++ return 0;
++
++ if (atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
++ EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n");
++ falcon_reset_xaui(efx);
++ atomic_set(&phy_data->bad_crc_count, 0);
++ }
++
++ rc = efx->board_info.monitor(efx);
++ if (rc)
++ efx->link_up = 0;
++
++ return rc;
++}
++
++static void tenxpress_phy_fini(struct efx_nic *efx)
++{
++ int reg;
++
++#ifdef CONFIG_SFC_DEBUGFS
++ efx_trim_debugfs_port(efx, debug_entries);
++#endif
++ /* Power down the LNPGA */
++ reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
++ PMA_PMD_XCONTROL_REG, reg);
++
++ /* Waiting here ensures that the board fini, which can turn off the
++ * power to the PHY, won't get run until the LNPGA powerdown has been
++ * given long enough to complete. */
++ schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
++
++ kfree(efx->phy_data);
++ efx->phy_data = NULL;
++}
++
++
++/* Set the RX and TX LEDs and Link LED flashing. The other LEDs
++ * (which probably aren't wired anyway) are left in AUTO mode */
++void tenxpress_phy_blink(struct efx_nic *efx, int blink)
++{
++ int reg;
++
++ if (blink)
++ reg = (PMA_PMD_LED_FLASH << PMA_PMD_LED_TX_LBN) |
++ (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN) |
++ (PMA_PMD_LED_FLASH << PMA_PMD_LED_LINK_LBN);
++ else
++ reg = PMA_PMD_LED_DEFAULT;
++
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
++ PMA_PMD_LED_OVERR_REG, reg);
++}
++
++static void tenxpress_reset_xaui(struct efx_nic *efx)
++{
++ int phy = efx->mii.phy_id;
++ int clk_ctrl, test_select, soft_rst2;
++
++ /* Real work is done on clock_ctrl other resets are thought to be
++ * optional but make the reset more reliable
++ */
++
++ /* Read */
++ clk_ctrl = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
++ PCS_CLOCK_CTRL_REG);
++ test_select = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
++ PCS_TEST_SELECT_REG);
++ soft_rst2 = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
++ PCS_SOFT_RST2_REG);
++
++ /* Modify => put in reset */
++ test_select &= ~(1 << CLK312_EN_LBN);
++ mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
++ PCS_TEST_SELECT_REG, test_select);
++
++ soft_rst2 &= ~((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
++ mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
++ PCS_SOFT_RST2_REG, soft_rst2);
++
++ clk_ctrl &= ~(1 << PLL312_RST_N_LBN);
++ mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
++ PCS_CLOCK_CTRL_REG, clk_ctrl);
++ udelay(10);
++
++ /* Modify => remove reset */
++ clk_ctrl |= (1 << PLL312_RST_N_LBN);
++ mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
++ PCS_CLOCK_CTRL_REG, clk_ctrl);
++ udelay(10);
++
++ soft_rst2 |= ((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
++ mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
++ PCS_SOFT_RST2_REG, soft_rst2);
++ udelay(10);
++
++ test_select |= (1 << CLK312_EN_LBN);
++ mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
++ PCS_TEST_SELECT_REG, test_select);
++ udelay(10);
++}
++
++
++struct efx_phy_operations falcon_tenxpress_phy_ops = {
++ .init = tenxpress_phy_init,
++ .reconfigure = tenxpress_phy_reconfigure,
++ .check_hw = tenxpress_phy_check_hw,
++ .fini = tenxpress_phy_fini,
++ .clear_interrupt = tenxpress_phy_clear_interrupt,
++ .reset_xaui = tenxpress_reset_xaui,
++ .mmds = TENXPRESS_REQUIRED_DEVS,
++ .loopbacks = TENXPRESS_LOOPBACKS,
++ .startup_loopback = LOOPBACK_PCS,
++};
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/tx.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/tx.c
+--- linux-2.6.18.8/drivers/net/sfc/tx.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/tx.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,522 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2005-2006: Fen Systems Ltd.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/pci.h>
++#include <linux/tcp.h>
++#include <linux/ip.h>
++#include <linux/in.h>
++#include <linux/if_ether.h>
++#include <linux/version.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++#include <linux/highmem.h>
++#endif
++#include "net_driver.h"
++#include "tx.h"
++#include "efx.h"
++#include "falcon.h"
++#include "workarounds.h"
++
++
++/*
++ * TX descriptor ring full threshold
++ *
++ * The tx_queue descriptor ring fill-level must fall below this value
++ * before we restart the netif queue
++ */
++#define EFX_NETDEV_TX_THRESHOLD(_tx_queue) \
++ (_tx_queue->efx->type->txd_ring_mask / 2u)
++
++
++
++/* We want to be able to nest calls to netif_stop_queue(), since each
++ * channel can have an individual stop on the queue.
++ */
++void efx_stop_queue(struct efx_nic *efx)
++{
++ spin_lock_bh(&efx->netif_stop_lock);
++ EFX_TRACE(efx, "stop TX queue\n");
++
++ atomic_inc(&efx->netif_stop_count);
++ if (likely(efx->net_dev_registered))
++ netif_stop_queue(efx->net_dev);
++
++ spin_unlock_bh(&efx->netif_stop_lock);
++}
++
++/* Wake netif's TX queue
++ * We want to be able to nest calls to netif_stop_queue(), since each
++ * channel can have an individual stop on the queue.
++ */
++inline void efx_wake_queue(struct efx_nic *efx)
++{
++ local_bh_disable();
++ if (atomic_dec_and_lock(&efx->netif_stop_count,
++ &efx->netif_stop_lock)) {
++ EFX_TRACE(efx, "waking TX queue\n");
++ if (likely(efx->net_dev_registered))
++ netif_wake_queue(efx->net_dev);
++ spin_unlock(&efx->netif_stop_lock);
++ }
++ local_bh_enable();
++}
++
++/*
++ * Add a socket buffer to a TX queue
++ *
++ * This maps all fragments of a socket buffer for DMA and adds them to
++ * the TX queue. The queue's insert pointer will be incremented by
++ * the number of fragments in the socket buffer.
++ *
++ * If any DMA mapping fails, any mapped fragments will be unmapped,
++ * the queue's insert pointer will be restored to its original value.
++ *
++ * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
++ * You must hold netif_tx_lock() to call this function.
++ */
++static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
++ const struct sk_buff *skb)
++{
++ struct efx_nic *efx = tx_queue->efx;
++ struct pci_dev *pci_dev = efx->pci_dev;
++ struct efx_tx_buffer *buffer;
++ skb_frag_t *fragment;
++ struct page *page;
++ int page_offset;
++ unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign;
++ dma_addr_t dma_addr, unmap_addr = 0;
++ unsigned int dma_len;
++ unsigned unmap_single;
++ int q_space, i = 0;
++ int rc = NETDEV_TX_OK;
++
++ EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
++
++ /* Get size of the initial fragment */
++ len = skb_headlen(skb);
++
++ fill_level = tx_queue->insert_count - tx_queue->old_read_count;
++ q_space = efx->type->txd_ring_mask - 1 - fill_level;
++
++ /* Map for DMA. Use pci_map_single rather than pci_map_page
++ * since this is more efficient on machines with sparse
++ * memory.
++ */
++ unmap_single = 1;
++ dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
++
++ /* Process all fragments */
++ while (1) {
++ if (unlikely(pci_dma_mapping_error(dma_addr)))
++ goto pci_err;
++
++ /* Store fields for marking in the per-fragment final
++ * descriptor */
++ unmap_len = len;
++ unmap_addr = dma_addr;
++
++ /* Add to TX queue, splitting across DMA boundaries */
++ do {
++ if (unlikely(q_space-- <= 0)) {
++ /* It might be that completions have
++ * happened since the xmit path last
++ * checked. Update the xmit path's
++ * copy of read_count.
++ */
++ ++tx_queue->stopped;
++ /* This memory barrier protects the
++ * change of stopped from the access
++ * of read_count. */
++ smp_mb();
++ tx_queue->old_read_count =
++ *(volatile unsigned *)
++ &tx_queue->read_count;
++ fill_level = (tx_queue->insert_count
++ - tx_queue->old_read_count);
++ q_space = (efx->type->txd_ring_mask - 1 -
++ fill_level);
++ if (unlikely(q_space-- <= 0))
++ goto stop;
++ smp_mb();
++ --tx_queue->stopped;
++ }
++
++ insert_ptr = (tx_queue->insert_count &
++ efx->type->txd_ring_mask);
++ buffer = &tx_queue->buffer[insert_ptr];
++ EFX_BUG_ON_PARANOID(buffer->skb);
++ EFX_BUG_ON_PARANOID(buffer->len);
++ EFX_BUG_ON_PARANOID(buffer->continuation != 1);
++ EFX_BUG_ON_PARANOID(buffer->unmap_len);
++
++ dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1);
++ if (likely(dma_len > len))
++ dma_len = len;
++
++ misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
++ if (misalign && dma_len + misalign > 512)
++ dma_len = 512 - misalign;
++
++ /* Fill out per descriptor fields */
++ buffer->len = dma_len;
++ buffer->dma_addr = dma_addr;
++ len -= dma_len;
++ dma_addr += dma_len;
++ ++tx_queue->insert_count;
++ } while (len);
++
++ /* Transfer ownership of the unmapping to the final buffer */
++ buffer->unmap_addr = unmap_addr;
++ buffer->unmap_single = unmap_single;
++ buffer->unmap_len = unmap_len;
++ unmap_len = 0;
++
++ /* Get address and size of next fragment */
++ if (i >= skb_shinfo(skb)->nr_frags)
++ break;
++ fragment = &skb_shinfo(skb)->frags[i];
++ len = fragment->size;
++ page = fragment->page;
++ page_offset = fragment->page_offset;
++ i++;
++ /* Map for DMA */
++ unmap_single = 0;
++ dma_addr = pci_map_page(pci_dev, page, page_offset, len,
++ PCI_DMA_TODEVICE);
++ }
++
++ /* Transfer ownership of the skb to the final buffer */
++ buffer->skb = skb;
++ buffer->continuation = 0;
++
++ /* Pass off to hardware */
++ falcon_push_buffers(tx_queue);
++
++ return NETDEV_TX_OK;
++
++ pci_err:
++ EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d "
++ "fragments for DMA\n", tx_queue->queue, skb->len,
++ skb_shinfo(skb)->nr_frags + 1);
++
++ /* Mark the packet as transmitted, and free the SKB ourselves */
++ dev_kfree_skb_any((struct sk_buff *)skb);
++ goto unwind;
++
++ stop:
++ rc = NETDEV_TX_BUSY;
++
++ /* Stop the queue if it wasn't stopped before. */
++ if (tx_queue->stopped == 1)
++ efx_stop_queue(efx);
++
++ unwind:
++ /* Work backwards until we hit the original insert pointer value */
++ while (tx_queue->insert_count != tx_queue->write_count) {
++ --tx_queue->insert_count;
++ insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
++ buffer = &tx_queue->buffer[insert_ptr];
++ if (buffer->unmap_len) {
++ if (buffer->unmap_single)
++ pci_unmap_single(pci_dev, buffer->unmap_addr,
++ buffer->unmap_len,
++ PCI_DMA_TODEVICE);
++ else
++ pci_unmap_page(pci_dev, buffer->unmap_addr,
++ buffer->unmap_len,
++ PCI_DMA_TODEVICE);
++ }
++ buffer->unmap_len = 0;
++ buffer->len = 0;
++ }
++
++ /* Free the fragment we were mid-way through pushing */
++ if (unmap_len)
++ pci_unmap_page(pci_dev, unmap_addr, unmap_len,
++ PCI_DMA_TODEVICE);
++
++ return rc;
++}
++
++/* Remove packets from the TX queue
++ *
++ * This removes packets from the TX queue, up to and including the
++ * specified index.
++ */
++static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
++ unsigned int index)
++{
++ struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
++ struct efx_tx_buffer *buffer;
++ unsigned int stop_index, read_ptr;
++
++ /* Calculate the stopping point. Doing the check this way
++ * avoids wrongly completing every buffer in the ring if we
++ * get called twice with the same index. (Hardware should
++ * never do this, since it can't complete that many buffers in
++ * one go.)
++ */
++ stop_index = (index + 1) & tx_queue->efx->type->txd_ring_mask;
++ read_ptr = tx_queue->read_count & tx_queue->efx->type->txd_ring_mask;
++
++ while (read_ptr != stop_index) {
++ buffer = &tx_queue->buffer[read_ptr];
++ if (unlikely(buffer->len == 0)) {
++ EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
++ "completion id %x\n", tx_queue->queue,
++ read_ptr);
++ atomic_inc(&tx_queue->efx->errors.spurious_tx);
++ /* Don't reset */
++ } else {
++ if (buffer->unmap_len) {
++ if (buffer->unmap_single)
++ pci_unmap_single(pci_dev,
++ buffer->unmap_addr,
++ buffer->unmap_len,
++ PCI_DMA_TODEVICE);
++ else
++ pci_unmap_page(pci_dev,
++ buffer->unmap_addr,
++ buffer->unmap_len,
++ PCI_DMA_TODEVICE);
++ buffer->unmap_single = 0;
++ buffer->unmap_len = 0;
++ }
++ if (buffer->skb) {
++ dev_kfree_skb_any((struct sk_buff *)
++ buffer->skb);
++ buffer->skb = NULL;
++ EFX_TRACE(tx_queue->efx, "TX queue %d "
++ "transmission id %x complete\n",
++ tx_queue->queue, read_ptr);
++ }
++ buffer->continuation = 1;
++ buffer->len = 0;
++ }
++ ++tx_queue->read_count;
++ read_ptr = (tx_queue->read_count &
++ tx_queue->efx->type->txd_ring_mask);
++ }
++}
++
++/* Initiate a packet transmission on the specified TX queue.
++ * Note that returning anything other than NETDEV_TX_OK will cause the
++ * OS to free the skb.
++ *
++ * This function is split out from efx_hard_start_xmit to allow the
++ * loopback test to direct packets via specific TX queues. It is
++ * therefore a non-static inline, so as not to penalise performance
++ * for non-loopback transmissions.
++ *
++ * Context: netif_tx_lock held
++ */
++inline int efx_xmit(struct efx_nic *efx,
++ struct efx_tx_queue *tx_queue, struct sk_buff *skb)
++{
++ int rc;
++
++ /* Map fragments for DMA and add to TX queue */
++ rc = efx_enqueue_skb(tx_queue, skb);
++ if (unlikely(rc != NETDEV_TX_OK))
++ goto out;
++
++ /* Update last TX timer */
++ efx->net_dev->trans_start = jiffies;
++
++ out:
++ return rc;
++}
++
++/* Initiate a packet transmission. We use one channel per CPU
++ * (sharing when we have more CPUs than channels). On Falcon, the TX
++ * completion events will be directed back to the CPU that transmitted
++ * the packet, which should be cache-efficient.
++ *
++ * Context: non-blocking.
++ * Note that returning anything other than NETDEV_TX_OK will cause the
++ * OS to free the skb.
++ */
++int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
++{
++ struct efx_nic *efx = net_dev->priv;
++ struct efx_tx_queue *tx_queue;
++ enum efx_veto veto;
++ int rc = NETDEV_TX_OK;
++
++ /* We have one TX queue. */
++ tx_queue = &efx->tx_queue[0];
++
++ /* See if driverlink wants to veto the packet. */
++ veto = EFX_DL_CALLBACK(efx, tx_packet, skb);
++ if (unlikely(veto)) {
++ EFX_LOG(efx, "TX queue %d packet vetoed by "
++ "driverlink %s driver\n", tx_queue->queue,
++ efx->dl_cb_dev.tx_packet->driver->name);
++ /* Free the skb; nothing else will do it */
++ dev_kfree_skb_any((struct sk_buff *)skb);
++ goto out;
++ }
++
++ rc = efx_xmit(efx, tx_queue, skb);
++out:
++ return rc;
++}
++
++#if defined(EFX_USE_FASTCALL)
++void fastcall efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
++#else
++void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
++#endif
++{
++ unsigned long flags __attribute__ ((unused));
++ unsigned fill_level;
++ struct efx_nic *efx = tx_queue->efx;
++
++ EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask);
++
++ /* Remove buffers from TX queue */
++ efx_dequeue_buffers(tx_queue, index);
++
++ /* See if we need to restart the netif queue. This barrier
++ * separates the update of read_count from the test of
++ * stopped. */
++ smp_mb();
++ if (unlikely(tx_queue->stopped)) {
++ fill_level = tx_queue->insert_count - tx_queue->read_count;
++ if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
++ /* If the port is stopped and the net_dev isn't
++ * registered, then the caller must be performing
++ * flow control manually */
++ if (unlikely(!efx->net_dev_registered))
++ return;
++
++ /* Do this under netif_tx_lock(), to avoid racing
++ * with efx_xmit(). */
++ netif_tx_lock(efx->net_dev);
++ if (tx_queue->stopped) {
++ tx_queue->stopped = 0;
++ efx_wake_queue(efx);
++ }
++ netif_tx_unlock(efx->net_dev);
++ }
++ }
++}
++
++int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
++{
++ struct efx_nic *efx = tx_queue->efx;
++ unsigned int txq_size;
++ int i, rc;
++
++ EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
++
++ /* Allocate software ring */
++ txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer);
++ tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
++ if (!tx_queue->buffer) {
++ rc = -ENOMEM;
++ goto fail1;
++ }
++ for (i = 0; i <= efx->type->txd_ring_mask; ++i)
++ tx_queue->buffer[i].continuation = 1;
++
++ /* Allocate hardware ring */
++ rc = falcon_probe_tx(tx_queue);
++ if (rc)
++ goto fail2;
++
++ return 0;
++
++ fail2:
++ kfree(tx_queue->buffer);
++ tx_queue->buffer = NULL;
++ fail1:
++ /* Mark queue as unused */
++ tx_queue->used = 0;
++
++ return rc;
++}
++
++int efx_init_tx_queue(struct efx_tx_queue *tx_queue)
++{
++ EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
++
++ ASSERT_RTNL();
++
++ /* Initialise fields */
++ tx_queue->insert_count = 0;
++ tx_queue->write_count = 0;
++ tx_queue->read_count = 0;
++ tx_queue->old_read_count = 0;
++ BUG_ON(tx_queue->stopped);
++
++ /* Set up TX descriptor ring */
++ return falcon_init_tx(tx_queue);
++}
++
++void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
++{
++ unsigned int last_index, mask;
++ if (tx_queue->buffer) {
++ /* Free any buffers left in the ring */
++ mask = tx_queue->efx->type->txd_ring_mask;
++ last_index = (tx_queue->insert_count - 1) & mask;
++ EFX_LOG(tx_queue->efx, "Will dequeue up to 0x%x from 0x%x\n",
++ last_index, tx_queue->read_count & mask);
++ efx_dequeue_buffers(tx_queue, last_index);
++ }
++}
++
++void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
++{
++ EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
++
++ ASSERT_RTNL();
++
++ /* Flush TX queue, remove descriptor ring */
++ falcon_fini_tx(tx_queue);
++
++ /* Release TX buffers */
++ efx_release_tx_buffers(tx_queue);
++
++ /* Release queue's stop on port, if any */
++ if (tx_queue->stopped) {
++ tx_queue->stopped = 0;
++ efx_wake_queue(tx_queue->efx);
++ }
++}
++
++void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
++{
++ EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
++ falcon_remove_tx(tx_queue);
++
++ kfree(tx_queue->buffer);
++ tx_queue->buffer = NULL;
++ tx_queue->used = 0;
++}
++
++
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/txc43128_phy.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/txc43128_phy.c
+--- linux-2.6.18.8/drivers/net/sfc/txc43128_phy.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/txc43128_phy.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,725 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++/*
++ * Driver for Transwitch/Mysticom CX4 retimer
++ * see www.transwitch.com, part is TXC-43128
++ */
++
++#include <linux/delay.h>
++#include <linux/seq_file.h>
++#include "efx.h"
++#include "debugfs.h"
++#include "gmii.h"
++#include "mdio_10g.h"
++#include "xenpack.h"
++#include "phy.h"
++#include "lm87_support.h"
++#include "falcon.h"
++#include "workarounds.h"
++
++/* We expect these MMDs to be in the package */
++#define TXC_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PCS | \
++ MDIO_MMDREG_DEVS0_PMAPMD | \
++ MDIO_MMDREG_DEVS0_PHYXS)
++
++#define TXC_LOOPBACKS ((1 << LOOPBACK_PCS) | \
++ (1 << LOOPBACK_PMAPMD) | \
++ (1 << LOOPBACK_NETWORK))
++
++/**************************************************************************
++ *
++ * Compile-time config
++ *
++ **************************************************************************
++ */
++#define TXCNAME "TXC43128"
++/* Total length of time we'll wait for the PHY to come out of reset */
++#define TXC_MAX_RESET_TIME 500
++/* Interval between checks */
++#define TXC_RESET_WAIT 10
++/* How long to run BIST: At 10Gbps 50 microseconds should be plenty to get
++ * some stats */
++#define TXC_BIST_DURATION (50)
++
++#define BER_INTERVAL (10 * efx_monitor_interval)
++
++/**************************************************************************
++ *
++ * Register definitions
++ *
++ **************************************************************************
++ */
++#define XAUI_NUM_LANES (4)
++
++/*** Global register bank */
++/* Silicon ID register */
++#define TXC_GLRGS_SLID (0xc000)
++#define TXC_GLRGS_SLID_MASK (0x1f)
++
++/* Command register */
++#define TXC_GLRGS_GLCMD (0xc004)
++/* Useful bits in command register */
++/* Lane power-down */
++#define TXC_GLCMD_L01PD_LBN (5)
++#define TXC_GLCMD_L23PD_LBN (6)
++/* Limited SW reset: preserves configuration but
++ * initiates a logic reset. Self-clearing */
++#define TXC_GLCMD_LMTSWRST_LBN (14)
++
++/* Signal Quality Control */
++#define TXC_GLRGS_GSGQLCTL (0xc01a)
++/* Enable bit */
++#define TXC_GSGQLCT_SGQLEN_LBN (15)
++/* Lane selection */
++#define TXC_GSGQLCT_LNSL_LBN (13)
++#define TXC_GSGQLCT_LNSL_WIDTH (2)
++
++/* Signal Quality Input */
++#define TXC_GLRGS_GSGQLIN (0xc01b)
++/* Signal Quality Grade */
++#define TXC_GLRGS_GSGQLGRD (0xc01c)
++/* Drift sign */
++#define TXC_GSGQLGRD_DRFTSGN_LBN (15)
++/* Grade valid flag */
++#define TXC_GSGQLGRD_GRDVAL_LBN (14)
++/* Remaining bits are the actual grade */
++#define TXC_GSGQLGRD_GRADE_LBN (0)
++#define TXC_GSGQLGRD_GRADE_WIDTH (14)
++
++/* Signal Quality Drift: 16-bit drift value */
++#define TXC_GLRGS_GSGQLDRFT (0xc01d)
++
++/**** Analog register bank */
++#define TXC_ALRGS_ATXCTL (0xc040)
++/* Lane power-down */
++#define TXC_ATXCTL_TXPD3_LBN (15)
++#define TXC_ATXCTL_TXPD2_LBN (14)
++#define TXC_ATXCTL_TXPD1_LBN (13)
++#define TXC_ATXCTL_TXPD0_LBN (12)
++
++/* Amplitude on lanes 0, 1 */
++#define TXC_ALRGS_ATXAMP0 (0xc041)
++/* Amplitude on lanes 2, 3 */
++#define TXC_ALRGS_ATXAMP1 (0xc042)
++/* Bit position of value for lane 0 (or 2) */
++#define TXC_ATXAMP_LANE02_LBN (3)
++/* Bit position of value for lane 1 (or 3) */
++#define TXC_ATXAMP_LANE13_LBN (11)
++
++#define TXC_ATXAMP_1280_mV (0)
++#define TXC_ATXAMP_1200_mV (8)
++#define TXC_ATXAMP_1120_mV (12)
++#define TXC_ATXAMP_1060_mV (14)
++#define TXC_ATXAMP_0820_mV (25)
++#define TXC_ATXAMP_0720_mV (26)
++#define TXC_ATXAMP_0580_mV (27)
++#define TXC_ATXAMP_0440_mV (28)
++
++#define TXC_ATXAMP_0820_BOTH \
++ ((TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE02_LBN) \
++ | (TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE13_LBN))
++
++#define TXC_ATXAMP_DEFAULT (0x6060) /* From databook */
++
++/* Preemphasis on lanes 0, 1 */
++#define TXC_ALRGS_ATXPRE0 (0xc043)
++/* Preemphasis on lanes 2, 3 */
++#define TXC_ALRGS_ATXPRE1 (0xc044)
++
++#define TXC_ATXPRE_NONE (0)
++#define TXC_ATXPRE_DEFAULT (0x1010) /* From databook */
++
++#define TXC_ALRGS_ARXCTL (0xc045)
++/* Lane power-down */
++#define TXC_ARXCTL_RXPD3_LBN (15)
++#define TXC_ARXCTL_RXPD2_LBN (14)
++#define TXC_ARXCTL_RXPD1_LBN (13)
++#define TXC_ARXCTL_RXPD0_LBN (12)
++
++/*** receiver control registers: Bit Error Rate measurement */
++/* Per lane BER timers */
++#define TXC_RXCTL_BERTMR0 (0xc0d4)
++#define TXC_RXCTL_BERTMR1 (0xc154)
++#define TXC_RXCTL_BERTMR2 (0xc1d4)
++#define TXC_RXCTL_BERTMR3 (0xc254)
++/* Per lane BER counters */
++#define TXC_RXCTL_BERCNT0 (0xc0d5)
++#define TXC_RXCTL_BERCNT1 (0xc155)
++#define TXC_RXCTL_BERCNT2 (0xc1d5)
++#define TXC_RXCTL_BERCNT3 (0xc255)
++
++#define BER_REG_SPACING (TXC_RXCTL_BERTMR1 - TXC_RXCTL_BERTMR0)
++
++/*** Main user-defined register set */
++/* Main control */
++#define TXC_MRGS_CTL (0xc340)
++/* Bits in main control */
++#define TXC_MCTL_RESET_LBN (15) /* Self clear */
++#define TXC_MCTL_TXLED_LBN (14) /* 1 to show align status */
++#define TXC_MCTL_RXLED_LBN (13) /* 1 to show align status */
++
++/* GPIO output */
++#define TXC_GPIO_OUTPUT (0xc346)
++#define TXC_GPIO_DIR (0xc348)
++
++/*** Vendor-specific BIST registers */
++#define TXC_BIST_CTL (0xc280)
++#define TXC_BIST_TXFRMCNT (0xc281)
++#define TXC_BIST_RX0FRMCNT (0xc282)
++#define TXC_BIST_RX1FRMCNT (0xc283)
++#define TXC_BIST_RX2FRMCNT (0xc284)
++#define TXC_BIST_RX3FRMCNT (0xc285)
++#define TXC_BIST_RX0ERRCNT (0xc286)
++#define TXC_BIST_RX1ERRCNT (0xc287)
++#define TXC_BIST_RX2ERRCNT (0xc288)
++#define TXC_BIST_RX3ERRCNT (0xc289)
++
++/*** BIST control bits */
++/* BIST type (controls bit patter in test) */
++#define TXC_BIST_CTRL_TYPE_LBN (10)
++#define TXC_BIST_CTRL_TYPE_TSD (0) /* TranSwitch Deterministic */
++#define TXC_BIST_CTRL_TYPE_CRP (1) /* CRPAT standard */
++#define TXC_BIST_CTRL_TYPE_CJP (2) /* CJPAT standard */
++#define TXC_BIST_CTRL_TYPE_TSR (3) /* TranSwitch pseudo-random */
++/* Set this to 1 for 10 bit and 0 for 8 bit */
++#define TXC_BIST_CTRL_B10EN_LBN (12)
++/* Enable BIST (write 0 to disable) */
++#define TXC_BIST_CTRL_ENAB_LBN (13)
++/*Stop BIST (self-clears when stop complete) */
++#define TXC_BIST_CTRL_STOP_LBN (14)
++/* Start BIST (cleared by writing 1 to STOP) */
++#define TXC_BIST_CTRL_STRT_LBN (15)
++
++/* Mt. Diablo test configuration */
++#define TXC_MTDIABLO_CTRL (0xc34f)
++#define TXC_MTDIABLO_CTRL_PMA_LOOP_LBN (10)
++
++struct txc43128_data {
++#ifdef CONFIG_SFC_DEBUGFS
++ /* BER stats update from check_hw. Note that this is in errors/second,
++ * converting it to errors/bit is left as an exercise for user-space.
++ */
++ unsigned phy_ber_pcs[4];
++ unsigned phy_ber_phyxs[4];
++#endif
++ unsigned bug10934_timer;
++ int phy_powered;
++ int tx_disabled;
++ enum efx_loopback_mode loopback_mode;
++};
++
++/* Perform the bug 10934 workaround every 5s */
++#define BUG10934_RESET_INTERVAL (5 * HZ)
++
++
++/* Perform a reset that doesn't clear configuration changes */
++static void txc_reset_logic(struct efx_nic *efx);
++
++/* Set the output value of a gpio */
++void txc_set_gpio_val(struct efx_nic *efx, int pin, int on)
++{
++ int outputs;
++
++ outputs = mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT);
++
++ outputs = (outputs & ~(1 << pin)) | (on << pin);
++
++ mdio_clause45_write(efx, efx->mii.phy_id,
++ MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT,
++ outputs);
++}
++
++/* Set up the GPIO direction register */
++void txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir)
++{
++ int dirs;
++
++ if (efx->board_info.minor < 3 &&
++ efx->board_info.major == 0)
++ return;
++
++ dirs = mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_PHYXS, TXC_GPIO_DIR);
++ dirs = (dir & ~(1 << pin)) | (dir << pin);
++ mdio_clause45_write(efx, efx->mii.phy_id,
++ MDIO_MMD_PHYXS, TXC_GPIO_DIR, dirs);
++
++}
++
++/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
++ * global reset (it's less clear what reset of other MMDs does).*/
++static int txc_reset_phy(struct efx_nic *efx)
++{
++ int rc = mdio_clause45_reset_mmd(efx, MDIO_MMD_PMAPMD,
++ TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
++ TXC_RESET_WAIT);
++ if (rc < 0)
++ goto fail;
++
++ /* Check that all the MMDs we expect are present and responding. We
++ * expect faults on some if the link is down, but not on the PHY XS */
++ rc = mdio_clause45_check_mmds(efx, TXC_REQUIRED_DEVS, 0);
++ if (rc < 0)
++ goto fail;
++
++ return 0;
++
++ fail:
++ EFX_ERR(efx, TXCNAME ": reset timed out!\n");
++ return rc;
++}
++
++/* Run a single BIST on one MMD*/
++static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
++{
++ int phy = efx->mii.phy_id;
++ int ctrl, bctl;
++ int lane;
++ int rc = 0;
++
++ EFX_INFO(efx, "" TXCNAME ": running BIST on %s MMD\n",
++ mdio_clause45_mmd_name(mmd));
++
++ /* Set PMA to test into loopback using Mt Diablo reg as per app note */
++ ctrl = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
++ TXC_MTDIABLO_CTRL);
++ ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
++ mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
++ TXC_MTDIABLO_CTRL, ctrl);
++
++
++ /* The BIST app. note lists these as 3 distinct steps. */
++ /* Set the BIST type */
++ bctl = (test << TXC_BIST_CTRL_TYPE_LBN);
++ mdio_clause45_write(efx, phy, mmd, TXC_BIST_CTL, bctl);
++
++ /* Set the BSTEN bit in the BIST Control register to enable */
++ bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN);
++ mdio_clause45_write(efx, phy, mmd, TXC_BIST_CTL, bctl);
++
++ /* Set the BSTRT bit in the BIST Control register */
++ mdio_clause45_write(efx, phy, mmd, TXC_BIST_CTL, bctl |
++ (1 << TXC_BIST_CTRL_STRT_LBN));
++
++ /* Wait. */
++ udelay(TXC_BIST_DURATION);
++
++ /* Set the BSTOP bit in the BIST Control register */
++ bctl |= (1 << TXC_BIST_CTRL_STOP_LBN);
++ mdio_clause45_write(efx, phy, mmd, TXC_BIST_CTL, bctl);
++
++ /* The STOP bit should go off when things have stopped */
++ while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN))
++ bctl = mdio_clause45_read(efx, phy, mmd, TXC_BIST_CTL);
++
++ /* Check all the error counts are 0 and all the frame counts are
++ non-zero */
++ for (lane = 0; lane < 4; lane++) {
++ int count = mdio_clause45_read(efx, phy, mmd,
++ TXC_BIST_RX0ERRCNT + lane);
++ if (count != 0) {
++ EFX_ERR(efx, ""TXCNAME": BIST error. "
++ "Lane %d had %d errs\n", lane, count);
++ rc = -EIO;
++ }
++ count = mdio_clause45_read(efx, phy, mmd,
++ TXC_BIST_RX0FRMCNT + lane);
++ if (count == 0) {
++ EFX_ERR(efx, ""TXCNAME": BIST error. "
++ "Lane %d got 0 frames\n", lane);
++ rc = -EIO;
++ }
++ }
++
++ if (rc == 0)
++ EFX_INFO(efx, ""TXCNAME": BIST pass\n");
++
++ /* Disable BIST */
++ mdio_clause45_write(efx, phy, mmd, TXC_BIST_CTL, 0);
++
++ /* Turn off loopback */
++ ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
++ mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
++ TXC_MTDIABLO_CTRL, ctrl);
++
++ return rc;
++}
++
++/* Run all the desired BIST tests for the PHY */
++static int txc_bist(struct efx_nic *efx)
++{
++ int rc;
++ /*!\todo: experiment with running more of the BIST patterns to
++ * see if it actually shows up more problems. */
++ rc = txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD);
++ return rc;
++}
++
++#ifdef CONFIG_SFC_DEBUGFS
++
++/* debugfs entries for this PHY */
++static struct efx_debugfs_parameter debug_entries[] = {
++ EFX_PER_LANE_PARAMETER("phy_ber_lane", "_pcs",
++ struct txc43128_data, phy_ber_pcs,
++ unsigned, efx_debugfs_read_uint),
++ EFX_PER_LANE_PARAMETER("phy_ber_lane", "_phyxs",
++ struct txc43128_data, phy_ber_phyxs,
++ unsigned, efx_debugfs_read_uint),
++ EFX_INT_PARAMETER(struct txc43128_data, phy_powered),
++ {NULL}
++};
++
++#endif /* CONFIG_SFC_DEBUGFS */
++
++/* Push the non-configurable defaults into the PHY. This must be
++ * done after every full reset */
++static void txc_apply_defaults(struct efx_nic *efx)
++{
++ int mctrl;
++
++ /* Turn amplitude down and preemphasis off on the host side
++ * (PHY<->MAC) as this is believed less likely to upset Falcon
++ * and no adverse effects have been noted. It probably also
++ * saves a picowatt or two */
++
++ /* Turn off preemphasis */
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PHYXS,
++ TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PHYXS,
++ TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
++
++ /* Turn down the amplitude */
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PHYXS,
++ TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH);
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PHYXS,
++ TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH);
++
++ /* Set the line side amplitude and preemphasis to the databook
++ * defaults as an erratum causes them to be 0 on at least some
++ * PHY rev.s */
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
++ TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT);
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
++ TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT);
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
++ TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT);
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
++ TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT);
++
++ /* Set up the LEDs */
++ mctrl = mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_PHYXS, TXC_MRGS_CTL);
++
++ /* Set the Green and Red LEDs to their default modes */
++ mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN));
++ mdio_clause45_write(efx, efx->mii.phy_id,
++ MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
++
++ /* Databook recommends doing this after configuration changes */
++ txc_reset_logic(efx);
++
++ efx->board_info.init_leds(efx);
++}
++
++/* Initialisation entry point for this PHY driver */
++static int txc43128_phy_init(struct efx_nic *efx)
++{
++ u32 devid;
++ int rc = 0;
++ struct txc43128_data *phy_data;
++
++ devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS);
++
++ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
++ efx->phy_data = phy_data;
++
++ /* This is the default after reset */
++ phy_data->phy_powered = efx->phy_powered;
++ phy_data->tx_disabled = efx->tx_disabled;
++
++#ifdef CONFIG_SFC_DEBUGFS
++ rc = efx_extend_debugfs_port(efx, phy_data, debug_entries);
++ if (rc < 0)
++ goto fail1;
++#endif
++ EFX_INFO(efx, ""TXCNAME ": PHY ID reg %x (OUI %x model %x "
++ "revision %x)\n", devid, MDIO_ID_OUI(devid),
++ MDIO_ID_MODEL(devid), MDIO_ID_REV(devid));
++
++ EFX_INFO(efx, ""TXCNAME ": Silicon ID %x\n",
++ mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_PHYXS, TXC_GLRGS_SLID) &
++ TXC_GLRGS_SLID_MASK);
++
++ rc = txc_reset_phy(efx);
++ if (rc < 0)
++ goto fail2;
++
++ rc = txc_bist(efx);
++ if (rc < 0)
++ goto fail2;
++
++ txc_apply_defaults(efx);
++
++ return 0;
++
++ fail2:
++#ifdef CONFIG_SFC_DEBUGFS
++ efx_trim_debugfs_port(efx, debug_entries);
++ /* fall-thru */
++ fail1:
++#endif
++ kfree(efx->phy_data);
++ efx->phy_data = NULL;
++ return rc;
++}
++
++/* Set the lane power down state in the global registers */
++static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd)
++{
++ int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN);
++ int ctl = mdio_clause45_read(efx, efx->mii.phy_id,
++ mmd, TXC_GLRGS_GLCMD);
++
++ if (efx->phy_powered)
++ ctl &= ~pd;
++ else
++ ctl |= pd;
++
++ mdio_clause45_write(efx, efx->mii.phy_id,
++ mmd, TXC_GLRGS_GLCMD, ctl);
++}
++
++/* Set the lane power down state in the analog control registers */
++static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
++{
++ int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
++ | (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
++
++ int rxpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
++ | (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
++
++ int txctl = mdio_clause45_read(efx, efx->mii.phy_id,
++ mmd, TXC_ALRGS_ATXCTL);
++ int rxctl = mdio_clause45_read(efx, efx->mii.phy_id,
++ mmd, TXC_ALRGS_ARXCTL);
++
++ if (efx->phy_powered) {
++ txctl &= ~txpd;
++ rxctl &= ~rxpd;
++ } else {
++ txctl |= txpd;
++ rxctl |= rxpd;
++ }
++
++ mdio_clause45_write(efx, efx->mii.phy_id,
++ mmd, TXC_ALRGS_ATXCTL, txctl);
++ mdio_clause45_write(efx, efx->mii.phy_id,
++ mmd, TXC_ALRGS_ARXCTL, rxctl);
++}
++
++static void txc_set_power(struct efx_nic *efx)
++{
++ /* According to the data book, all the MMDs can do low power */
++ mdio_clause45_set_mmds_lpower(efx, !efx->phy_powered,
++ TXC_REQUIRED_DEVS);
++
++ /* Global register bank is in PCS, PHY XS. These control the host
++ * side and line side settings respectively. */
++ txc_glrgs_lane_power(efx, MDIO_MMD_PCS);
++ txc_glrgs_lane_power(efx, MDIO_MMD_PHYXS);
++
++ /* Analog register bank in PMA/PMD, PHY XS */
++ txc_analog_lane_power(efx, MDIO_MMD_PMAPMD);
++ txc_analog_lane_power(efx, MDIO_MMD_PHYXS);
++}
++
++
++static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
++{
++ int portid = efx->mii.phy_id;
++ int val = mdio_clause45_read(efx, portid, mmd, TXC_GLRGS_GLCMD);
++ int tries = 50;
++ val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
++ mdio_clause45_write(efx, portid, mmd, TXC_GLRGS_GLCMD, val);
++ while (tries--) {
++ val = mdio_clause45_read(efx, portid, mmd,
++ TXC_GLRGS_GLCMD);
++ if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
++ break;
++ udelay(1);
++ }
++ if (!tries)
++ EFX_INFO(efx, TXCNAME " Logic reset timed out!\n");
++}
++
++
++/* Perform a logic reset. This preserves the configuration registers
++ * and is needed for some configuration changes to take effect */
++static void txc_reset_logic(struct efx_nic *efx)
++{
++ /* The data sheet claims we can do the logic reset on either the
++ * PCS or the PHYXS and the result is a reset of both host- and
++ * line-side logic. */
++ txc_reset_logic_mmd(efx, MDIO_MMD_PCS);
++}
++
++static int txc43128_phy_read_link(struct efx_nic *efx)
++{
++ return mdio_clause45_links_ok(efx, TXC_REQUIRED_DEVS);
++}
++
++static void txc43128_phy_reconfigure(struct efx_nic *efx)
++{
++ struct txc43128_data *phy_data = efx->phy_data;
++ int power_change = (efx->phy_powered != phy_data->phy_powered);
++ int loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS);
++ int disable_change = (efx->tx_disabled != phy_data->tx_disabled);
++
++ if (!phy_data->tx_disabled && efx->tx_disabled) {
++ txc_reset_phy(efx);
++ txc_apply_defaults(efx);
++ falcon_reset_xaui(efx);
++ disable_change = 0;
++ }
++
++ mdio_clause45_transmit_disable(efx, efx->tx_disabled);
++ mdio_clause45_phy_reconfigure(efx);
++ if (power_change)
++ txc_set_power(efx);
++
++ /* The data sheet claims this is required after every reconfiguration
++ * (note at end of 7.1), but we mustn't do it when nothing changes as
++ * it glitches the link, and reconfigure gets called on link change,
++ * so we get an IRQ storm on link up. */
++ if (loop_change || power_change || disable_change)
++ txc_reset_logic(efx);
++
++ phy_data->phy_powered = efx->phy_powered;
++ phy_data->loopback_mode = efx->loopback_mode;
++ phy_data->tx_disabled = efx->tx_disabled;
++ efx->link_up = txc43128_phy_read_link(efx);
++ efx->link_options = GM_LPA_10000FULL;
++}
++
++static void txc43128_phy_fini(struct efx_nic *efx)
++{
++ efx->board_info.blink(efx, 0);
++
++ /* Disable link events */
++ xenpack_disable_lasi_irqs(efx);
++
++#ifdef CONFIG_SFC_DEBUGFS
++ /* Remove the extra debug entries and free data */
++ efx_trim_debugfs_port(efx, debug_entries);
++#endif
++ kfree(efx->phy_data);
++ efx->phy_data = NULL;
++}
++
++/* Periodic callback: this exists mainly to poll link status as we currently
++ * don't use LASI interrupts. Also update the BER counters and poll the lm87 */
++static int txc43128_phy_check_hw(struct efx_nic *efx)
++{
++ struct txc43128_data *data = efx->phy_data;
++#ifdef CONFIG_SFC_DEBUGFS
++ int phy = efx->mii.phy_id;
++ int timer, count, i, mmd;
++#endif
++ int rc = 0;
++ int link_up = txc43128_phy_read_link(efx);
++
++ /* Simulate a PHY event if link state has changed */
++ if (link_up != efx->link_up) {
++ efx->link_up = link_up;
++ efx->mac_op->fake_phy_event(efx);
++ } else if (EFX_WORKAROUND_10934(efx)) {
++ if (link_up || (efx->loopback_mode != LOOPBACK_NONE))
++ data->bug10934_timer = jiffies;
++ else {
++ int delta = jiffies - data->bug10934_timer;
++ if (delta >= BUG10934_RESET_INTERVAL) {
++ data->bug10934_timer = jiffies;
++ txc_reset_logic(efx);
++ }
++ }
++ }
++
++ rc = efx->board_info.monitor(efx);
++ if (rc) {
++ EFX_ERR(efx, "" TXCNAME
++ ": sensor alert! Putting PHY into low power.\n");
++ efx->phy_powered = 0;
++ txc_set_power(efx);
++ }
++
++#ifdef CONFIG_SFC_DEBUGFS
++ /* There are 2 MMDs with RX BER counters: PCS and PHY XS,
++ * which happen to be consecutively numbered */
++ for (mmd = MDIO_MMD_PCS; mmd <= MDIO_MMD_PHYXS; mmd++) {
++ for (i = 0; i < XAUI_NUM_LANES; i++) {
++ timer = mdio_clause45_read(efx, phy, mmd,
++ TXC_RXCTL_BERTMR0 +
++ i * BER_REG_SPACING);
++ count = mdio_clause45_read(efx, phy, mmd,
++ TXC_RXCTL_BERCNT0 +
++ i * BER_REG_SPACING);
++ /* The BER timer counts down in seconds. If it would
++ * expire before the next check_hw, update the stats &
++ * restart the timer (clears the count) */
++ if (timer * HZ < efx_monitor_interval) {
++ /* Record count, allowing for the fact that the
++ * timer may not have reached zero */
++ unsigned ber = (count * BER_INTERVAL) /
++ (BER_INTERVAL - timer * HZ);
++ if (mmd == MDIO_MMD_PCS)
++ data->phy_ber_pcs[i] = ber;
++ else
++ data->phy_ber_phyxs[i] = ber;
++ /* Reprogram the timer */
++ mdio_clause45_write(efx, phy, mmd,
++ TXC_RXCTL_BERTMR0 +
++ i * BER_REG_SPACING,
++ BER_INTERVAL / HZ);
++ }
++ }
++ mmd = (mmd == MDIO_MMD_PCS) ? MDIO_MMD_PHYXS : 0;
++ }
++#endif /* CONFIG_SFC_DEBUGFS */
++ return rc;
++}
++
++struct efx_phy_operations falcon_txc_phy_ops = {
++ .init = txc43128_phy_init,
++ .reconfigure = txc43128_phy_reconfigure,
++ .check_hw = txc43128_phy_check_hw,
++ .fini = txc43128_phy_fini,
++ .clear_interrupt = efx_port_dummy_op_void,
++ .reset_xaui = efx_port_dummy_op_void,
++ .mmds = TXC_REQUIRED_DEVS,
++ .loopbacks = TXC_LOOPBACKS,
++ .startup_loopback = LOOPBACK_PMAPMD,
++};
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/tx.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/tx.h
+--- linux-2.6.18.8/drivers/net/sfc/tx.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/tx.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,41 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2006: Fen Systems Ltd.
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Initially developed by Michael Brown <mbrown@fensystems.co.uk>
++ * Maintained by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_TX_H
++#define EFX_TX_H
++
++#include "net_driver.h"
++
++int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
++void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
++int efx_init_tx_queue(struct efx_tx_queue *tx_queue);
++void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
++
++int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
++void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
++
++#endif /* EFX_TX_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/workarounds.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/workarounds.h
+--- linux-2.6.18.8/drivers/net/sfc/workarounds.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/workarounds.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,97 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_WORKAROUNDS_H
++#define EFX_WORKAROUNDS_H
++
++/*
++ * Hardware workarounds.
++ * Bug numbers are from Solarflare's Bugzilla.
++ */
++
++#define EFX_WORKAROUND_ALWAYS(efx) 1
++#define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1)
++#define EFX_WORKAROUND_FALCON_B0FPGA(efx) \
++ (FALCON_REV(efx) == FALCON_REV_B0 && !(efx)->is_asic)
++
++/* XAUI resets if link not detected */
++#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
++/* SNAP frames have TOBE_DISC set */
++#define EFX_WORKAROUND_5475 EFX_WORKAROUND_ALWAYS
++/* PHY interrupts can go to the wrong port */
++#define EFX_WORKAROUND_6263 EFX_WORKAROUND_ALWAYS
++/* Reprog PCIe ACK timer to workaround issue in PCIe IP block */
++#define EFX_WORKAROUND_6943 EFX_WORKAROUND_ALWAYS
++/* RX PCIe double split performance issue */
++#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
++/* Bit-bashed I2C reads cause performance drop */
++#define EFX_WORKAROUND_7884 EFX_WORKAROUND_ALWAYS
++/* Selftests need to be retried */
++#define EFX_WORKAROUND_8909 EFX_WORKAROUND_ALWAYS
++/* Queued ACKs aren't flushed before L1 entry */
++#define EFX_WORKAROUND_9096 EFX_WORKAROUND_ALWAYS
++/* TX pkt parser problem with <= 16 byte TXes */
++#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
++/* XGXS and XAUI reset sequencing in SW */
++#define EFX_WORKAROUND_9388 EFX_WORKAROUND_ALWAYS
++/* Low rate CRC errors require XAUI reset */
++#define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS
++/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
++ * or a PCIe error (bug 11028) */
++#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
++/* CX4 retimer fails to bring link up after reset */
++#define EFX_WORKAROUND_10934 EFX_WORKAROUND_ALWAYS
++/* Transmit flow control may get disabled */
++#define EFX_WORKAROUND_11482 EFX_WORKAROUND_ALWAYS
++/* Flush events can take a very long time to appear */
++#define EFX_WORKAROUND_11557 EFX_WORKAROUND_ALWAYS
++
++/* Spurious parity errors in TSORT buffers */
++#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
++/* No unaligned TX over 512 byte boundaries */
++#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A
++/* iSCSI parsing errors */
++#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
++/* RX events go missing */
++#define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A
++/* RX_RESET on A1 */
++#define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A
++/* Spurious duplicate RX events */
++#define EFX_WORKAROUND_7062 EFX_WORKAROUND_FALCON_A
++/* Increase filter depth to avoid RX_RESET */
++#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
++/* Flushes may never complete */
++#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_A
++/* Leak overlength packets rather than free */
++#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
++
++/* Memory needs clearing at start-of-day */
++#define EFX_WORKAROUND_8202 EFX_WORKAROUND_FALCON_B0FPGA
++/* MAC statistics are transient */
++#define EFX_WORKAROUND_8419 EFX_WORKAROUND_FALCON_B0FPGA
++/* Prefetch watchdog timer may trigger erroneously on busy systems */
++#define EFX_WORKAROUND_9008 EFX_WORKAROUND_FALCON_B0FPGA
++
++#endif /* EFX_WORKAROUNDS_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/xenpack.h linux-2.6.18-xen-3.3.0/drivers/net/sfc/xenpack.h
+--- linux-2.6.18.8/drivers/net/sfc/xenpack.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/xenpack.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,80 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2006: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef EFX_XENPACK_H
++#define EFX_XENPACK_H
++
++/* Exported functions from Xenpack standard PHY control */
++
++#include "mdio_10g.h"
++
++/****************************************************************************/
++/* XENPACK MDIO register extensions */
++#define MDIO_XP_LASI_RX_CTRL (0x9000)
++#define MDIO_XP_LASI_TX_CTRL (0x9001)
++#define MDIO_XP_LASI_CTRL (0x9002)
++#define MDIO_XP_LASI_RX_STAT (0x9003)
++#define MDIO_XP_LASI_TX_STAT (0x9004)
++#define MDIO_XP_LASI_STAT (0x9005)
++
++/* Control/Status bits */
++#define XP_LASI_LS_ALARM (1 << 0)
++#define XP_LASI_TX_ALARM (1 << 1)
++#define XP_LASI_RX_ALARM (1 << 2)
++/* These two are Quake vendor extensions to the standard XENPACK defines */
++#define XP_LASI_LS_INTB (1 << 3)
++#define XP_LASI_TEST (1 << 7)
++
++/* Enable LASI interrupts for PHY */
++static inline void xenpack_enable_lasi_irqs(struct efx_nic *efx)
++{
++ int reg;
++ int phy_id = efx->mii.phy_id;
++ /* Read to clear LASI status register */
++ reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
++ MDIO_XP_LASI_STAT);
++
++ /* Enable LASI interrupts from PMA/PMD */
++ mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
++ MDIO_XP_LASI_CTRL, XP_LASI_LS_ALARM);
++}
++
++/* Read the LASI interrupt status to clear the interrupt. */
++static inline int xenpack_clear_lasi_irqs(struct efx_nic *efx)
++{
++ /* Read to clear link status alarm */
++ return mdio_clause45_read(efx, efx->mii.phy_id,
++ MDIO_MMD_PMAPMD, MDIO_XP_LASI_STAT);
++}
++
++/* Turn off LASI interrupts */
++static inline void xenpack_disable_lasi_irqs(struct efx_nic *efx)
++{
++ /* Turn LASI interrupts off */
++ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
++ MDIO_XP_LASI_CTRL, 0);
++}
++
++#endif /* EFX_XENPACK_H */
+diff -rpuN linux-2.6.18.8/drivers/net/sfc/xfp_phy.c linux-2.6.18-xen-3.3.0/drivers/net/sfc/xfp_phy.c
+--- linux-2.6.18.8/drivers/net/sfc/xfp_phy.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/net/sfc/xfp_phy.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,206 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed by Solarflare Communications <linux-net-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++/*
++ * Driver for XFP optical PHYs (plus some support specific to the Quake 2032)
++ * See www.amcc.com for details (search for qt2032)
++ */
++
++#include <linux/timer.h>
++#include <linux/delay.h>
++#include "efx.h"
++#include "gmii.h"
++#include "mdio_10g.h"
++#include "xenpack.h"
++#include "phy.h"
++
++#define XFP_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PCS | \
++ MDIO_MMDREG_DEVS0_PMAPMD | \
++ MDIO_MMDREG_DEVS0_PHYXS)
++
++#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \
++ (1 << LOOPBACK_PMAPMD) | \
++ (1 << LOOPBACK_NETWORK))
++
++/****************************************************************************/
++/* Quake-specific MDIO registers */
++#define MDIO_QUAKE_LED0_REG (0xD006)
++
++
++void xfp_set_led(struct efx_nic *p, int led, int mode)
++{
++ int addr = MDIO_QUAKE_LED0_REG + led;
++ mdio_clause45_write(p, p->mii.phy_id, MDIO_MMD_PMAPMD, addr,
++ mode);
++}
++
++struct xfp_phy_data {
++ int phy_powered;
++ int tx_disabled;
++};
++
++
++#define XFP_MAX_RESET_TIME 500
++#define XFP_RESET_WAIT 10
++
++/* Reset the PHYXS MMD. This is documented (for the Quake PHY) as doing
++ * a complete soft reset.
++ */
++static int xfp_reset_phy(struct efx_nic *efx)
++{
++ int rc;
++
++ rc = mdio_clause45_reset_mmd(efx, MDIO_MMD_PHYXS,
++ XFP_MAX_RESET_TIME / XFP_RESET_WAIT,
++ XFP_RESET_WAIT);
++ if (rc < 0)
++ goto fail;
++
++ /* Wait 250ms for the PHY to complete bootup */
++ msleep(250);
++
++ /* Check that all the MMDs we expect are present and responding. We
++ * expect faults on some if the link is down, but not on the PHY XS */
++ rc = mdio_clause45_check_mmds(efx, XFP_REQUIRED_DEVS,
++ MDIO_MMDREG_DEVS0_PHYXS);
++ if (rc < 0)
++ goto fail;
++
++ efx->board_info.init_leds(efx);
++
++ return rc;
++
++ fail:
++ EFX_ERR(efx, "XFP: reset timed out!\n");
++ return rc;
++}
++
++
++static int xfp_phy_init(struct efx_nic *efx)
++{
++ struct xfp_phy_data *phy_data;
++ u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS);
++ int rc;
++
++ phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL);
++ efx->phy_data = (void *) phy_data;
++
++ EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
++ " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
++ MDIO_ID_REV(devid));
++
++ phy_data->phy_powered = efx->phy_powered;
++ phy_data->tx_disabled = efx->tx_disabled;
++
++ rc = xfp_reset_phy(efx);
++ if (rc < 0)
++ goto fail;
++
++ EFX_INFO(efx, "XFP: PHY init %s.\n",
++ rc ? "failed" : "successful");
++ return 0;
++
++ fail:
++ kfree(efx->phy_data);
++ efx->phy_data = NULL;
++ return rc;
++}
++
++static void xfp_phy_clear_interrupt(struct efx_nic *efx)
++{
++ xenpack_clear_lasi_irqs(efx);
++}
++
++static int xfp_link_ok(struct efx_nic *efx)
++{
++ return mdio_clause45_links_ok(efx, XFP_REQUIRED_DEVS);
++}
++
++static int xfp_phy_check_hw(struct efx_nic *efx)
++{
++ int rc = 0;
++ int link_up = xfp_link_ok(efx);
++ /* Simulate a PHY event if link state has changed */
++ if (link_up != efx->link_up) {
++ efx->link_up = link_up;
++ efx->mac_op->fake_phy_event(efx);
++ }
++
++ rc = efx->board_info.monitor(efx);
++ if (rc) {
++ EFX_ERR(efx, ": XFP sensor alert! Putting PHY into "
++ "low power.\n");
++ efx->phy_powered = 0;
++
++ mdio_clause45_set_mmds_lpower(efx, 1, XFP_REQUIRED_DEVS);
++ }
++
++ return rc;
++}
++
++static void xfp_phy_reconfigure(struct efx_nic *efx)
++{
++ struct xfp_phy_data *phy_data = efx->phy_data;
++
++ /* Reset the PHY when moving from transmitter off or powered off,
++ * to transmitter on and powered on */
++ if ((efx->phy_powered && !efx->tx_disabled) &&
++ (!phy_data->phy_powered || phy_data->tx_disabled))
++ xfp_reset_phy(efx);
++
++ mdio_clause45_transmit_disable(efx, efx->tx_disabled);
++ mdio_clause45_set_mmds_lpower(efx, !efx->phy_powered,
++ XFP_REQUIRED_DEVS);
++ mdio_clause45_phy_reconfigure(efx);
++
++ phy_data->tx_disabled = efx->tx_disabled;
++ phy_data->phy_powered = efx->phy_powered;
++ efx->link_up = xfp_link_ok(efx);
++ efx->link_options = GM_LPA_10000FULL;
++}
++
++
++static void xfp_phy_fini(struct efx_nic *efx)
++{
++ /* Clobber the LED if it was blinking */
++ efx->board_info.blink(efx, 0);
++
++ /* Free the context block */
++ kfree(efx->phy_data);
++ efx->phy_data = NULL;
++}
++
++struct efx_phy_operations falcon_xfp_phy_ops = {
++ .init = xfp_phy_init,
++ .reconfigure = xfp_phy_reconfigure,
++ .check_hw = xfp_phy_check_hw,
++ .fini = xfp_phy_fini,
++ .clear_interrupt = xfp_phy_clear_interrupt,
++ .reset_xaui = efx_port_dummy_op_void,
++ .mmds = XFP_REQUIRED_DEVS,
++ .loopbacks = XFP_LOOPBACKS,
++ /* No loopback appears to be reliable enough for self-test
++ * operation. So don't do it. */
++ .startup_loopback = LOOPBACK_PCS,
++};
+diff -rpuN linux-2.6.18.8/drivers/oprofile/buffer_sync.c linux-2.6.18-xen-3.3.0/drivers/oprofile/buffer_sync.c
+--- linux-2.6.18.8/drivers/oprofile/buffer_sync.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/oprofile/buffer_sync.c 2008-08-21 11:36:07.000000000 +0200
+@@ -6,6 +6,10 @@
+ *
+ * @author John Levon <levon@movementarian.org>
+ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ *
+ * This is the core of the buffer management. Each
+ * CPU buffer is processed and entered into the
+ * global event buffer. Such processing is necessary
+@@ -38,6 +42,7 @@ static cpumask_t marked_cpus = CPU_MASK_
+ static DEFINE_SPINLOCK(task_mortuary);
+ static void process_task_mortuary(void);
+
++static int cpu_current_domain[NR_CPUS];
+
+ /* Take ownership of the task struct and place it on the
+ * list for processing. Only after two full buffer syncs
+@@ -146,6 +151,11 @@ static void end_sync(void)
+ int sync_start(void)
+ {
+ int err;
++ int i;
++
++ for (i = 0; i < NR_CPUS; i++) {
++ cpu_current_domain[i] = COORDINATOR_DOMAIN;
++ }
+
+ start_cpu_work();
+
+@@ -275,15 +285,31 @@ static void add_cpu_switch(int i)
+ last_cookie = INVALID_COOKIE;
+ }
+
+-static void add_kernel_ctx_switch(unsigned int in_kernel)
++static void add_cpu_mode_switch(unsigned int cpu_mode)
+ {
+ add_event_entry(ESCAPE_CODE);
+- if (in_kernel)
+- add_event_entry(KERNEL_ENTER_SWITCH_CODE);
+- else
+- add_event_entry(KERNEL_EXIT_SWITCH_CODE);
++ switch (cpu_mode) {
++ case CPU_MODE_USER:
++ add_event_entry(USER_ENTER_SWITCH_CODE);
++ break;
++ case CPU_MODE_KERNEL:
++ add_event_entry(KERNEL_ENTER_SWITCH_CODE);
++ break;
++ case CPU_MODE_XEN:
++ add_event_entry(XEN_ENTER_SWITCH_CODE);
++ break;
++ default:
++ break;
++ }
+ }
+-
++
++static void add_domain_switch(unsigned long domain_id)
++{
++ add_event_entry(ESCAPE_CODE);
++ add_event_entry(DOMAIN_SWITCH_CODE);
++ add_event_entry(domain_id);
++}
++
+ static void
+ add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
+ {
+@@ -348,9 +374,9 @@ static int add_us_sample(struct mm_struc
+ * for later lookup from userspace.
+ */
+ static int
+-add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
++add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
+ {
+- if (in_kernel) {
++ if (cpu_mode >= CPU_MODE_KERNEL) {
+ add_sample_entry(s->eip, s->event);
+ return 1;
+ } else if (mm) {
+@@ -496,15 +522,21 @@ void sync_buffer(int cpu)
+ struct mm_struct *mm = NULL;
+ struct task_struct * new;
+ unsigned long cookie = 0;
+- int in_kernel = 1;
++ int cpu_mode = 1;
+ unsigned int i;
+ sync_buffer_state state = sb_buffer_start;
+ unsigned long available;
++ int domain_switch = 0;
+
+ mutex_lock(&buffer_mutex);
+
+ add_cpu_switch(cpu);
+
++ /* We need to assign the first samples in this CPU buffer to the
++ same domain that we were processing at the last sync_buffer */
++ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
++ add_domain_switch(cpu_current_domain[cpu]);
++ }
+ /* Remember, only we can modify tail_pos */
+
+ available = get_slots(cpu_buf);
+@@ -512,16 +544,18 @@ void sync_buffer(int cpu)
+ for (i = 0; i < available; ++i) {
+ struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
+
+- if (is_code(s->eip)) {
+- if (s->event <= CPU_IS_KERNEL) {
+- /* kernel/userspace switch */
+- in_kernel = s->event;
++ if (is_code(s->eip) && !domain_switch) {
++ if (s->event <= CPU_MODE_XEN) {
++ /* xen/kernel/userspace switch */
++ cpu_mode = s->event;
+ if (state == sb_buffer_start)
+ state = sb_sample_start;
+- add_kernel_ctx_switch(s->event);
++ add_cpu_mode_switch(s->event);
+ } else if (s->event == CPU_TRACE_BEGIN) {
+ state = sb_bt_start;
+ add_trace_begin();
++ } else if (s->event == CPU_DOMAIN_SWITCH) {
++ domain_switch = 1;
+ } else {
+ struct mm_struct * oldmm = mm;
+
+@@ -535,11 +569,21 @@ void sync_buffer(int cpu)
+ add_user_ctx_switch(new, cookie);
+ }
+ } else {
+- if (state >= sb_bt_start &&
+- !add_sample(mm, s, in_kernel)) {
+- if (state == sb_bt_start) {
+- state = sb_bt_ignore;
+- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
++ if (domain_switch) {
++ cpu_current_domain[cpu] = s->eip;
++ add_domain_switch(s->eip);
++ domain_switch = 0;
++ } else {
++ if (cpu_current_domain[cpu] !=
++ COORDINATOR_DOMAIN) {
++ add_sample_entry(s->eip, s->event);
++ }
++ else if (state >= sb_bt_start &&
++ !add_sample(mm, s, cpu_mode)) {
++ if (state == sb_bt_start) {
++ state = sb_bt_ignore;
++ atomic_inc(&oprofile_stats.bt_lost_no_mapping);
++ }
+ }
+ }
+ }
+@@ -548,6 +592,11 @@ void sync_buffer(int cpu)
+ }
+ release_mm(mm);
+
++ /* We reset domain to COORDINATOR at each CPU switch */
++ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
++ add_domain_switch(COORDINATOR_DOMAIN);
++ }
++
+ mark_done(cpu);
+
+ mutex_unlock(&buffer_mutex);
+diff -rpuN linux-2.6.18.8/drivers/oprofile/cpu_buffer.c linux-2.6.18-xen-3.3.0/drivers/oprofile/cpu_buffer.c
+--- linux-2.6.18.8/drivers/oprofile/cpu_buffer.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/oprofile/cpu_buffer.c 2008-08-21 11:36:07.000000000 +0200
+@@ -6,6 +6,10 @@
+ *
+ * @author John Levon <levon@movementarian.org>
+ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ *
+ * Each CPU has a local buffer that stores PC value/event
+ * pairs. We also log context switches when we notice them.
+ * Eventually each CPU's buffer is processed into the global
+@@ -34,6 +38,8 @@ static void wq_sync_buffer(void *);
+ #define DEFAULT_TIMER_EXPIRE (HZ / 10)
+ static int work_enabled;
+
++static int32_t current_domain = COORDINATOR_DOMAIN;
++
+ void free_cpu_buffers(void)
+ {
+ int i;
+@@ -57,7 +63,7 @@ int alloc_cpu_buffers(void)
+ goto fail;
+
+ b->last_task = NULL;
+- b->last_is_kernel = -1;
++ b->last_cpu_mode = -1;
+ b->tracing = 0;
+ b->buffer_size = buffer_size;
+ b->tail_pos = 0;
+@@ -113,7 +119,7 @@ void cpu_buffer_reset(struct oprofile_cp
+ * collected will populate the buffer with proper
+ * values to initialize the buffer
+ */
+- cpu_buf->last_is_kernel = -1;
++ cpu_buf->last_cpu_mode = -1;
+ cpu_buf->last_task = NULL;
+ }
+
+@@ -163,13 +169,13 @@ add_code(struct oprofile_cpu_buffer * bu
+ * because of the head/tail separation of the writer and reader
+ * of the CPU buffer.
+ *
+- * is_kernel is needed because on some architectures you cannot
++ * cpu_mode is needed because on some architectures you cannot
+ * tell if you are in kernel or user space simply by looking at
+- * pc. We tag this in the buffer by generating kernel enter/exit
+- * events whenever is_kernel changes
++ * pc. We tag this in the buffer by generating kernel/user (and xen)
++ * enter events whenever cpu_mode changes
+ */
+ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
+- int is_kernel, unsigned long event)
++ int cpu_mode, unsigned long event)
+ {
+ struct task_struct * task;
+
+@@ -180,18 +186,18 @@ static int log_sample(struct oprofile_cp
+ return 0;
+ }
+
+- is_kernel = !!is_kernel;
+-
+ task = current;
+
+ /* notice a switch from user->kernel or vice versa */
+- if (cpu_buf->last_is_kernel != is_kernel) {
+- cpu_buf->last_is_kernel = is_kernel;
+- add_code(cpu_buf, is_kernel);
++ if (cpu_buf->last_cpu_mode != cpu_mode) {
++ cpu_buf->last_cpu_mode = cpu_mode;
++ add_code(cpu_buf, cpu_mode);
+ }
+-
++
+ /* notice a task switch */
+- if (cpu_buf->last_task != task) {
++ /* if not processing other domain samples */
++ if ((cpu_buf->last_task != task) &&
++ (current_domain == COORDINATOR_DOMAIN)) {
+ cpu_buf->last_task = task;
+ add_code(cpu_buf, (unsigned long)task);
+ }
+@@ -275,6 +281,25 @@ void oprofile_add_trace(unsigned long pc
+ add_sample(cpu_buf, pc, 0);
+ }
+
++int oprofile_add_domain_switch(int32_t domain_id)
++{
++ struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
++
++ /* should have space for switching into and out of domain
++ (2 slots each) plus one sample and one cpu mode switch */
++ if (((nr_available_slots(cpu_buf) < 6) &&
++ (domain_id != COORDINATOR_DOMAIN)) ||
++ (nr_available_slots(cpu_buf) < 2))
++ return 0;
++
++ add_code(cpu_buf, CPU_DOMAIN_SWITCH);
++ add_sample(cpu_buf, domain_id, 0);
++
++ current_domain = domain_id;
++
++ return 1;
++}
++
+ /*
+ * This serves to avoid cpu buffer overflow, and makes sure
+ * the task mortuary progresses
+diff -rpuN linux-2.6.18.8/drivers/oprofile/cpu_buffer.h linux-2.6.18-xen-3.3.0/drivers/oprofile/cpu_buffer.h
+--- linux-2.6.18.8/drivers/oprofile/cpu_buffer.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/oprofile/cpu_buffer.h 2008-08-21 11:36:07.000000000 +0200
+@@ -36,7 +36,7 @@ struct oprofile_cpu_buffer {
+ volatile unsigned long tail_pos;
+ unsigned long buffer_size;
+ struct task_struct * last_task;
+- int last_is_kernel;
++ int last_cpu_mode;
+ int tracing;
+ struct op_sample * buffer;
+ unsigned long sample_received;
+@@ -51,7 +51,10 @@ extern struct oprofile_cpu_buffer cpu_bu
+ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
+
+ /* transient events for the CPU buffer -> event buffer */
+-#define CPU_IS_KERNEL 1
+-#define CPU_TRACE_BEGIN 2
++#define CPU_MODE_USER 0
++#define CPU_MODE_KERNEL 1
++#define CPU_MODE_XEN 2
++#define CPU_TRACE_BEGIN 3
++#define CPU_DOMAIN_SWITCH 4
+
+ #endif /* OPROFILE_CPU_BUFFER_H */
+diff -rpuN linux-2.6.18.8/drivers/oprofile/event_buffer.h linux-2.6.18-xen-3.3.0/drivers/oprofile/event_buffer.h
+--- linux-2.6.18.8/drivers/oprofile/event_buffer.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/oprofile/event_buffer.h 2008-08-21 11:36:07.000000000 +0200
+@@ -29,15 +29,20 @@ void wake_up_buffer_waiter(void);
+ #define CPU_SWITCH_CODE 2
+ #define COOKIE_SWITCH_CODE 3
+ #define KERNEL_ENTER_SWITCH_CODE 4
+-#define KERNEL_EXIT_SWITCH_CODE 5
++#define USER_ENTER_SWITCH_CODE 5
+ #define MODULE_LOADED_CODE 6
+ #define CTX_TGID_CODE 7
+ #define TRACE_BEGIN_CODE 8
+ #define TRACE_END_CODE 9
++#define XEN_ENTER_SWITCH_CODE 10
++#define DOMAIN_SWITCH_CODE 11
+
+ #define INVALID_COOKIE ~0UL
+ #define NO_COOKIE 0UL
+
++/* Constant used to refer to coordinator domain (Xen) */
++#define COORDINATOR_DOMAIN -1
++
+ /* add data to the event buffer */
+ void add_event_entry(unsigned long data);
+
+diff -rpuN linux-2.6.18.8/drivers/oprofile/oprof.c linux-2.6.18-xen-3.3.0/drivers/oprofile/oprof.c
+--- linux-2.6.18.8/drivers/oprofile/oprof.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/oprofile/oprof.c 2008-08-21 11:36:07.000000000 +0200
+@@ -5,6 +5,10 @@
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
++ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
+ */
+
+ #include <linux/kernel.h>
+@@ -19,7 +23,7 @@
+ #include "cpu_buffer.h"
+ #include "buffer_sync.h"
+ #include "oprofile_stats.h"
+-
++
+ struct oprofile_operations oprofile_ops;
+
+ unsigned long oprofile_started;
+@@ -33,6 +37,32 @@ static DEFINE_MUTEX(start_mutex);
+ */
+ static int timer = 0;
+
++int oprofile_set_active(int active_domains[], unsigned int adomains)
++{
++ int err;
++
++ if (!oprofile_ops.set_active)
++ return -EINVAL;
++
++ mutex_lock(&start_mutex);
++ err = oprofile_ops.set_active(active_domains, adomains);
++ mutex_unlock(&start_mutex);
++ return err;
++}
++
++int oprofile_set_passive(int passive_domains[], unsigned int pdomains)
++{
++ int err;
++
++ if (!oprofile_ops.set_passive)
++ return -EINVAL;
++
++ mutex_lock(&start_mutex);
++ err = oprofile_ops.set_passive(passive_domains, pdomains);
++ mutex_unlock(&start_mutex);
++ return err;
++}
++
+ int oprofile_setup(void)
+ {
+ int err;
+diff -rpuN linux-2.6.18.8/drivers/oprofile/oprof.h linux-2.6.18-xen-3.3.0/drivers/oprofile/oprof.h
+--- linux-2.6.18.8/drivers/oprofile/oprof.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/oprofile/oprof.h 2008-08-21 11:36:07.000000000 +0200
+@@ -35,5 +35,8 @@ void oprofile_create_files(struct super_
+ void oprofile_timer_init(struct oprofile_operations * ops);
+
+ int oprofile_set_backtrace(unsigned long depth);
++
++int oprofile_set_active(int active_domains[], unsigned int adomains);
++int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
+
+ #endif /* OPROF_H */
+diff -rpuN linux-2.6.18.8/drivers/oprofile/oprofile_files.c linux-2.6.18-xen-3.3.0/drivers/oprofile/oprofile_files.c
+--- linux-2.6.18.8/drivers/oprofile/oprofile_files.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/oprofile/oprofile_files.c 2008-08-21 11:36:07.000000000 +0200
+@@ -5,15 +5,21 @@
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
++ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
+ */
+
+ #include <linux/fs.h>
+ #include <linux/oprofile.h>
++#include <asm/uaccess.h>
++#include <linux/ctype.h>
+
+ #include "event_buffer.h"
+ #include "oprofile_stats.h"
+ #include "oprof.h"
+-
++
+ unsigned long fs_buffer_size = 131072;
+ unsigned long fs_cpu_buffer_size = 8192;
+ unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
+@@ -117,11 +123,202 @@ static ssize_t dump_write(struct file *
+ static struct file_operations dump_fops = {
+ .write = dump_write,
+ };
+-
++
++#define TMPBUFSIZE 512
++
++static unsigned int adomains = 0;
++static int active_domains[MAX_OPROF_DOMAINS + 1];
++static DEFINE_MUTEX(adom_mutex);
++
++static ssize_t adomain_write(struct file * file, char const __user * buf,
++ size_t count, loff_t * offset)
++{
++ char *tmpbuf;
++ char *startp, *endp;
++ int i;
++ unsigned long val;
++ ssize_t retval = count;
++
++ if (*offset)
++ return -EINVAL;
++ if (count > TMPBUFSIZE - 1)
++ return -EINVAL;
++
++ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++ return -ENOMEM;
++
++ if (copy_from_user(tmpbuf, buf, count)) {
++ kfree(tmpbuf);
++ return -EFAULT;
++ }
++ tmpbuf[count] = 0;
++
++ mutex_lock(&adom_mutex);
++
++ startp = tmpbuf;
++ /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
++ for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
++ val = simple_strtoul(startp, &endp, 0);
++ if (endp == startp)
++ break;
++ while (ispunct(*endp) || isspace(*endp))
++ endp++;
++ active_domains[i] = val;
++ if (active_domains[i] != val)
++ /* Overflow, force error below */
++ i = MAX_OPROF_DOMAINS + 1;
++ startp = endp;
++ }
++ /* Force error on trailing junk */
++ adomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
++
++ kfree(tmpbuf);
++
++ if (adomains > MAX_OPROF_DOMAINS
++ || oprofile_set_active(active_domains, adomains)) {
++ adomains = 0;
++ retval = -EINVAL;
++ }
++
++ mutex_unlock(&adom_mutex);
++ return retval;
++}
++
++static ssize_t adomain_read(struct file * file, char __user * buf,
++ size_t count, loff_t * offset)
++{
++ char * tmpbuf;
++ size_t len;
++ int i;
++ ssize_t retval;
++
++ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++ return -ENOMEM;
++
++ mutex_lock(&adom_mutex);
++
++ len = 0;
++ for (i = 0; i < adomains; i++)
++ len += snprintf(tmpbuf + len,
++ len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
++ "%u ", active_domains[i]);
++ WARN_ON(len > TMPBUFSIZE);
++ if (len != 0 && len <= TMPBUFSIZE)
++ tmpbuf[len-1] = '\n';
++
++ mutex_unlock(&adom_mutex);
++
++ retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
++
++ kfree(tmpbuf);
++ return retval;
++}
++
++
++static struct file_operations active_domain_ops = {
++ .read = adomain_read,
++ .write = adomain_write,
++};
++
++static unsigned int pdomains = 0;
++static int passive_domains[MAX_OPROF_DOMAINS];
++static DEFINE_MUTEX(pdom_mutex);
++
++static ssize_t pdomain_write(struct file * file, char const __user * buf,
++ size_t count, loff_t * offset)
++{
++ char *tmpbuf;
++ char *startp, *endp;
++ int i;
++ unsigned long val;
++ ssize_t retval = count;
++
++ if (*offset)
++ return -EINVAL;
++ if (count > TMPBUFSIZE - 1)
++ return -EINVAL;
++
++ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++ return -ENOMEM;
++
++ if (copy_from_user(tmpbuf, buf, count)) {
++ kfree(tmpbuf);
++ return -EFAULT;
++ }
++ tmpbuf[count] = 0;
++
++ mutex_lock(&pdom_mutex);
++
++ startp = tmpbuf;
++ /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
++ for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
++ val = simple_strtoul(startp, &endp, 0);
++ if (endp == startp)
++ break;
++ while (ispunct(*endp) || isspace(*endp))
++ endp++;
++ passive_domains[i] = val;
++ if (passive_domains[i] != val)
++ /* Overflow, force error below */
++ i = MAX_OPROF_DOMAINS + 1;
++ startp = endp;
++ }
++ /* Force error on trailing junk */
++ pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
++
++ kfree(tmpbuf);
++
++ if (pdomains > MAX_OPROF_DOMAINS
++ || oprofile_set_passive(passive_domains, pdomains)) {
++ pdomains = 0;
++ retval = -EINVAL;
++ }
++
++ mutex_unlock(&pdom_mutex);
++ return retval;
++}
++
++static ssize_t pdomain_read(struct file * file, char __user * buf,
++ size_t count, loff_t * offset)
++{
++ char * tmpbuf;
++ size_t len;
++ int i;
++ ssize_t retval;
++
++ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++ return -ENOMEM;
++
++ mutex_lock(&pdom_mutex);
++
++ len = 0;
++ for (i = 0; i < pdomains; i++)
++ len += snprintf(tmpbuf + len,
++ len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
++ "%u ", passive_domains[i]);
++ WARN_ON(len > TMPBUFSIZE);
++ if (len != 0 && len <= TMPBUFSIZE)
++ tmpbuf[len-1] = '\n';
++
++ mutex_unlock(&pdom_mutex);
++
++ retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
++
++ kfree(tmpbuf);
++ return retval;
++}
++
++static struct file_operations passive_domain_ops = {
++ .read = pdomain_read,
++ .write = pdomain_write,
++};
++
+ void oprofile_create_files(struct super_block * sb, struct dentry * root)
+ {
+ oprofilefs_create_file(sb, root, "enable", &enable_fops);
+ oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
++ oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
++ oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
+ oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
+ oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
+ oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
+diff -rpuN linux-2.6.18.8/drivers/pci/bus.c linux-2.6.18-xen-3.3.0/drivers/pci/bus.c
+--- linux-2.6.18.8/drivers/pci/bus.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/pci/bus.c 2008-08-21 11:36:07.000000000 +0200
+@@ -17,6 +17,8 @@
+
+ #include "pci.h"
+
++extern int pci_mem_align;
++
+ /**
+ * pci_bus_alloc_resource - allocate a resource from a parent bus
+ * @bus: PCI bus
+@@ -44,6 +46,11 @@ pci_bus_alloc_resource(struct pci_bus *b
+
+ type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
+
++ /* If the boot parameter 'pci-mem-align' was specified then we need to
++ align the memory addresses, at page size alignment. */
++ if (pci_mem_align && (align < (PAGE_SIZE-1)))
++ align = PAGE_SIZE - 1;
++
+ for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
+ struct resource *r = bus->resource[i];
+ if (!r)
+diff -rpuN linux-2.6.18.8/drivers/pci/msi-xen.c linux-2.6.18-xen-3.3.0/drivers/pci/msi-xen.c
+--- linux-2.6.18.8/drivers/pci/msi-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/pci/msi-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,746 @@
++/*
++ * File: msi.c
++ * Purpose: PCI Message Signaled Interrupt (MSI)
++ *
++ * Copyright (C) 2003-2004 Intel
++ * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
++ */
++
++#include <linux/mm.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/ioport.h>
++#include <linux/smp_lock.h>
++#include <linux/pci.h>
++#include <linux/proc_fs.h>
++
++#include <asm/errno.h>
++#include <asm/io.h>
++#include <asm/smp.h>
++
++#include "pci.h"
++#include "msi.h"
++
++static int pci_msi_enable = 1;
++
++static struct msi_ops *msi_ops;
++
++int msi_register(struct msi_ops *ops)
++{
++ msi_ops = ops;
++ return 0;
++}
++
++static LIST_HEAD(msi_dev_head);
++DEFINE_SPINLOCK(msi_dev_lock);
++
++struct msi_dev_list {
++ struct pci_dev *dev;
++ struct list_head list;
++ spinlock_t pirq_list_lock;
++ struct list_head pirq_list_head;
++};
++
++struct msi_pirq_entry {
++ struct list_head list;
++ int pirq;
++ int entry_nr;
++};
++
++static struct msi_dev_list *get_msi_dev_pirq_list(struct pci_dev *dev)
++{
++ struct msi_dev_list *msi_dev_list, *ret = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&msi_dev_lock, flags);
++
++ list_for_each_entry(msi_dev_list, &msi_dev_head, list)
++ if ( msi_dev_list->dev == dev )
++ ret = msi_dev_list;
++
++ if ( ret ) {
++ spin_unlock_irqrestore(&msi_dev_lock, flags);
++ return ret;
++ }
++
++ /* Has not allocate msi_dev until now. */
++ ret = kmalloc(sizeof(struct msi_dev_list), GFP_ATOMIC);
++
++ /* Failed to allocate msi_dev structure */
++ if ( !ret ) {
++ spin_unlock_irqrestore(&msi_dev_lock, flags);
++ return NULL;
++ }
++
++ spin_lock_init(&ret->pirq_list_lock);
++ INIT_LIST_HEAD(&ret->pirq_list_head);
++ list_add_tail(&ret->list, &msi_dev_head);
++ spin_unlock_irqrestore(&msi_dev_lock, flags);
++ return ret;
++}
++
++static int attach_pirq_entry(int pirq, int entry_nr,
++ struct msi_dev_list *msi_dev_entry)
++{
++ struct msi_pirq_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
++ unsigned long flags;
++
++ if (!entry)
++ return -ENOMEM;
++ entry->pirq = pirq;
++ entry->entry_nr = entry_nr;
++ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++ list_add_tail(&entry->list, &msi_dev_entry->pirq_list_head);
++ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++ return 0;
++}
++
++static void detach_pirq_entry(int entry_nr,
++ struct msi_dev_list *msi_dev_entry)
++{
++ unsigned long flags;
++ struct msi_pirq_entry *pirq_entry;
++
++ list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
++ if (pirq_entry->entry_nr == entry_nr) {
++ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++ list_del(&pirq_entry->list);
++ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++ kfree(pirq_entry);
++ return;
++ }
++ }
++}
++
++/*
++ * pciback will provide device's owner
++ */
++static int (*get_owner)(struct pci_dev *dev);
++
++int register_msi_get_owner(int (*func)(struct pci_dev *dev))
++{
++ if (get_owner) {
++ printk(KERN_WARNING "register msi_get_owner again\n");
++ return -EEXIST;
++ }
++ get_owner = func;
++ return 0;
++}
++
++int unregister_msi_get_owner(int (*func)(struct pci_dev *dev))
++{
++ if (get_owner != func)
++ return -EINVAL;
++ get_owner = NULL;
++ return 0;
++}
++
++static int msi_get_dev_owner(struct pci_dev *dev)
++{
++ int owner;
++
++ BUG_ON(!is_initial_xendomain());
++ if (get_owner && (owner = get_owner(dev)) >= 0) {
++ printk(KERN_INFO "get owner for dev %x get %x \n",
++ dev->devfn, owner);
++ return owner;
++ }
++
++ return DOMID_SELF;
++}
++
++static int msi_unmap_pirq(struct pci_dev *dev, int pirq)
++{
++ struct physdev_unmap_pirq unmap;
++ int rc;
++
++ unmap.domid = msi_get_dev_owner(dev);
++ unmap.pirq = pirq;
++
++ if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap)))
++ printk(KERN_WARNING "unmap irq %x failed\n", pirq);
++
++ if (rc < 0)
++ return rc;
++ return 0;
++}
++
++static u64 find_table_base(struct pci_dev *dev, int pos)
++{
++ u8 bar;
++ u32 reg;
++ unsigned long flags;
++
++ pci_read_config_dword(dev, msix_table_offset_reg(pos), &reg);
++ bar = reg & PCI_MSIX_FLAGS_BIRMASK;
++
++ flags = pci_resource_flags(dev, bar);
++ if (flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET | IORESOURCE_BUSY))
++ return 0;
++
++ return pci_resource_start(dev, bar);
++}
++
++/*
++ * Protected by msi_lock
++ */
++static int msi_map_pirq_to_vector(struct pci_dev *dev, int pirq,
++ int entry_nr, u64 table_base)
++{
++ struct physdev_map_pirq map_irq;
++ int rc;
++ domid_t domid = DOMID_SELF;
++
++ domid = msi_get_dev_owner(dev);
++
++ map_irq.domid = domid;
++ map_irq.type = MAP_PIRQ_TYPE_MSI;
++ map_irq.index = -1;
++ map_irq.pirq = pirq;
++ map_irq.bus = dev->bus->number;
++ map_irq.devfn = dev->devfn;
++ map_irq.entry_nr = entry_nr;
++ map_irq.table_base = table_base;
++
++ if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq)))
++ printk(KERN_WARNING "map irq failed\n");
++
++ if (rc < 0)
++ return rc;
++
++ return map_irq.pirq;
++}
++
++static int msi_map_vector(struct pci_dev *dev, int entry_nr, u64 table_base)
++{
++ return msi_map_pirq_to_vector(dev, -1, entry_nr, table_base);
++}
++
++static int msi_init(void)
++{
++ static int status = 0;
++
++ if (pci_msi_quirk) {
++ pci_msi_enable = 0;
++ printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
++ status = -EINVAL;
++ }
++
++ return status;
++}
++
++void pci_scan_msi_device(struct pci_dev *dev) { }
++
++void disable_msi_mode(struct pci_dev *dev, int pos, int type)
++{
++ u16 control;
++
++ pci_read_config_word(dev, msi_control_reg(pos), &control);
++ if (type == PCI_CAP_ID_MSI) {
++ /* Set enabled bits to single MSI & enable MSI_enable bit */
++ msi_disable(control);
++ pci_write_config_word(dev, msi_control_reg(pos), control);
++ dev->msi_enabled = 0;
++ } else {
++ msix_disable(control);
++ pci_write_config_word(dev, msi_control_reg(pos), control);
++ dev->msix_enabled = 0;
++ }
++ if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
++ /* PCI Express Endpoint device detected */
++ pci_intx(dev, 1); /* enable intx */
++ }
++}
++
++static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
++{
++ u16 control;
++
++ pci_read_config_word(dev, msi_control_reg(pos), &control);
++ if (type == PCI_CAP_ID_MSI) {
++ /* Set enabled bits to single MSI & enable MSI_enable bit */
++ msi_enable(control, 1);
++ pci_write_config_word(dev, msi_control_reg(pos), control);
++ dev->msi_enabled = 1;
++ } else {
++ msix_enable(control);
++ pci_write_config_word(dev, msi_control_reg(pos), control);
++ dev->msix_enabled = 1;
++ }
++ if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
++ /* PCI Express Endpoint device detected */
++ pci_intx(dev, 0); /* disable intx */
++ }
++}
++
++#ifdef CONFIG_PM
++int pci_save_msi_state(struct pci_dev *dev)
++{
++ int pos;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++ if (pos <= 0 || dev->no_msi)
++ return 0;
++
++ if (!dev->msi_enabled)
++ return 0;
++
++ /* Restore dev->irq to its default pin-assertion vector */
++ msi_unmap_pirq(dev, dev->irq);
++ /* Disable MSI mode */
++ disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
++ /* Set the flags for use of restore */
++ dev->msi_enabled = 1;
++ return 0;
++}
++
++void pci_restore_msi_state(struct pci_dev *dev)
++{
++ int pos, pirq;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++ if (pos <= 0)
++ return;
++
++ if (!dev->msi_enabled)
++ return;
++
++ pirq = msi_map_pirq_to_vector(dev, dev->irq, 0, 0);
++ if (pirq < 0)
++ return;
++ enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
++}
++
++int pci_save_msix_state(struct pci_dev *dev)
++{
++ int pos;
++ unsigned long flags;
++ struct msi_dev_list *msi_dev_entry;
++ struct msi_pirq_entry *pirq_entry, *tmp;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++ if (pos <= 0 || dev->no_msi)
++ return 0;
++
++ /* save the capability */
++ if (!dev->msix_enabled)
++ return 0;
++
++ msi_dev_entry = get_msi_dev_pirq_list(dev);
++
++ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++ list_for_each_entry_safe(pirq_entry, tmp,
++ &msi_dev_entry->pirq_list_head, list)
++ msi_unmap_pirq(dev, pirq_entry->pirq);
++ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++
++ disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
++ /* Set the flags for use of restore */
++ dev->msix_enabled = 1;
++
++ return 0;
++}
++
++void pci_restore_msix_state(struct pci_dev *dev)
++{
++ int pos;
++ unsigned long flags;
++ u64 table_base;
++ struct msi_dev_list *msi_dev_entry;
++ struct msi_pirq_entry *pirq_entry, *tmp;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++ if (pos <= 0)
++ return;
++
++ if (!dev->msix_enabled)
++ return;
++
++ msi_dev_entry = get_msi_dev_pirq_list(dev);
++ table_base = find_table_base(dev, pos);
++ if (!table_base)
++ return;
++
++ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++ list_for_each_entry_safe(pirq_entry, tmp,
++ &msi_dev_entry->pirq_list_head, list)
++ msi_map_pirq_to_vector(dev, pirq_entry->pirq,
++ pirq_entry->entry_nr, table_base);
++ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++
++ enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
++}
++#endif
++
++/**
++ * msi_capability_init - configure device's MSI capability structure
++ * @dev: pointer to the pci_dev data structure of MSI device function
++ *
++ * Setup the MSI capability structure of device function with a single
++ * MSI vector, regardless of device function is capable of handling
++ * multiple messages. A return of zero indicates the successful setup
++ * of an entry zero with the new MSI vector or non-zero for otherwise.
++ **/
++static int msi_capability_init(struct pci_dev *dev)
++{
++ int pos, pirq;
++ u16 control;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++ pci_read_config_word(dev, msi_control_reg(pos), &control);
++
++ pirq = msi_map_vector(dev, 0, 0);
++ if (pirq < 0)
++ return -EBUSY;
++
++ dev->irq = pirq;
++ /* Set MSI enabled bits */
++ enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
++ dev->msi_enabled = 1;
++
++ return 0;
++}
++
++/**
++ * msix_capability_init - configure device's MSI-X capability
++ * @dev: pointer to the pci_dev data structure of MSI-X device function
++ * @entries: pointer to an array of struct msix_entry entries
++ * @nvec: number of @entries
++ *
++ * Setup the MSI-X capability structure of device function with a
++ * single MSI-X vector. A return of zero indicates the successful setup of
++ * requested MSI-X entries with allocated vectors or non-zero for otherwise.
++ **/
++static int msix_capability_init(struct pci_dev *dev,
++ struct msix_entry *entries, int nvec)
++{
++ u64 table_base;
++ int pirq, i, j, mapped, pos;
++ struct msi_dev_list *msi_dev_entry = get_msi_dev_pirq_list(dev);
++ struct msi_pirq_entry *pirq_entry;
++
++ if (!msi_dev_entry)
++ return -ENOMEM;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++ table_base = find_table_base(dev, pos);
++ if (!table_base)
++ return -ENODEV;
++
++ /* MSI-X Table Initialization */
++ for (i = 0; i < nvec; i++) {
++ mapped = 0;
++ list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
++ if (pirq_entry->entry_nr == entries[i].entry) {
++ printk(KERN_WARNING "msix entry %d for dev %02x:%02x:%01x are \
++ not freed before acquire again.\n", entries[i].entry,
++ dev->bus->number, PCI_SLOT(dev->devfn),
++ PCI_FUNC(dev->devfn));
++ (entries + i)->vector = pirq_entry->pirq;
++ mapped = 1;
++ break;
++ }
++ }
++ if (mapped)
++ continue;
++ pirq = msi_map_vector(dev, entries[i].entry, table_base);
++ if (pirq < 0)
++ break;
++ attach_pirq_entry(pirq, entries[i].entry, msi_dev_entry);
++ (entries + i)->vector = pirq;
++ }
++
++ if (i != nvec) {
++ for (j = --i; j >= 0; j--) {
++ msi_unmap_pirq(dev, entries[j].vector);
++ detach_pirq_entry(entries[j].entry, msi_dev_entry);
++ entries[j].vector = 0;
++ }
++ return -EBUSY;
++ }
++
++ enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
++ dev->msix_enabled = 1;
++
++ return 0;
++}
++
++/**
++ * pci_enable_msi - configure device's MSI capability structure
++ * @dev: pointer to the pci_dev data structure of MSI device function
++ *
++ * Setup the MSI capability structure of device function with
++ * a single MSI vector upon its software driver call to request for
++ * MSI mode enabled on its hardware device function. A return of zero
++ * indicates the successful setup of an entry zero with the new MSI
++ * vector or non-zero for otherwise.
++ **/
++extern int pci_frontend_enable_msi(struct pci_dev *dev);
++int pci_enable_msi(struct pci_dev* dev)
++{
++ struct pci_bus *bus;
++ int pos, temp, status = -EINVAL;
++
++ if (!pci_msi_enable || !dev)
++ return status;
++
++ if (dev->no_msi)
++ return status;
++
++ for (bus = dev->bus; bus; bus = bus->parent)
++ if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
++ return -EINVAL;
++
++ status = msi_init();
++ if (status < 0)
++ return status;
++
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++ if (!is_initial_xendomain())
++ {
++ int ret;
++
++ temp = dev->irq;
++ ret = pci_frontend_enable_msi(dev);
++ if (ret)
++ return ret;
++
++ dev->irq_old = temp;
++
++ return ret;
++ }
++#endif
++
++ temp = dev->irq;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++ if (!pos)
++ return -EINVAL;
++
++ /* Check whether driver already requested for MSI-X vectors */
++ if (dev->msix_enabled) {
++ printk(KERN_INFO "PCI: %s: Can't enable MSI. "
++ "Device already has MSI-X vectors assigned\n",
++ pci_name(dev));
++ dev->irq = temp;
++ return -EINVAL;
++ }
++
++ status = msi_capability_init(dev);
++ if ( !status )
++ dev->irq_old = temp;
++ else
++ dev->irq = temp;
++
++ return status;
++}
++
++extern void pci_frontend_disable_msi(struct pci_dev* dev);
++void pci_disable_msi(struct pci_dev* dev)
++{
++ int pos;
++ int pirq;
++
++ if (!pci_msi_enable)
++ return;
++ if (!dev)
++ return;
++
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++ if (!is_initial_xendomain()) {
++ pci_frontend_disable_msi(dev);
++ dev->irq = dev->irq_old;
++ return;
++ }
++#endif
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++ if (!pos)
++ return;
++
++ pirq = dev->irq;
++ /* Restore dev->irq to its default pin-assertion vector */
++ dev->irq = dev->irq_old;
++ msi_unmap_pirq(dev, pirq);
++
++ /* Disable MSI mode */
++ disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
++}
++
++/**
++ * pci_enable_msix - configure device's MSI-X capability structure
++ * @dev: pointer to the pci_dev data structure of MSI-X device function
++ * @entries: pointer to an array of MSI-X entries
++ * @nvec: number of MSI-X vectors requested for allocation by device driver
++ *
++ * Setup the MSI-X capability structure of device function with the number
++ * of requested vectors upon its software driver call to request for
++ * MSI-X mode enabled on its hardware device function. A return of zero
++ * indicates the successful configuration of MSI-X capability structure
++ * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
++ * Or a return of > 0 indicates that driver request is exceeding the number
++ * of vectors available. Driver should use the returned value to re-send
++ * its request.
++ **/
++extern int pci_frontend_enable_msix(struct pci_dev *dev,
++ struct msix_entry *entries, int nvec);
++int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
++{
++ struct pci_bus *bus;
++ int status, pos, nr_entries;
++ int i, j, temp;
++ u16 control;
++
++ if (!pci_msi_enable || !dev || !entries)
++ return -EINVAL;
++
++ if (dev->no_msi)
++ return -EINVAL;
++
++ for (bus = dev->bus; bus; bus = bus->parent)
++ if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
++ return -EINVAL;
++
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++ if (!is_initial_xendomain()) {
++ int ret;
++
++ ret = pci_frontend_enable_msix(dev, entries, nvec);
++ if (ret) {
++ printk("get %x from pci_frontend_enable_msix\n", ret);
++ return ret;
++ }
++
++ return 0;
++ }
++#endif
++
++ status = msi_init();
++ if (status < 0)
++ return status;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++ if (!pos)
++ return -EINVAL;
++
++ pci_read_config_word(dev, msi_control_reg(pos), &control);
++ nr_entries = multi_msix_capable(control);
++ if (nvec > nr_entries)
++ return -EINVAL;
++
++ /* Check for any invalid entries */
++ for (i = 0; i < nvec; i++) {
++ if (entries[i].entry >= nr_entries)
++ return -EINVAL; /* invalid entry */
++ for (j = i + 1; j < nvec; j++) {
++ if (entries[i].entry == entries[j].entry)
++ return -EINVAL; /* duplicate entry */
++ }
++ }
++
++ temp = dev->irq;
++ /* Check whether driver already requested for MSI vector */
++ if (dev->msi_enabled) {
++ printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
++ "Device already has an MSI vector assigned\n",
++ pci_name(dev));
++ dev->irq = temp;
++ return -EINVAL;
++ }
++
++ status = msix_capability_init(dev, entries, nvec);
++
++ if ( !status )
++ dev->irq_old = temp;
++ else
++ dev->irq = temp;
++
++ return status;
++}
++
++extern void pci_frontend_disable_msix(struct pci_dev* dev);
++void pci_disable_msix(struct pci_dev* dev)
++{
++ int pos;
++ u16 control;
++
++
++ if (!pci_msi_enable)
++ return;
++ if (!dev)
++ return;
++
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++ if (!is_initial_xendomain()) {
++ pci_frontend_disable_msix(dev);
++ dev->irq = dev->irq_old;
++ return;
++ }
++#endif
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++ if (!pos)
++ return;
++
++ pci_read_config_word(dev, msi_control_reg(pos), &control);
++ if (!(control & PCI_MSIX_FLAGS_ENABLE))
++ return;
++
++ msi_remove_pci_irq_vectors(dev);
++
++ /* Disable MSI mode */
++ disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
++}
++
++/**
++ * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
++ * @dev: pointer to the pci_dev data structure of MSI(X) device function
++ *
++ * Being called during hotplug remove, from which the device function
++ * is hot-removed. All previous assigned MSI/MSI-X vectors, if
++ * allocated for this device function, are reclaimed to unused state,
++ * which may be used later on.
++ **/
++void msi_remove_pci_irq_vectors(struct pci_dev* dev)
++{
++ unsigned long flags;
++ struct msi_dev_list *msi_dev_entry;
++ struct msi_pirq_entry *pirq_entry, *tmp;
++
++ if (!pci_msi_enable || !dev)
++ return;
++
++ msi_dev_entry = get_msi_dev_pirq_list(dev);
++
++ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++ if (!list_empty(&msi_dev_entry->pirq_list_head))
++ {
++ printk(KERN_WARNING "msix pirqs for dev %02x:%02x:%01x are not freed \
++ before acquire again.\n", dev->bus->number, PCI_SLOT(dev->devfn),
++ PCI_FUNC(dev->devfn));
++ list_for_each_entry_safe(pirq_entry, tmp,
++ &msi_dev_entry->pirq_list_head, list) {
++ msi_unmap_pirq(dev, pirq_entry->pirq);
++ list_del(&pirq_entry->list);
++ kfree(pirq_entry);
++ }
++ }
++ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++ dev->irq = dev->irq_old;
++}
++
++void pci_no_msi(void)
++{
++ pci_msi_enable = 0;
++}
++
++EXPORT_SYMBOL(pci_enable_msi);
++EXPORT_SYMBOL(pci_disable_msi);
++EXPORT_SYMBOL(pci_enable_msix);
++EXPORT_SYMBOL(pci_disable_msix);
++#ifdef CONFIG_XEN
++EXPORT_SYMBOL(register_msi_get_owner);
++EXPORT_SYMBOL(unregister_msi_get_owner);
++#endif
++
+diff -rpuN linux-2.6.18.8/drivers/pci/quirks.c linux-2.6.18-xen-3.3.0/drivers/pci/quirks.c
+--- linux-2.6.18.8/drivers/pci/quirks.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/pci/quirks.c 2008-08-21 11:36:07.000000000 +0200
+@@ -23,6 +23,40 @@
+ #include <linux/acpi.h>
+ #include "pci.h"
+
++/* A global flag which signals if we should page-align PCI mem windows. */
++int pci_mem_align = 0;
++
++static int __init set_pci_mem_align(char *str)
++{
++ pci_mem_align = 1;
++ return 1;
++}
++__setup("pci-mem-align", set_pci_mem_align);
++
++/* This quirk function enables us to force all memory resources which are
++ * assigned to PCI devices, to be page-aligned.
++ */
++static void __devinit quirk_align_mem_resources(struct pci_dev *dev)
++{
++ int i;
++ struct resource *r;
++ resource_size_t old_start;
++
++ if (!pci_mem_align)
++ return;
++
++ for (i=0; i < DEVICE_COUNT_RESOURCE; i++) {
++ r = &dev->resource[i];
++ if ((r == NULL) || !(r->flags & IORESOURCE_MEM))
++ continue;
++
++ old_start = r->start;
++ r->start = (r->start + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
++ r->end = r->end - (old_start - r->start);
++ }
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_align_mem_resources);
++
+ /* The Mellanox Tavor device gives false positive parity errors
+ * Mark this device with a broken_parity_status, to allow
+ * PCI scanning code to "skip" this now blacklisted device.
+@@ -839,6 +873,25 @@ static void __init quirk_disable_pxb(str
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb );
+
++static void __devinit quirk_sb600_sata(struct pci_dev *pdev)
++{
++ /* set sb600/sb700/sb800 sata to ahci mode */
++ u8 tmp;
++
++ pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
++ if (tmp == 0x01) {
++ pci_read_config_byte(pdev, 0x40, &tmp);
++ pci_write_config_byte(pdev, 0x40, tmp|1);
++ pci_write_config_byte(pdev, 0x9, 1);
++ pci_write_config_byte(pdev, 0xa, 6);
++ pci_write_config_byte(pdev, 0x40, tmp);
++
++ pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
++ dev_info(&pdev->dev, "set SATA to AHCI mode\n");
++ }
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_sb600_sata);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_sb600_sata);
+
+ /*
+ * Serverworks CSB5 IDE does not fully support native mode
+@@ -1494,10 +1547,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_N
+
+ static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
+ {
+- u16 command;
++ u16 command, pmcsr;
+ u32 bar;
+ u8 __iomem *csr;
+ u8 cmd_hi;
++ int pm;
+
+ switch (dev->device) {
+ /* PCI IDs taken from drivers/net/e100.c */
+@@ -1532,6 +1586,17 @@ static void __devinit quirk_e100_interru
+ if (!(command & PCI_COMMAND_MEMORY) || !bar)
+ return;
+
++ /*
++ * Check that the device is in the D0 power state. If it's not,
++ * there is no point to look any further.
++ */
++ pm = pci_find_capability(dev, PCI_CAP_ID_PM);
++ if (pm) {
++ pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
++ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
++ return;
++ }
++
+ csr = ioremap(bar, 8);
+ if (!csr) {
+ printk(KERN_WARNING "PCI: Can't map %s e100 registers\n",
+diff -rpuN linux-2.6.18.8/drivers/pnp/manager.c linux-2.6.18-xen-3.3.0/drivers/pnp/manager.c
+--- linux-2.6.18.8/drivers/pnp/manager.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/pnp/manager.c 2008-08-21 11:36:07.000000000 +0200
+@@ -168,7 +168,7 @@ static int pnp_assign_irq(struct pnp_dev
+ return 0;
+ }
+
+-static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
++static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
+ {
+ resource_size_t *start, *end;
+ unsigned long *flags;
+@@ -179,18 +179,14 @@ static int pnp_assign_dma(struct pnp_dev
+ 1, 3, 5, 6, 7, 0, 2, 4
+ };
+
+- if (!dev || !rule)
+- return -EINVAL;
+-
+ if (idx >= PNP_MAX_DMA) {
+ pnp_err("More than 2 dmas is incompatible with pnp specifications.");
+- /* pretend we were successful so at least the manager won't try again */
+- return 1;
++ return;
+ }
+
+ /* check if this resource has been manually set, if so skip */
+ if (!(dev->res.dma_resource[idx].flags & IORESOURCE_AUTO))
+- return 1;
++ return;
+
+ start = &dev->res.dma_resource[idx].start;
+ end = &dev->res.dma_resource[idx].end;
+@@ -200,19 +196,17 @@ static int pnp_assign_dma(struct pnp_dev
+ *flags |= rule->flags | IORESOURCE_DMA;
+ *flags &= ~IORESOURCE_UNSET;
+
+- if (!rule->map) {
+- *flags |= IORESOURCE_DISABLED;
+- return 1; /* skip disabled resource requests */
+- }
+-
+ for (i = 0; i < 8; i++) {
+ if(rule->map & (1<<xtab[i])) {
+ *start = *end = xtab[i];
+ if(pnp_check_dma(dev, idx))
+- return 1;
++ return;
+ }
+ }
+- return 0;
++#ifdef MAX_DMA_CHANNELS
++ *start = *end = MAX_DMA_CHANNELS;
++#endif
++ *flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+ }
+
+ /**
+@@ -331,8 +325,7 @@ static int pnp_assign_resources(struct p
+ irq = irq->next;
+ }
+ while (dma) {
+- if (!pnp_assign_dma(dev, dma, ndma))
+- goto fail;
++ pnp_assign_dma(dev, dma, ndma);
+ ndma++;
+ dma = dma->next;
+ }
+@@ -367,8 +360,7 @@ static int pnp_assign_resources(struct p
+ irq = irq->next;
+ }
+ while (dma) {
+- if (!pnp_assign_dma(dev, dma, ndma))
+- goto fail;
++ pnp_assign_dma(dev, dma, ndma);
+ ndma++;
+ dma = dma->next;
+ }
+diff -rpuN linux-2.6.18.8/drivers/scsi/ahci.c linux-2.6.18-xen-3.3.0/drivers/scsi/ahci.c
+--- linux-2.6.18.8/drivers/scsi/ahci.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/scsi/ahci.c 2008-08-21 11:36:07.000000000 +0200
+@@ -78,6 +78,7 @@ enum {
+
+ board_ahci = 0,
+ board_ahci_vt8251 = 1,
++ board_ahci_sb700 = 2,
+
+ /* global controller registers */
+ HOST_CAP = 0x00, /* host capabilities */
+@@ -283,6 +284,16 @@ static const struct ata_port_info ahci_p
+ .udma_mask = 0x7f, /* udma0-6 ; FIXME */
+ .port_ops = &ahci_ops,
+ },
++ {
++ .sht = &ahci_sht,
++ .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
++ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
++ ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
++ ATA_FLAG_IPM,
++ .pio_mask = 0x1f, /* pio0-4 */
++ .udma_mask = 0x7f,
++ .port_ops = &ahci_ops,
++ },
+ };
+
+ static const struct pci_device_id ahci_pci_tbl[] = {
+@@ -317,6 +328,36 @@ static const struct pci_device_id ahci_p
+ board_ahci }, /* ICH8M */
+ { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_ahci }, /* ICH8M */
++ { PCI_VENDOR_ID_INTEL, 0x2922, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH9 */
++ { PCI_VENDOR_ID_INTEL, 0x2923, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH9 */
++ { PCI_VENDOR_ID_INTEL, 0x2924, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH9 */
++ { PCI_VENDOR_ID_INTEL, 0x2925, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH9 */
++ { PCI_VENDOR_ID_INTEL, 0x2927, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH9 */
++ { PCI_VENDOR_ID_INTEL, 0x2929, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH9M */
++ { PCI_VENDOR_ID_INTEL, 0x292a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH9M */
++ { PCI_VENDOR_ID_INTEL, 0x292b, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH9M */
++ { PCI_VENDOR_ID_INTEL, 0x292f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH9M */
++ { PCI_VENDOR_ID_INTEL, 0x294d, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH9 */
++ { PCI_VENDOR_ID_INTEL, 0x294e, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH9M */
++ { PCI_VENDOR_ID_INTEL, 0x3a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH10 */
++ { PCI_VENDOR_ID_INTEL, 0x3a05, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH10 */
++ { PCI_VENDOR_ID_INTEL, 0x3a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH10 */
++ { PCI_VENDOR_ID_INTEL, 0x3a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci }, /* ICH10 */
+
+ /* JMicron */
+ { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+@@ -335,6 +376,18 @@ static const struct pci_device_id ahci_p
+ board_ahci }, /* ATI SB600 non-raid */
+ { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_ahci }, /* ATI SB600 raid */
++ { PCI_VENDOR_ID_ATI, 0x4390, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci_sb700 },
++ { PCI_VENDOR_ID_ATI, 0x4391, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci_sb700 },
++ { PCI_VENDOR_ID_ATI, 0x4392, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci_sb700 },
++ { PCI_VENDOR_ID_ATI, 0x4393, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci_sb700 },
++ { PCI_VENDOR_ID_ATI, 0x4394, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci_sb700 },
++ { PCI_VENDOR_ID_ATI, 0x4395, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ board_ahci_sb700 },
+
+ /* VIA */
+ { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+diff -rpuN linux-2.6.18.8/drivers/scsi/ata_piix.c linux-2.6.18-xen-3.3.0/drivers/scsi/ata_piix.c
+--- linux-2.6.18.8/drivers/scsi/ata_piix.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/scsi/ata_piix.c 2008-08-21 11:36:07.000000000 +0200
+@@ -125,6 +125,8 @@ enum {
+ ich6m_sata_ahci = 6,
+ ich7m_sata_ahci = 7,
+ ich8_sata_ahci = 8,
++ ich9_sata_ahci = 9,
++ ich8_2port_sata = 10,
+
+ /* constants for mapping table */
+ P0 = 0, /* port 0 */
+@@ -192,12 +194,32 @@ static const struct pci_device_id piix_p
+ { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich7m_sata_ahci },
+ /* Enterprise Southbridge 2 (where's the datasheet?) */
+ { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+- /* SATA Controller 1 IDE (ICH8, no datasheet yet) */
++ /* SATA Controller 1 IDE (ICH8) */
+ { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+- /* SATA Controller 2 IDE (ICH8, ditto) */
+- { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
++ /* SATA Controller 2 IDE (ICH8) */
++ { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* Mobile SATA Controller IDE (ICH8M, ditto) */
+ { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
++ /* SATA Controller 1 IDE (ICH9) */
++ { 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich9_sata_ahci },
++ /* SATA Controller 1 IDE (ICH9) */
++ { 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich9_sata_ahci },
++ /* SATA Controller 2 IDE (ICH9) */
++ { 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich9_sata_ahci },
++ /* Mobile SATA Controller 1 IDE (ICH9M) */
++ { 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich9_sata_ahci },
++ /* Mobile SATA Controller 2 IDE (ICH9M) */
++ { 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich9_sata_ahci },
++ /* Mobile SATA Controller 2 IDE (ICH9M) */
++ { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich9_sata_ahci },
++ /* SATA Controller IDE (ICH10) */
++ { 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
++ /* SATA Controller IDE (ICH10) */
++ { 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (ICH10) */
++ { 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
++ /* SATA Controller IDE (ICH10) */
++ { 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+
+ { } /* terminate list */
+ };
+@@ -361,9 +383,34 @@ static const struct piix_map_db ich8_map
+ .present_shift = 8,
+ .map = {
+ /* PM PS SM SS MAP */
+- { P0, NA, P1, NA }, /* 00b (hardwired) */
++ { P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */
+ { RV, RV, RV, RV },
+- { RV, RV, RV, RV }, /* 10b (never) */
++ { IDE, IDE, NA, NA }, /* 10b (IDE mode) */
++ { RV, RV, RV, RV },
++ },
++};
++
++static const struct piix_map_db ich9_map_db = {
++ .mask = 0x3,
++ .port_enable = 0x3,
++ .present_shift = 8,
++ .map = {
++ /* PM PS SM SS MAP */
++ { P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */
++ { RV, RV, RV, RV },
++ { IDE, IDE, NA, NA }, /* 10b (IDE mode) */
++ { RV, RV, RV, RV },
++ },
++};
++
++static const struct piix_map_db ich8_2port_map_db = {
++ .mask = 0x3,
++ .port_enable = 0x3,
++ .map = {
++ /* PM PS SM SS MAP */
++ { P0, NA, P1, NA }, /* 00b */
++ { RV, RV, RV, RV }, /* 01b */
++ { RV, RV, RV, RV }, /* 10b */
+ { RV, RV, RV, RV },
+ },
+ };
+@@ -376,6 +423,8 @@ static const struct piix_map_db *piix_ma
+ [ich6m_sata_ahci] = &ich6m_map_db,
+ [ich7m_sata_ahci] = &ich7m_map_db,
+ [ich8_sata_ahci] = &ich8_map_db,
++ [ich9_sata_ahci] = &ich9_map_db,
++ [ich8_2port_sata] = &ich8_2port_map_db,
+ };
+
+ static struct ata_port_info piix_port_info[] = {
+@@ -487,6 +536,30 @@ static struct ata_port_info piix_port_in
+ .udma_mask = 0x7f, /* udma0-6 */
+ .port_ops = &piix_sata_ops,
+ },
++
++ /* ich9_sata_ahci */
++ {
++ .sht = &piix_sht,
++ .host_flags = ATA_FLAG_SATA |
++ PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
++ PIIX_FLAG_AHCI,
++ .pio_mask = 0x1f, /* pio0-4 */
++ .mwdma_mask = 0x07, /* mwdma0-2 */
++ .udma_mask = 0x7f, /* udma0-6 */
++ .port_ops = &piix_sata_ops,
++ },
++
++ /* ich8_2port_sata: 11: */
++ {
++ .sht = &piix_sht,
++ .host_flags = ATA_FLAG_SATA |
++ PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
++ PIIX_FLAG_AHCI,
++ .pio_mask = 0x1f, /* pio0-4 */
++ .mwdma_mask = 0x07, /* mwdma0-2 */
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &piix_pata_ops,
++ },
+ };
+
+ static struct pci_bits piix_enable_bits[] = {
+diff -rpuN linux-2.6.18.8/drivers/serial/Kconfig linux-2.6.18-xen-3.3.0/drivers/serial/Kconfig
+--- linux-2.6.18.8/drivers/serial/Kconfig 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/serial/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -11,6 +11,7 @@ menu "Serial drivers"
+ config SERIAL_8250
+ tristate "8250/16550 and compatible serial support"
+ depends on (BROKEN || !SPARC)
++ depends on !XEN_DISABLE_SERIAL
+ select SERIAL_CORE
+ ---help---
+ This selects whether you want to include the driver for the standard
+diff -rpuN linux-2.6.18.8/drivers/video/console/Kconfig linux-2.6.18-xen-3.3.0/drivers/video/console/Kconfig
+--- linux-2.6.18.8/drivers/video/console/Kconfig 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/video/console/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -53,6 +53,7 @@ config VGACON_SOFT_SCROLLBACK_SIZE
+ config VIDEO_SELECT
+ bool "Video mode selection support"
+ depends on X86 && VGA_CONSOLE
++ depends on !XEN
+ ---help---
+ This enables support for text mode selection on kernel startup. If
+ you want to take advantage of some high-resolution text mode your
+diff -rpuN linux-2.6.18.8/drivers/video/Kconfig linux-2.6.18-xen-3.3.0/drivers/video/Kconfig
+--- linux-2.6.18.8/drivers/video/Kconfig 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/video/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -1254,7 +1254,7 @@ config FB_CYBLA
+ tristate "Cyberblade/i1 support"
+ depends on FB && PCI && X86_32 && !64BIT
+ select FB_CFB_IMAGEBLIT
+- select VIDEO_SELECT
++ select VIDEO_SELECT if !XEN
+ ---help---
+ This driver is supposed to support the Trident Cyberblade/i1
+ graphics core integrated in the VIA VT8601A North Bridge,
+diff -rpuN linux-2.6.18.8/drivers/xen/balloon/balloon.c linux-2.6.18-xen-3.3.0/drivers/xen/balloon/balloon.c
+--- linux-2.6.18.8/drivers/xen/balloon/balloon.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/balloon/balloon.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,724 @@
++/******************************************************************************
++ * balloon.c
++ *
++ * Xen balloon driver - enables returning/claiming memory to/from Xen.
++ *
++ * Copyright (c) 2003, B Dragovic
++ * Copyright (c) 2003-2004, M Williamson, K Fraser
++ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/smp_lock.h>
++#include <linux/pagemap.h>
++#include <linux/bootmem.h>
++#include <linux/highmem.h>
++#include <linux/vmalloc.h>
++#include <linux/mutex.h>
++#include <xen/xen_proc.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/interface/memory.h>
++#include <asm/maddr.h>
++#include <asm/page.h>
++#include <asm/pgalloc.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/tlb.h>
++#include <linux/highmem.h>
++#include <linux/list.h>
++#include <xen/xenbus.h>
++#include "common.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#ifdef CONFIG_PROC_FS
++static struct proc_dir_entry *balloon_pde;
++#endif
++
++static DEFINE_MUTEX(balloon_mutex);
++
++/*
++ * Protects atomic reservation decrease/increase against concurrent increases.
++ * Also protects non-atomic updates of current_pages and driver_pages, and
++ * balloon lists.
++ */
++DEFINE_SPINLOCK(balloon_lock);
++
++struct balloon_stats balloon_stats;
++
++/* We increase/decrease in batches which fit in a page */
++static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
++
++/* VM /proc information for memory */
++extern unsigned long totalram_pages;
++
++#ifndef MODULE
++extern unsigned long totalhigh_pages;
++#define inc_totalhigh_pages() (totalhigh_pages++)
++#define dec_totalhigh_pages() (totalhigh_pages--)
++#else
++#define inc_totalhigh_pages() ((void)0)
++#define dec_totalhigh_pages() ((void)0)
++#endif
++
++/* List of ballooned pages, threaded through the mem_map array. */
++static LIST_HEAD(ballooned_pages);
++
++/* Main work function, always executed in process context. */
++static void balloon_process(void *unused);
++static DECLARE_WORK(balloon_worker, balloon_process, NULL);
++static struct timer_list balloon_timer;
++
++/* When ballooning out (allocating memory to return to Xen) we don't really
++ want the kernel to try too hard since that can trigger the oom killer. */
++#define GFP_BALLOON \
++ (GFP_HIGHUSER|__GFP_NOWARN|__GFP_NORETRY|__GFP_NOMEMALLOC|__GFP_COLD)
++
++#define PAGE_TO_LIST(p) (&(p)->lru)
++#define LIST_TO_PAGE(l) list_entry((l), struct page, lru)
++#define UNLIST_PAGE(p) \
++ do { \
++ list_del(PAGE_TO_LIST(p)); \
++ PAGE_TO_LIST(p)->next = NULL; \
++ PAGE_TO_LIST(p)->prev = NULL; \
++ } while(0)
++
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "xen_mem: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "xen_mem: " fmt, ##args)
++
++/* balloon_append: add the given page to the balloon. */
++static void balloon_append(struct page *page)
++{
++ /* Lowmem is re-populated first, so highmem pages go at list tail. */
++ if (PageHighMem(page)) {
++ list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
++ bs.balloon_high++;
++ dec_totalhigh_pages();
++ } else {
++ list_add(PAGE_TO_LIST(page), &ballooned_pages);
++ bs.balloon_low++;
++ }
++}
++
++/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
++static struct page *balloon_retrieve(void)
++{
++ struct page *page;
++
++ if (list_empty(&ballooned_pages))
++ return NULL;
++
++ page = LIST_TO_PAGE(ballooned_pages.next);
++ UNLIST_PAGE(page);
++
++ if (PageHighMem(page)) {
++ bs.balloon_high--;
++ inc_totalhigh_pages();
++ }
++ else
++ bs.balloon_low--;
++
++ return page;
++}
++
++static struct page *balloon_first_page(void)
++{
++ if (list_empty(&ballooned_pages))
++ return NULL;
++ return LIST_TO_PAGE(ballooned_pages.next);
++}
++
++static struct page *balloon_next_page(struct page *page)
++{
++ struct list_head *next = PAGE_TO_LIST(page)->next;
++ if (next == &ballooned_pages)
++ return NULL;
++ return LIST_TO_PAGE(next);
++}
++
++static inline void balloon_free_page(struct page *page)
++{
++#ifndef MODULE
++ if (put_page_testzero(page))
++ free_cold_page(page);
++#else
++ /* free_cold_page() is not being exported. */
++ __free_page(page);
++#endif
++}
++
++static void balloon_alarm(unsigned long unused)
++{
++ schedule_work(&balloon_worker);
++}
++
++static unsigned long current_target(void)
++{
++ unsigned long target = min(bs.target_pages, bs.hard_limit);
++ if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high))
++ target = bs.current_pages + bs.balloon_low + bs.balloon_high;
++ return target;
++}
++
++static unsigned long minimum_target(void)
++{
++#ifndef CONFIG_XEN
++#define max_pfn num_physpages
++#endif
++ unsigned long min_pages, curr_pages = current_target();
++
++#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
++ /* Simple continuous piecewiese linear function:
++ * max MiB -> min MiB gradient
++ * 0 0
++ * 16 16
++ * 32 24
++ * 128 72 (1/2)
++ * 512 168 (1/4)
++ * 2048 360 (1/8)
++ * 8192 552 (1/32)
++ * 32768 1320
++ * 131072 4392
++ */
++ if (max_pfn < MB2PAGES(128))
++ min_pages = MB2PAGES(8) + (max_pfn >> 1);
++ else if (max_pfn < MB2PAGES(512))
++ min_pages = MB2PAGES(40) + (max_pfn >> 2);
++ else if (max_pfn < MB2PAGES(2048))
++ min_pages = MB2PAGES(104) + (max_pfn >> 3);
++ else
++ min_pages = MB2PAGES(296) + (max_pfn >> 5);
++#undef MB2PAGES
++
++ /* Don't enforce growth */
++ return min(min_pages, curr_pages);
++#ifndef CONFIG_XEN
++#undef max_pfn
++#endif
++}
++
++static int increase_reservation(unsigned long nr_pages)
++{
++ unsigned long pfn, i, flags;
++ struct page *page;
++ long rc;
++ struct xen_memory_reservation reservation = {
++ .address_bits = 0,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++
++ if (nr_pages > ARRAY_SIZE(frame_list))
++ nr_pages = ARRAY_SIZE(frame_list);
++
++ balloon_lock(flags);
++
++ page = balloon_first_page();
++ for (i = 0; i < nr_pages; i++) {
++ BUG_ON(page == NULL);
++ frame_list[i] = page_to_pfn(page);;
++ page = balloon_next_page(page);
++ }
++
++ set_xen_guest_handle(reservation.extent_start, frame_list);
++ reservation.nr_extents = nr_pages;
++ rc = HYPERVISOR_memory_op(
++ XENMEM_populate_physmap, &reservation);
++ if (rc < nr_pages) {
++ if (rc > 0) {
++ int ret;
++
++ /* We hit the Xen hard limit: reprobe. */
++ reservation.nr_extents = rc;
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &reservation);
++ BUG_ON(ret != rc);
++ }
++ if (rc >= 0)
++ bs.hard_limit = (bs.current_pages + rc -
++ bs.driver_pages);
++ goto out;
++ }
++
++ for (i = 0; i < nr_pages; i++) {
++ page = balloon_retrieve();
++ BUG_ON(page == NULL);
++
++ pfn = page_to_pfn(page);
++ BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
++ phys_to_machine_mapping_valid(pfn));
++
++ set_phys_to_machine(pfn, frame_list[i]);
++
++#ifdef CONFIG_XEN
++ /* Link back into the page tables if not highmem. */
++ if (pfn < max_low_pfn) {
++ int ret;
++ ret = HYPERVISOR_update_va_mapping(
++ (unsigned long)__va(pfn << PAGE_SHIFT),
++ pfn_pte_ma(frame_list[i], PAGE_KERNEL),
++ 0);
++ BUG_ON(ret);
++ }
++#endif
++
++ /* Relinquish the page back to the allocator. */
++ ClearPageReserved(page);
++ init_page_count(page);
++ balloon_free_page(page);
++ }
++
++ bs.current_pages += nr_pages;
++ totalram_pages = bs.current_pages;
++
++ out:
++ balloon_unlock(flags);
++
++ return 0;
++}
++
++static int decrease_reservation(unsigned long nr_pages)
++{
++ unsigned long pfn, i, flags;
++ struct page *page;
++ void *v;
++ int need_sleep = 0;
++ int ret;
++ struct xen_memory_reservation reservation = {
++ .address_bits = 0,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++
++ if (nr_pages > ARRAY_SIZE(frame_list))
++ nr_pages = ARRAY_SIZE(frame_list);
++
++ for (i = 0; i < nr_pages; i++) {
++ if ((page = alloc_page(GFP_BALLOON)) == NULL) {
++ nr_pages = i;
++ need_sleep = 1;
++ break;
++ }
++
++ pfn = page_to_pfn(page);
++ frame_list[i] = pfn_to_mfn(pfn);
++
++ if (!PageHighMem(page)) {
++ v = phys_to_virt(pfn << PAGE_SHIFT);
++ scrub_pages(v, 1);
++#ifdef CONFIG_XEN
++ ret = HYPERVISOR_update_va_mapping(
++ (unsigned long)v, __pte_ma(0), 0);
++ BUG_ON(ret);
++#endif
++ }
++#ifdef CONFIG_XEN_SCRUB_PAGES
++ else {
++ v = kmap(page);
++ scrub_pages(v, 1);
++ kunmap(page);
++ }
++#endif
++ }
++
++#ifdef CONFIG_XEN
++ /* Ensure that ballooned highmem pages don't have kmaps. */
++ kmap_flush_unused();
++ flush_tlb_all();
++#endif
++
++ balloon_lock(flags);
++
++ /* No more mappings: invalidate P2M and add to balloon. */
++ for (i = 0; i < nr_pages; i++) {
++ pfn = mfn_to_pfn(frame_list[i]);
++ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++ balloon_append(pfn_to_page(pfn));
++ }
++
++ set_xen_guest_handle(reservation.extent_start, frame_list);
++ reservation.nr_extents = nr_pages;
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
++ BUG_ON(ret != nr_pages);
++
++ bs.current_pages -= nr_pages;
++ totalram_pages = bs.current_pages;
++
++ balloon_unlock(flags);
++
++ return need_sleep;
++}
++
++/*
++ * We avoid multiple worker processes conflicting via the balloon mutex.
++ * We may of course race updates of the target counts (which are protected
++ * by the balloon lock), or with changes to the Xen hard limit, but we will
++ * recover from these in time.
++ */
++static void balloon_process(void *unused)
++{
++ int need_sleep = 0;
++ long credit;
++
++ mutex_lock(&balloon_mutex);
++
++ do {
++ credit = current_target() - bs.current_pages;
++ if (credit > 0)
++ need_sleep = (increase_reservation(credit) != 0);
++ if (credit < 0)
++ need_sleep = (decrease_reservation(-credit) != 0);
++
++#ifndef CONFIG_PREEMPT
++ if (need_resched())
++ schedule();
++#endif
++ } while ((credit != 0) && !need_sleep);
++
++ /* Schedule more work if there is some still to be done. */
++ if (current_target() != bs.current_pages)
++ mod_timer(&balloon_timer, jiffies + HZ);
++
++ mutex_unlock(&balloon_mutex);
++}
++
++/* Resets the Xen limit, sets new target, and kicks off processing. */
++void balloon_set_new_target(unsigned long target)
++{
++ /* No need for lock. Not read-modify-write updates. */
++ bs.hard_limit = ~0UL;
++ bs.target_pages = max(target, minimum_target());
++ schedule_work(&balloon_worker);
++}
++
++static struct xenbus_watch target_watch =
++{
++ .node = "memory/target"
++};
++
++/* React to a change in the target key */
++static void watch_target(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ unsigned long long new_target;
++ int err;
++
++ err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
++ if (err != 1) {
++ /* This is ok (for domain0 at least) - so just return */
++ return;
++ }
++
++ /* The given memory/target value is in KiB, so it needs converting to
++ * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
++ */
++ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
++}
++
++static int balloon_init_watcher(struct notifier_block *notifier,
++ unsigned long event,
++ void *data)
++{
++ int err;
++
++ err = register_xenbus_watch(&target_watch);
++ if (err)
++ printk(KERN_ERR "Failed to set balloon watcher\n");
++
++ return NOTIFY_DONE;
++}
++
++#ifdef CONFIG_PROC_FS
++static int balloon_write(struct file *file, const char __user *buffer,
++ unsigned long count, void *data)
++{
++ char memstring[64], *endchar;
++ unsigned long long target_bytes;
++
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ if (count <= 1)
++ return -EBADMSG; /* runt */
++ if (count > sizeof(memstring))
++ return -EFBIG; /* too long */
++
++ if (copy_from_user(memstring, buffer, count))
++ return -EFAULT;
++ memstring[sizeof(memstring)-1] = '\0';
++
++ target_bytes = memparse(memstring, &endchar);
++ balloon_set_new_target(target_bytes >> PAGE_SHIFT);
++
++ return count;
++}
++
++static int balloon_read(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int len;
++
++ len = sprintf(
++ page,
++ "Current allocation: %8lu kB\n"
++ "Requested target: %8lu kB\n"
++ "Low-mem balloon: %8lu kB\n"
++ "High-mem balloon: %8lu kB\n"
++ "Driver pages: %8lu kB\n"
++ "Xen hard limit: ",
++ PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages),
++ PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high),
++ PAGES2KB(bs.driver_pages));
++
++ if (bs.hard_limit != ~0UL)
++ len += sprintf(page + len, "%8lu kB\n",
++ PAGES2KB(bs.hard_limit));
++ else
++ len += sprintf(page + len, " ??? kB\n");
++
++ *eof = 1;
++ return len;
++}
++#endif
++
++static struct notifier_block xenstore_notifier;
++
++static int __init balloon_init(void)
++{
++#if defined(CONFIG_X86) && defined(CONFIG_XEN)
++ unsigned long pfn;
++ struct page *page;
++#endif
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ IPRINTK("Initialising balloon driver.\n");
++
++#ifdef CONFIG_XEN
++ bs.current_pages = min(xen_start_info->nr_pages, max_pfn);
++ totalram_pages = bs.current_pages;
++#else
++ bs.current_pages = totalram_pages;
++#endif
++ bs.target_pages = bs.current_pages;
++ bs.balloon_low = 0;
++ bs.balloon_high = 0;
++ bs.driver_pages = 0UL;
++ bs.hard_limit = ~0UL;
++
++ init_timer(&balloon_timer);
++ balloon_timer.data = 0;
++ balloon_timer.function = balloon_alarm;
++
++#ifdef CONFIG_PROC_FS
++ if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) {
++ WPRINTK("Unable to create /proc/xen/balloon.\n");
++ return -1;
++ }
++
++ balloon_pde->read_proc = balloon_read;
++ balloon_pde->write_proc = balloon_write;
++#endif
++ balloon_sysfs_init();
++
++#if defined(CONFIG_X86) && defined(CONFIG_XEN)
++ /* Initialise the balloon with excess memory space. */
++ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
++ page = pfn_to_page(pfn);
++ if (!PageReserved(page))
++ balloon_append(page);
++ }
++#endif
++
++ target_watch.callback = watch_target;
++ xenstore_notifier.notifier_call = balloon_init_watcher;
++
++ register_xenstore_notifier(&xenstore_notifier);
++
++ return 0;
++}
++
++subsys_initcall(balloon_init);
++
++static void __exit balloon_exit(void)
++{
++ /* XXX - release balloon here */
++ return;
++}
++
++module_exit(balloon_exit);
++
++void balloon_update_driver_allowance(long delta)
++{
++ unsigned long flags;
++
++ balloon_lock(flags);
++ bs.driver_pages += delta;
++ balloon_unlock(flags);
++}
++
++#ifdef CONFIG_XEN
++static int dealloc_pte_fn(
++ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++ unsigned long mfn = pte_mfn(*pte);
++ int ret;
++ struct xen_memory_reservation reservation = {
++ .nr_extents = 1,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ set_xen_guest_handle(reservation.extent_start, &mfn);
++ set_pte_at(&init_mm, addr, pte, __pte_ma(0));
++ set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
++ BUG_ON(ret != 1);
++ return 0;
++}
++#endif
++
++struct page **alloc_empty_pages_and_pagevec(int nr_pages)
++{
++ unsigned long flags;
++ void *v;
++ struct page *page, **pagevec;
++ int i, ret;
++
++ pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL);
++ if (pagevec == NULL)
++ return NULL;
++
++ for (i = 0; i < nr_pages; i++) {
++ page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD);
++ if (page == NULL)
++ goto err;
++
++ v = page_address(page);
++ scrub_pages(v, 1);
++
++ balloon_lock(flags);
++
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ unsigned long gmfn = page_to_pfn(page);
++ struct xen_memory_reservation reservation = {
++ .nr_extents = 1,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ set_xen_guest_handle(reservation.extent_start, &gmfn);
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &reservation);
++ if (ret == 1)
++ ret = 0; /* success */
++ } else {
++#ifdef CONFIG_XEN
++ ret = apply_to_page_range(&init_mm, (unsigned long)v,
++ PAGE_SIZE, dealloc_pte_fn,
++ NULL);
++#else
++ /* Cannot handle non-auto translate mode. */
++ ret = 1;
++#endif
++ }
++
++ if (ret != 0) {
++ balloon_unlock(flags);
++ balloon_free_page(page);
++ goto err;
++ }
++
++ totalram_pages = --bs.current_pages;
++
++ balloon_unlock(flags);
++ }
++
++ out:
++ schedule_work(&balloon_worker);
++#ifdef CONFIG_XEN
++ flush_tlb_all();
++#endif
++ return pagevec;
++
++ err:
++ balloon_lock(flags);
++ while (--i >= 0)
++ balloon_append(pagevec[i]);
++ balloon_unlock(flags);
++ kfree(pagevec);
++ pagevec = NULL;
++ goto out;
++}
++
++void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
++{
++ unsigned long flags;
++ int i;
++
++ if (pagevec == NULL)
++ return;
++
++ balloon_lock(flags);
++ for (i = 0; i < nr_pages; i++) {
++ BUG_ON(page_count(pagevec[i]) != 1);
++ balloon_append(pagevec[i]);
++ }
++ balloon_unlock(flags);
++
++ kfree(pagevec);
++
++ schedule_work(&balloon_worker);
++}
++
++void balloon_release_driver_page(struct page *page)
++{
++ unsigned long flags;
++
++ balloon_lock(flags);
++ balloon_append(page);
++ bs.driver_pages--;
++ balloon_unlock(flags);
++
++ schedule_work(&balloon_worker);
++}
++
++EXPORT_SYMBOL_GPL(balloon_update_driver_allowance);
++EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec);
++EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec);
++EXPORT_SYMBOL_GPL(balloon_release_driver_page);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/balloon/common.h linux-2.6.18-xen-3.3.0/drivers/xen/balloon/common.h
+--- linux-2.6.18.8/drivers/xen/balloon/common.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/balloon/common.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,58 @@
++/******************************************************************************
++ * balloon/common.h
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_BALLOON_COMMON_H__
++#define __XEN_BALLOON_COMMON_H__
++
++#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
++
++struct balloon_stats {
++ /* We aim for 'current allocation' == 'target allocation'. */
++ unsigned long current_pages;
++ unsigned long target_pages;
++ /* We may hit the hard limit in Xen. If we do then we remember it. */
++ unsigned long hard_limit;
++ /*
++ * Drivers may alter the memory reservation independently, but they
++ * must inform the balloon driver so we avoid hitting the hard limit.
++ */
++ unsigned long driver_pages;
++ /* Number of pages in high- and low-memory balloons. */
++ unsigned long balloon_low;
++ unsigned long balloon_high;
++};
++
++extern struct balloon_stats balloon_stats;
++#define bs balloon_stats
++
++int balloon_sysfs_init(void);
++void balloon_sysfs_exit(void);
++
++void balloon_set_new_target(unsigned long target);
++
++#endif /* __XEN_BALLOON_COMMON_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/balloon/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/balloon/Makefile
+--- linux-2.6.18.8/drivers/xen/balloon/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/balloon/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2 @@
++
++obj-y := balloon.o sysfs.o
+diff -rpuN linux-2.6.18.8/drivers/xen/balloon/sysfs.c linux-2.6.18-xen-3.3.0/drivers/xen/balloon/sysfs.c
+--- linux-2.6.18.8/drivers/xen/balloon/sysfs.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/balloon/sysfs.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,170 @@
++/******************************************************************************
++ * balloon/sysfs.c
++ *
++ * Xen balloon driver - sysfs interfaces.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/stat.h>
++#include <linux/string.h>
++#include <linux/sysdev.h>
++#include "common.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#define BALLOON_CLASS_NAME "xen_memory"
++
++#define BALLOON_SHOW(name, format, args...) \
++ static ssize_t show_##name(struct sys_device *dev, \
++ char *buf) \
++ { \
++ return sprintf(buf, format, ##args); \
++ } \
++ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
++
++BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages));
++BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low));
++BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high));
++BALLOON_SHOW(hard_limit_kb,
++ (bs.hard_limit!=~0UL) ? "%lu\n" : "???\n",
++ (bs.hard_limit!=~0UL) ? PAGES2KB(bs.hard_limit) : 0);
++BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages));
++
++static ssize_t show_target_kb(struct sys_device *dev, char *buf)
++{
++ return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages));
++}
++
++static ssize_t store_target_kb(struct sys_device *dev,
++ const char *buf,
++ size_t count)
++{
++ char memstring[64], *endchar;
++ unsigned long long target_bytes;
++
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ if (count <= 1)
++ return -EBADMSG; /* runt */
++ if (count > sizeof(memstring))
++ return -EFBIG; /* too long */
++ strcpy(memstring, buf);
++
++ target_bytes = memparse(memstring, &endchar);
++ balloon_set_new_target(target_bytes >> PAGE_SHIFT);
++
++ return count;
++}
++
++static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
++ show_target_kb, store_target_kb);
++
++static struct sysdev_attribute *balloon_attrs[] = {
++ &attr_target_kb,
++};
++
++static struct attribute *balloon_info_attrs[] = {
++ &attr_current_kb.attr,
++ &attr_low_kb.attr,
++ &attr_high_kb.attr,
++ &attr_hard_limit_kb.attr,
++ &attr_driver_kb.attr,
++ NULL
++};
++
++static struct attribute_group balloon_info_group = {
++ .name = "info",
++ .attrs = balloon_info_attrs,
++};
++
++static struct sysdev_class balloon_sysdev_class = {
++ set_kset_name(BALLOON_CLASS_NAME),
++};
++
++static struct sys_device balloon_sysdev;
++
++static int register_balloon(struct sys_device *sysdev)
++{
++ int i, error;
++
++ error = sysdev_class_register(&balloon_sysdev_class);
++ if (error)
++ return error;
++
++ sysdev->id = 0;
++ sysdev->cls = &balloon_sysdev_class;
++
++ error = sysdev_register(sysdev);
++ if (error) {
++ sysdev_class_unregister(&balloon_sysdev_class);
++ return error;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
++ error = sysdev_create_file(sysdev, balloon_attrs[i]);
++ if (error)
++ goto fail;
++ }
++
++ error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
++ if (error)
++ goto fail;
++
++ return 0;
++
++ fail:
++ while (--i >= 0)
++ sysdev_remove_file(sysdev, balloon_attrs[i]);
++ sysdev_unregister(sysdev);
++ sysdev_class_unregister(&balloon_sysdev_class);
++ return error;
++}
++
++static void unregister_balloon(struct sys_device *sysdev)
++{
++ int i;
++
++ sysfs_remove_group(&sysdev->kobj, &balloon_info_group);
++ for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++)
++ sysdev_remove_file(sysdev, balloon_attrs[i]);
++ sysdev_unregister(sysdev);
++ sysdev_class_unregister(&balloon_sysdev_class);
++}
++
++int balloon_sysfs_init(void)
++{
++ return register_balloon(&balloon_sysdev);
++}
++
++void balloon_sysfs_exit(void)
++{
++ unregister_balloon(&balloon_sysdev);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/blkback/blkback.c linux-2.6.18-xen-3.3.0/drivers/xen/blkback/blkback.c
+--- linux-2.6.18.8/drivers/xen/blkback/blkback.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blkback/blkback.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,646 @@
++/******************************************************************************
++ * arch/xen/drivers/blkif/backend/main.c
++ *
++ * Back-end of the driver for virtual block devices. This portion of the
++ * driver exports a 'unified' block-device interface that can be accessed
++ * by any operating system that implements a compatible front end. A
++ * reference front-end implementation can be found in:
++ * arch/xen/drivers/blkif/frontend
++ *
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Copyright (c) 2005, Christopher Clark
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/kthread.h>
++#include <linux/list.h>
++#include <linux/delay.h>
++#include <xen/balloon.h>
++#include <asm/hypervisor.h>
++#include "common.h"
++
++/*
++ * These are rather arbitrary. They are fairly large because adjacent requests
++ * pulled from a communication ring are quite likely to end up being part of
++ * the same scatter/gather request at the disc.
++ *
++ * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
++ *
++ * This will increase the chances of being able to write whole tracks.
++ * 64 should be enough to keep us competitive with Linux.
++ */
++static int blkif_reqs = 64;
++module_param_named(reqs, blkif_reqs, int, 0);
++MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
++
++/* Run-time switchable: /sys/module/blkback/parameters/ */
++static unsigned int log_stats = 0;
++static unsigned int debug_lvl = 0;
++module_param(log_stats, int, 0644);
++module_param(debug_lvl, int, 0644);
++
++/*
++ * Each outstanding request that we've passed to the lower device layers has a
++ * 'pending_req' allocated to it. Each buffer_head that completes decrements
++ * the pendcnt towards zero. When it hits zero, the specified domain has a
++ * response queued for it, with the saved 'id' passed back.
++ */
++typedef struct {
++ blkif_t *blkif;
++ u64 id;
++ int nr_pages;
++ atomic_t pendcnt;
++ unsigned short operation;
++ int status;
++ struct list_head free_list;
++} pending_req_t;
++
++static pending_req_t *pending_reqs;
++static struct list_head pending_free;
++static DEFINE_SPINLOCK(pending_free_lock);
++static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
++
++#define BLKBACK_INVALID_HANDLE (~0)
++
++static struct page **pending_pages;
++static grant_handle_t *pending_grant_handles;
++
++static inline int vaddr_pagenr(pending_req_t *req, int seg)
++{
++ return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
++}
++
++static inline unsigned long vaddr(pending_req_t *req, int seg)
++{
++ unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
++ return (unsigned long)pfn_to_kaddr(pfn);
++}
++
++#define pending_handle(_req, _seg) \
++ (pending_grant_handles[vaddr_pagenr(_req, _seg)])
++
++
++static int do_block_io_op(blkif_t *blkif);
++static void dispatch_rw_block_io(blkif_t *blkif,
++ blkif_request_t *req,
++ pending_req_t *pending_req);
++static void make_response(blkif_t *blkif, u64 id,
++ unsigned short op, int st);
++
++/******************************************************************
++ * misc small helpers
++ */
++static pending_req_t* alloc_req(void)
++{
++ pending_req_t *req = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++ if (!list_empty(&pending_free)) {
++ req = list_entry(pending_free.next, pending_req_t, free_list);
++ list_del(&req->free_list);
++ }
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++ return req;
++}
++
++static void free_req(pending_req_t *req)
++{
++ unsigned long flags;
++ int was_empty;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++ was_empty = list_empty(&pending_free);
++ list_add(&req->free_list, &pending_free);
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++ if (was_empty)
++ wake_up(&pending_free_wq);
++}
++
++static void unplug_queue(blkif_t *blkif)
++{
++ if (blkif->plug == NULL)
++ return;
++ if (blkif->plug->unplug_fn)
++ blkif->plug->unplug_fn(blkif->plug);
++ blk_put_queue(blkif->plug);
++ blkif->plug = NULL;
++}
++
++static void plug_queue(blkif_t *blkif, struct bio *bio)
++{
++ request_queue_t *q = bdev_get_queue(bio->bi_bdev);
++
++ if (q == blkif->plug)
++ return;
++ unplug_queue(blkif);
++ blk_get_queue(q);
++ blkif->plug = q;
++}
++
++static void fast_flush_area(pending_req_t *req)
++{
++ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++ unsigned int i, invcount = 0;
++ grant_handle_t handle;
++ int ret;
++
++ for (i = 0; i < req->nr_pages; i++) {
++ handle = pending_handle(req, i);
++ if (handle == BLKBACK_INVALID_HANDLE)
++ continue;
++ gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
++ GNTMAP_host_map, handle);
++ pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
++ invcount++;
++ }
++
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, unmap, invcount);
++ BUG_ON(ret);
++}
++
++/******************************************************************
++ * SCHEDULER FUNCTIONS
++ */
++
++static void print_stats(blkif_t *blkif)
++{
++ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
++ current->comm, blkif->st_oo_req,
++ blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
++ blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
++ blkif->st_rd_req = 0;
++ blkif->st_wr_req = 0;
++ blkif->st_oo_req = 0;
++}
++
++int blkif_schedule(void *arg)
++{
++ blkif_t *blkif = arg;
++
++ blkif_get(blkif);
++
++ if (debug_lvl)
++ printk(KERN_DEBUG "%s: started\n", current->comm);
++
++ while (!kthread_should_stop()) {
++ if (try_to_freeze())
++ continue;
++
++ wait_event_interruptible(
++ blkif->wq,
++ blkif->waiting_reqs || kthread_should_stop());
++ wait_event_interruptible(
++ pending_free_wq,
++ !list_empty(&pending_free) || kthread_should_stop());
++
++ blkif->waiting_reqs = 0;
++ smp_mb(); /* clear flag *before* checking for work */
++
++ if (do_block_io_op(blkif))
++ blkif->waiting_reqs = 1;
++ unplug_queue(blkif);
++
++ if (log_stats && time_after(jiffies, blkif->st_print))
++ print_stats(blkif);
++ }
++
++ if (log_stats)
++ print_stats(blkif);
++ if (debug_lvl)
++ printk(KERN_DEBUG "%s: exiting\n", current->comm);
++
++ blkif->xenblkd = NULL;
++ blkif_put(blkif);
++
++ return 0;
++}
++
++/******************************************************************
++ * COMPLETION CALLBACK -- Called as bh->b_end_io()
++ */
++
++static void __end_block_io_op(pending_req_t *pending_req, int error)
++{
++ /* An error fails the entire request. */
++ if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
++ (error == -EOPNOTSUPP)) {
++ DPRINTK("blkback: write barrier op failed, not supported\n");
++ blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
++ pending_req->status = BLKIF_RSP_EOPNOTSUPP;
++ } else if (error) {
++ DPRINTK("Buffer not up-to-date at end of operation, "
++ "error=%d\n", error);
++ pending_req->status = BLKIF_RSP_ERROR;
++ }
++
++ if (atomic_dec_and_test(&pending_req->pendcnt)) {
++ fast_flush_area(pending_req);
++ make_response(pending_req->blkif, pending_req->id,
++ pending_req->operation, pending_req->status);
++ blkif_put(pending_req->blkif);
++ free_req(pending_req);
++ }
++}
++
++static int end_block_io_op(struct bio *bio, unsigned int done, int error)
++{
++ if (bio->bi_size != 0)
++ return 1;
++ __end_block_io_op(bio->bi_private, error);
++ bio_put(bio);
++ return error;
++}
++
++
++/******************************************************************************
++ * NOTIFICATION FROM GUEST OS.
++ */
++
++static void blkif_notify_work(blkif_t *blkif)
++{
++ blkif->waiting_reqs = 1;
++ wake_up(&blkif->wq);
++}
++
++irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++ blkif_notify_work(dev_id);
++ return IRQ_HANDLED;
++}
++
++
++
++/******************************************************************
++ * DOWNWARD CALLS -- These interface with the block-device layer proper.
++ */
++
++static int do_block_io_op(blkif_t *blkif)
++{
++ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++ blkif_request_t req;
++ pending_req_t *pending_req;
++ RING_IDX rc, rp;
++ int more_to_do = 0;
++
++ rc = blk_rings->common.req_cons;
++ rp = blk_rings->common.sring->req_prod;
++ rmb(); /* Ensure we see queued requests up to 'rp'. */
++
++ while (rc != rp) {
++
++ if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
++ break;
++
++ pending_req = alloc_req();
++ if (NULL == pending_req) {
++ blkif->st_oo_req++;
++ more_to_do = 1;
++ break;
++ }
++
++ if (kthread_should_stop()) {
++ more_to_do = 1;
++ break;
++ }
++
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
++ break;
++ case BLKIF_PROTOCOL_X86_32:
++ blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
++ break;
++ case BLKIF_PROTOCOL_X86_64:
++ blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
++ break;
++ default:
++ BUG();
++ }
++ blk_rings->common.req_cons = ++rc; /* before make_response() */
++
++ /* Apply all sanity checks to /private copy/ of request. */
++ barrier();
++
++ switch (req.operation) {
++ case BLKIF_OP_READ:
++ blkif->st_rd_req++;
++ dispatch_rw_block_io(blkif, &req, pending_req);
++ break;
++ case BLKIF_OP_WRITE_BARRIER:
++ blkif->st_br_req++;
++ /* fall through */
++ case BLKIF_OP_WRITE:
++ blkif->st_wr_req++;
++ dispatch_rw_block_io(blkif, &req, pending_req);
++ break;
++ default:
++ /* A good sign something is wrong: sleep for a while to
++ * avoid excessive CPU consumption by a bad guest. */
++ msleep(1);
++ DPRINTK("error: unknown block io operation [%d]\n",
++ req.operation);
++ make_response(blkif, req.id, req.operation,
++ BLKIF_RSP_ERROR);
++ free_req(pending_req);
++ break;
++ }
++
++ /* Yield point for this unbounded loop. */
++ cond_resched();
++ }
++
++ return more_to_do;
++}
++
++static void dispatch_rw_block_io(blkif_t *blkif,
++ blkif_request_t *req,
++ pending_req_t *pending_req)
++{
++ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
++ struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++ struct phys_req preq;
++ struct {
++ unsigned long buf; unsigned int nsec;
++ } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++ unsigned int nseg;
++ struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++ int ret, i, nbio = 0;
++ int operation;
++
++ switch (req->operation) {
++ case BLKIF_OP_READ:
++ operation = READ;
++ break;
++ case BLKIF_OP_WRITE:
++ operation = WRITE;
++ break;
++ case BLKIF_OP_WRITE_BARRIER:
++ operation = WRITE_BARRIER;
++ break;
++ default:
++ operation = 0; /* make gcc happy */
++ BUG();
++ }
++
++ /* Check that number of segments is sane. */
++ nseg = req->nr_segments;
++ if (unlikely(nseg == 0 && operation != WRITE_BARRIER) ||
++ unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
++ DPRINTK("Bad number of segments in request (%d)\n", nseg);
++ goto fail_response;
++ }
++
++ preq.dev = req->handle;
++ preq.sector_number = req->sector_number;
++ preq.nr_sects = 0;
++
++ pending_req->blkif = blkif;
++ pending_req->id = req->id;
++ pending_req->operation = req->operation;
++ pending_req->status = BLKIF_RSP_OKAY;
++ pending_req->nr_pages = nseg;
++
++ for (i = 0; i < nseg; i++) {
++ uint32_t flags;
++
++ seg[i].nsec = req->seg[i].last_sect -
++ req->seg[i].first_sect + 1;
++
++ if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
++ (req->seg[i].last_sect < req->seg[i].first_sect))
++ goto fail_response;
++ preq.nr_sects += seg[i].nsec;
++
++ flags = GNTMAP_host_map;
++ if (operation != READ)
++ flags |= GNTMAP_readonly;
++ gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
++ req->seg[i].gref, blkif->domid);
++ }
++
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
++ BUG_ON(ret);
++
++ for (i = 0; i < nseg; i++) {
++ if (unlikely(map[i].status != 0)) {
++ DPRINTK("invalid buffer -- could not remap it\n");
++ map[i].handle = BLKBACK_INVALID_HANDLE;
++ ret |= 1;
++ }
++
++ pending_handle(pending_req, i) = map[i].handle;
++
++ if (ret)
++ continue;
++
++ set_phys_to_machine(__pa(vaddr(
++ pending_req, i)) >> PAGE_SHIFT,
++ FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
++ seg[i].buf = map[i].dev_bus_addr |
++ (req->seg[i].first_sect << 9);
++ }
++
++ if (ret)
++ goto fail_flush;
++
++ if (vbd_translate(&preq, blkif, operation) != 0) {
++ DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
++ operation == READ ? "read" : "write",
++ preq.sector_number,
++ preq.sector_number + preq.nr_sects, preq.dev);
++ goto fail_flush;
++ }
++
++ for (i = 0; i < nseg; i++) {
++ if (((int)preq.sector_number|(int)seg[i].nsec) &
++ ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
++ DPRINTK("Misaligned I/O request from domain %d",
++ blkif->domid);
++ goto fail_put_bio;
++ }
++
++ while ((bio == NULL) ||
++ (bio_add_page(bio,
++ virt_to_page(vaddr(pending_req, i)),
++ seg[i].nsec << 9,
++ seg[i].buf & ~PAGE_MASK) == 0)) {
++ bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
++ if (unlikely(bio == NULL))
++ goto fail_put_bio;
++
++ bio->bi_bdev = preq.bdev;
++ bio->bi_private = pending_req;
++ bio->bi_end_io = end_block_io_op;
++ bio->bi_sector = preq.sector_number;
++ }
++
++ preq.sector_number += seg[i].nsec;
++ }
++
++ if (!bio) {
++ BUG_ON(operation != WRITE_BARRIER);
++ bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0);
++ if (unlikely(bio == NULL))
++ goto fail_put_bio;
++
++ bio->bi_bdev = preq.bdev;
++ bio->bi_private = pending_req;
++ bio->bi_end_io = end_block_io_op;
++ bio->bi_sector = -1;
++ }
++
++ plug_queue(blkif, bio);
++ atomic_set(&pending_req->pendcnt, nbio);
++ blkif_get(blkif);
++
++ for (i = 0; i < nbio; i++)
++ submit_bio(operation, biolist[i]);
++
++ if (operation == READ)
++ blkif->st_rd_sect += preq.nr_sects;
++ else if (operation == WRITE || operation == WRITE_BARRIER)
++ blkif->st_wr_sect += preq.nr_sects;
++
++ return;
++
++ fail_put_bio:
++ for (i = 0; i < (nbio-1); i++)
++ bio_put(biolist[i]);
++ fail_flush:
++ fast_flush_area(pending_req);
++ fail_response:
++ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
++ free_req(pending_req);
++ msleep(1); /* back off a bit */
++}
++
++
++
++/******************************************************************
++ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
++ */
++
++
++static void make_response(blkif_t *blkif, u64 id,
++ unsigned short op, int st)
++{
++ blkif_response_t resp;
++ unsigned long flags;
++ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++ int more_to_do = 0;
++ int notify;
++
++ resp.id = id;
++ resp.operation = op;
++ resp.status = st;
++
++ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
++ /* Place on the response ring for the relevant domain. */
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ case BLKIF_PROTOCOL_X86_32:
++ memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ case BLKIF_PROTOCOL_X86_64:
++ memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ default:
++ BUG();
++ }
++ blk_rings->common.rsp_prod_pvt++;
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
++ if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
++ /*
++ * Tail check for pending requests. Allows frontend to avoid
++ * notifications if requests are already in flight (lower
++ * overheads and promotes batching).
++ */
++ RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
++
++ } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
++ more_to_do = 1;
++ }
++
++ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
++
++ if (more_to_do)
++ blkif_notify_work(blkif);
++ if (notify)
++ notify_remote_via_irq(blkif->irq);
++}
++
++static int __init blkif_init(void)
++{
++ int i, mmap_pages;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
++
++ pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
++ blkif_reqs, GFP_KERNEL);
++ pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
++ mmap_pages, GFP_KERNEL);
++ pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
++
++ if (!pending_reqs || !pending_grant_handles || !pending_pages)
++ goto out_of_memory;
++
++ for (i = 0; i < mmap_pages; i++)
++ pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
++
++ blkif_interface_init();
++
++ memset(pending_reqs, 0, sizeof(pending_reqs));
++ INIT_LIST_HEAD(&pending_free);
++
++ for (i = 0; i < blkif_reqs; i++)
++ list_add_tail(&pending_reqs[i].free_list, &pending_free);
++
++ blkif_xenbus_init();
++
++ return 0;
++
++ out_of_memory:
++ kfree(pending_reqs);
++ kfree(pending_grant_handles);
++ free_empty_pages_and_pagevec(pending_pages, mmap_pages);
++ printk("%s: out of memory\n", __FUNCTION__);
++ return -ENOMEM;
++}
++
++module_init(blkif_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/blkback/common.h linux-2.6.18-xen-3.3.0/drivers/xen/blkback/common.h
+--- linux-2.6.18.8/drivers/xen/blkback/common.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blkback/common.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,139 @@
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __BLKIF__BACKEND__COMMON_H__
++#define __BLKIF__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <linux/wait.h>
++#include <asm/io.h>
++#include <asm/setup.h>
++#include <asm/pgalloc.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/blkif.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++#include <xen/xenbus.h>
++
++#define DPRINTK(_f, _a...) \
++ pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++
++struct vbd {
++ blkif_vdev_t handle; /* what the domain refers to this vbd as */
++ unsigned char readonly; /* Non-zero -> read-only */
++ unsigned char type; /* VDISK_xxx */
++ u32 pdevice; /* phys device that this vbd maps to */
++ struct block_device *bdev;
++};
++
++struct backend_info;
++
++typedef struct blkif_st {
++ /* Unique identifier for this interface. */
++ domid_t domid;
++ unsigned int handle;
++ /* Physical parameters of the comms window. */
++ unsigned int irq;
++ /* Comms information. */
++ enum blkif_protocol blk_protocol;
++ blkif_back_rings_t blk_rings;
++ struct vm_struct *blk_ring_area;
++ /* The VBD attached to this interface. */
++ struct vbd vbd;
++ /* Back pointer to the backend_info. */
++ struct backend_info *be;
++ /* Private fields. */
++ spinlock_t blk_ring_lock;
++ atomic_t refcnt;
++
++ wait_queue_head_t wq;
++ struct task_struct *xenblkd;
++ unsigned int waiting_reqs;
++ request_queue_t *plug;
++
++ /* statistics */
++ unsigned long st_print;
++ int st_rd_req;
++ int st_wr_req;
++ int st_oo_req;
++ int st_br_req;
++ int st_rd_sect;
++ int st_wr_sect;
++
++ wait_queue_head_t waiting_to_free;
++
++ grant_handle_t shmem_handle;
++ grant_ref_t shmem_ref;
++} blkif_t;
++
++blkif_t *blkif_alloc(domid_t domid);
++void blkif_disconnect(blkif_t *blkif);
++void blkif_free(blkif_t *blkif);
++int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
++
++#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define blkif_put(_b) \
++ do { \
++ if (atomic_dec_and_test(&(_b)->refcnt)) \
++ wake_up(&(_b)->waiting_to_free);\
++ } while (0)
++
++/* Create a vbd. */
++int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, unsigned major,
++ unsigned minor, int readonly, int cdrom);
++void vbd_free(struct vbd *vbd);
++
++unsigned long long vbd_size(struct vbd *vbd);
++unsigned int vbd_info(struct vbd *vbd);
++unsigned long vbd_secsize(struct vbd *vbd);
++
++struct phys_req {
++ unsigned short dev;
++ unsigned short nr_sects;
++ struct block_device *bdev;
++ blkif_sector_t sector_number;
++};
++
++int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation);
++
++void blkif_interface_init(void);
++
++void blkif_xenbus_init(void);
++
++irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++int blkif_schedule(void *arg);
++
++int blkback_barrier(struct xenbus_transaction xbt,
++ struct backend_info *be, int state);
++
++#endif /* __BLKIF__BACKEND__COMMON_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/blkback/interface.c linux-2.6.18-xen-3.3.0/drivers/xen/blkback/interface.c
+--- linux-2.6.18.8/drivers/xen/blkback/interface.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blkback/interface.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,181 @@
++/******************************************************************************
++ * arch/xen/drivers/blkif/backend/interface.c
++ *
++ * Block-device interface management.
++ *
++ * Copyright (c) 2004, Keir Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "common.h"
++#include <xen/evtchn.h>
++#include <linux/kthread.h>
++
++static kmem_cache_t *blkif_cachep;
++
++blkif_t *blkif_alloc(domid_t domid)
++{
++ blkif_t *blkif;
++
++ blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
++ if (!blkif)
++ return ERR_PTR(-ENOMEM);
++
++ memset(blkif, 0, sizeof(*blkif));
++ blkif->domid = domid;
++ spin_lock_init(&blkif->blk_ring_lock);
++ atomic_set(&blkif->refcnt, 1);
++ init_waitqueue_head(&blkif->wq);
++ blkif->st_print = jiffies;
++ init_waitqueue_head(&blkif->waiting_to_free);
++
++ return blkif;
++}
++
++static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++ GNTMAP_host_map, shared_page, blkif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return op.status;
++ }
++
++ blkif->shmem_ref = shared_page;
++ blkif->shmem_handle = op.handle;
++
++ return 0;
++}
++
++static void unmap_frontend_page(blkif_t *blkif)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++ GNTMAP_host_map, blkif->shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++}
++
++int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
++{
++ int err;
++
++ /* Already connected through? */
++ if (blkif->irq)
++ return 0;
++
++ if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
++ return -ENOMEM;
++
++ err = map_frontend_page(blkif, shared_page);
++ if (err) {
++ free_vm_area(blkif->blk_ring_area);
++ return err;
++ }
++
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ {
++ blkif_sring_t *sring;
++ sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
++ break;
++ }
++ case BLKIF_PROTOCOL_X86_32:
++ {
++ blkif_x86_32_sring_t *sring_x86_32;
++ sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
++ break;
++ }
++ case BLKIF_PROTOCOL_X86_64:
++ {
++ blkif_x86_64_sring_t *sring_x86_64;
++ sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
++ break;
++ }
++ default:
++ BUG();
++ }
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif);
++ if (err < 0)
++ {
++ unmap_frontend_page(blkif);
++ free_vm_area(blkif->blk_ring_area);
++ blkif->blk_rings.common.sring = NULL;
++ return err;
++ }
++ blkif->irq = err;
++
++ return 0;
++}
++
++void blkif_disconnect(blkif_t *blkif)
++{
++ if (blkif->xenblkd) {
++ kthread_stop(blkif->xenblkd);
++ blkif->xenblkd = NULL;
++ }
++
++ atomic_dec(&blkif->refcnt);
++ wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
++ atomic_inc(&blkif->refcnt);
++
++ if (blkif->irq) {
++ unbind_from_irqhandler(blkif->irq, blkif);
++ blkif->irq = 0;
++ }
++
++ if (blkif->blk_rings.common.sring) {
++ unmap_frontend_page(blkif);
++ free_vm_area(blkif->blk_ring_area);
++ blkif->blk_rings.common.sring = NULL;
++ }
++}
++
++void blkif_free(blkif_t *blkif)
++{
++ if (!atomic_dec_and_test(&blkif->refcnt))
++ BUG();
++ kmem_cache_free(blkif_cachep, blkif);
++}
++
++void __init blkif_interface_init(void)
++{
++ blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
++ 0, 0, NULL, NULL);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/blkback/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/blkback/Makefile
+--- linux-2.6.18.8/drivers/xen/blkback/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blkback/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_XEN_BLKDEV_BACKEND) := blkbk.o
++
++blkbk-y := blkback.o xenbus.o interface.o vbd.o
+diff -rpuN linux-2.6.18.8/drivers/xen/blkback/vbd.c linux-2.6.18-xen-3.3.0/drivers/xen/blkback/vbd.c
+--- linux-2.6.18.8/drivers/xen/blkback/vbd.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blkback/vbd.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,118 @@
++/******************************************************************************
++ * blkback/vbd.c
++ *
++ * Routines for managing virtual block devices (VBDs).
++ *
++ * Copyright (c) 2003-2005, Keir Fraser & Steve Hand
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "common.h"
++
++#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
++ (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
++
++unsigned long long vbd_size(struct vbd *vbd)
++{
++ return vbd_sz(vbd);
++}
++
++unsigned int vbd_info(struct vbd *vbd)
++{
++ return vbd->type | (vbd->readonly?VDISK_READONLY:0);
++}
++
++unsigned long vbd_secsize(struct vbd *vbd)
++{
++ return bdev_hardsect_size(vbd->bdev);
++}
++
++int vbd_create(blkif_t *blkif, blkif_vdev_t handle, unsigned major,
++ unsigned minor, int readonly, int cdrom)
++{
++ struct vbd *vbd;
++ struct block_device *bdev;
++
++ vbd = &blkif->vbd;
++ vbd->handle = handle;
++ vbd->readonly = readonly;
++ vbd->type = 0;
++
++ vbd->pdevice = MKDEV(major, minor);
++
++ bdev = open_by_devnum(vbd->pdevice,
++ vbd->readonly ? FMODE_READ : FMODE_WRITE);
++
++ if (IS_ERR(bdev)) {
++ DPRINTK("vbd_creat: device %08x could not be opened.\n",
++ vbd->pdevice);
++ return -ENOENT;
++ }
++
++ vbd->bdev = bdev;
++
++ if (vbd->bdev->bd_disk == NULL) {
++ DPRINTK("vbd_creat: device %08x doesn't exist.\n",
++ vbd->pdevice);
++ vbd_free(vbd);
++ return -ENOENT;
++ }
++
++ if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
++ vbd->type |= VDISK_CDROM;
++ if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
++ vbd->type |= VDISK_REMOVABLE;
++
++ DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
++ handle, blkif->domid);
++ return 0;
++}
++
++void vbd_free(struct vbd *vbd)
++{
++ if (vbd->bdev)
++ blkdev_put(vbd->bdev);
++ vbd->bdev = NULL;
++}
++
++int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
++{
++ struct vbd *vbd = &blkif->vbd;
++ int rc = -EACCES;
++
++ if ((operation != READ) && vbd->readonly)
++ goto out;
++
++ if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
++ goto out;
++
++ req->dev = vbd->pdevice;
++ req->bdev = vbd->bdev;
++ rc = 0;
++
++ out:
++ return rc;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/blkback/xenbus.c linux-2.6.18-xen-3.3.0/drivers/xen/blkback/xenbus.c
+--- linux-2.6.18.8/drivers/xen/blkback/xenbus.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blkback/xenbus.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,541 @@
++/* Xenbus code for blkif backend
++ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
++ Copyright (C) 2005 XenSource Ltd
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++*/
++
++#include <stdarg.h>
++#include <linux/module.h>
++#include <linux/kthread.h>
++#include "common.h"
++
++#undef DPRINTK
++#define DPRINTK(fmt, args...) \
++ pr_debug("blkback/xenbus (%s:%d) " fmt ".\n", \
++ __FUNCTION__, __LINE__, ##args)
++
++struct backend_info
++{
++ struct xenbus_device *dev;
++ blkif_t *blkif;
++ struct xenbus_watch backend_watch;
++ unsigned major;
++ unsigned minor;
++ char *mode;
++};
++
++static void connect(struct backend_info *);
++static int connect_ring(struct backend_info *);
++static void backend_changed(struct xenbus_watch *, const char **,
++ unsigned int);
++
++static int blkback_name(blkif_t *blkif, char *buf)
++{
++ char *devpath, *devname;
++ struct xenbus_device *dev = blkif->be->dev;
++
++ devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
++ if (IS_ERR(devpath))
++ return PTR_ERR(devpath);
++
++ if ((devname = strstr(devpath, "/dev/")) != NULL)
++ devname += strlen("/dev/");
++ else
++ devname = devpath;
++
++ snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
++ kfree(devpath);
++
++ return 0;
++}
++
++static void update_blkif_status(blkif_t *blkif)
++{
++ int err;
++ char name[TASK_COMM_LEN];
++
++ /* Not ready to connect? */
++ if (!blkif->irq || !blkif->vbd.bdev)
++ return;
++
++ /* Already connected? */
++ if (blkif->be->dev->state == XenbusStateConnected)
++ return;
++
++ /* Attempt to connect: exit if we fail to. */
++ connect(blkif->be);
++ if (blkif->be->dev->state != XenbusStateConnected)
++ return;
++
++ err = blkback_name(blkif, name);
++ if (err) {
++ xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
++ return;
++ }
++
++ blkif->xenblkd = kthread_run(blkif_schedule, blkif, name);
++ if (IS_ERR(blkif->xenblkd)) {
++ err = PTR_ERR(blkif->xenblkd);
++ blkif->xenblkd = NULL;
++ xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
++ }
++}
++
++
++/****************************************************************
++ * sysfs interface for VBD I/O requests
++ */
++
++#define VBD_SHOW(name, format, args...) \
++ static ssize_t show_##name(struct device *_dev, \
++ struct device_attribute *attr, \
++ char *buf) \
++ { \
++ struct xenbus_device *dev = to_xenbus_device(_dev); \
++ struct backend_info *be = dev->dev.driver_data; \
++ \
++ return sprintf(buf, format, ##args); \
++ } \
++ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
++
++VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
++VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
++VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
++VBD_SHOW(br_req, "%d\n", be->blkif->st_br_req);
++VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
++VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
++
++static struct attribute *vbdstat_attrs[] = {
++ &dev_attr_oo_req.attr,
++ &dev_attr_rd_req.attr,
++ &dev_attr_wr_req.attr,
++ &dev_attr_br_req.attr,
++ &dev_attr_rd_sect.attr,
++ &dev_attr_wr_sect.attr,
++ NULL
++};
++
++static struct attribute_group vbdstat_group = {
++ .name = "statistics",
++ .attrs = vbdstat_attrs,
++};
++
++VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
++VBD_SHOW(mode, "%s\n", be->mode);
++
++int xenvbd_sysfs_addif(struct xenbus_device *dev)
++{
++ int error;
++
++ error = device_create_file(&dev->dev, &dev_attr_physical_device);
++ if (error)
++ goto fail1;
++
++ error = device_create_file(&dev->dev, &dev_attr_mode);
++ if (error)
++ goto fail2;
++
++ error = sysfs_create_group(&dev->dev.kobj, &vbdstat_group);
++ if (error)
++ goto fail3;
++
++ return 0;
++
++fail3: sysfs_remove_group(&dev->dev.kobj, &vbdstat_group);
++fail2: device_remove_file(&dev->dev, &dev_attr_mode);
++fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
++ return error;
++}
++
++void xenvbd_sysfs_delif(struct xenbus_device *dev)
++{
++ sysfs_remove_group(&dev->dev.kobj, &vbdstat_group);
++ device_remove_file(&dev->dev, &dev_attr_mode);
++ device_remove_file(&dev->dev, &dev_attr_physical_device);
++}
++
++static int blkback_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ DPRINTK("");
++
++ if (be->major || be->minor)
++ xenvbd_sysfs_delif(dev);
++
++ if (be->backend_watch.node) {
++ unregister_xenbus_watch(&be->backend_watch);
++ kfree(be->backend_watch.node);
++ be->backend_watch.node = NULL;
++ }
++
++ if (be->blkif) {
++ blkif_disconnect(be->blkif);
++ vbd_free(&be->blkif->vbd);
++ blkif_free(be->blkif);
++ be->blkif = NULL;
++ }
++
++ kfree(be);
++ dev->dev.driver_data = NULL;
++ return 0;
++}
++
++int blkback_barrier(struct xenbus_transaction xbt,
++ struct backend_info *be, int state)
++{
++ struct xenbus_device *dev = be->dev;
++ int err;
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
++ "%d", state);
++ if (err)
++ xenbus_dev_fatal(dev, err, "writing feature-barrier");
++
++ return err;
++}
++
++/**
++ * Entry point to this code when a new device is created. Allocate the basic
++ * structures, and watch the store waiting for the hotplug scripts to tell us
++ * the device's physical major and minor numbers. Switch to InitWait.
++ */
++static int blkback_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
++ be->dev = dev;
++ dev->dev.driver_data = be;
++
++ be->blkif = blkif_alloc(dev->otherend_id);
++ if (IS_ERR(be->blkif)) {
++ err = PTR_ERR(be->blkif);
++ be->blkif = NULL;
++ xenbus_dev_fatal(dev, err, "creating block interface");
++ goto fail;
++ }
++
++ /* setup back pointer */
++ be->blkif->be = be;
++
++ err = xenbus_watch_path2(dev, dev->nodename, "physical-device",
++ &be->backend_watch, backend_changed);
++ if (err)
++ goto fail;
++
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto fail;
++
++ return 0;
++
++fail:
++ DPRINTK("failed");
++ blkback_remove(dev);
++ return err;
++}
++
++
++/**
++ * Callback received when the hotplug scripts have placed the physical-device
++ * node. Read it and the mode node, and create a vbd. If the frontend is
++ * ready, connect.
++ */
++static void backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ int err;
++ unsigned major;
++ unsigned minor;
++ struct backend_info *be
++ = container_of(watch, struct backend_info, backend_watch);
++ struct xenbus_device *dev = be->dev;
++ int cdrom = 0;
++ char *device_type;
++
++ DPRINTK("");
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
++ &major, &minor);
++ if (XENBUS_EXIST_ERR(err)) {
++ /* Since this watch will fire once immediately after it is
++ registered, we expect this. Ignore it, and wait for the
++ hotplug scripts. */
++ return;
++ }
++ if (err != 2) {
++ xenbus_dev_fatal(dev, err, "reading physical-device");
++ return;
++ }
++
++ if ((be->major || be->minor) &&
++ ((be->major != major) || (be->minor != minor))) {
++ printk(KERN_WARNING
++ "blkback: changing physical device (from %x:%x to "
++ "%x:%x) not supported.\n", be->major, be->minor,
++ major, minor);
++ return;
++ }
++
++ be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
++ if (IS_ERR(be->mode)) {
++ err = PTR_ERR(be->mode);
++ be->mode = NULL;
++ xenbus_dev_fatal(dev, err, "reading mode");
++ return;
++ }
++
++ device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
++ if (!IS_ERR(device_type)) {
++ cdrom = strcmp(device_type, "cdrom") == 0;
++ kfree(device_type);
++ }
++
++ if (be->major == 0 && be->minor == 0) {
++ /* Front end dir is a number, which is used as the handle. */
++
++ char *p = strrchr(dev->otherend, '/') + 1;
++ long handle = simple_strtoul(p, NULL, 0);
++
++ be->major = major;
++ be->minor = minor;
++
++ err = vbd_create(be->blkif, handle, major, minor,
++ (NULL == strchr(be->mode, 'w')), cdrom);
++ if (err) {
++ be->major = be->minor = 0;
++ xenbus_dev_fatal(dev, err, "creating vbd structure");
++ return;
++ }
++
++ err = xenvbd_sysfs_addif(dev);
++ if (err) {
++ vbd_free(&be->blkif->vbd);
++ be->major = be->minor = 0;
++ xenbus_dev_fatal(dev, err, "creating sysfs entries");
++ return;
++ }
++
++ /* We're potentially connected now */
++ update_blkif_status(be->blkif);
++ }
++}
++
++
++/**
++ * Callback received when the frontend's state changes.
++ */
++static void frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
++{
++ struct backend_info *be = dev->dev.driver_data;
++ int err;
++
++ DPRINTK("%s", xenbus_strstate(frontend_state));
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ if (dev->state == XenbusStateClosed) {
++ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
++ __FUNCTION__, dev->nodename);
++ xenbus_switch_state(dev, XenbusStateInitWait);
++ }
++ break;
++
++ case XenbusStateInitialised:
++ case XenbusStateConnected:
++ /* Ensure we connect even when two watches fire in
++ close successsion and we miss the intermediate value
++ of frontend_state. */
++ if (dev->state == XenbusStateConnected)
++ break;
++
++ err = connect_ring(be);
++ if (err)
++ break;
++ update_blkif_status(be->blkif);
++ break;
++
++ case XenbusStateClosing:
++ blkif_disconnect(be->blkif);
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
++
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ if (xenbus_dev_is_online(dev))
++ break;
++ /* fall through if not online */
++ case XenbusStateUnknown:
++ device_unregister(&dev->dev);
++ break;
++
++ default:
++ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
++
++
++/* ** Connection ** */
++
++
++/**
++ * Write the physical details regarding the block device to the store, and
++ * switch to Connected state.
++ */
++static void connect(struct backend_info *be)
++{
++ struct xenbus_transaction xbt;
++ int err;
++ struct xenbus_device *dev = be->dev;
++
++ DPRINTK("%s", dev->otherend);
++
++ /* Supply the information about the device the frontend needs */
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ return;
++ }
++
++ err = blkback_barrier(xbt, be, 1);
++ if (err)
++ goto abort;
++
++ err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
++ vbd_size(&be->blkif->vbd));
++ if (err) {
++ xenbus_dev_fatal(dev, err, "writing %s/sectors",
++ dev->nodename);
++ goto abort;
++ }
++
++ /* FIXME: use a typename instead */
++ err = xenbus_printf(xbt, dev->nodename, "info", "%u",
++ vbd_info(&be->blkif->vbd));
++ if (err) {
++ xenbus_dev_fatal(dev, err, "writing %s/info",
++ dev->nodename);
++ goto abort;
++ }
++ err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
++ vbd_secsize(&be->blkif->vbd));
++ if (err) {
++ xenbus_dev_fatal(dev, err, "writing %s/sector-size",
++ dev->nodename);
++ goto abort;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN)
++ goto again;
++ if (err)
++ xenbus_dev_fatal(dev, err, "ending transaction");
++
++ err = xenbus_switch_state(dev, XenbusStateConnected);
++ if (err)
++ xenbus_dev_fatal(dev, err, "switching to Connected state",
++ dev->nodename);
++
++ return;
++ abort:
++ xenbus_transaction_end(xbt, 1);
++}
++
++
++static int connect_ring(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned long ring_ref;
++ unsigned int evtchn;
++ char protocol[64] = "";
++ int err;
++
++ DPRINTK("%s", dev->otherend);
++
++ err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", &ring_ref,
++ "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_fatal(dev, err,
++ "reading %s/ring-ref and event-channel",
++ dev->otherend);
++ return err;
++ }
++
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++ err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
++ "%63s", protocol, NULL);
++ if (err)
++ strcpy(protocol, "unspecified, assuming native");
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
++ else {
++ xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
++ return -1;
++ }
++ printk(KERN_INFO
++ "blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
++ ring_ref, evtchn, be->blkif->blk_protocol, protocol);
++
++ /* Map the shared frame, irq etc. */
++ err = blkif_map(be->blkif, ring_ref, evtchn);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
++ ring_ref, evtchn);
++ return err;
++ }
++
++ return 0;
++}
++
++
++/* ** Driver Registration ** */
++
++
++static const struct xenbus_device_id blkback_ids[] = {
++ { "vbd" },
++ { "" }
++};
++
++
++static struct xenbus_driver blkback = {
++ .name = "vbd",
++ .owner = THIS_MODULE,
++ .ids = blkback_ids,
++ .probe = blkback_probe,
++ .remove = blkback_remove,
++ .otherend_changed = frontend_changed
++};
++
++
++void blkif_xenbus_init(void)
++{
++ xenbus_register_backend(&blkback);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/blkfront/blkfront.c linux-2.6.18-xen-3.3.0/drivers/xen/blkfront/blkfront.c
+--- linux-2.6.18.8/drivers/xen/blkfront/blkfront.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blkfront/blkfront.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,936 @@
++/******************************************************************************
++ * blkfront.c
++ *
++ * XenLinux virtual block-device driver.
++ *
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004, Christian Limpach
++ * Copyright (c) 2004, Andrew Warfield
++ * Copyright (c) 2005, Christopher Clark
++ * Copyright (c) 2005, XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/version.h>
++#include "block.h"
++#include <linux/cdrom.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <scsi/scsi.h>
++#include <xen/evtchn.h>
++#include <xen/xenbus.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/protocols.h>
++#include <xen/gnttab.h>
++#include <asm/hypervisor.h>
++#include <asm/maddr.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#define BLKIF_STATE_DISCONNECTED 0
++#define BLKIF_STATE_CONNECTED 1
++#define BLKIF_STATE_SUSPENDED 2
++
++#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
++ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
++#define GRANT_INVALID_REF 0
++
++static void connect(struct blkfront_info *);
++static void blkfront_closing(struct xenbus_device *);
++static int blkfront_remove(struct xenbus_device *);
++static int talk_to_backend(struct xenbus_device *, struct blkfront_info *);
++static int setup_blkring(struct xenbus_device *, struct blkfront_info *);
++
++static void kick_pending_request_queues(struct blkfront_info *);
++
++static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs);
++static void blkif_restart_queue(void *arg);
++static void blkif_recover(struct blkfront_info *);
++static void blkif_completion(struct blk_shadow *);
++static void blkif_free(struct blkfront_info *, int);
++
++
++/**
++ * Entry point to this code when a new device is created. Allocate the basic
++ * structures and the ring buffer for communication with the backend, and
++ * inform the backend of the appropriate details for those. Switch to
++ * Initialised state.
++ */
++static int blkfront_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err, vdevice, i;
++ struct blkfront_info *info;
++
++ /* FIXME: Use dynamic device id if this is not set. */
++ err = xenbus_scanf(XBT_NIL, dev->nodename,
++ "virtual-device", "%i", &vdevice);
++ if (err != 1) {
++ /* go looking in the extended area instead */
++ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
++ "%i", &vdevice);
++ if (err != 1) {
++ xenbus_dev_fatal(dev, err, "reading virtual-device");
++ return err;
++ }
++ }
++
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (!info) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++ return -ENOMEM;
++ }
++
++ info->xbdev = dev;
++ info->vdevice = vdevice;
++ info->connected = BLKIF_STATE_DISCONNECTED;
++ INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
++
++ for (i = 0; i < BLK_RING_SIZE; i++)
++ info->shadow[i].req.id = i+1;
++ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
++
++ /* Front end dir is a number, which is used as the id. */
++ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
++ dev->dev.driver_data = info;
++
++ err = talk_to_backend(dev, info);
++ if (err) {
++ kfree(info);
++ dev->dev.driver_data = NULL;
++ return err;
++ }
++
++ return 0;
++}
++
++
++/**
++ * We are reconnecting to the backend, due to a suspend/resume, or a backend
++ * driver restart. We tear down our blkif structure and recreate it, but
++ * leave the device-layer structures intact so that this is transparent to the
++ * rest of the kernel.
++ */
++static int blkfront_resume(struct xenbus_device *dev)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++ int err;
++
++ DPRINTK("blkfront_resume: %s\n", dev->nodename);
++
++ blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
++
++ err = talk_to_backend(dev, info);
++ if (info->connected == BLKIF_STATE_SUSPENDED && !err)
++ blkif_recover(info);
++
++ return err;
++}
++
++
++/* Common code used when first setting up, and when resuming. */
++static int talk_to_backend(struct xenbus_device *dev,
++ struct blkfront_info *info)
++{
++ const char *message = NULL;
++ struct xenbus_transaction xbt;
++ int err;
++
++ /* Create shared ring, alloc event channel. */
++ err = setup_blkring(dev, info);
++ if (err)
++ goto out;
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ goto destroy_blkring;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename,
++ "ring-ref","%u", info->ring_ref);
++ if (err) {
++ message = "writing ring-ref";
++ goto abort_transaction;
++ }
++ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(info->irq));
++ if (err) {
++ message = "writing event-channel";
++ goto abort_transaction;
++ }
++ err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
++ XEN_IO_PROTO_ABI_NATIVE);
++ if (err) {
++ message = "writing protocol";
++ goto abort_transaction;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err) {
++ if (err == -EAGAIN)
++ goto again;
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto destroy_blkring;
++ }
++
++ xenbus_switch_state(dev, XenbusStateInitialised);
++
++ return 0;
++
++ abort_transaction:
++ xenbus_transaction_end(xbt, 1);
++ if (message)
++ xenbus_dev_fatal(dev, err, "%s", message);
++ destroy_blkring:
++ blkif_free(info, 0);
++ out:
++ return err;
++}
++
++
++static int setup_blkring(struct xenbus_device *dev,
++ struct blkfront_info *info)
++{
++ blkif_sring_t *sring;
++ int err;
++
++ info->ring_ref = GRANT_INVALID_REF;
++
++ sring = (blkif_sring_t *)__get_free_page(GFP_NOIO | __GFP_HIGH);
++ if (!sring) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
++ return -ENOMEM;
++ }
++ SHARED_RING_INIT(sring);
++ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
++ if (err < 0) {
++ free_page((unsigned long)sring);
++ info->ring.sring = NULL;
++ goto fail;
++ }
++ info->ring_ref = err;
++
++ err = bind_listening_port_to_irqhandler(
++ dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
++ if (err <= 0) {
++ xenbus_dev_fatal(dev, err,
++ "bind_listening_port_to_irqhandler");
++ goto fail;
++ }
++ info->irq = err;
++
++ return 0;
++fail:
++ blkif_free(info, 0);
++ return err;
++}
++
++
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++ struct block_device *bd;
++
++ DPRINTK("blkfront:backend_changed.\n");
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitWait:
++ case XenbusStateInitialised:
++ case XenbusStateReconfiguring:
++ case XenbusStateReconfigured:
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateConnected:
++ connect(info);
++ break;
++
++ case XenbusStateClosing:
++ bd = bdget(info->dev);
++ if (bd == NULL)
++ xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++ down(&bd->bd_sem);
++#else
++ mutex_lock(&bd->bd_mutex);
++#endif
++ if (info->users > 0)
++ xenbus_dev_error(dev, -EBUSY,
++ "Device in use; refusing to close");
++ else
++ blkfront_closing(dev);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++ up(&bd->bd_sem);
++#else
++ mutex_unlock(&bd->bd_mutex);
++#endif
++ bdput(bd);
++ break;
++ }
++}
++
++
++/* ** Connection ** */
++
++
++/*
++ * Invoked when the backend is finally 'ready' (and has told produced
++ * the details about the physical device - #sectors, size, etc).
++ */
++static void connect(struct blkfront_info *info)
++{
++ unsigned long long sectors;
++ unsigned long sector_size;
++ unsigned int binfo;
++ int err;
++
++ if ((info->connected == BLKIF_STATE_CONNECTED) ||
++ (info->connected == BLKIF_STATE_SUSPENDED) )
++ return;
++
++ DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend);
++
++ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
++ "sectors", "%Lu", &sectors,
++ "info", "%u", &binfo,
++ "sector-size", "%lu", &sector_size,
++ NULL);
++ if (err) {
++ xenbus_dev_fatal(info->xbdev, err,
++ "reading backend fields at %s",
++ info->xbdev->otherend);
++ return;
++ }
++
++ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
++ "feature-barrier", "%lu", &info->feature_barrier,
++ NULL);
++ if (err)
++ info->feature_barrier = 0;
++
++ err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
++ if (err) {
++ xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
++ info->xbdev->otherend);
++ return;
++ }
++
++ err = xlvbd_sysfs_addif(info);
++ if (err) {
++ xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s",
++ info->xbdev->otherend);
++ return;
++ }
++
++ (void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
++
++ /* Kick pending requests. */
++ spin_lock_irq(&blkif_io_lock);
++ info->connected = BLKIF_STATE_CONNECTED;
++ kick_pending_request_queues(info);
++ spin_unlock_irq(&blkif_io_lock);
++
++ add_disk(info->gd);
++
++ info->is_ready = 1;
++}
++
++/**
++ * Handle the change of state of the backend to Closing. We must delete our
++ * device-layer structures now, to ensure that writes are flushed through to
++ * the backend. Once is this done, we can switch to Closed in
++ * acknowledgement.
++ */
++static void blkfront_closing(struct xenbus_device *dev)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++ unsigned long flags;
++
++ DPRINTK("blkfront_closing: %s removed\n", dev->nodename);
++
++ if (info->rq == NULL)
++ goto out;
++
++ spin_lock_irqsave(&blkif_io_lock, flags);
++ /* No more blkif_request(). */
++ blk_stop_queue(info->rq);
++ /* No more gnttab callback work. */
++ gnttab_cancel_free_callback(&info->callback);
++ spin_unlock_irqrestore(&blkif_io_lock, flags);
++
++ /* Flush gnttab callback work. Must be done with no locks held. */
++ flush_scheduled_work();
++
++ xlvbd_sysfs_delif(info);
++
++ xlvbd_del(info);
++
++ out:
++ xenbus_frontend_closed(dev);
++}
++
++
++static int blkfront_remove(struct xenbus_device *dev)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++
++ DPRINTK("blkfront_remove: %s removed\n", dev->nodename);
++
++ blkif_free(info, 0);
++
++ kfree(info);
++
++ return 0;
++}
++
++
++static inline int GET_ID_FROM_FREELIST(
++ struct blkfront_info *info)
++{
++ unsigned long free = info->shadow_free;
++ BUG_ON(free > BLK_RING_SIZE);
++ info->shadow_free = info->shadow[free].req.id;
++ info->shadow[free].req.id = 0x0fffffee; /* debug */
++ return free;
++}
++
++static inline void ADD_ID_TO_FREELIST(
++ struct blkfront_info *info, unsigned long id)
++{
++ info->shadow[id].req.id = info->shadow_free;
++ info->shadow[id].request = 0;
++ info->shadow_free = id;
++}
++
++static inline void flush_requests(struct blkfront_info *info)
++{
++ int notify;
++
++ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
++
++ if (notify)
++ notify_remote_via_irq(info->irq);
++}
++
++static void kick_pending_request_queues(struct blkfront_info *info)
++{
++ if (!RING_FULL(&info->ring)) {
++ /* Re-enable calldowns. */
++ blk_start_queue(info->rq);
++ /* Kick things off immediately. */
++ do_blkif_request(info->rq);
++ }
++}
++
++static void blkif_restart_queue(void *arg)
++{
++ struct blkfront_info *info = (struct blkfront_info *)arg;
++ spin_lock_irq(&blkif_io_lock);
++ if (info->connected == BLKIF_STATE_CONNECTED)
++ kick_pending_request_queues(info);
++ spin_unlock_irq(&blkif_io_lock);
++}
++
++static void blkif_restart_queue_callback(void *arg)
++{
++ struct blkfront_info *info = (struct blkfront_info *)arg;
++ schedule_work(&info->work);
++}
++
++int blkif_open(struct inode *inode, struct file *filep)
++{
++ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
++ info->users++;
++ return 0;
++}
++
++
++int blkif_release(struct inode *inode, struct file *filep)
++{
++ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
++ info->users--;
++ if (info->users == 0) {
++ /* Check whether we have been instructed to close. We will
++ have ignored this request initially, as the device was
++ still mounted. */
++ struct xenbus_device * dev = info->xbdev;
++ enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
++
++ if (state == XenbusStateClosing && info->is_ready)
++ blkfront_closing(dev);
++ }
++ return 0;
++}
++
++
++int blkif_ioctl(struct inode *inode, struct file *filep,
++ unsigned command, unsigned long argument)
++{
++ int i;
++
++ DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
++ command, (long)argument, inode->i_rdev);
++
++ switch (command) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++ case HDIO_GETGEO: {
++ struct block_device *bd = inode->i_bdev;
++ struct hd_geometry geo;
++ int ret;
++
++ if (!argument)
++ return -EINVAL;
++
++ geo.start = get_start_sect(bd);
++ ret = blkif_getgeo(bd, &geo);
++ if (ret)
++ return ret;
++
++ if (copy_to_user((struct hd_geometry __user *)argument, &geo,
++ sizeof(geo)))
++ return -EFAULT;
++
++ return 0;
++ }
++#endif
++ case CDROMMULTISESSION:
++ DPRINTK("FIXME: support multisession CDs later\n");
++ for (i = 0; i < sizeof(struct cdrom_multisession); i++)
++ if (put_user(0, (char __user *)(argument + i)))
++ return -EFAULT;
++ return 0;
++
++ case CDROM_GET_CAPABILITY: {
++ struct blkfront_info *info =
++ inode->i_bdev->bd_disk->private_data;
++ struct gendisk *gd = info->gd;
++ if (gd->flags & GENHD_FL_CD)
++ return 0;
++ return -EINVAL;
++ }
++ default:
++ /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
++ command);*/
++ return -EINVAL; /* same return as native Linux */
++ }
++
++ return 0;
++}
++
++
++int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
++{
++ /* We don't have real geometry info, but let's at least return
++ values consistent with the size of the device */
++ sector_t nsect = get_capacity(bd->bd_disk);
++ sector_t cylinders = nsect;
++
++ hg->heads = 0xff;
++ hg->sectors = 0x3f;
++ sector_div(cylinders, hg->heads * hg->sectors);
++ hg->cylinders = cylinders;
++ if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
++ hg->cylinders = 0xffff;
++ return 0;
++}
++
++
++/*
++ * blkif_queue_request
++ *
++ * request block io
++ *
++ * id: for guest use only.
++ * operation: BLKIF_OP_{READ,WRITE,PROBE}
++ * buffer: buffer to read/write into. this should be a
++ * virtual address in the guest os.
++ */
++static int blkif_queue_request(struct request *req)
++{
++ struct blkfront_info *info = req->rq_disk->private_data;
++ unsigned long buffer_mfn;
++ blkif_request_t *ring_req;
++ struct bio *bio;
++ struct bio_vec *bvec;
++ int idx;
++ unsigned long id;
++ unsigned int fsect, lsect;
++ int ref;
++ grant_ref_t gref_head;
++
++ if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
++ return 1;
++
++ if (gnttab_alloc_grant_references(
++ BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
++ gnttab_request_free_callback(
++ &info->callback,
++ blkif_restart_queue_callback,
++ info,
++ BLKIF_MAX_SEGMENTS_PER_REQUEST);
++ return 1;
++ }
++
++ /* Fill out a communications ring structure. */
++ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
++ id = GET_ID_FROM_FREELIST(info);
++ info->shadow[id].request = (unsigned long)req;
++
++ ring_req->id = id;
++ ring_req->sector_number = (blkif_sector_t)req->sector;
++ ring_req->handle = info->handle;
++
++ ring_req->operation = rq_data_dir(req) ?
++ BLKIF_OP_WRITE : BLKIF_OP_READ;
++ if (blk_barrier_rq(req))
++ ring_req->operation = BLKIF_OP_WRITE_BARRIER;
++
++ ring_req->nr_segments = 0;
++ rq_for_each_bio (bio, req) {
++ bio_for_each_segment (bvec, bio, idx) {
++ BUG_ON(ring_req->nr_segments
++ == BLKIF_MAX_SEGMENTS_PER_REQUEST);
++ buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
++ fsect = bvec->bv_offset >> 9;
++ lsect = fsect + (bvec->bv_len >> 9) - 1;
++ /* install a grant reference. */
++ ref = gnttab_claim_grant_reference(&gref_head);
++ BUG_ON(ref == -ENOSPC);
++
++ gnttab_grant_foreign_access_ref(
++ ref,
++ info->xbdev->otherend_id,
++ buffer_mfn,
++ rq_data_dir(req) ? GTF_readonly : 0 );
++
++ info->shadow[id].frame[ring_req->nr_segments] =
++ mfn_to_pfn(buffer_mfn);
++
++ ring_req->seg[ring_req->nr_segments] =
++ (struct blkif_request_segment) {
++ .gref = ref,
++ .first_sect = fsect,
++ .last_sect = lsect };
++
++ ring_req->nr_segments++;
++ }
++ }
++
++ info->ring.req_prod_pvt++;
++
++ /* Keep a private copy so we can reissue requests when recovering. */
++ info->shadow[id].req = *ring_req;
++
++ gnttab_free_grant_references(gref_head);
++
++ return 0;
++}
++
++/*
++ * do_blkif_request
++ * read a block; request is in a request queue
++ */
++void do_blkif_request(request_queue_t *rq)
++{
++ struct blkfront_info *info = NULL;
++ struct request *req;
++ int queued;
++
++ DPRINTK("Entered do_blkif_request\n");
++
++ queued = 0;
++
++ while ((req = elv_next_request(rq)) != NULL) {
++ info = req->rq_disk->private_data;
++ if (!blk_fs_request(req)) {
++ end_request(req, 0);
++ continue;
++ }
++
++ if (RING_FULL(&info->ring))
++ goto wait;
++
++ DPRINTK("do_blk_req %p: cmd %p, sec %llx, "
++ "(%u/%li) buffer:%p [%s]\n",
++ req, req->cmd, (long long)req->sector,
++ req->current_nr_sectors,
++ req->nr_sectors, req->buffer,
++ rq_data_dir(req) ? "write" : "read");
++
++
++ blkdev_dequeue_request(req);
++ if (blkif_queue_request(req)) {
++ blk_requeue_request(rq, req);
++ wait:
++ /* Avoid pointless unplugs. */
++ blk_stop_queue(rq);
++ break;
++ }
++
++ queued++;
++ }
++
++ if (queued != 0)
++ flush_requests(info);
++}
++
++
++static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
++{
++ struct request *req;
++ blkif_response_t *bret;
++ RING_IDX i, rp;
++ unsigned long flags;
++ struct blkfront_info *info = (struct blkfront_info *)dev_id;
++ int uptodate;
++
++ spin_lock_irqsave(&blkif_io_lock, flags);
++
++ if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
++ spin_unlock_irqrestore(&blkif_io_lock, flags);
++ return IRQ_HANDLED;
++ }
++
++ again:
++ rp = info->ring.sring->rsp_prod;
++ rmb(); /* Ensure we see queued responses up to 'rp'. */
++
++ for (i = info->ring.rsp_cons; i != rp; i++) {
++ unsigned long id;
++ int ret;
++
++ bret = RING_GET_RESPONSE(&info->ring, i);
++ id = bret->id;
++ req = (struct request *)info->shadow[id].request;
++
++ blkif_completion(&info->shadow[id]);
++
++ ADD_ID_TO_FREELIST(info, id);
++
++ uptodate = (bret->status == BLKIF_RSP_OKAY);
++ switch (bret->operation) {
++ case BLKIF_OP_WRITE_BARRIER:
++ if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
++ printk("blkfront: %s: write barrier op failed\n",
++ info->gd->disk_name);
++ uptodate = -EOPNOTSUPP;
++ info->feature_barrier = 0;
++ xlvbd_barrier(info);
++ }
++ /* fall through */
++ case BLKIF_OP_READ:
++ case BLKIF_OP_WRITE:
++ if (unlikely(bret->status != BLKIF_RSP_OKAY))
++ DPRINTK("Bad return from blkdev data "
++ "request: %x\n", bret->status);
++
++ ret = end_that_request_first(req, uptodate,
++ req->hard_nr_sectors);
++ BUG_ON(ret);
++ end_that_request_last(req, uptodate);
++ break;
++ default:
++ BUG();
++ }
++ }
++
++ info->ring.rsp_cons = i;
++
++ if (i != info->ring.req_prod_pvt) {
++ int more_to_do;
++ RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
++ if (more_to_do)
++ goto again;
++ } else
++ info->ring.sring->rsp_event = i + 1;
++
++ kick_pending_request_queues(info);
++
++ spin_unlock_irqrestore(&blkif_io_lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++static void blkif_free(struct blkfront_info *info, int suspend)
++{
++ /* Prevent new requests being issued until we fix things up. */
++ spin_lock_irq(&blkif_io_lock);
++ info->connected = suspend ?
++ BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
++ /* No more blkif_request(). */
++ if (info->rq)
++ blk_stop_queue(info->rq);
++ /* No more gnttab callback work. */
++ gnttab_cancel_free_callback(&info->callback);
++ spin_unlock_irq(&blkif_io_lock);
++
++ /* Flush gnttab callback work. Must be done with no locks held. */
++ flush_scheduled_work();
++
++ /* Free resources associated with old device channel. */
++ if (info->ring_ref != GRANT_INVALID_REF) {
++ gnttab_end_foreign_access(info->ring_ref,
++ (unsigned long)info->ring.sring);
++ info->ring_ref = GRANT_INVALID_REF;
++ info->ring.sring = NULL;
++ }
++ if (info->irq)
++ unbind_from_irqhandler(info->irq, info);
++ info->irq = 0;
++}
++
++static void blkif_completion(struct blk_shadow *s)
++{
++ int i;
++ for (i = 0; i < s->req.nr_segments; i++)
++ gnttab_end_foreign_access(s->req.seg[i].gref, 0UL);
++}
++
++static void blkif_recover(struct blkfront_info *info)
++{
++ int i;
++ blkif_request_t *req;
++ struct blk_shadow *copy;
++ int j;
++
++ /* Stage 1: Make a safe copy of the shadow state. */
++ copy = kmalloc(sizeof(info->shadow), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH);
++ memcpy(copy, info->shadow, sizeof(info->shadow));
++
++ /* Stage 2: Set up free list. */
++ memset(&info->shadow, 0, sizeof(info->shadow));
++ for (i = 0; i < BLK_RING_SIZE; i++)
++ info->shadow[i].req.id = i+1;
++ info->shadow_free = info->ring.req_prod_pvt;
++ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
++
++ /* Stage 3: Find pending requests and requeue them. */
++ for (i = 0; i < BLK_RING_SIZE; i++) {
++ /* Not in use? */
++ if (copy[i].request == 0)
++ continue;
++
++ /* Grab a request slot and copy shadow state into it. */
++ req = RING_GET_REQUEST(
++ &info->ring, info->ring.req_prod_pvt);
++ *req = copy[i].req;
++
++ /* We get a new request id, and must reset the shadow state. */
++ req->id = GET_ID_FROM_FREELIST(info);
++ memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
++
++ /* Rewrite any grant references invalidated by susp/resume. */
++ for (j = 0; j < req->nr_segments; j++)
++ gnttab_grant_foreign_access_ref(
++ req->seg[j].gref,
++ info->xbdev->otherend_id,
++ pfn_to_mfn(info->shadow[req->id].frame[j]),
++ rq_data_dir((struct request *)
++ info->shadow[req->id].request) ?
++ GTF_readonly : 0);
++ info->shadow[req->id].req = *req;
++
++ info->ring.req_prod_pvt++;
++ }
++
++ kfree(copy);
++
++ (void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
++
++ spin_lock_irq(&blkif_io_lock);
++
++ /* Now safe for us to use the shared ring */
++ info->connected = BLKIF_STATE_CONNECTED;
++
++ /* Send off requeued requests */
++ flush_requests(info);
++
++ /* Kick any other new requests queued since we resumed */
++ kick_pending_request_queues(info);
++
++ spin_unlock_irq(&blkif_io_lock);
++}
++
++int blkfront_is_ready(struct xenbus_device *dev)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++
++ return info->is_ready;
++}
++
++
++/* ** Driver Registration ** */
++
++
++static const struct xenbus_device_id blkfront_ids[] = {
++ { "vbd" },
++ { "" }
++};
++MODULE_ALIAS("xen:vbd");
++
++static struct xenbus_driver blkfront = {
++ .name = "vbd",
++ .owner = THIS_MODULE,
++ .ids = blkfront_ids,
++ .probe = blkfront_probe,
++ .remove = blkfront_remove,
++ .resume = blkfront_resume,
++ .otherend_changed = backend_changed,
++ .is_ready = blkfront_is_ready,
++};
++
++
++static int __init xlblk_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ return xenbus_register_frontend(&blkfront);
++}
++module_init(xlblk_init);
++
++
++static void __exit xlblk_exit(void)
++{
++ return xenbus_unregister_driver(&blkfront);
++}
++module_exit(xlblk_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/blkfront/block.h linux-2.6.18-xen-3.3.0/drivers/xen/blkfront/block.h
+--- linux-2.6.18.8/drivers/xen/blkfront/block.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blkfront/block.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,158 @@
++/******************************************************************************
++ * block.h
++ *
++ * Shared definitions between all levels of XenLinux Virtual block devices.
++ *
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004-2005, Christian Limpach
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_DRIVERS_BLOCK_H__
++#define __XEN_DRIVERS_BLOCK_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/hdreg.h>
++#include <linux/blkdev.h>
++#include <linux/major.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/gnttab.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/io/blkif.h>
++#include <xen/interface/io/ring.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/uaccess.h>
++
++#define DPRINTK(_f, _a...) pr_debug(_f, ## _a)
++
++#if 0
++#define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a)
++#else
++#define DPRINTK_IOCTL(_f, _a...) ((void)0)
++#endif
++
++struct xlbd_type_info
++{
++ int partn_shift;
++ int disks_per_major;
++ char *devname;
++ char *diskname;
++};
++
++struct xlbd_major_info
++{
++ int major;
++ int index;
++ int usage;
++ struct xlbd_type_info *type;
++};
++
++struct blk_shadow {
++ blkif_request_t req;
++ unsigned long request;
++ unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++
++#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
++
++/*
++ * We have one of these per vbd, whether ide, scsi or 'other'. They
++ * hang in private_data off the gendisk structure. We may end up
++ * putting all kinds of interesting stuff here :-)
++ */
++struct blkfront_info
++{
++ struct xenbus_device *xbdev;
++ dev_t dev;
++ struct gendisk *gd;
++ int vdevice;
++ blkif_vdev_t handle;
++ int connected;
++ int ring_ref;
++ blkif_front_ring_t ring;
++ unsigned int irq;
++ struct xlbd_major_info *mi;
++ request_queue_t *rq;
++ struct work_struct work;
++ struct gnttab_free_callback callback;
++ struct blk_shadow shadow[BLK_RING_SIZE];
++ unsigned long shadow_free;
++ int feature_barrier;
++ int is_ready;
++
++ /**
++ * The number of people holding this device open. We won't allow a
++ * hot-unplug unless this is 0.
++ */
++ int users;
++};
++
++extern spinlock_t blkif_io_lock;
++
++extern int blkif_open(struct inode *inode, struct file *filep);
++extern int blkif_release(struct inode *inode, struct file *filep);
++extern int blkif_ioctl(struct inode *inode, struct file *filep,
++ unsigned command, unsigned long argument);
++extern int blkif_getgeo(struct block_device *, struct hd_geometry *);
++extern int blkif_check(dev_t dev);
++extern int blkif_revalidate(dev_t dev);
++extern void do_blkif_request (request_queue_t *rq);
++
++/* Virtual block-device subsystem. */
++/* Note that xlvbd_add doesn't call add_disk for you: you're expected
++ to call add_disk on info->gd once the disk is properly connected
++ up. */
++int xlvbd_add(blkif_sector_t capacity, int device,
++ u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
++void xlvbd_del(struct blkfront_info *info);
++int xlvbd_barrier(struct blkfront_info *info);
++
++#ifdef CONFIG_SYSFS
++int xlvbd_sysfs_addif(struct blkfront_info *info);
++void xlvbd_sysfs_delif(struct blkfront_info *info);
++#else
++static inline int xlvbd_sysfs_addif(struct blkfront_info *info)
++{
++ return 0;
++}
++
++static inline void xlvbd_sysfs_delif(struct blkfront_info *info)
++{
++ ;
++}
++#endif
++
++#endif /* __XEN_DRIVERS_BLOCK_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/blkfront/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/blkfront/Makefile
+--- linux-2.6.18.8/drivers/xen/blkfront/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blkfront/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,5 @@
++
++obj-$(CONFIG_XEN_BLKDEV_FRONTEND) := xenblk.o
++
++xenblk-objs := blkfront.o vbd.o
++
+diff -rpuN linux-2.6.18.8/drivers/xen/blkfront/vbd.c linux-2.6.18-xen-3.3.0/drivers/xen/blkfront/vbd.c
+--- linux-2.6.18.8/drivers/xen/blkfront/vbd.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blkfront/vbd.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,460 @@
++/******************************************************************************
++ * vbd.c
++ *
++ * XenLinux virtual block-device driver (xvd).
++ *
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004-2005, Christian Limpach
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "block.h"
++#include <linux/blkdev.h>
++#include <linux/list.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#define BLKIF_MAJOR(dev) ((dev)>>8)
++#define BLKIF_MINOR(dev) ((dev) & 0xff)
++
++#define EXT_SHIFT 28
++#define EXTENDED (1<<EXT_SHIFT)
++#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
++#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
++
++/*
++ * For convenience we distinguish between ide, scsi and 'other' (i.e.,
++ * potentially combinations of the two) in the naming scheme and in a few other
++ * places.
++ */
++
++#define NUM_IDE_MAJORS 10
++#define NUM_SCSI_MAJORS 17
++#define NUM_VBD_MAJORS 2
++
++static struct xlbd_type_info xlbd_ide_type = {
++ .partn_shift = 6,
++ .disks_per_major = 2,
++ .devname = "ide",
++ .diskname = "hd",
++};
++
++static struct xlbd_type_info xlbd_scsi_type = {
++ .partn_shift = 4,
++ .disks_per_major = 16,
++ .devname = "sd",
++ .diskname = "sd",
++};
++
++static struct xlbd_type_info xlbd_vbd_type = {
++ .partn_shift = 4,
++ .disks_per_major = 16,
++ .devname = "xvd",
++ .diskname = "xvd",
++};
++
++static struct xlbd_type_info xlbd_vbd_type_ext = {
++ .partn_shift = 8,
++ .disks_per_major = 256,
++ .devname = "xvd",
++ .diskname = "xvd",
++};
++
++static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
++ NUM_VBD_MAJORS];
++
++#define XLBD_MAJOR_IDE_START 0
++#define XLBD_MAJOR_SCSI_START (NUM_IDE_MAJORS)
++#define XLBD_MAJOR_VBD_START (NUM_IDE_MAJORS + NUM_SCSI_MAJORS)
++
++#define XLBD_MAJOR_IDE_RANGE XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START - 1
++#define XLBD_MAJOR_SCSI_RANGE XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START - 1
++#define XLBD_MAJOR_VBD_RANGE XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START + NUM_VBD_MAJORS - 1
++
++static struct block_device_operations xlvbd_block_fops =
++{
++ .owner = THIS_MODULE,
++ .open = blkif_open,
++ .release = blkif_release,
++ .ioctl = blkif_ioctl,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ .getgeo = blkif_getgeo
++#endif
++};
++
++DEFINE_SPINLOCK(blkif_io_lock);
++
++static struct xlbd_major_info *
++xlbd_alloc_major_info(int major, int minor, int index)
++{
++ struct xlbd_major_info *ptr;
++ int do_register;
++
++ ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
++ if (ptr == NULL)
++ return NULL;
++
++ ptr->major = major;
++ do_register = 1;
++
++ switch (index) {
++ case XLBD_MAJOR_IDE_RANGE:
++ ptr->type = &xlbd_ide_type;
++ ptr->index = index - XLBD_MAJOR_IDE_START;
++ break;
++ case XLBD_MAJOR_SCSI_RANGE:
++ ptr->type = &xlbd_scsi_type;
++ ptr->index = index - XLBD_MAJOR_SCSI_START;
++ break;
++ case XLBD_MAJOR_VBD_RANGE:
++ ptr->index = 0;
++ if ((index - XLBD_MAJOR_VBD_START) == 0)
++ ptr->type = &xlbd_vbd_type;
++ else
++ ptr->type = &xlbd_vbd_type_ext;
++
++ /*
++ * if someone already registered block major 202,
++ * don't try to register it again
++ */
++ if (major_info[XLBD_MAJOR_VBD_START] != NULL)
++ do_register = 0;
++ break;
++ }
++
++ if (do_register) {
++ if (register_blkdev(ptr->major, ptr->type->devname)) {
++ kfree(ptr);
++ return NULL;
++ }
++
++ printk("xen-vbd: registered block device major %i\n", ptr->major);
++ }
++
++ major_info[index] = ptr;
++ return ptr;
++}
++
++static struct xlbd_major_info *
++xlbd_get_major_info(int major, int minor, int vdevice)
++{
++ struct xlbd_major_info *mi;
++ int index;
++
++ switch (major) {
++ case IDE0_MAJOR: index = 0; break;
++ case IDE1_MAJOR: index = 1; break;
++ case IDE2_MAJOR: index = 2; break;
++ case IDE3_MAJOR: index = 3; break;
++ case IDE4_MAJOR: index = 4; break;
++ case IDE5_MAJOR: index = 5; break;
++ case IDE6_MAJOR: index = 6; break;
++ case IDE7_MAJOR: index = 7; break;
++ case IDE8_MAJOR: index = 8; break;
++ case IDE9_MAJOR: index = 9; break;
++ case SCSI_DISK0_MAJOR: index = 10; break;
++ case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
++ index = 11 + major - SCSI_DISK1_MAJOR;
++ break;
++ case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR:
++ index = 18 + major - SCSI_DISK8_MAJOR;
++ break;
++ case SCSI_CDROM_MAJOR: index = 26; break;
++ default:
++ if (!VDEV_IS_EXTENDED(vdevice))
++ index = 27;
++ else
++ index = 28;
++ break;
++ }
++
++ mi = ((major_info[index] != NULL) ? major_info[index] :
++ xlbd_alloc_major_info(major, minor, index));
++ if (mi)
++ mi->usage++;
++ return mi;
++}
++
++static void
++xlbd_put_major_info(struct xlbd_major_info *mi)
++{
++ mi->usage--;
++ /* XXX: release major if 0 */
++}
++
++static int
++xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
++{
++ request_queue_t *rq;
++
++ rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
++ if (rq == NULL)
++ return -1;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
++ elevator_init(rq, "noop");
++#else
++ elevator_init(rq, &elevator_noop);
++#endif
++
++ /* Hard sector size and max sectors impersonate the equiv. hardware. */
++ blk_queue_hardsect_size(rq, sector_size);
++ blk_queue_max_sectors(rq, 512);
++
++ /* Each segment in a request is up to an aligned page in size. */
++ blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
++ blk_queue_max_segment_size(rq, PAGE_SIZE);
++
++ /* Ensure a merged request will fit in a single I/O ring slot. */
++ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
++ blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
++
++ /* Make sure buffer addresses are sector-aligned. */
++ blk_queue_dma_alignment(rq, 511);
++
++ /* Make sure we don't use bounce buffers. */
++ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
++
++ gd->queue = rq;
++
++ return 0;
++}
++
++static int
++xlvbd_alloc_gendisk(int major, int minor, blkif_sector_t capacity, int vdevice,
++ u16 vdisk_info, u16 sector_size,
++ struct blkfront_info *info)
++{
++ struct gendisk *gd;
++ struct xlbd_major_info *mi;
++ int nr_minors = 1;
++ int err = -ENODEV;
++ unsigned int offset;
++
++ BUG_ON(info->gd != NULL);
++ BUG_ON(info->mi != NULL);
++ BUG_ON(info->rq != NULL);
++
++ mi = xlbd_get_major_info(major, minor, vdevice);
++ if (mi == NULL)
++ goto out;
++ info->mi = mi;
++
++ if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
++ nr_minors = 1 << mi->type->partn_shift;
++
++ gd = alloc_disk(nr_minors);
++ if (gd == NULL)
++ goto out;
++
++ offset = mi->index * mi->type->disks_per_major +
++ (minor >> mi->type->partn_shift);
++ if (nr_minors > 1) {
++ if (offset < 26) {
++ sprintf(gd->disk_name, "%s%c",
++ mi->type->diskname, 'a' + offset );
++ }
++ else {
++ sprintf(gd->disk_name, "%s%c%c",
++ mi->type->diskname,
++ 'a' + ((offset/26)-1), 'a' + (offset%26) );
++ }
++ }
++ else {
++ if (offset < 26) {
++ sprintf(gd->disk_name, "%s%c%d",
++ mi->type->diskname,
++ 'a' + offset,
++ minor & ((1 << mi->type->partn_shift) - 1));
++ }
++ else {
++ sprintf(gd->disk_name, "%s%c%c%d",
++ mi->type->diskname,
++ 'a' + ((offset/26)-1), 'a' + (offset%26),
++ minor & ((1 << mi->type->partn_shift) - 1));
++ }
++ }
++
++ gd->major = mi->major;
++ gd->first_minor = minor;
++ gd->fops = &xlvbd_block_fops;
++ gd->private_data = info;
++ gd->driverfs_dev = &(info->xbdev->dev);
++ set_capacity(gd, capacity);
++
++ if (xlvbd_init_blk_queue(gd, sector_size)) {
++ del_gendisk(gd);
++ goto out;
++ }
++
++ info->rq = gd->queue;
++ info->gd = gd;
++
++ if (info->feature_barrier)
++ xlvbd_barrier(info);
++
++ if (vdisk_info & VDISK_READONLY)
++ set_disk_ro(gd, 1);
++
++ if (vdisk_info & VDISK_REMOVABLE)
++ gd->flags |= GENHD_FL_REMOVABLE;
++
++ if (vdisk_info & VDISK_CDROM)
++ gd->flags |= GENHD_FL_CD;
++
++ return 0;
++
++ out:
++ if (mi)
++ xlbd_put_major_info(mi);
++ info->mi = NULL;
++ return err;
++}
++
++int
++xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info,
++ u16 sector_size, struct blkfront_info *info)
++{
++ struct block_device *bd;
++ int err = 0;
++ int major, minor;
++
++ if ((vdevice>>EXT_SHIFT) > 1) {
++ /* this is above the extended range; something is wrong */
++ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice);
++ return -ENODEV;
++ }
++
++ if (!VDEV_IS_EXTENDED(vdevice)) {
++ major = BLKIF_MAJOR(vdevice);
++ minor = BLKIF_MINOR(vdevice);
++ }
++ else {
++ major = 202;
++ minor = BLKIF_MINOR_EXT(vdevice);
++ }
++
++ info->dev = MKDEV(major, minor);
++ bd = bdget(info->dev);
++ if (bd == NULL)
++ return -ENODEV;
++
++ err = xlvbd_alloc_gendisk(major, minor, capacity, vdevice, vdisk_info,
++ sector_size, info);
++
++ bdput(bd);
++ return err;
++}
++
++void
++xlvbd_del(struct blkfront_info *info)
++{
++ if (info->mi == NULL)
++ return;
++
++ BUG_ON(info->gd == NULL);
++ del_gendisk(info->gd);
++ put_disk(info->gd);
++ info->gd = NULL;
++
++ xlbd_put_major_info(info->mi);
++ info->mi = NULL;
++
++ BUG_ON(info->rq == NULL);
++ blk_cleanup_queue(info->rq);
++ info->rq = NULL;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++int
++xlvbd_barrier(struct blkfront_info *info)
++{
++ int err;
++
++ err = blk_queue_ordered(info->rq,
++ info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL);
++ if (err)
++ return err;
++ printk(KERN_INFO "blkfront: %s: barriers %s\n",
++ info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled");
++ return 0;
++}
++#else
++int
++xlvbd_barrier(struct blkfront_info *info)
++{
++ printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name);
++ return -ENOSYS;
++}
++#endif
++
++#ifdef CONFIG_SYSFS
++static ssize_t show_media(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct xenbus_device *xendev = to_xenbus_device(dev);
++ struct blkfront_info *info = xendev->dev.driver_data;
++
++ if (info->gd->flags & GENHD_FL_CD)
++ return sprintf(buf, "cdrom\n");
++ return sprintf(buf, "disk\n");
++}
++
++static struct device_attribute xlvbd_attrs[] = {
++ __ATTR(media, S_IRUGO, show_media, NULL),
++};
++
++int xlvbd_sysfs_addif(struct blkfront_info *info)
++{
++ int i;
++ int error = 0;
++
++ for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) {
++ error = device_create_file(info->gd->driverfs_dev,
++ &xlvbd_attrs[i]);
++ if (error)
++ goto fail;
++ }
++ return 0;
++
++fail:
++ while (--i >= 0)
++ device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]);
++ return error;
++}
++
++void xlvbd_sysfs_delif(struct blkfront_info *info)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++)
++ device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]);
++}
++
++#endif /* CONFIG_SYSFS */
+diff -rpuN linux-2.6.18.8/drivers/xen/blktap/blktap.c linux-2.6.18-xen-3.3.0/drivers/xen/blktap/blktap.c
+--- linux-2.6.18.8/drivers/xen/blktap/blktap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blktap/blktap.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1681 @@
++/******************************************************************************
++ * drivers/xen/blktap/blktap.c
++ *
++ * Back-end driver for user level virtual block devices. This portion of the
++ * driver exports a 'unified' block-device interface that can be accessed
++ * by any operating system that implements a compatible front end. Requests
++ * are remapped to a user-space memory region.
++ *
++ * Based on the blkback driver code.
++ *
++ * Copyright (c) 2004-2005, Andrew Warfield and Julian Chesterfield
++ *
++ * Clean ups and fix ups:
++ * Copyright (c) 2006, Steven Rostedt - Red Hat, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/kthread.h>
++#include <linux/list.h>
++#include <asm/hypervisor.h>
++#include "common.h"
++#include <xen/balloon.h>
++#include <xen/driver_util.h>
++#include <linux/kernel.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/errno.h>
++#include <linux/major.h>
++#include <linux/gfp.h>
++#include <linux/poll.h>
++#include <linux/delay.h>
++#include <asm/tlbflush.h>
++
++#define MAX_TAP_DEV 256 /*the maximum number of tapdisk ring devices */
++#define MAX_DEV_NAME 100 /*the max tapdisk ring device name e.g. blktap0 */
++
++/*
++ * The maximum number of requests that can be outstanding at any time
++ * is determined by
++ *
++ * [mmap_alloc * MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST]
++ *
++ * where mmap_alloc < MAX_DYNAMIC_MEM.
++ *
++ * TODO:
++ * mmap_alloc is initialised to 2 and should be adjustable on the fly via
++ * sysfs.
++ */
++#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
++#define MAX_DYNAMIC_MEM BLK_RING_SIZE
++#define MAX_PENDING_REQS BLK_RING_SIZE
++#define MMAP_PAGES (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
++#define MMAP_VADDR(_start, _req,_seg) \
++ (_start + \
++ ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \
++ ((_seg) * PAGE_SIZE))
++static int blkif_reqs = MAX_PENDING_REQS;
++static int mmap_pages = MMAP_PAGES;
++
++#define RING_PAGES 1 /* BLKTAP - immediately before the mmap area, we
++ * have a bunch of pages reserved for shared
++ * memory rings.
++ */
++
++/*Data struct handed back to userspace for tapdisk device to VBD mapping*/
++typedef struct domid_translate {
++ unsigned short domid;
++ unsigned short busid;
++} domid_translate_t ;
++
++typedef struct domid_translate_ext {
++ unsigned short domid;
++ u32 busid;
++} domid_translate_ext_t ;
++
++/*Data struct associated with each of the tapdisk devices*/
++typedef struct tap_blkif {
++ struct vm_area_struct *vma; /*Shared memory area */
++ unsigned long rings_vstart; /*Kernel memory mapping */
++ unsigned long user_vstart; /*User memory mapping */
++ unsigned long dev_inuse; /*One process opens device at a time. */
++ unsigned long dev_pending; /*In process of being opened */
++ unsigned long ring_ok; /*make this ring->state */
++ blkif_front_ring_t ufe_ring; /*Rings up to user space. */
++ wait_queue_head_t wait; /*for poll */
++ unsigned long mode; /*current switching mode */
++ int minor; /*Minor number for tapdisk device */
++ pid_t pid; /*tapdisk process id */
++ enum { RUNNING, CLEANSHUTDOWN } status; /*Detect a clean userspace
++ shutdown */
++ unsigned long *idx_map; /*Record the user ring id to kern
++ [req id, idx] tuple */
++ blkif_t *blkif; /*Associate blkif with tapdev */
++ struct domid_translate_ext trans; /*Translation from domid to bus. */
++} tap_blkif_t;
++
++static struct tap_blkif *tapfds[MAX_TAP_DEV];
++static int blktap_next_minor;
++
++module_param(blkif_reqs, int, 0);
++/* Run-time switchable: /sys/module/blktap/parameters/ */
++static unsigned int log_stats = 0;
++static unsigned int debug_lvl = 0;
++module_param(log_stats, int, 0644);
++module_param(debug_lvl, int, 0644);
++
++/*
++ * Each outstanding request that we've passed to the lower device layers has a
++ * 'pending_req' allocated to it. Each buffer_head that completes decrements
++ * the pendcnt towards zero. When it hits zero, the specified domain has a
++ * response queued for it, with the saved 'id' passed back.
++ */
++typedef struct {
++ blkif_t *blkif;
++ u64 id;
++ unsigned short mem_idx;
++ int nr_pages;
++ atomic_t pendcnt;
++ unsigned short operation;
++ int status;
++ struct list_head free_list;
++ int inuse;
++} pending_req_t;
++
++static pending_req_t *pending_reqs[MAX_PENDING_REQS];
++static struct list_head pending_free;
++static DEFINE_SPINLOCK(pending_free_lock);
++static DECLARE_WAIT_QUEUE_HEAD (pending_free_wq);
++static int alloc_pending_reqs;
++
++typedef unsigned int PEND_RING_IDX;
++
++static inline int MASK_PEND_IDX(int i) {
++ return (i & (MAX_PENDING_REQS-1));
++}
++
++static inline unsigned int RTN_PEND_IDX(pending_req_t *req, int idx) {
++ return (req - pending_reqs[idx]);
++}
++
++#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
++
++#define BLKBACK_INVALID_HANDLE (~0)
++
++static struct page **foreign_pages[MAX_DYNAMIC_MEM];
++static inline unsigned long idx_to_kaddr(
++ unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx)
++{
++ unsigned int arr_idx = req_idx*BLKIF_MAX_SEGMENTS_PER_REQUEST + sg_idx;
++ unsigned long pfn = page_to_pfn(foreign_pages[mmap_idx][arr_idx]);
++ return (unsigned long)pfn_to_kaddr(pfn);
++}
++
++static unsigned short mmap_alloc = 0;
++static unsigned short mmap_lock = 0;
++static unsigned short mmap_inuse = 0;
++
++/******************************************************************
++ * GRANT HANDLES
++ */
++
++/* When using grant tables to map a frame for device access then the
++ * handle returned must be used to unmap the frame. This is needed to
++ * drop the ref count on the frame.
++ */
++struct grant_handle_pair
++{
++ grant_handle_t kernel;
++ grant_handle_t user;
++};
++#define INVALID_GRANT_HANDLE 0xFFFF
++
++static struct grant_handle_pair
++ pending_grant_handles[MAX_DYNAMIC_MEM][MMAP_PAGES];
++#define pending_handle(_id, _idx, _i) \
++ (pending_grant_handles[_id][((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) \
++ + (_i)])
++
++
++static int blktap_read_ufe_ring(tap_blkif_t *info); /*local prototypes*/
++
++#define BLKTAP_MINOR 0 /*/dev/xen/blktap has a dynamic major */
++#define BLKTAP_DEV_DIR "/dev/xen"
++
++static int blktap_major;
++
++/* blktap IOCTLs: */
++#define BLKTAP_IOCTL_KICK_FE 1
++#define BLKTAP_IOCTL_KICK_BE 2 /* currently unused */
++#define BLKTAP_IOCTL_SETMODE 3
++#define BLKTAP_IOCTL_SENDPID 4
++#define BLKTAP_IOCTL_NEWINTF 5
++#define BLKTAP_IOCTL_MINOR 6
++#define BLKTAP_IOCTL_MAJOR 7
++#define BLKTAP_QUERY_ALLOC_REQS 8
++#define BLKTAP_IOCTL_FREEINTF 9
++#define BLKTAP_IOCTL_NEWINTF_EXT 50
++#define BLKTAP_IOCTL_PRINT_IDXS 100
++
++/* blktap switching modes: (Set with BLKTAP_IOCTL_SETMODE) */
++#define BLKTAP_MODE_PASSTHROUGH 0x00000000 /* default */
++#define BLKTAP_MODE_INTERCEPT_FE 0x00000001
++#define BLKTAP_MODE_INTERCEPT_BE 0x00000002 /* unimp. */
++
++#define BLKTAP_MODE_INTERPOSE \
++ (BLKTAP_MODE_INTERCEPT_FE | BLKTAP_MODE_INTERCEPT_BE)
++
++
++static inline int BLKTAP_MODE_VALID(unsigned long arg)
++{
++ return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
++ (arg == BLKTAP_MODE_INTERCEPT_FE) ||
++ (arg == BLKTAP_MODE_INTERPOSE ));
++}
++
++/* Requests passing through the tap to userspace are re-assigned an ID.
++ * We must record a mapping between the BE [IDX,ID] tuple and the userspace
++ * ring ID.
++ */
++
++static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
++{
++ return ((fe_dom << 16) | MASK_PEND_IDX(idx));
++}
++
++extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id)
++{
++ return (PEND_RING_IDX)(id & 0x0000ffff);
++}
++
++extern inline int ID_TO_MIDX(unsigned long id)
++{
++ return (int)(id >> 16);
++}
++
++#define INVALID_REQ 0xdead0000
++
++/*TODO: Convert to a free list*/
++static inline int GET_NEXT_REQ(unsigned long *idx_map)
++{
++ int i;
++ for (i = 0; i < MAX_PENDING_REQS; i++)
++ if (idx_map[i] == INVALID_REQ)
++ return i;
++
++ return INVALID_REQ;
++}
++
++static inline int OFFSET_TO_USR_IDX(int offset)
++{
++ return offset / BLKIF_MAX_SEGMENTS_PER_REQUEST;
++}
++
++static inline int OFFSET_TO_SEG(int offset)
++{
++ return offset % BLKIF_MAX_SEGMENTS_PER_REQUEST;
++}
++
++
++#define BLKTAP_INVALID_HANDLE(_g) \
++ (((_g->kernel) == INVALID_GRANT_HANDLE) && \
++ ((_g->user) == INVALID_GRANT_HANDLE))
++
++#define BLKTAP_INVALIDATE_HANDLE(_g) do { \
++ (_g)->kernel = INVALID_GRANT_HANDLE; (_g)->user = INVALID_GRANT_HANDLE; \
++ } while(0)
++
++
++/******************************************************************
++ * BLKTAP VM OPS
++ */
++
++static struct page *blktap_nopage(struct vm_area_struct *vma,
++ unsigned long address,
++ int *type)
++{
++ /*
++ * if the page has not been mapped in by the driver then return
++ * NOPAGE_SIGBUS to the domain.
++ */
++
++ return NOPAGE_SIGBUS;
++}
++
++static pte_t blktap_clear_pte(struct vm_area_struct *vma,
++ unsigned long uvaddr,
++ pte_t *ptep, int is_fullmm)
++{
++ pte_t copy;
++ tap_blkif_t *info;
++ int offset, seg, usr_idx, pending_idx, mmap_idx;
++ unsigned long uvstart = vma->vm_start + (RING_PAGES << PAGE_SHIFT);
++ unsigned long kvaddr;
++ struct page **map;
++ struct page *pg;
++ struct grant_handle_pair *khandle;
++ struct gnttab_unmap_grant_ref unmap[2];
++ int count = 0;
++
++ /*
++ * If the address is before the start of the grant mapped region or
++ * if vm_file is NULL (meaning mmap failed and we have nothing to do)
++ */
++ if (uvaddr < uvstart || vma->vm_file == NULL)
++ return ptep_get_and_clear_full(vma->vm_mm, uvaddr,
++ ptep, is_fullmm);
++
++ info = vma->vm_file->private_data;
++ map = vma->vm_private_data;
++
++ /* TODO Should these be changed to if statements? */
++ BUG_ON(!info);
++ BUG_ON(!info->idx_map);
++ BUG_ON(!map);
++
++ offset = (int) ((uvaddr - uvstart) >> PAGE_SHIFT);
++ usr_idx = OFFSET_TO_USR_IDX(offset);
++ seg = OFFSET_TO_SEG(offset);
++
++ pending_idx = MASK_PEND_IDX(ID_TO_IDX(info->idx_map[usr_idx]));
++ mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
++
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, seg);
++ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++ ClearPageReserved(pg);
++ map[offset + RING_PAGES] = NULL;
++
++ khandle = &pending_handle(mmap_idx, pending_idx, seg);
++
++ if (khandle->kernel != INVALID_GRANT_HANDLE) {
++ gnttab_set_unmap_op(&unmap[count], kvaddr,
++ GNTMAP_host_map, khandle->kernel);
++ count++;
++
++ set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
++ INVALID_P2M_ENTRY);
++ }
++
++ if (khandle->user != INVALID_GRANT_HANDLE) {
++ BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
++
++ copy = *ptep;
++ gnttab_set_unmap_op(&unmap[count], virt_to_machine(ptep),
++ GNTMAP_host_map
++ | GNTMAP_application_map
++ | GNTMAP_contains_pte,
++ khandle->user);
++ count++;
++ } else {
++ BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap));
++
++ /* USING SHADOW PAGE TABLES. */
++ copy = ptep_get_and_clear_full(vma->vm_mm, uvaddr, ptep,
++ is_fullmm);
++ }
++
++ if (count) {
++ BLKTAP_INVALIDATE_HANDLE(khandle);
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
++ unmap, count))
++ BUG();
++ }
++
++ return copy;
++}
++
++struct vm_operations_struct blktap_vm_ops = {
++ nopage: blktap_nopage,
++ zap_pte: blktap_clear_pte,
++};
++
++/******************************************************************
++ * BLKTAP FILE OPS
++ */
++
++/*Function Declarations*/
++static tap_blkif_t *get_next_free_dev(void);
++static int blktap_open(struct inode *inode, struct file *filp);
++static int blktap_release(struct inode *inode, struct file *filp);
++static int blktap_mmap(struct file *filp, struct vm_area_struct *vma);
++static int blktap_ioctl(struct inode *inode, struct file *filp,
++ unsigned int cmd, unsigned long arg);
++static unsigned int blktap_poll(struct file *file, poll_table *wait);
++
++static const struct file_operations blktap_fops = {
++ .owner = THIS_MODULE,
++ .poll = blktap_poll,
++ .ioctl = blktap_ioctl,
++ .open = blktap_open,
++ .release = blktap_release,
++ .mmap = blktap_mmap,
++};
++
++
++static tap_blkif_t *get_next_free_dev(void)
++{
++ struct class *class;
++ tap_blkif_t *info;
++ int minor;
++
++ /*
++ * This is called only from the ioctl, which
++ * means we should always have interrupts enabled.
++ */
++ BUG_ON(irqs_disabled());
++
++ spin_lock_irq(&pending_free_lock);
++
++ /* tapfds[0] is always NULL */
++
++ for (minor = 1; minor < blktap_next_minor; minor++) {
++ info = tapfds[minor];
++ /* we could have failed a previous attempt. */
++ if (!info ||
++ ((info->dev_inuse == 0) &&
++ (info->dev_pending == 0)) ) {
++ info->dev_pending = 1;
++ goto found;
++ }
++ }
++ info = NULL;
++ minor = -1;
++
++ /*
++ * We didn't find free device. If we can still allocate
++ * more, then we grab the next device minor that is
++ * available. This is done while we are still under
++ * the protection of the pending_free_lock.
++ */
++ if (blktap_next_minor < MAX_TAP_DEV)
++ minor = blktap_next_minor++;
++found:
++ spin_unlock_irq(&pending_free_lock);
++
++ if (!info && minor > 0) {
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (unlikely(!info)) {
++ /*
++ * If we failed here, try to put back
++ * the next minor number. But if one
++ * was just taken, then we just lose this
++ * minor. We can try to allocate this
++ * minor again later.
++ */
++ spin_lock_irq(&pending_free_lock);
++ if (blktap_next_minor == minor+1)
++ blktap_next_minor--;
++ spin_unlock_irq(&pending_free_lock);
++ goto out;
++ }
++
++ info->minor = minor;
++ /*
++ * Make sure that we have a minor before others can
++ * see us.
++ */
++ wmb();
++ tapfds[minor] = info;
++
++ if ((class = get_xen_class()) != NULL)
++ class_device_create(class, NULL,
++ MKDEV(blktap_major, minor), NULL,
++ "blktap%d", minor);
++ }
++
++out:
++ return info;
++}
++
++int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif)
++{
++ tap_blkif_t *info;
++ int i;
++
++ for (i = 1; i < blktap_next_minor; i++) {
++ info = tapfds[i];
++ if ( info &&
++ (info->trans.domid == domid) &&
++ (info->trans.busid == xenbus_id) ) {
++ info->blkif = blkif;
++ info->status = RUNNING;
++ return i;
++ }
++ }
++ return -1;
++}
++
++void signal_tapdisk(int idx)
++{
++ tap_blkif_t *info;
++ struct task_struct *ptask;
++
++ /*
++ * if the userland tools set things up wrong, this could be negative;
++ * just don't try to signal in this case
++ */
++ if (idx < 0)
++ return;
++
++ info = tapfds[idx];
++ if ((idx < 0) || (idx > MAX_TAP_DEV) || !info)
++ return;
++
++ if (info->pid > 0) {
++ ptask = find_task_by_pid(info->pid);
++ if (ptask)
++ info->status = CLEANSHUTDOWN;
++ }
++ info->blkif = NULL;
++
++ return;
++}
++
++static int blktap_open(struct inode *inode, struct file *filp)
++{
++ blkif_sring_t *sring;
++ int idx = iminor(inode) - BLKTAP_MINOR;
++ tap_blkif_t *info;
++ int i;
++
++ /* ctrl device, treat differently */
++ if (!idx)
++ return 0;
++
++ info = tapfds[idx];
++
++ if ((idx < 0) || (idx > MAX_TAP_DEV) || !info) {
++ WPRINTK("Unable to open device /dev/xen/blktap%d\n",
++ idx);
++ return -ENODEV;
++ }
++
++ DPRINTK("Opening device /dev/xen/blktap%d\n",idx);
++
++ /*Only one process can access device at a time*/
++ if (test_and_set_bit(0, &info->dev_inuse))
++ return -EBUSY;
++
++ info->dev_pending = 0;
++
++ /* Allocate the fe ring. */
++ sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
++ if (sring == NULL)
++ goto fail_nomem;
++
++ SetPageReserved(virt_to_page(sring));
++
++ SHARED_RING_INIT(sring);
++ FRONT_RING_INIT(&info->ufe_ring, sring, PAGE_SIZE);
++
++ filp->private_data = info;
++ info->vma = NULL;
++
++ info->idx_map = kmalloc(sizeof(unsigned long) * MAX_PENDING_REQS,
++ GFP_KERNEL);
++
++ if (info->idx_map == NULL)
++ goto fail_nomem;
++
++ if (idx > 0) {
++ init_waitqueue_head(&info->wait);
++ for (i = 0; i < MAX_PENDING_REQS; i++)
++ info->idx_map[i] = INVALID_REQ;
++ }
++
++ DPRINTK("Tap open: device /dev/xen/blktap%d\n",idx);
++ return 0;
++
++ fail_nomem:
++ return -ENOMEM;
++}
++
++static int blktap_release(struct inode *inode, struct file *filp)
++{
++ tap_blkif_t *info = filp->private_data;
++
++ /* check for control device */
++ if (!info)
++ return 0;
++
++ info->dev_inuse = 0;
++ DPRINTK("Freeing device [/dev/xen/blktap%d]\n",info->minor);
++
++ /* Free the ring page. */
++ ClearPageReserved(virt_to_page(info->ufe_ring.sring));
++ free_page((unsigned long) info->ufe_ring.sring);
++
++ /* Clear any active mappings and free foreign map table */
++ if (info->vma) {
++ zap_page_range(
++ info->vma, info->vma->vm_start,
++ info->vma->vm_end - info->vma->vm_start, NULL);
++
++ kfree(info->vma->vm_private_data);
++
++ info->vma = NULL;
++ }
++
++ if (info->idx_map) {
++ kfree(info->idx_map);
++ info->idx_map = NULL;
++ }
++
++ if ( (info->status != CLEANSHUTDOWN) && (info->blkif != NULL) ) {
++ if (info->blkif->xenblkd != NULL) {
++ kthread_stop(info->blkif->xenblkd);
++ info->blkif->xenblkd = NULL;
++ }
++ info->status = CLEANSHUTDOWN;
++ }
++
++ return 0;
++}
++
++
++/* Note on mmap:
++ * We need to map pages to user space in a way that will allow the block
++ * subsystem set up direct IO to them. This couldn't be done before, because
++ * there isn't really a sane way to translate a user virtual address down to a
++ * physical address when the page belongs to another domain.
++ *
++ * My first approach was to map the page in to kernel memory, add an entry
++ * for it in the physical frame list (using alloc_lomem_region as in blkback)
++ * and then attempt to map that page up to user space. This is disallowed
++ * by xen though, which realizes that we don't really own the machine frame
++ * underlying the physical page.
++ *
++ * The new approach is to provide explicit support for this in xen linux.
++ * The VMA now has a flag, VM_FOREIGN, to indicate that it contains pages
++ * mapped from other vms. vma->vm_private_data is set up as a mapping
++ * from pages to actual page structs. There is a new clause in get_user_pages
++ * that does the right thing for this sort of mapping.
++ */
++static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ int size;
++ struct page **map;
++ int i;
++ tap_blkif_t *info = filp->private_data;
++ int ret;
++
++ if (info == NULL) {
++ WPRINTK("blktap: mmap, retrieving idx failed\n");
++ return -ENOMEM;
++ }
++
++ vma->vm_flags |= VM_RESERVED;
++ vma->vm_ops = &blktap_vm_ops;
++
++ size = vma->vm_end - vma->vm_start;
++ if (size != ((mmap_pages + RING_PAGES) << PAGE_SHIFT)) {
++ WPRINTK("you _must_ map exactly %d pages!\n",
++ mmap_pages + RING_PAGES);
++ return -EAGAIN;
++ }
++
++ size >>= PAGE_SHIFT;
++ info->rings_vstart = vma->vm_start;
++ info->user_vstart = info->rings_vstart + (RING_PAGES << PAGE_SHIFT);
++
++ /* Map the ring pages to the start of the region and reserve it. */
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ ret = vm_insert_page(vma, vma->vm_start,
++ virt_to_page(info->ufe_ring.sring));
++ else
++ ret = remap_pfn_range(vma, vma->vm_start,
++ __pa(info->ufe_ring.sring) >> PAGE_SHIFT,
++ PAGE_SIZE, vma->vm_page_prot);
++ if (ret) {
++ WPRINTK("Mapping user ring failed!\n");
++ goto fail;
++ }
++
++ /* Mark this VM as containing foreign pages, and set up mappings. */
++ map = kzalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
++ * sizeof(struct page *),
++ GFP_KERNEL);
++ if (map == NULL) {
++ WPRINTK("Couldn't alloc VM_FOREIGN map.\n");
++ goto fail;
++ }
++
++ for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
++ map[i] = NULL;
++
++ vma->vm_private_data = map;
++ vma->vm_flags |= VM_FOREIGN;
++ vma->vm_flags |= VM_DONTCOPY;
++
++#ifdef CONFIG_X86
++ vma->vm_mm->context.has_foreign_mappings = 1;
++#endif
++
++ info->vma = vma;
++ info->ring_ok = 1;
++ return 0;
++ fail:
++ /* Clear any active mappings. */
++ zap_page_range(vma, vma->vm_start,
++ vma->vm_end - vma->vm_start, NULL);
++
++ return -ENOMEM;
++}
++
++
++static int blktap_ioctl(struct inode *inode, struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ tap_blkif_t *info = filp->private_data;
++
++ switch(cmd) {
++ case BLKTAP_IOCTL_KICK_FE:
++ {
++ /* There are fe messages to process. */
++ return blktap_read_ufe_ring(info);
++ }
++ case BLKTAP_IOCTL_SETMODE:
++ {
++ if (info) {
++ if (BLKTAP_MODE_VALID(arg)) {
++ info->mode = arg;
++ /* XXX: may need to flush rings here. */
++ DPRINTK("blktap: set mode to %lx\n",
++ arg);
++ return 0;
++ }
++ }
++ return 0;
++ }
++ case BLKTAP_IOCTL_PRINT_IDXS:
++ {
++ if (info) {
++ printk("User Rings: \n-----------\n");
++ printk("UF: rsp_cons: %2d, req_prod_prv: %2d "
++ "| req_prod: %2d, rsp_prod: %2d\n",
++ info->ufe_ring.rsp_cons,
++ info->ufe_ring.req_prod_pvt,
++ info->ufe_ring.sring->req_prod,
++ info->ufe_ring.sring->rsp_prod);
++ }
++ return 0;
++ }
++ case BLKTAP_IOCTL_SENDPID:
++ {
++ if (info) {
++ info->pid = (pid_t)arg;
++ DPRINTK("blktap: pid received %d\n",
++ info->pid);
++ }
++ return 0;
++ }
++ case BLKTAP_IOCTL_NEWINTF:
++ {
++ uint64_t val = (uint64_t)arg;
++ domid_translate_t *tr = (domid_translate_t *)&val;
++
++ DPRINTK("NEWINTF Req for domid %d and bus id %d\n",
++ tr->domid, tr->busid);
++ info = get_next_free_dev();
++ if (!info) {
++ WPRINTK("Error initialising /dev/xen/blktap - "
++ "No more devices\n");
++ return -1;
++ }
++ info->trans.domid = tr->domid;
++ info->trans.busid = tr->busid;
++ return info->minor;
++ }
++ case BLKTAP_IOCTL_NEWINTF_EXT:
++ {
++ void __user *udata = (void __user *) arg;
++ domid_translate_ext_t tr;
++
++ if (copy_from_user(&tr, udata, sizeof(domid_translate_ext_t)))
++ return -EFAULT;
++
++ DPRINTK("NEWINTF_EXT Req for domid %d and bus id %d\n",
++ tr.domid, tr.busid);
++ info = get_next_free_dev();
++ if (!info) {
++ WPRINTK("Error initialising /dev/xen/blktap - "
++ "No more devices\n");
++ return -1;
++ }
++ info->trans.domid = tr.domid;
++ info->trans.busid = tr.busid;
++ return info->minor;
++ }
++ case BLKTAP_IOCTL_FREEINTF:
++ {
++ unsigned long dev = arg;
++ unsigned long flags;
++
++ info = tapfds[dev];
++
++ if ((dev > MAX_TAP_DEV) || !info)
++ return 0; /* should this be an error? */
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++ if (info->dev_pending)
++ info->dev_pending = 0;
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++
++ return 0;
++ }
++ case BLKTAP_IOCTL_MINOR:
++ {
++ unsigned long dev = arg;
++
++ info = tapfds[dev];
++
++ if ((dev > MAX_TAP_DEV) || !info)
++ return -EINVAL;
++
++ return info->minor;
++ }
++ case BLKTAP_IOCTL_MAJOR:
++ return blktap_major;
++
++ case BLKTAP_QUERY_ALLOC_REQS:
++ {
++ WPRINTK("BLKTAP_QUERY_ALLOC_REQS ioctl: %d/%d\n",
++ alloc_pending_reqs, blkif_reqs);
++ return (alloc_pending_reqs/blkif_reqs) * 100;
++ }
++ }
++ return -ENOIOCTLCMD;
++}
++
++static unsigned int blktap_poll(struct file *filp, poll_table *wait)
++{
++ tap_blkif_t *info = filp->private_data;
++
++ /* do not work on the control device */
++ if (!info)
++ return 0;
++
++ poll_wait(filp, &info->wait, wait);
++ if (info->ufe_ring.req_prod_pvt != info->ufe_ring.sring->req_prod) {
++ RING_PUSH_REQUESTS(&info->ufe_ring);
++ return POLLIN | POLLRDNORM;
++ }
++ return 0;
++}
++
++void blktap_kick_user(int idx)
++{
++ tap_blkif_t *info;
++
++ info = tapfds[idx];
++
++ if ((idx < 0) || (idx > MAX_TAP_DEV) || !info)
++ return;
++
++ wake_up_interruptible(&info->wait);
++
++ return;
++}
++
++static int do_block_io_op(blkif_t *blkif);
++static void dispatch_rw_block_io(blkif_t *blkif,
++ blkif_request_t *req,
++ pending_req_t *pending_req);
++static void make_response(blkif_t *blkif, u64 id,
++ unsigned short op, int st);
++
++/******************************************************************
++ * misc small helpers
++ */
++static int req_increase(void)
++{
++ int i, j;
++
++ if (mmap_alloc >= MAX_PENDING_REQS || mmap_lock)
++ return -EINVAL;
++
++ pending_reqs[mmap_alloc] = kzalloc(sizeof(pending_req_t)
++ * blkif_reqs, GFP_KERNEL);
++ foreign_pages[mmap_alloc] = alloc_empty_pages_and_pagevec(mmap_pages);
++
++ if (!pending_reqs[mmap_alloc] || !foreign_pages[mmap_alloc])
++ goto out_of_memory;
++
++ DPRINTK("%s: reqs=%d, pages=%d\n",
++ __FUNCTION__, blkif_reqs, mmap_pages);
++
++ for (i = 0; i < MAX_PENDING_REQS; i++) {
++ list_add_tail(&pending_reqs[mmap_alloc][i].free_list,
++ &pending_free);
++ pending_reqs[mmap_alloc][i].mem_idx = mmap_alloc;
++ for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
++ BLKTAP_INVALIDATE_HANDLE(&pending_handle(mmap_alloc,
++ i, j));
++ }
++
++ mmap_alloc++;
++ DPRINTK("# MMAPs increased to %d\n",mmap_alloc);
++ return 0;
++
++ out_of_memory:
++ free_empty_pages_and_pagevec(foreign_pages[mmap_alloc], mmap_pages);
++ kfree(pending_reqs[mmap_alloc]);
++ WPRINTK("%s: out of memory\n", __FUNCTION__);
++ return -ENOMEM;
++}
++
++static void mmap_req_del(int mmap)
++{
++ BUG_ON(!spin_is_locked(&pending_free_lock));
++
++ kfree(pending_reqs[mmap]);
++ pending_reqs[mmap] = NULL;
++
++ free_empty_pages_and_pagevec(foreign_pages[mmap_alloc], mmap_pages);
++ foreign_pages[mmap] = NULL;
++
++ mmap_lock = 0;
++ DPRINTK("# MMAPs decreased to %d\n",mmap_alloc);
++ mmap_alloc--;
++}
++
++static pending_req_t* alloc_req(void)
++{
++ pending_req_t *req = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++
++ if (!list_empty(&pending_free)) {
++ req = list_entry(pending_free.next, pending_req_t, free_list);
++ list_del(&req->free_list);
++ }
++
++ if (req) {
++ req->inuse = 1;
++ alloc_pending_reqs++;
++ }
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++
++ return req;
++}
++
++static void free_req(pending_req_t *req)
++{
++ unsigned long flags;
++ int was_empty;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++
++ alloc_pending_reqs--;
++ req->inuse = 0;
++ if (mmap_lock && (req->mem_idx == mmap_alloc-1)) {
++ mmap_inuse--;
++ if (mmap_inuse == 0) mmap_req_del(mmap_alloc-1);
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++ return;
++ }
++ was_empty = list_empty(&pending_free);
++ list_add(&req->free_list, &pending_free);
++
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++
++ if (was_empty)
++ wake_up(&pending_free_wq);
++}
++
++static void fast_flush_area(pending_req_t *req, int k_idx, int u_idx,
++ int tapidx)
++{
++ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
++ unsigned int i, invcount = 0;
++ struct grant_handle_pair *khandle;
++ uint64_t ptep;
++ int ret, mmap_idx;
++ unsigned long kvaddr, uvaddr;
++ tap_blkif_t *info;
++
++
++ info = tapfds[tapidx];
++
++ if ((tapidx < 0) || (tapidx > MAX_TAP_DEV) || !info) {
++ WPRINTK("fast_flush: Couldn't get info!\n");
++ return;
++ }
++
++ if (info->vma != NULL &&
++ xen_feature(XENFEAT_auto_translated_physmap)) {
++ down_write(&info->vma->vm_mm->mmap_sem);
++ zap_page_range(info->vma,
++ MMAP_VADDR(info->user_vstart, u_idx, 0),
++ req->nr_pages << PAGE_SHIFT, NULL);
++ up_write(&info->vma->vm_mm->mmap_sem);
++ return;
++ }
++
++ mmap_idx = req->mem_idx;
++
++ for (i = 0; i < req->nr_pages; i++) {
++ kvaddr = idx_to_kaddr(mmap_idx, k_idx, i);
++ uvaddr = MMAP_VADDR(info->user_vstart, u_idx, i);
++
++ khandle = &pending_handle(mmap_idx, k_idx, i);
++
++ if (khandle->kernel != INVALID_GRANT_HANDLE) {
++ gnttab_set_unmap_op(&unmap[invcount],
++ idx_to_kaddr(mmap_idx, k_idx, i),
++ GNTMAP_host_map, khandle->kernel);
++ invcount++;
++
++ set_phys_to_machine(
++ __pa(idx_to_kaddr(mmap_idx, k_idx, i))
++ >> PAGE_SHIFT, INVALID_P2M_ENTRY);
++ }
++
++ if (khandle->user != INVALID_GRANT_HANDLE) {
++ BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
++ if (create_lookup_pte_addr(
++ info->vma->vm_mm,
++ MMAP_VADDR(info->user_vstart, u_idx, i),
++ &ptep) !=0) {
++ WPRINTK("Couldn't get a pte addr!\n");
++ return;
++ }
++
++ gnttab_set_unmap_op(&unmap[invcount], ptep,
++ GNTMAP_host_map
++ | GNTMAP_application_map
++ | GNTMAP_contains_pte,
++ khandle->user);
++ invcount++;
++ }
++
++ BLKTAP_INVALIDATE_HANDLE(khandle);
++ }
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, unmap, invcount);
++ BUG_ON(ret);
++
++ if (info->vma != NULL && !xen_feature(XENFEAT_auto_translated_physmap))
++ zap_page_range(info->vma,
++ MMAP_VADDR(info->user_vstart, u_idx, 0),
++ req->nr_pages << PAGE_SHIFT, NULL);
++}
++
++/******************************************************************
++ * SCHEDULER FUNCTIONS
++ */
++
++static void print_stats(blkif_t *blkif)
++{
++ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d\n",
++ current->comm, blkif->st_oo_req,
++ blkif->st_rd_req, blkif->st_wr_req);
++ blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
++ blkif->st_rd_req = 0;
++ blkif->st_wr_req = 0;
++ blkif->st_oo_req = 0;
++}
++
++int tap_blkif_schedule(void *arg)
++{
++ blkif_t *blkif = arg;
++
++ blkif_get(blkif);
++
++ if (debug_lvl)
++ printk(KERN_DEBUG "%s: started\n", current->comm);
++
++ while (!kthread_should_stop()) {
++ if (try_to_freeze())
++ continue;
++
++ wait_event_interruptible(
++ blkif->wq,
++ blkif->waiting_reqs || kthread_should_stop());
++ wait_event_interruptible(
++ pending_free_wq,
++ !list_empty(&pending_free) || kthread_should_stop());
++
++ blkif->waiting_reqs = 0;
++ smp_mb(); /* clear flag *before* checking for work */
++
++ if (do_block_io_op(blkif))
++ blkif->waiting_reqs = 1;
++
++ if (log_stats && time_after(jiffies, blkif->st_print))
++ print_stats(blkif);
++ }
++
++ if (log_stats)
++ print_stats(blkif);
++ if (debug_lvl)
++ printk(KERN_DEBUG "%s: exiting\n", current->comm);
++
++ blkif->xenblkd = NULL;
++ blkif_put(blkif);
++
++ return 0;
++}
++
++/******************************************************************
++ * COMPLETION CALLBACK -- Called by user level ioctl()
++ */
++
++static int blktap_read_ufe_ring(tap_blkif_t *info)
++{
++ /* This is called to read responses from the UFE ring. */
++ RING_IDX i, j, rp;
++ blkif_response_t *resp;
++ blkif_t *blkif=NULL;
++ int pending_idx, usr_idx, mmap_idx;
++ pending_req_t *pending_req;
++
++ if (!info)
++ return 0;
++
++ /* We currently only forward packets in INTERCEPT_FE mode. */
++ if (!(info->mode & BLKTAP_MODE_INTERCEPT_FE))
++ return 0;
++
++ /* for each outstanding message on the UFEring */
++ rp = info->ufe_ring.sring->rsp_prod;
++ rmb();
++
++ for (i = info->ufe_ring.rsp_cons; i != rp; i++) {
++ blkif_response_t res;
++ resp = RING_GET_RESPONSE(&info->ufe_ring, i);
++ memcpy(&res, resp, sizeof(res));
++ mb(); /* rsp_cons read by RING_FULL() in do_block_io_op(). */
++ ++info->ufe_ring.rsp_cons;
++
++ /*retrieve [usr_idx] to [mmap_idx,pending_idx] mapping*/
++ usr_idx = (int)res.id;
++ pending_idx = MASK_PEND_IDX(ID_TO_IDX(info->idx_map[usr_idx]));
++ mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
++
++ if ( (mmap_idx >= mmap_alloc) ||
++ (ID_TO_IDX(info->idx_map[usr_idx]) >= MAX_PENDING_REQS) )
++ WPRINTK("Incorrect req map"
++ "[%d], internal map [%d,%d (%d)]\n",
++ usr_idx, mmap_idx,
++ ID_TO_IDX(info->idx_map[usr_idx]),
++ MASK_PEND_IDX(
++ ID_TO_IDX(info->idx_map[usr_idx])));
++
++ pending_req = &pending_reqs[mmap_idx][pending_idx];
++ blkif = pending_req->blkif;
++
++ for (j = 0; j < pending_req->nr_pages; j++) {
++
++ unsigned long kvaddr, uvaddr;
++ struct page **map = info->vma->vm_private_data;
++ struct page *pg;
++ int offset;
++
++ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j);
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, j);
++
++ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++ ClearPageReserved(pg);
++ offset = (uvaddr - info->vma->vm_start)
++ >> PAGE_SHIFT;
++ map[offset] = NULL;
++ }
++ fast_flush_area(pending_req, pending_idx, usr_idx, info->minor);
++ info->idx_map[usr_idx] = INVALID_REQ;
++ make_response(blkif, pending_req->id, res.operation,
++ res.status);
++ blkif_put(pending_req->blkif);
++ free_req(pending_req);
++ }
++
++ return 0;
++}
++
++
++/******************************************************************************
++ * NOTIFICATION FROM GUEST OS.
++ */
++
++static void blkif_notify_work(blkif_t *blkif)
++{
++ blkif->waiting_reqs = 1;
++ wake_up(&blkif->wq);
++}
++
++irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++ blkif_notify_work(dev_id);
++ return IRQ_HANDLED;
++}
++
++
++
++/******************************************************************
++ * DOWNWARD CALLS -- These interface with the block-device layer proper.
++ */
++static int print_dbug = 1;
++static int do_block_io_op(blkif_t *blkif)
++{
++ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++ blkif_request_t req;
++ pending_req_t *pending_req;
++ RING_IDX rc, rp;
++ int more_to_do = 0;
++ tap_blkif_t *info;
++
++ rc = blk_rings->common.req_cons;
++ rp = blk_rings->common.sring->req_prod;
++ rmb(); /* Ensure we see queued requests up to 'rp'. */
++
++ /*Check blkif has corresponding UE ring*/
++ if (blkif->dev_num < 0) {
++ /*oops*/
++ if (print_dbug) {
++ WPRINTK("Corresponding UE "
++ "ring does not exist!\n");
++ print_dbug = 0; /*We only print this message once*/
++ }
++ return 0;
++ }
++
++ info = tapfds[blkif->dev_num];
++
++ if (blkif->dev_num > MAX_TAP_DEV || !info || !info->dev_inuse) {
++ if (print_dbug) {
++ WPRINTK("Can't get UE info!\n");
++ print_dbug = 0;
++ }
++ return 0;
++ }
++
++ while (rc != rp) {
++
++ if (RING_FULL(&info->ufe_ring)) {
++ WPRINTK("RING_FULL! More to do\n");
++ more_to_do = 1;
++ break;
++ }
++
++ if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) {
++ WPRINTK("RING_REQUEST_CONS_OVERFLOW!"
++ " More to do\n");
++ more_to_do = 1;
++ break;
++ }
++
++ pending_req = alloc_req();
++ if (NULL == pending_req) {
++ blkif->st_oo_req++;
++ more_to_do = 1;
++ break;
++ }
++
++ if (kthread_should_stop()) {
++ more_to_do = 1;
++ break;
++ }
++
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc),
++ sizeof(req));
++ break;
++ case BLKIF_PROTOCOL_X86_32:
++ blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
++ break;
++ case BLKIF_PROTOCOL_X86_64:
++ blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
++ break;
++ default:
++ BUG();
++ }
++ blk_rings->common.req_cons = ++rc; /* before make_response() */
++
++ /* Apply all sanity checks to /private copy/ of request. */
++ barrier();
++
++ switch (req.operation) {
++ case BLKIF_OP_READ:
++ blkif->st_rd_req++;
++ dispatch_rw_block_io(blkif, &req, pending_req);
++ break;
++
++ case BLKIF_OP_WRITE:
++ blkif->st_wr_req++;
++ dispatch_rw_block_io(blkif, &req, pending_req);
++ break;
++
++ default:
++ /* A good sign something is wrong: sleep for a while to
++ * avoid excessive CPU consumption by a bad guest. */
++ msleep(1);
++ WPRINTK("unknown operation [%d]\n",
++ req.operation);
++ make_response(blkif, req.id, req.operation,
++ BLKIF_RSP_ERROR);
++ free_req(pending_req);
++ break;
++ }
++
++ /* Yield point for this unbounded loop. */
++ cond_resched();
++ }
++
++ blktap_kick_user(blkif->dev_num);
++
++ return more_to_do;
++}
++
++static void dispatch_rw_block_io(blkif_t *blkif,
++ blkif_request_t *req,
++ pending_req_t *pending_req)
++{
++ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
++ int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
++ struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
++ unsigned int nseg;
++ int ret, i, nr_sects = 0;
++ tap_blkif_t *info;
++ blkif_request_t *target;
++ int pending_idx = RTN_PEND_IDX(pending_req,pending_req->mem_idx);
++ int usr_idx;
++ uint16_t mmap_idx = pending_req->mem_idx;
++
++ if (blkif->dev_num < 0 || blkif->dev_num > MAX_TAP_DEV)
++ goto fail_response;
++
++ info = tapfds[blkif->dev_num];
++ if (info == NULL)
++ goto fail_response;
++
++ /* Check we have space on user ring - should never fail. */
++ usr_idx = GET_NEXT_REQ(info->idx_map);
++ if (usr_idx == INVALID_REQ) {
++ BUG();
++ goto fail_response;
++ }
++
++ /* Check that number of segments is sane. */
++ nseg = req->nr_segments;
++ if ( unlikely(nseg == 0) ||
++ unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) {
++ WPRINTK("Bad number of segments in request (%d)\n", nseg);
++ goto fail_response;
++ }
++
++ /* Make sure userspace is ready. */
++ if (!info->ring_ok) {
++ WPRINTK("blktap: ring not ready for requests!\n");
++ goto fail_response;
++ }
++
++ if (RING_FULL(&info->ufe_ring)) {
++ WPRINTK("blktap: fe_ring is full, can't add "
++ "IO Request will be dropped. %d %d\n",
++ RING_SIZE(&info->ufe_ring),
++ RING_SIZE(&blkif->blk_rings.common));
++ goto fail_response;
++ }
++
++ pending_req->blkif = blkif;
++ pending_req->id = req->id;
++ pending_req->operation = operation;
++ pending_req->status = BLKIF_RSP_OKAY;
++ pending_req->nr_pages = nseg;
++ op = 0;
++ for (i = 0; i < nseg; i++) {
++ unsigned long uvaddr;
++ unsigned long kvaddr;
++ uint64_t ptep;
++ uint32_t flags;
++
++ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
++
++ flags = GNTMAP_host_map;
++ if (operation == WRITE)
++ flags |= GNTMAP_readonly;
++ gnttab_set_map_op(&map[op], kvaddr, flags,
++ req->seg[i].gref, blkif->domid);
++ op++;
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Now map it to user. */
++ ret = create_lookup_pte_addr(info->vma->vm_mm,
++ uvaddr, &ptep);
++ if (ret) {
++ WPRINTK("Couldn't get a pte addr!\n");
++ goto fail_flush;
++ }
++
++ flags = GNTMAP_host_map | GNTMAP_application_map
++ | GNTMAP_contains_pte;
++ if (operation == WRITE)
++ flags |= GNTMAP_readonly;
++ gnttab_set_map_op(&map[op], ptep, flags,
++ req->seg[i].gref, blkif->domid);
++ op++;
++ }
++
++ nr_sects += (req->seg[i].last_sect -
++ req->seg[i].first_sect + 1);
++ }
++
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, op);
++ BUG_ON(ret);
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ for (i = 0; i < (nseg*2); i+=2) {
++ unsigned long uvaddr;
++ unsigned long kvaddr;
++ unsigned long offset;
++ struct page *pg;
++
++ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i/2);
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i/2);
++
++ if (unlikely(map[i].status != 0)) {
++ WPRINTK("invalid kernel buffer -- "
++ "could not remap it\n");
++ ret |= 1;
++ map[i].handle = INVALID_GRANT_HANDLE;
++ }
++
++ if (unlikely(map[i+1].status != 0)) {
++ WPRINTK("invalid user buffer -- "
++ "could not remap it\n");
++ ret |= 1;
++ map[i+1].handle = INVALID_GRANT_HANDLE;
++ }
++
++ pending_handle(mmap_idx, pending_idx, i/2).kernel
++ = map[i].handle;
++ pending_handle(mmap_idx, pending_idx, i/2).user
++ = map[i+1].handle;
++
++ if (ret)
++ continue;
++
++ set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
++ FOREIGN_FRAME(map[i].dev_bus_addr
++ >> PAGE_SHIFT));
++ offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
++ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++ ((struct page **)info->vma->vm_private_data)[offset] =
++ pg;
++ }
++ } else {
++ for (i = 0; i < nseg; i++) {
++ unsigned long uvaddr;
++ unsigned long kvaddr;
++ unsigned long offset;
++ struct page *pg;
++
++ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
++
++ if (unlikely(map[i].status != 0)) {
++ WPRINTK("invalid kernel buffer -- "
++ "could not remap it\n");
++ ret |= 1;
++ map[i].handle = INVALID_GRANT_HANDLE;
++ }
++
++ pending_handle(mmap_idx, pending_idx, i).kernel
++ = map[i].handle;
++
++ if (ret)
++ continue;
++
++ offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
++ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++ ((struct page **)info->vma->vm_private_data)[offset] =
++ pg;
++ }
++ }
++
++ if (ret)
++ goto fail_flush;
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ down_write(&info->vma->vm_mm->mmap_sem);
++ /* Mark mapped pages as reserved: */
++ for (i = 0; i < req->nr_segments; i++) {
++ unsigned long kvaddr;
++ struct page *pg;
++
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
++ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++ SetPageReserved(pg);
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ ret = vm_insert_page(info->vma,
++ MMAP_VADDR(info->user_vstart,
++ usr_idx, i), pg);
++ if (ret) {
++ up_write(&info->vma->vm_mm->mmap_sem);
++ goto fail_flush;
++ }
++ }
++ }
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ up_write(&info->vma->vm_mm->mmap_sem);
++
++ /*record [mmap_idx,pending_idx] to [usr_idx] mapping*/
++ info->idx_map[usr_idx] = MAKE_ID(mmap_idx, pending_idx);
++
++ blkif_get(blkif);
++ /* Finally, write the request message to the user ring. */
++ target = RING_GET_REQUEST(&info->ufe_ring,
++ info->ufe_ring.req_prod_pvt);
++ memcpy(target, req, sizeof(*req));
++ target->id = usr_idx;
++ wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */
++ info->ufe_ring.req_prod_pvt++;
++
++ if (operation == READ)
++ blkif->st_rd_sect += nr_sects;
++ else if (operation == WRITE)
++ blkif->st_wr_sect += nr_sects;
++
++ return;
++
++ fail_flush:
++ WPRINTK("Reached Fail_flush\n");
++ fast_flush_area(pending_req, pending_idx, usr_idx, blkif->dev_num);
++ fail_response:
++ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
++ free_req(pending_req);
++ msleep(1); /* back off a bit */
++}
++
++
++
++/******************************************************************
++ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
++ */
++
++
++static void make_response(blkif_t *blkif, u64 id,
++ unsigned short op, int st)
++{
++ blkif_response_t resp;
++ unsigned long flags;
++ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++ int more_to_do = 0;
++ int notify;
++
++ resp.id = id;
++ resp.operation = op;
++ resp.status = st;
++
++ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
++ /* Place on the response ring for the relevant domain. */
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ memcpy(RING_GET_RESPONSE(&blk_rings->native,
++ blk_rings->native.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ case BLKIF_PROTOCOL_X86_32:
++ memcpy(RING_GET_RESPONSE(&blk_rings->x86_32,
++ blk_rings->x86_32.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ case BLKIF_PROTOCOL_X86_64:
++ memcpy(RING_GET_RESPONSE(&blk_rings->x86_64,
++ blk_rings->x86_64.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ default:
++ BUG();
++ }
++ blk_rings->common.rsp_prod_pvt++;
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
++
++ if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
++ /*
++ * Tail check for pending requests. Allows frontend to avoid
++ * notifications if requests are already in flight (lower
++ * overheads and promotes batching).
++ */
++ RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
++ } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
++ more_to_do = 1;
++ }
++
++ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
++ if (more_to_do)
++ blkif_notify_work(blkif);
++ if (notify)
++ notify_remote_via_irq(blkif->irq);
++}
++
++static int __init blkif_init(void)
++{
++ int i, ret;
++ struct class *class;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ INIT_LIST_HEAD(&pending_free);
++ for(i = 0; i < 2; i++) {
++ ret = req_increase();
++ if (ret)
++ break;
++ }
++ if (i == 0)
++ return ret;
++
++ tap_blkif_interface_init();
++
++ alloc_pending_reqs = 0;
++
++ tap_blkif_xenbus_init();
++
++ /* Dynamically allocate a major for this device */
++ ret = register_chrdev(0, "blktap", &blktap_fops);
++
++ if (ret < 0) {
++ WPRINTK("Couldn't register /dev/xen/blktap\n");
++ return -ENOMEM;
++ }
++
++ blktap_major = ret;
++
++ /* tapfds[0] is always NULL */
++ blktap_next_minor++;
++
++ DPRINTK("Created misc_dev [/dev/xen/blktap%d]\n",i);
++
++ /* Make sure the xen class exists */
++ if ((class = get_xen_class()) != NULL) {
++ /*
++ * This will allow udev to create the blktap ctrl device.
++ * We only want to create blktap0 first. We don't want
++ * to flood the sysfs system with needless blktap devices.
++ * We only create the device when a request of a new device is
++ * made.
++ */
++ class_device_create(class, NULL,
++ MKDEV(blktap_major, 0), NULL,
++ "blktap0");
++ } else {
++ /* this is bad, but not fatal */
++ WPRINTK("blktap: sysfs xen_class not created\n");
++ }
++
++ DPRINTK("Blktap device successfully created\n");
++
++ return 0;
++}
++
++module_init(blkif_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/blktap/common.h linux-2.6.18-xen-3.3.0/drivers/xen/blktap/common.h
+--- linux-2.6.18.8/drivers/xen/blktap/common.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blktap/common.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,121 @@
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __BLKIF__BACKEND__COMMON_H__
++#define __BLKIF__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <asm/io.h>
++#include <asm/setup.h>
++#include <asm/pgalloc.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/blkif.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++
++#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++
++#define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
++
++struct backend_info;
++
++typedef struct blkif_st {
++ /* Unique identifier for this interface. */
++ domid_t domid;
++ unsigned int handle;
++ /* Physical parameters of the comms window. */
++ unsigned int irq;
++ /* Comms information. */
++ enum blkif_protocol blk_protocol;
++ blkif_back_rings_t blk_rings;
++ struct vm_struct *blk_ring_area;
++ /* Back pointer to the backend_info. */
++ struct backend_info *be;
++ /* Private fields. */
++ spinlock_t blk_ring_lock;
++ atomic_t refcnt;
++
++ wait_queue_head_t wq;
++ struct task_struct *xenblkd;
++ unsigned int waiting_reqs;
++ request_queue_t *plug;
++
++ /* statistics */
++ unsigned long st_print;
++ int st_rd_req;
++ int st_wr_req;
++ int st_oo_req;
++ int st_rd_sect;
++ int st_wr_sect;
++
++ wait_queue_head_t waiting_to_free;
++
++ grant_handle_t shmem_handle;
++ grant_ref_t shmem_ref;
++
++ int dev_num;
++ uint64_t sectors;
++} blkif_t;
++
++blkif_t *tap_alloc_blkif(domid_t domid);
++void tap_blkif_free(blkif_t *blkif);
++int tap_blkif_map(blkif_t *blkif, unsigned long shared_page,
++ unsigned int evtchn);
++void tap_blkif_unmap(blkif_t *blkif);
++
++#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define blkif_put(_b) \
++ do { \
++ if (atomic_dec_and_test(&(_b)->refcnt)) \
++ wake_up(&(_b)->waiting_to_free);\
++ } while (0)
++
++
++struct phys_req {
++ unsigned short dev;
++ unsigned short nr_sects;
++ struct block_device *bdev;
++ blkif_sector_t sector_number;
++};
++
++void tap_blkif_interface_init(void);
++
++void tap_blkif_xenbus_init(void);
++
++irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++int tap_blkif_schedule(void *arg);
++
++int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif);
++void signal_tapdisk(int idx);
++
++#endif /* __BLKIF__BACKEND__COMMON_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/blktap/interface.c linux-2.6.18-xen-3.3.0/drivers/xen/blktap/interface.c
+--- linux-2.6.18.8/drivers/xen/blktap/interface.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blktap/interface.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,174 @@
++/******************************************************************************
++ * drivers/xen/blktap/interface.c
++ *
++ * Block-device interface management.
++ *
++ * Copyright (c) 2004, Keir Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++
++ */
++
++#include "common.h"
++#include <xen/evtchn.h>
++
++static kmem_cache_t *blkif_cachep;
++
++blkif_t *tap_alloc_blkif(domid_t domid)
++{
++ blkif_t *blkif;
++
++ blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
++ if (!blkif)
++ return ERR_PTR(-ENOMEM);
++
++ memset(blkif, 0, sizeof(*blkif));
++ blkif->domid = domid;
++ spin_lock_init(&blkif->blk_ring_lock);
++ atomic_set(&blkif->refcnt, 1);
++ init_waitqueue_head(&blkif->wq);
++ blkif->st_print = jiffies;
++ init_waitqueue_head(&blkif->waiting_to_free);
++
++ return blkif;
++}
++
++static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++ GNTMAP_host_map, shared_page, blkif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return op.status;
++ }
++
++ blkif->shmem_ref = shared_page;
++ blkif->shmem_handle = op.handle;
++
++ return 0;
++}
++
++static void unmap_frontend_page(blkif_t *blkif)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++ GNTMAP_host_map, blkif->shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++}
++
++int tap_blkif_map(blkif_t *blkif, unsigned long shared_page,
++ unsigned int evtchn)
++{
++ int err;
++
++ /* Already connected through? */
++ if (blkif->irq)
++ return 0;
++
++ if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
++ return -ENOMEM;
++
++ err = map_frontend_page(blkif, shared_page);
++ if (err) {
++ free_vm_area(blkif->blk_ring_area);
++ return err;
++ }
++
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ {
++ blkif_sring_t *sring;
++ sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
++ break;
++ }
++ case BLKIF_PROTOCOL_X86_32:
++ {
++ blkif_x86_32_sring_t *sring_x86_32;
++ sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
++ break;
++ }
++ case BLKIF_PROTOCOL_X86_64:
++ {
++ blkif_x86_64_sring_t *sring_x86_64;
++ sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
++ break;
++ }
++ default:
++ BUG();
++ }
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ blkif->domid, evtchn, tap_blkif_be_int,
++ 0, "blkif-backend", blkif);
++ if (err < 0) {
++ unmap_frontend_page(blkif);
++ free_vm_area(blkif->blk_ring_area);
++ blkif->blk_rings.common.sring = NULL;
++ return err;
++ }
++ blkif->irq = err;
++
++ return 0;
++}
++
++void tap_blkif_unmap(blkif_t *blkif)
++{
++ if (blkif->irq) {
++ unbind_from_irqhandler(blkif->irq, blkif);
++ blkif->irq = 0;
++ }
++ if (blkif->blk_rings.common.sring) {
++ unmap_frontend_page(blkif);
++ free_vm_area(blkif->blk_ring_area);
++ blkif->blk_rings.common.sring = NULL;
++ }
++}
++
++void tap_blkif_free(blkif_t *blkif)
++{
++ atomic_dec(&blkif->refcnt);
++ wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
++
++ tap_blkif_unmap(blkif);
++ kmem_cache_free(blkif_cachep, blkif);
++}
++
++void __init tap_blkif_interface_init(void)
++{
++ blkif_cachep = kmem_cache_create("blktapif_cache", sizeof(blkif_t),
++ 0, 0, NULL, NULL);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/blktap/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/blktap/Makefile
+--- linux-2.6.18.8/drivers/xen/blktap/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blktap/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,5 @@
++LINUXINCLUDE += -I../xen/include/public/io
++
++obj-$(CONFIG_XEN_BLKDEV_TAP) := xenblktap.o
++
++xenblktap-y := xenbus.o interface.o blktap.o
+diff -rpuN linux-2.6.18.8/drivers/xen/blktap/xenbus.c linux-2.6.18-xen-3.3.0/drivers/xen/blktap/xenbus.c
+--- linux-2.6.18.8/drivers/xen/blktap/xenbus.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/blktap/xenbus.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,477 @@
++/* drivers/xen/blktap/xenbus.c
++ *
++ * Xenbus code for blktap
++ *
++ * Copyright (c) 2004-2005, Andrew Warfield and Julian Chesterfield
++ *
++ * Based on the blkback xenbus code:
++ *
++ * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
++ * Copyright (C) 2005 XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <stdarg.h>
++#include <linux/module.h>
++#include <linux/kthread.h>
++#include <xen/xenbus.h>
++#include "common.h"
++
++
++struct backend_info
++{
++ struct xenbus_device *dev;
++ blkif_t *blkif;
++ struct xenbus_watch backend_watch;
++ int xenbus_id;
++ int group_added;
++};
++
++
++static void connect(struct backend_info *);
++static int connect_ring(struct backend_info *);
++static int blktap_remove(struct xenbus_device *dev);
++static int blktap_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id);
++static void tap_backend_changed(struct xenbus_watch *, const char **,
++ unsigned int);
++static void tap_frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state);
++
++static int strsep_len(const char *str, char c, unsigned int len)
++{
++ unsigned int i;
++
++ for (i = 0; str[i]; i++)
++ if (str[i] == c) {
++ if (len == 0)
++ return i;
++ len--;
++ }
++ return (len == 0) ? i : -ERANGE;
++}
++
++static long get_id(const char *str)
++{
++ int len,end;
++ const char *ptr;
++ char *tptr, num[10];
++
++ len = strsep_len(str, '/', 2);
++ end = strlen(str);
++ if ( (len < 0) || (end < 0) ) return -1;
++
++ ptr = str + len + 1;
++ strncpy(num,ptr,end - len);
++ tptr = num + (end - (len + 1));
++ *tptr = '\0';
++ DPRINTK("Get_id called for %s (%s)\n",str,num);
++
++ return simple_strtol(num, NULL, 10);
++}
++
++static int blktap_name(blkif_t *blkif, char *buf)
++{
++ char *devpath, *devname;
++ struct xenbus_device *dev = blkif->be->dev;
++
++ devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
++ if (IS_ERR(devpath))
++ return PTR_ERR(devpath);
++
++ if ((devname = strstr(devpath, "/dev/")) != NULL)
++ devname += strlen("/dev/");
++ else
++ devname = devpath;
++
++ snprintf(buf, TASK_COMM_LEN, "blktap.%d.%s", blkif->domid, devname);
++ kfree(devpath);
++
++ return 0;
++}
++
++/****************************************************************
++ * sysfs interface for I/O requests of blktap device
++ */
++
++#define VBD_SHOW(name, format, args...) \
++ static ssize_t show_##name(struct device *_dev, \
++ struct device_attribute *attr, \
++ char *buf) \
++ { \
++ struct xenbus_device *dev = to_xenbus_device(_dev); \
++ struct backend_info *be = dev->dev.driver_data; \
++ \
++ return sprintf(buf, format, ##args); \
++ } \
++ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
++
++VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
++VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
++VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
++VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
++VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
++
++static struct attribute *tapstat_attrs[] = {
++ &dev_attr_oo_req.attr,
++ &dev_attr_rd_req.attr,
++ &dev_attr_wr_req.attr,
++ &dev_attr_rd_sect.attr,
++ &dev_attr_wr_sect.attr,
++ NULL
++};
++
++static struct attribute_group tapstat_group = {
++ .name = "statistics",
++ .attrs = tapstat_attrs,
++};
++
++int xentap_sysfs_addif(struct xenbus_device *dev)
++{
++ int err;
++ struct backend_info *be = dev->dev.driver_data;
++ err = sysfs_create_group(&dev->dev.kobj, &tapstat_group);
++ if (!err)
++ be->group_added = 1;
++ return err;
++}
++
++void xentap_sysfs_delif(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++ sysfs_remove_group(&dev->dev.kobj, &tapstat_group);
++ be->group_added = 0;
++}
++
++static int blktap_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ if (be->group_added)
++ xentap_sysfs_delif(be->dev);
++ if (be->backend_watch.node) {
++ unregister_xenbus_watch(&be->backend_watch);
++ kfree(be->backend_watch.node);
++ be->backend_watch.node = NULL;
++ }
++ if (be->blkif) {
++ if (be->blkif->xenblkd)
++ kthread_stop(be->blkif->xenblkd);
++ signal_tapdisk(be->blkif->dev_num);
++ tap_blkif_free(be->blkif);
++ be->blkif = NULL;
++ }
++ kfree(be);
++ dev->dev.driver_data = NULL;
++ return 0;
++}
++
++static void tap_update_blkif_status(blkif_t *blkif)
++{
++ int err;
++ char name[TASK_COMM_LEN];
++
++ /* Not ready to connect? */
++ if(!blkif->irq || !blkif->sectors) {
++ return;
++ }
++
++ /* Already connected? */
++ if (blkif->be->dev->state == XenbusStateConnected)
++ return;
++
++ /* Attempt to connect: exit if we fail to. */
++ connect(blkif->be);
++ if (blkif->be->dev->state != XenbusStateConnected)
++ return;
++
++ err = blktap_name(blkif, name);
++ if (err) {
++ xenbus_dev_error(blkif->be->dev, err, "get blktap dev name");
++ return;
++ }
++
++ if (!blkif->be->group_added) {
++ err = xentap_sysfs_addif(blkif->be->dev);
++ if (err) {
++ xenbus_dev_fatal(blkif->be->dev, err,
++ "creating sysfs entries");
++ return;
++ }
++ }
++
++ blkif->xenblkd = kthread_run(tap_blkif_schedule, blkif, name);
++ if (IS_ERR(blkif->xenblkd)) {
++ err = PTR_ERR(blkif->xenblkd);
++ blkif->xenblkd = NULL;
++ xenbus_dev_fatal(blkif->be->dev, err, "start xenblkd");
++ WPRINTK("Error starting thread\n");
++ }
++}
++
++/**
++ * Entry point to this code when a new device is created. Allocate
++ * the basic structures, and watch the store waiting for the
++ * user-space program to tell us the physical device info. Switch to
++ * InitWait.
++ */
++static int blktap_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
++
++ be->dev = dev;
++ dev->dev.driver_data = be;
++ be->xenbus_id = get_id(dev->nodename);
++
++ be->blkif = tap_alloc_blkif(dev->otherend_id);
++ if (IS_ERR(be->blkif)) {
++ err = PTR_ERR(be->blkif);
++ be->blkif = NULL;
++ xenbus_dev_fatal(dev, err, "creating block interface");
++ goto fail;
++ }
++
++ /* setup back pointer */
++ be->blkif->be = be;
++ be->blkif->sectors = 0;
++
++ /* set a watch on disk info, waiting for userspace to update details*/
++ err = xenbus_watch_path2(dev, dev->nodename, "info",
++ &be->backend_watch, tap_backend_changed);
++ if (err)
++ goto fail;
++
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto fail;
++ return 0;
++
++fail:
++ DPRINTK("blktap probe failed\n");
++ blktap_remove(dev);
++ return err;
++}
++
++
++/**
++ * Callback received when the user space code has placed the device
++ * information in xenstore.
++ */
++static void tap_backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ int err;
++ unsigned long info;
++ struct backend_info *be
++ = container_of(watch, struct backend_info, backend_watch);
++ struct xenbus_device *dev = be->dev;
++
++ /**
++ * Check to see whether userspace code has opened the image
++ * and written sector
++ * and disk info to xenstore
++ */
++ err = xenbus_gather(XBT_NIL, dev->nodename, "info", "%lu", &info,
++ NULL);
++ if (XENBUS_EXIST_ERR(err))
++ return;
++ if (err) {
++ xenbus_dev_error(dev, err, "getting info");
++ return;
++ }
++
++ DPRINTK("Userspace update on disk info, %lu\n",info);
++
++ err = xenbus_gather(XBT_NIL, dev->nodename, "sectors", "%llu",
++ &be->blkif->sectors, NULL);
++
++ /* Associate tap dev with domid*/
++ be->blkif->dev_num = dom_to_devid(be->blkif->domid, be->xenbus_id,
++ be->blkif);
++ DPRINTK("Thread started for domid [%d], connecting disk\n",
++ be->blkif->dev_num);
++
++ tap_update_blkif_status(be->blkif);
++}
++
++/**
++ * Callback received when the frontend's state changes.
++ */
++static void tap_frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
++{
++ struct backend_info *be = dev->dev.driver_data;
++ int err;
++
++ DPRINTK("\n");
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ if (dev->state == XenbusStateClosed) {
++ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
++ __FUNCTION__, dev->nodename);
++ xenbus_switch_state(dev, XenbusStateInitWait);
++ }
++ break;
++
++ case XenbusStateInitialised:
++ case XenbusStateConnected:
++ /* Ensure we connect even when two watches fire in
++ close successsion and we miss the intermediate value
++ of frontend_state. */
++ if (dev->state == XenbusStateConnected)
++ break;
++
++ err = connect_ring(be);
++ if (err)
++ break;
++ tap_update_blkif_status(be->blkif);
++ break;
++
++ case XenbusStateClosing:
++ if (be->blkif->xenblkd) {
++ kthread_stop(be->blkif->xenblkd);
++ be->blkif->xenblkd = NULL;
++ }
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
++
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ if (xenbus_dev_is_online(dev))
++ break;
++ /* fall through if not online */
++ case XenbusStateUnknown:
++ device_unregister(&dev->dev);
++ break;
++
++ default:
++ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
++
++
++/**
++ * Switch to Connected state.
++ */
++static void connect(struct backend_info *be)
++{
++ int err;
++
++ struct xenbus_device *dev = be->dev;
++
++ err = xenbus_switch_state(dev, XenbusStateConnected);
++ if (err)
++ xenbus_dev_fatal(dev, err, "switching to Connected state",
++ dev->nodename);
++
++ return;
++}
++
++
++static int connect_ring(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned long ring_ref;
++ unsigned int evtchn;
++ char protocol[64];
++ int err;
++
++ DPRINTK("%s\n", dev->otherend);
++
++ err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
++ &ring_ref, "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_fatal(dev, err,
++ "reading %s/ring-ref and event-channel",
++ dev->otherend);
++ return err;
++ }
++
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++ err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
++ "%63s", protocol, NULL);
++ if (err)
++ strcpy(protocol, "unspecified, assuming native");
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
++ else {
++ xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
++ return -1;
++ }
++ printk(KERN_INFO
++ "blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
++ ring_ref, evtchn, be->blkif->blk_protocol, protocol);
++
++ /* Map the shared frame, irq etc. */
++ err = tap_blkif_map(be->blkif, ring_ref, evtchn);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
++ ring_ref, evtchn);
++ return err;
++ }
++
++ return 0;
++}
++
++
++/* ** Driver Registration ** */
++
++
++static const struct xenbus_device_id blktap_ids[] = {
++ { "tap" },
++ { "" }
++};
++
++
++static struct xenbus_driver blktap = {
++ .name = "tap",
++ .owner = THIS_MODULE,
++ .ids = blktap_ids,
++ .probe = blktap_probe,
++ .remove = blktap_remove,
++ .otherend_changed = tap_frontend_changed
++};
++
++
++void tap_blkif_xenbus_init(void)
++{
++ xenbus_register_backend(&blktap);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/char/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/char/Makefile
+--- linux-2.6.18.8/drivers/xen/char/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/char/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1 @@
++obj-$(CONFIG_XEN_DEVMEM) := mem.o
+diff -rpuN linux-2.6.18.8/drivers/xen/char/mem.c linux-2.6.18-xen-3.3.0/drivers/xen/char/mem.c
+--- linux-2.6.18.8/drivers/xen/char/mem.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/char/mem.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,190 @@
++/*
++ * Originally from linux/drivers/char/mem.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ *
++ * Added devfs support.
++ * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
++ * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
++ */
++
++#include <linux/mm.h>
++#include <linux/miscdevice.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/mman.h>
++#include <linux/random.h>
++#include <linux/init.h>
++#include <linux/raw.h>
++#include <linux/tty.h>
++#include <linux/capability.h>
++#include <linux/smp_lock.h>
++#include <linux/ptrace.h>
++#include <linux/device.h>
++#include <asm/pgalloc.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/hypervisor.h>
++
++static inline int uncached_access(struct file *file)
++{
++ if (file->f_flags & O_SYNC)
++ return 1;
++ /* Xen sets correct MTRR type on non-RAM for us. */
++ return 0;
++}
++
++/*
++ * This funcion reads the *physical* memory. The f_pos points directly to the
++ * memory location.
++ */
++static ssize_t read_mem(struct file * file, char __user * buf,
++ size_t count, loff_t *ppos)
++{
++ unsigned long p = *ppos, ignored;
++ ssize_t read = 0, sz;
++ void __iomem *v;
++
++ while (count > 0) {
++ /*
++ * Handle first page in case it's not aligned
++ */
++ if (-p & (PAGE_SIZE - 1))
++ sz = -p & (PAGE_SIZE - 1);
++ else
++ sz = PAGE_SIZE;
++
++ sz = min_t(unsigned long, sz, count);
++
++ v = ioremap(p, sz);
++ if (IS_ERR(v) || v == NULL) {
++ /*
++ * Some programs (e.g., dmidecode) groove off into
++ * weird RAM areas where no tables can possibly exist
++ * (because Xen will have stomped on them!). These
++ * programs get rather upset if we let them know that
++ * Xen failed their access, so we fake out a read of
++ * all zeroes.
++ */
++ if (clear_user(buf, count))
++ return -EFAULT;
++ read += count;
++ break;
++ }
++
++ ignored = copy_to_user(buf, v, sz);
++ iounmap(v);
++ if (ignored)
++ return -EFAULT;
++ buf += sz;
++ p += sz;
++ count -= sz;
++ read += sz;
++ }
++
++ *ppos += read;
++ return read;
++}
++
++static ssize_t write_mem(struct file * file, const char __user * buf,
++ size_t count, loff_t *ppos)
++{
++ unsigned long p = *ppos, ignored;
++ ssize_t written = 0, sz;
++ void __iomem *v;
++
++ while (count > 0) {
++ /*
++ * Handle first page in case it's not aligned
++ */
++ if (-p & (PAGE_SIZE - 1))
++ sz = -p & (PAGE_SIZE - 1);
++ else
++ sz = PAGE_SIZE;
++
++ sz = min_t(unsigned long, sz, count);
++
++ v = ioremap(p, sz);
++ if (v == NULL)
++ break;
++ if (IS_ERR(v)) {
++ if (written == 0)
++ return PTR_ERR(v);
++ break;
++ }
++
++ ignored = copy_from_user(v, buf, sz);
++ iounmap(v);
++ if (ignored) {
++ written += sz - ignored;
++ if (written)
++ break;
++ return -EFAULT;
++ }
++ buf += sz;
++ p += sz;
++ count -= sz;
++ written += sz;
++ }
++
++ *ppos += written;
++ return written;
++}
++
++#ifndef ARCH_HAS_DEV_MEM_MMAP_MEM
++static int xen_mmap_mem(struct file * file, struct vm_area_struct * vma)
++{
++ size_t size = vma->vm_end - vma->vm_start;
++
++ if (uncached_access(file))
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++ /* We want to return the real error code, not EAGAIN. */
++ return direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ size, vma->vm_page_prot, DOMID_IO);
++}
++#endif
++
++/*
++ * The memory devices use the full 32/64 bits of the offset, and so we cannot
++ * check against negative addresses: they are ok. The return value is weird,
++ * though, in that case (0).
++ *
++ * also note that seeking relative to the "end of file" isn't supported:
++ * it has no meaning, so it returns -EINVAL.
++ */
++static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
++{
++ loff_t ret;
++
++ mutex_lock(&file->f_dentry->d_inode->i_mutex);
++ switch (orig) {
++ case 0:
++ file->f_pos = offset;
++ ret = file->f_pos;
++ force_successful_syscall_return();
++ break;
++ case 1:
++ file->f_pos += offset;
++ ret = file->f_pos;
++ force_successful_syscall_return();
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
++ return ret;
++}
++
++static int open_mem(struct inode * inode, struct file * filp)
++{
++ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
++const struct file_operations mem_fops = {
++ .llseek = memory_lseek,
++ .read = read_mem,
++ .write = write_mem,
++ .mmap = xen_mmap_mem,
++ .open = open_mem,
++};
+diff -rpuN linux-2.6.18.8/drivers/xen/console/console.c linux-2.6.18-xen-3.3.0/drivers/xen/console/console.c
+--- linux-2.6.18.8/drivers/xen/console/console.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/console/console.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,731 @@
++/******************************************************************************
++ * console.c
++ *
++ * Virtual console driver.
++ *
++ * Copyright (c) 2002-2004, K A Fraser.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial.h>
++#include <linux/major.h>
++#include <linux/ptrace.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/console.h>
++#include <linux/bootmem.h>
++#include <linux/sysrq.h>
++#include <linux/screen_info.h>
++#include <linux/vt.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/uaccess.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/event_channel.h>
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/xenbus.h>
++#include <xen/xencons.h>
++
++/*
++ * Modes:
++ * 'xencons=off' [XC_OFF]: Console is disabled.
++ * 'xencons=tty' [XC_TTY]: Console attached to '/dev/tty[0-9]+'.
++ * 'xencons=ttyS' [XC_SERIAL]: Console attached to '/dev/ttyS[0-9]+'.
++ * 'xencons=xvc' [XC_XVC]: Console attached to '/dev/xvc0'.
++ * default: XC_XVC
++ *
++ * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
++ * warnings from standard distro startup scripts.
++ */
++static enum {
++ XC_OFF, XC_TTY, XC_SERIAL, XC_XVC
++} xc_mode = XC_XVC;
++static int xc_num = -1;
++
++/* /dev/xvc0 device number allocated by lanana.org. */
++#define XEN_XVC_MAJOR 204
++#define XEN_XVC_MINOR 191
++
++#ifdef CONFIG_MAGIC_SYSRQ
++static unsigned long sysrq_requested;
++extern int sysrq_enabled;
++#endif
++
++static int __init xencons_setup(char *str)
++{
++ char *q;
++ int n;
++ extern int console_use_vt;
++
++ console_use_vt = 1;
++ if (!strncmp(str, "ttyS", 4)) {
++ xc_mode = XC_SERIAL;
++ str += 4;
++ } else if (!strncmp(str, "tty", 3)) {
++ xc_mode = XC_TTY;
++ str += 3;
++ console_use_vt = 0;
++ } else if (!strncmp(str, "xvc", 3)) {
++ xc_mode = XC_XVC;
++ str += 3;
++ } else if (!strncmp(str, "off", 3)) {
++ xc_mode = XC_OFF;
++ str += 3;
++ }
++
++ n = simple_strtol(str, &q, 10);
++ if (q != str)
++ xc_num = n;
++
++ return 1;
++}
++__setup("xencons=", xencons_setup);
++
++/* The kernel and user-land drivers share a common transmit buffer. */
++static unsigned int wbuf_size = 4096;
++#define WBUF_MASK(_i) ((_i)&(wbuf_size-1))
++static char *wbuf;
++static unsigned int wc, wp; /* write_cons, write_prod */
++
++static int __init xencons_bufsz_setup(char *str)
++{
++ unsigned int goal;
++ goal = simple_strtoul(str, NULL, 0);
++ if (goal) {
++ goal = roundup_pow_of_two(goal);
++ if (wbuf_size < goal)
++ wbuf_size = goal;
++ }
++ return 1;
++}
++__setup("xencons_bufsz=", xencons_bufsz_setup);
++
++/* This lock protects accesses to the common transmit buffer. */
++static DEFINE_SPINLOCK(xencons_lock);
++
++/* Common transmit-kick routine. */
++static void __xencons_tx_flush(void);
++
++static struct tty_driver *xencons_driver;
++
++/******************** Kernel console driver ********************************/
++
++static void kcons_write(struct console *c, const char *s, unsigned int count)
++{
++ int i = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++
++ while (i < count) {
++ for (; i < count; i++) {
++ if ((wp - wc) >= (wbuf_size - 1))
++ break;
++ if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n')
++ wbuf[WBUF_MASK(wp++)] = '\r';
++ }
++
++ __xencons_tx_flush();
++ }
++
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void kcons_write_dom0(struct console *c, const char *s, unsigned int count)
++{
++
++ while (count > 0) {
++ int rc;
++ rc = HYPERVISOR_console_io( CONSOLEIO_write, count, (char *)s);
++ if (rc <= 0)
++ break;
++ count -= rc;
++ s += rc;
++ }
++}
++
++static struct tty_driver *kcons_device(struct console *c, int *index)
++{
++ *index = 0;
++ return xencons_driver;
++}
++
++static struct console kcons_info = {
++ .device = kcons_device,
++ .flags = CON_PRINTBUFFER | CON_ENABLED,
++ .index = -1,
++};
++
++static int __init xen_console_init(void)
++{
++ if (!is_running_on_xen())
++ goto out;
++
++ if (is_initial_xendomain()) {
++ kcons_info.write = kcons_write_dom0;
++ } else {
++ if (!xen_start_info->console.domU.evtchn)
++ goto out;
++ kcons_info.write = kcons_write;
++ }
++
++ switch (xc_mode) {
++ case XC_XVC:
++ strcpy(kcons_info.name, "xvc");
++ if (xc_num == -1)
++ xc_num = 0;
++ break;
++
++ case XC_SERIAL:
++ strcpy(kcons_info.name, "ttyS");
++ if (xc_num == -1)
++ xc_num = 0;
++ break;
++
++ case XC_TTY:
++ strcpy(kcons_info.name, "tty");
++ if (xc_num == -1)
++ xc_num = 1;
++ break;
++
++ default:
++ goto out;
++ }
++
++ wbuf = alloc_bootmem(wbuf_size);
++
++ register_console(&kcons_info);
++
++ out:
++ return 0;
++}
++console_initcall(xen_console_init);
++
++/*** Useful function for console debugging -- goes straight to Xen. ***/
++asmlinkage int xprintk(const char *fmt, ...)
++{
++ va_list args;
++ int printk_len;
++ static char printk_buf[1024];
++
++ /* Emit the output into the temporary buffer */
++ va_start(args, fmt);
++ printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
++ va_end(args);
++
++ /* Send the processed output directly to Xen. */
++ kcons_write_dom0(NULL, printk_buf, printk_len);
++
++ return 0;
++}
++
++/*** Forcibly flush console data before dying. ***/
++void xencons_force_flush(void)
++{
++ int sz;
++
++ /* Emergency console is synchronous, so there's nothing to flush. */
++ if (!is_running_on_xen() ||
++ is_initial_xendomain() ||
++ !xen_start_info->console.domU.evtchn)
++ return;
++
++ /* Spin until console data is flushed through to the daemon. */
++ while (wc != wp) {
++ int sent = 0;
++ if ((sz = wp - wc) == 0)
++ continue;
++ sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
++ if (sent > 0)
++ wc += sent;
++ }
++}
++
++
++void __init dom0_init_screen_info(const struct dom0_vga_console_info *info, size_t size)
++{
++ /* This is drawn from a dump from vgacon:startup in
++ * standard Linux. */
++ screen_info.orig_video_mode = 3;
++ screen_info.orig_video_isVGA = 1;
++ screen_info.orig_video_lines = 25;
++ screen_info.orig_video_cols = 80;
++ screen_info.orig_video_ega_bx = 3;
++ screen_info.orig_video_points = 16;
++ screen_info.orig_y = screen_info.orig_video_lines - 1;
++
++ switch (info->video_type) {
++ case XEN_VGATYPE_TEXT_MODE_3:
++ if (size < offsetof(struct dom0_vga_console_info, u.text_mode_3)
++ + sizeof(info->u.text_mode_3))
++ break;
++ screen_info.orig_video_lines = info->u.text_mode_3.rows;
++ screen_info.orig_video_cols = info->u.text_mode_3.columns;
++ screen_info.orig_x = info->u.text_mode_3.cursor_x;
++ screen_info.orig_y = info->u.text_mode_3.cursor_y;
++ screen_info.orig_video_points =
++ info->u.text_mode_3.font_height;
++ break;
++
++ case XEN_VGATYPE_VESA_LFB:
++ if (size < offsetof(struct dom0_vga_console_info,
++ u.vesa_lfb.gbl_caps))
++ break;
++ screen_info.orig_video_isVGA = VIDEO_TYPE_VLFB;
++ screen_info.lfb_width = info->u.vesa_lfb.width;
++ screen_info.lfb_height = info->u.vesa_lfb.height;
++ screen_info.lfb_depth = info->u.vesa_lfb.bits_per_pixel;
++ screen_info.lfb_base = info->u.vesa_lfb.lfb_base;
++ screen_info.lfb_size = info->u.vesa_lfb.lfb_size;
++ screen_info.lfb_linelength = info->u.vesa_lfb.bytes_per_line;
++ screen_info.red_size = info->u.vesa_lfb.red_size;
++ screen_info.red_pos = info->u.vesa_lfb.red_pos;
++ screen_info.green_size = info->u.vesa_lfb.green_size;
++ screen_info.green_pos = info->u.vesa_lfb.green_pos;
++ screen_info.blue_size = info->u.vesa_lfb.blue_size;
++ screen_info.blue_pos = info->u.vesa_lfb.blue_pos;
++ screen_info.rsvd_size = info->u.vesa_lfb.rsvd_size;
++ screen_info.rsvd_pos = info->u.vesa_lfb.rsvd_pos;
++ if (size >= offsetof(struct dom0_vga_console_info,
++ u.vesa_lfb.gbl_caps)
++ + sizeof(info->u.vesa_lfb.gbl_caps))
++ screen_info.capabilities = info->u.vesa_lfb.gbl_caps;
++ if (size >= offsetof(struct dom0_vga_console_info,
++ u.vesa_lfb.mode_attrs)
++ + sizeof(info->u.vesa_lfb.mode_attrs))
++ screen_info.vesa_attributes = info->u.vesa_lfb.mode_attrs;
++ break;
++ }
++}
++
++
++/******************** User-space console driver (/dev/console) ************/
++
++#define DRV(_d) (_d)
++#define DUMMY_TTY(_tty) ((xc_mode == XC_TTY) && \
++ ((_tty)->index != (xc_num - 1)))
++
++static struct termios *xencons_termios[MAX_NR_CONSOLES];
++static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
++static struct tty_struct *xencons_tty;
++static int xencons_priv_irq;
++static char x_char;
++
++void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
++{
++ int i;
++ unsigned long flags;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ if (xencons_tty == NULL)
++ goto out;
++
++ for (i = 0; i < len; i++) {
++#ifdef CONFIG_MAGIC_SYSRQ
++ if (sysrq_enabled) {
++ if (buf[i] == '\x0f') { /* ^O */
++ if (!sysrq_requested) {
++ sysrq_requested = jiffies;
++ continue; /* don't print sysrq key */
++ }
++ sysrq_requested = 0;
++ } else if (sysrq_requested) {
++ unsigned long sysrq_timeout =
++ sysrq_requested + HZ*2;
++ sysrq_requested = 0;
++ if (time_before(jiffies, sysrq_timeout)) {
++ spin_unlock_irqrestore(
++ &xencons_lock, flags);
++ handle_sysrq(
++ buf[i], regs, xencons_tty);
++ spin_lock_irqsave(
++ &xencons_lock, flags);
++ continue;
++ }
++ }
++ }
++#endif
++ tty_insert_flip_char(xencons_tty, buf[i], 0);
++ }
++ tty_flip_buffer_push(xencons_tty);
++
++ out:
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void __xencons_tx_flush(void)
++{
++ int sent, sz, work_done = 0;
++
++ if (x_char) {
++ if (is_initial_xendomain())
++ kcons_write_dom0(NULL, &x_char, 1);
++ else
++ while (x_char)
++ if (xencons_ring_send(&x_char, 1) == 1)
++ break;
++ x_char = 0;
++ work_done = 1;
++ }
++
++ while (wc != wp) {
++ sz = wp - wc;
++ if (sz > (wbuf_size - WBUF_MASK(wc)))
++ sz = wbuf_size - WBUF_MASK(wc);
++ if (is_initial_xendomain()) {
++ kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
++ wc += sz;
++ } else {
++ sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
++ if (sent == 0)
++ break;
++ wc += sent;
++ }
++ work_done = 1;
++ }
++
++ if (work_done && (xencons_tty != NULL)) {
++ wake_up_interruptible(&xencons_tty->write_wait);
++ if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
++ (xencons_tty->ldisc.write_wakeup != NULL))
++ (xencons_tty->ldisc.write_wakeup)(xencons_tty);
++ }
++}
++
++void xencons_tx(void)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ __xencons_tx_flush();
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++/* Privileged receive callback and transmit kicker. */
++static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ static char rbuf[16];
++ int l;
++
++ while ((l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0)
++ xencons_rx(rbuf, l, regs);
++
++ xencons_tx();
++
++ return IRQ_HANDLED;
++}
++
++static int xencons_write_room(struct tty_struct *tty)
++{
++ return wbuf_size - (wp - wc);
++}
++
++static int xencons_chars_in_buffer(struct tty_struct *tty)
++{
++ return wp - wc;
++}
++
++static void xencons_send_xchar(struct tty_struct *tty, char ch)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ x_char = ch;
++ __xencons_tx_flush();
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void xencons_throttle(struct tty_struct *tty)
++{
++ if (DUMMY_TTY(tty))
++ return;
++
++ if (I_IXOFF(tty))
++ xencons_send_xchar(tty, STOP_CHAR(tty));
++}
++
++static void xencons_unthrottle(struct tty_struct *tty)
++{
++ if (DUMMY_TTY(tty))
++ return;
++
++ if (I_IXOFF(tty)) {
++ if (x_char != 0)
++ x_char = 0;
++ else
++ xencons_send_xchar(tty, START_CHAR(tty));
++ }
++}
++
++static void xencons_flush_buffer(struct tty_struct *tty)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ wc = wp = 0;
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static inline int __xencons_put_char(int ch)
++{
++ char _ch = (char)ch;
++ if ((wp - wc) == wbuf_size)
++ return 0;
++ wbuf[WBUF_MASK(wp++)] = _ch;
++ return 1;
++}
++
++static int xencons_write(
++ struct tty_struct *tty,
++ const unsigned char *buf,
++ int count)
++{
++ int i;
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return count;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++
++ for (i = 0; i < count; i++)
++ if (!__xencons_put_char(buf[i]))
++ break;
++
++ if (i != 0)
++ __xencons_tx_flush();
++
++ spin_unlock_irqrestore(&xencons_lock, flags);
++
++ return i;
++}
++
++static void xencons_put_char(struct tty_struct *tty, u_char ch)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ (void)__xencons_put_char(ch);
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void xencons_flush_chars(struct tty_struct *tty)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ __xencons_tx_flush();
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
++{
++ unsigned long orig_jiffies = jiffies;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ while (DRV(tty->driver)->chars_in_buffer(tty)) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule_timeout(1);
++ if (signal_pending(current))
++ break;
++ if (timeout && time_after(jiffies, orig_jiffies + timeout))
++ break;
++ }
++
++ set_current_state(TASK_RUNNING);
++}
++
++static int xencons_open(struct tty_struct *tty, struct file *filp)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return 0;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ tty->driver_data = NULL;
++ if (xencons_tty == NULL)
++ xencons_tty = tty;
++ __xencons_tx_flush();
++ spin_unlock_irqrestore(&xencons_lock, flags);
++
++ return 0;
++}
++
++static void xencons_close(struct tty_struct *tty, struct file *filp)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ mutex_lock(&tty_mutex);
++
++ if (tty->count != 1) {
++ mutex_unlock(&tty_mutex);
++ return;
++ }
++
++ /* Prevent other threads from re-opening this tty. */
++ set_bit(TTY_CLOSING, &tty->flags);
++ mutex_unlock(&tty_mutex);
++
++ tty->closing = 1;
++ tty_wait_until_sent(tty, 0);
++ if (DRV(tty->driver)->flush_buffer != NULL)
++ DRV(tty->driver)->flush_buffer(tty);
++ if (tty->ldisc.flush_buffer != NULL)
++ tty->ldisc.flush_buffer(tty);
++ tty->closing = 0;
++ spin_lock_irqsave(&xencons_lock, flags);
++ xencons_tty = NULL;
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static struct tty_operations xencons_ops = {
++ .open = xencons_open,
++ .close = xencons_close,
++ .write = xencons_write,
++ .write_room = xencons_write_room,
++ .put_char = xencons_put_char,
++ .flush_chars = xencons_flush_chars,
++ .chars_in_buffer = xencons_chars_in_buffer,
++ .send_xchar = xencons_send_xchar,
++ .flush_buffer = xencons_flush_buffer,
++ .throttle = xencons_throttle,
++ .unthrottle = xencons_unthrottle,
++ .wait_until_sent = xencons_wait_until_sent,
++};
++
++static int __init xencons_init(void)
++{
++ int rc;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ if (xc_mode == XC_OFF)
++ return 0;
++
++ if (!is_initial_xendomain()) {
++ rc = xencons_ring_init();
++ if (rc)
++ return rc;
++ }
++
++ xencons_driver = alloc_tty_driver((xc_mode == XC_TTY) ?
++ MAX_NR_CONSOLES : 1);
++ if (xencons_driver == NULL)
++ return -ENOMEM;
++
++ DRV(xencons_driver)->name = "xencons";
++ DRV(xencons_driver)->major = TTY_MAJOR;
++ DRV(xencons_driver)->type = TTY_DRIVER_TYPE_SERIAL;
++ DRV(xencons_driver)->subtype = SERIAL_TYPE_NORMAL;
++ DRV(xencons_driver)->init_termios = tty_std_termios;
++ DRV(xencons_driver)->flags =
++ TTY_DRIVER_REAL_RAW |
++ TTY_DRIVER_RESET_TERMIOS;
++ DRV(xencons_driver)->termios = xencons_termios;
++ DRV(xencons_driver)->termios_locked = xencons_termios_locked;
++
++ switch (xc_mode) {
++ case XC_XVC:
++ DRV(xencons_driver)->name = "xvc";
++ DRV(xencons_driver)->major = XEN_XVC_MAJOR;
++ DRV(xencons_driver)->minor_start = XEN_XVC_MINOR;
++ DRV(xencons_driver)->name_base = xc_num;
++ break;
++ case XC_SERIAL:
++ DRV(xencons_driver)->name = "ttyS";
++ DRV(xencons_driver)->minor_start = 64 + xc_num;
++ DRV(xencons_driver)->name_base = xc_num;
++ break;
++ default:
++ DRV(xencons_driver)->name = "tty";
++ DRV(xencons_driver)->minor_start = 1;
++ DRV(xencons_driver)->name_base = 1;
++ break;
++ }
++
++ tty_set_operations(xencons_driver, &xencons_ops);
++
++ if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
++ printk("WARNING: Failed to register Xen virtual "
++ "console driver as '%s%d'\n",
++ DRV(xencons_driver)->name,
++ DRV(xencons_driver)->name_base);
++ put_tty_driver(xencons_driver);
++ xencons_driver = NULL;
++ return rc;
++ }
++
++ if (is_initial_xendomain()) {
++ xencons_priv_irq = bind_virq_to_irqhandler(
++ VIRQ_CONSOLE,
++ 0,
++ xencons_priv_interrupt,
++ 0,
++ "console",
++ NULL);
++ BUG_ON(xencons_priv_irq < 0);
++ }
++
++ printk("Xen virtual console successfully installed as %s%d\n",
++ DRV(xencons_driver)->name, xc_num);
++
++ return 0;
++}
++
++module_init(xencons_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/console/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/console/Makefile
+--- linux-2.6.18.8/drivers/xen/console/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/console/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2 @@
++
++obj-y := console.o xencons_ring.o
+diff -rpuN linux-2.6.18.8/drivers/xen/console/xencons_ring.c linux-2.6.18-xen-3.3.0/drivers/xen/console/xencons_ring.c
+--- linux-2.6.18.8/drivers/xen/console/xencons_ring.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/console/xencons_ring.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,143 @@
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial.h>
++#include <linux/major.h>
++#include <linux/ptrace.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/xencons.h>
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/err.h>
++#include <xen/interface/io/console.h>
++
++static int xencons_irq;
++
++static inline struct xencons_interface *xencons_interface(void)
++{
++ return mfn_to_virt(xen_start_info->console.domU.mfn);
++}
++
++static inline void notify_daemon(void)
++{
++ /* Use evtchn: this is called early, before irq is set up. */
++ notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
++}
++
++int xencons_ring_send(const char *data, unsigned len)
++{
++ int sent = 0;
++ struct xencons_interface *intf = xencons_interface();
++ XENCONS_RING_IDX cons, prod;
++
++ cons = intf->out_cons;
++ prod = intf->out_prod;
++ mb();
++ BUG_ON((prod - cons) > sizeof(intf->out));
++
++ while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
++ intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
++
++ wmb();
++ intf->out_prod = prod;
++
++ notify_daemon();
++
++ return sent;
++}
++
++static irqreturn_t handle_input(int irq, void *unused, struct pt_regs *regs)
++{
++ struct xencons_interface *intf = xencons_interface();
++ XENCONS_RING_IDX cons, prod;
++
++ cons = intf->in_cons;
++ prod = intf->in_prod;
++ mb();
++ BUG_ON((prod - cons) > sizeof(intf->in));
++
++ while (cons != prod) {
++ xencons_rx(intf->in+MASK_XENCONS_IDX(cons,intf->in), 1, regs);
++ cons++;
++ }
++
++ mb();
++ intf->in_cons = cons;
++
++ notify_daemon();
++
++ xencons_tx();
++
++ return IRQ_HANDLED;
++}
++
++int xencons_ring_init(void)
++{
++ int irq;
++
++ if (xencons_irq)
++ unbind_from_irqhandler(xencons_irq, NULL);
++ xencons_irq = 0;
++
++ if (!is_running_on_xen() ||
++ is_initial_xendomain() ||
++ !xen_start_info->console.domU.evtchn)
++ return -ENODEV;
++
++ irq = bind_caller_port_to_irqhandler(
++ xen_start_info->console.domU.evtchn,
++ handle_input, 0, "xencons", NULL);
++ if (irq < 0) {
++ printk(KERN_ERR "XEN console request irq failed %i\n", irq);
++ return irq;
++ }
++
++ xencons_irq = irq;
++
++ /* In case we have in-flight data after save/restore... */
++ notify_daemon();
++
++ return 0;
++}
++
++void xencons_resume(void)
++{
++ (void)xencons_ring_init();
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/core/cpu_hotplug.c linux-2.6.18-xen-3.3.0/drivers/xen/core/cpu_hotplug.c
+--- linux-2.6.18.8/drivers/xen/core/cpu_hotplug.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/cpu_hotplug.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,173 @@
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <xen/cpu_hotplug.h>
++#include <xen/xenbus.h>
++
++/*
++ * Set of CPUs that remote admin software will allow us to bring online.
++ * Notified to us via xenbus.
++ */
++static cpumask_t xenbus_allowed_cpumask;
++
++/* Set of CPUs that local admin will allow us to bring online. */
++static cpumask_t local_allowed_cpumask = CPU_MASK_ALL;
++
++static int local_cpu_hotplug_request(void)
++{
++ /*
++ * We assume a CPU hotplug request comes from local admin if it is made
++ * via a userspace process (i.e., one with a real mm_struct).
++ */
++ return (current->mm != NULL);
++}
++
++static void vcpu_hotplug(unsigned int cpu)
++{
++ int err;
++ char dir[32], state[32];
++
++ if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
++ return;
++
++ sprintf(dir, "cpu/%u", cpu);
++ err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
++ if (err != 1) {
++ printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
++ return;
++ }
++
++ if (strcmp(state, "online") == 0) {
++ cpu_set(cpu, xenbus_allowed_cpumask);
++ (void)cpu_up(cpu);
++ } else if (strcmp(state, "offline") == 0) {
++ cpu_clear(cpu, xenbus_allowed_cpumask);
++ (void)cpu_down(cpu);
++ } else {
++ printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
++ state, cpu);
++ }
++}
++
++static void handle_vcpu_hotplug_event(
++ struct xenbus_watch *watch, const char **vec, unsigned int len)
++{
++ unsigned int cpu;
++ char *cpustr;
++ const char *node = vec[XS_WATCH_PATH];
++
++ if ((cpustr = strstr(node, "cpu/")) != NULL) {
++ sscanf(cpustr, "cpu/%u", &cpu);
++ vcpu_hotplug(cpu);
++ }
++}
++
++static int smpboot_cpu_notify(struct notifier_block *notifier,
++ unsigned long action, void *hcpu)
++{
++ unsigned int cpu = (long)hcpu;
++
++ /*
++ * We do this in a callback notifier rather than __cpu_disable()
++ * because local_cpu_hotplug_request() does not work in the latter
++ * as it's always executed from within a stopmachine kthread.
++ */
++ if ((action == CPU_DOWN_PREPARE) && local_cpu_hotplug_request())
++ cpu_clear(cpu, local_allowed_cpumask);
++
++ return NOTIFY_OK;
++}
++
++static int setup_cpu_watcher(struct notifier_block *notifier,
++ unsigned long event, void *data)
++{
++ unsigned int i;
++
++ static struct xenbus_watch cpu_watch = {
++ .node = "cpu",
++ .callback = handle_vcpu_hotplug_event,
++ .flags = XBWF_new_thread };
++ (void)register_xenbus_watch(&cpu_watch);
++
++ if (!is_initial_xendomain()) {
++ for_each_possible_cpu(i)
++ vcpu_hotplug(i);
++ printk(KERN_INFO "Brought up %ld CPUs\n",
++ (long)num_online_cpus());
++ }
++
++ return NOTIFY_DONE;
++}
++
++static int __init setup_vcpu_hotplug_event(void)
++{
++ static struct notifier_block hotplug_cpu = {
++ .notifier_call = smpboot_cpu_notify };
++ static struct notifier_block xsn_cpu = {
++ .notifier_call = setup_cpu_watcher };
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ register_cpu_notifier(&hotplug_cpu);
++ register_xenstore_notifier(&xsn_cpu);
++
++ return 0;
++}
++
++arch_initcall(setup_vcpu_hotplug_event);
++
++int smp_suspend(void)
++{
++ unsigned int cpu;
++ int err;
++
++ for_each_online_cpu(cpu) {
++ if (cpu == 0)
++ continue;
++ err = cpu_down(cpu);
++ if (err) {
++ printk(KERN_CRIT "Failed to take all CPUs "
++ "down: %d.\n", err);
++ for_each_possible_cpu(cpu)
++ vcpu_hotplug(cpu);
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++void smp_resume(void)
++{
++ unsigned int cpu;
++
++ for_each_possible_cpu(cpu)
++ vcpu_hotplug(cpu);
++}
++
++int cpu_up_check(unsigned int cpu)
++{
++ int rc = 0;
++
++ if (local_cpu_hotplug_request()) {
++ cpu_set(cpu, local_allowed_cpumask);
++ if (!cpu_isset(cpu, xenbus_allowed_cpumask)) {
++ printk("%s: attempt to bring up CPU %u disallowed by "
++ "remote admin.\n", __FUNCTION__, cpu);
++ rc = -EBUSY;
++ }
++ } else if (!cpu_isset(cpu, local_allowed_cpumask) ||
++ !cpu_isset(cpu, xenbus_allowed_cpumask)) {
++ rc = -EBUSY;
++ }
++
++ return rc;
++}
++
++void init_xenbus_allowed_cpumask(void)
++{
++ xenbus_allowed_cpumask = cpu_present_map;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/core/evtchn.c linux-2.6.18-xen-3.3.0/drivers/xen/core/evtchn.c
+--- linux-2.6.18.8/drivers/xen/core/evtchn.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/evtchn.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1061 @@
++/******************************************************************************
++ * evtchn.c
++ *
++ * Communication via Xen event channels.
++ *
++ * Copyright (c) 2002-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/kernel_stat.h>
++#include <linux/version.h>
++#include <asm/atomic.h>
++#include <asm/system.h>
++#include <asm/ptrace.h>
++#include <asm/synch_bitops.h>
++#include <xen/evtchn.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/physdev.h>
++#include <asm/hypervisor.h>
++#include <linux/mc146818rtc.h> /* RTC_IRQ */
++
++/*
++ * This lock protects updates to the following mapping and reference-count
++ * arrays. The lock does not need to be acquired to read the mapping tables.
++ */
++static DEFINE_SPINLOCK(irq_mapping_update_lock);
++
++/* IRQ <-> event-channel mappings. */
++static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
++ [0 ... NR_EVENT_CHANNELS-1] = -1 };
++
++/* Packed IRQ information: binding type, sub-type index, and event channel. */
++static u32 irq_info[NR_IRQS];
++
++/* Binding types. */
++enum {
++ IRQT_UNBOUND,
++ IRQT_PIRQ,
++ IRQT_VIRQ,
++ IRQT_IPI,
++ IRQT_LOCAL_PORT,
++ IRQT_CALLER_PORT
++};
++
++/* Constructor for packed IRQ information. */
++static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
++{
++ return ((type << 24) | (index << 16) | evtchn);
++}
++
++/* Convenient shorthand for packed representation of an unbound IRQ. */
++#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
++
++/*
++ * Accessors for packed IRQ information.
++ */
++
++static inline unsigned int evtchn_from_irq(int irq)
++{
++ return (u16)(irq_info[irq]);
++}
++
++static inline unsigned int index_from_irq(int irq)
++{
++ return (u8)(irq_info[irq] >> 16);
++}
++
++static inline unsigned int type_from_irq(int irq)
++{
++ return (u8)(irq_info[irq] >> 24);
++}
++
++/* IRQ <-> VIRQ mapping. */
++DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
++
++/* IRQ <-> IPI mapping. */
++#ifndef NR_IPIS
++#define NR_IPIS 1
++#endif
++DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
++
++/* Reference counts for bindings to IRQs. */
++static int irq_bindcount[NR_IRQS];
++
++/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
++static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
++
++#ifdef CONFIG_SMP
++
++static u8 cpu_evtchn[NR_EVENT_CHANNELS];
++static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
++
++static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
++ unsigned int idx)
++{
++ return (sh->evtchn_pending[idx] &
++ cpu_evtchn_mask[cpu][idx] &
++ ~sh->evtchn_mask[idx]);
++}
++
++static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ int irq = evtchn_to_irq[chn];
++
++ BUG_ON(!test_bit(chn, s->evtchn_mask));
++
++ if (irq != -1)
++ set_native_irq_info(irq, cpumask_of_cpu(cpu));
++
++ clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
++ set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
++ cpu_evtchn[chn] = cpu;
++}
++
++static void init_evtchn_cpu_bindings(void)
++{
++ int i;
++
++ /* By default all event channels notify CPU#0. */
++ for (i = 0; i < NR_IRQS; i++)
++ set_native_irq_info(i, cpumask_of_cpu(0));
++
++ memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
++ memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
++}
++
++static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
++{
++ return cpu_evtchn[evtchn];
++}
++
++#else
++
++static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
++ unsigned int idx)
++{
++ return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
++}
++
++static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
++{
++}
++
++static void init_evtchn_cpu_bindings(void)
++{
++}
++
++static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
++{
++ return 0;
++}
++
++#endif
++
++/* Upcall to generic IRQ layer. */
++#ifdef CONFIG_X86
++extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
++void __init xen_init_IRQ(void);
++void __init init_IRQ(void)
++{
++ irq_ctx_init(0);
++ xen_init_IRQ();
++}
++#if defined (__i386__)
++static inline void exit_idle(void) {}
++#define IRQ_REG orig_eax
++#elif defined (__x86_64__)
++#include <asm/idle.h>
++#define IRQ_REG orig_rax
++#endif
++#define do_IRQ(irq, regs) do { \
++ (regs)->IRQ_REG = ~(irq); \
++ do_IRQ((regs)); \
++} while (0)
++#endif
++
++/* Xen will never allocate port zero for any purpose. */
++#define VALID_EVTCHN(chn) ((chn) != 0)
++
++/*
++ * Force a proper event-channel callback from Xen after clearing the
++ * callback mask. We do this in a very simple manner, by making a call
++ * down into Xen. The pending flag will be checked by Xen on return.
++ */
++void force_evtchn_callback(void)
++{
++ VOID(HYPERVISOR_xen_version(0, NULL));
++}
++/* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
++EXPORT_SYMBOL(force_evtchn_callback);
++
++static DEFINE_PER_CPU(unsigned int, upcall_count) = { 0 };
++static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 };
++static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 };
++
++/* NB. Interrupts are disabled on entry. */
++asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
++{
++ unsigned long l1, l2;
++ unsigned long masked_l1, masked_l2;
++ unsigned int l1i, l2i, port, count;
++ int irq;
++ unsigned int cpu = smp_processor_id();
++ shared_info_t *s = HYPERVISOR_shared_info;
++ vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
++
++
++ do {
++ /* Avoid a callback storm when we reenable delivery. */
++ vcpu_info->evtchn_upcall_pending = 0;
++
++ /* Nested invocations bail immediately. */
++ if (unlikely(per_cpu(upcall_count, cpu)++))
++ return;
++
++#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
++ /* Clear master flag /before/ clearing selector flag. */
++ wmb();
++#endif
++ l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
++
++ l1i = per_cpu(last_processed_l1i, cpu);
++ l2i = per_cpu(last_processed_l2i, cpu);
++
++ while (l1 != 0) {
++
++ l1i = (l1i + 1) % BITS_PER_LONG;
++ masked_l1 = l1 & ((~0UL) << l1i);
++
++ if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */
++ l1i = BITS_PER_LONG - 1;
++ l2i = BITS_PER_LONG - 1;
++ continue;
++ }
++ l1i = __ffs(masked_l1);
++
++ do {
++ l2 = active_evtchns(cpu, s, l1i);
++
++ l2i = (l2i + 1) % BITS_PER_LONG;
++ masked_l2 = l2 & ((~0UL) << l2i);
++
++ if (masked_l2 == 0) { /* if we masked out all events, move on */
++ l2i = BITS_PER_LONG - 1;
++ break;
++ }
++
++ l2i = __ffs(masked_l2);
++
++ /* process port */
++ port = (l1i * BITS_PER_LONG) + l2i;
++ if ((irq = evtchn_to_irq[port]) != -1)
++ do_IRQ(irq, regs);
++ else {
++ exit_idle();
++ evtchn_device_upcall(port);
++ }
++
++ /* if this is the final port processed, we'll pick up here+1 next time */
++ per_cpu(last_processed_l1i, cpu) = l1i;
++ per_cpu(last_processed_l2i, cpu) = l2i;
++
++ } while (l2i != BITS_PER_LONG - 1);
++
++ l2 = active_evtchns(cpu, s, l1i);
++ if (l2 == 0) /* we handled all ports, so we can clear the selector bit */
++ l1 &= ~(1UL << l1i);
++
++ }
++
++ /* If there were nested callbacks then we have more to do. */
++ count = per_cpu(upcall_count, cpu);
++ per_cpu(upcall_count, cpu) = 0;
++ } while (unlikely(count != 1));
++}
++
++static int find_unbound_irq(void)
++{
++ static int warned;
++ int dynirq, irq;
++
++ for (dynirq = 0; dynirq < NR_DYNIRQS; dynirq++) {
++ irq = dynirq_to_irq(dynirq);
++ if (irq_bindcount[irq] == 0)
++ return irq;
++ }
++
++ if (!warned) {
++ warned = 1;
++ printk(KERN_WARNING "No available IRQ to bind to: "
++ "increase NR_DYNIRQS.\n");
++ }
++
++ return -ENOSPC;
++}
++
++static int bind_caller_port_to_irq(unsigned int caller_port)
++{
++ int irq;
++
++ spin_lock(&irq_mapping_update_lock);
++
++ if ((irq = evtchn_to_irq[caller_port]) == -1) {
++ if ((irq = find_unbound_irq()) < 0)
++ goto out;
++
++ evtchn_to_irq[caller_port] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
++ }
++
++ irq_bindcount[irq]++;
++
++ out:
++ spin_unlock(&irq_mapping_update_lock);
++ return irq;
++}
++
++static int bind_local_port_to_irq(unsigned int local_port)
++{
++ int irq;
++
++ spin_lock(&irq_mapping_update_lock);
++
++ BUG_ON(evtchn_to_irq[local_port] != -1);
++
++ if ((irq = find_unbound_irq()) < 0) {
++ struct evtchn_close close = { .port = local_port };
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
++ BUG();
++ goto out;
++ }
++
++ evtchn_to_irq[local_port] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
++ irq_bindcount[irq]++;
++
++ out:
++ spin_unlock(&irq_mapping_update_lock);
++ return irq;
++}
++
++static int bind_listening_port_to_irq(unsigned int remote_domain)
++{
++ struct evtchn_alloc_unbound alloc_unbound;
++ int err;
++
++ alloc_unbound.dom = DOMID_SELF;
++ alloc_unbound.remote_dom = remote_domain;
++
++ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
++ &alloc_unbound);
++
++ return err ? : bind_local_port_to_irq(alloc_unbound.port);
++}
++
++static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
++ unsigned int remote_port)
++{
++ struct evtchn_bind_interdomain bind_interdomain;
++ int err;
++
++ bind_interdomain.remote_dom = remote_domain;
++ bind_interdomain.remote_port = remote_port;
++
++ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
++ &bind_interdomain);
++
++ return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
++}
++
++static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
++{
++ struct evtchn_bind_virq bind_virq;
++ int evtchn, irq;
++
++ spin_lock(&irq_mapping_update_lock);
++
++ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
++ if ((irq = find_unbound_irq()) < 0)
++ goto out;
++
++ bind_virq.virq = virq;
++ bind_virq.vcpu = cpu;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
++ &bind_virq) != 0)
++ BUG();
++ evtchn = bind_virq.port;
++
++ evtchn_to_irq[evtchn] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
++
++ per_cpu(virq_to_irq, cpu)[virq] = irq;
++
++ bind_evtchn_to_cpu(evtchn, cpu);
++ }
++
++ irq_bindcount[irq]++;
++
++ out:
++ spin_unlock(&irq_mapping_update_lock);
++ return irq;
++}
++
++static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
++{
++ struct evtchn_bind_ipi bind_ipi;
++ int evtchn, irq;
++
++ spin_lock(&irq_mapping_update_lock);
++
++ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
++ if ((irq = find_unbound_irq()) < 0)
++ goto out;
++
++ bind_ipi.vcpu = cpu;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
++ &bind_ipi) != 0)
++ BUG();
++ evtchn = bind_ipi.port;
++
++ evtchn_to_irq[evtchn] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
++
++ per_cpu(ipi_to_irq, cpu)[ipi] = irq;
++
++ bind_evtchn_to_cpu(evtchn, cpu);
++ }
++
++ irq_bindcount[irq]++;
++
++ out:
++ spin_unlock(&irq_mapping_update_lock);
++ return irq;
++}
++
++static void unbind_from_irq(unsigned int irq)
++{
++ struct evtchn_close close;
++ unsigned int cpu;
++ int evtchn = evtchn_from_irq(irq);
++
++ spin_lock(&irq_mapping_update_lock);
++
++ if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
++ close.port = evtchn;
++ if ((type_from_irq(irq) != IRQT_CALLER_PORT) &&
++ HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
++ BUG();
++
++ switch (type_from_irq(irq)) {
++ case IRQT_VIRQ:
++ per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
++ [index_from_irq(irq)] = -1;
++ break;
++ case IRQT_IPI:
++ per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
++ [index_from_irq(irq)] = -1;
++ break;
++ default:
++ break;
++ }
++
++ /* Closed ports are implicitly re-bound to VCPU0. */
++ bind_evtchn_to_cpu(evtchn, 0);
++
++ evtchn_to_irq[evtchn] = -1;
++ irq_info[irq] = IRQ_UNBOUND;
++
++ /* Zap stats across IRQ changes of use. */
++ for_each_possible_cpu(cpu)
++ kstat_cpu(cpu).irqs[irq] = 0;
++ }
++
++ spin_unlock(&irq_mapping_update_lock);
++}
++
++int bind_caller_port_to_irqhandler(
++ unsigned int caller_port,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_caller_port_to_irq(caller_port);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_caller_port_to_irqhandler);
++
++int bind_listening_port_to_irqhandler(
++ unsigned int remote_domain,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_listening_port_to_irq(remote_domain);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_listening_port_to_irqhandler);
++
++int bind_interdomain_evtchn_to_irqhandler(
++ unsigned int remote_domain,
++ unsigned int remote_port,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
++
++int bind_virq_to_irqhandler(
++ unsigned int virq,
++ unsigned int cpu,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_virq_to_irq(virq, cpu);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
++
++int bind_ipi_to_irqhandler(
++ unsigned int ipi,
++ unsigned int cpu,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_ipi_to_irq(ipi, cpu);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
++
++void unbind_from_irqhandler(unsigned int irq, void *dev_id)
++{
++ free_irq(irq, dev_id);
++ unbind_from_irq(irq);
++}
++EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
++
++#ifdef CONFIG_SMP
++void rebind_evtchn_to_cpu(int port, unsigned int cpu)
++{
++ struct evtchn_bind_vcpu ebv = { .port = port, .vcpu = cpu };
++ int masked;
++
++ masked = test_and_set_evtchn_mask(port);
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &ebv) == 0)
++ bind_evtchn_to_cpu(port, cpu);
++ if (!masked)
++ unmask_evtchn(port);
++}
++
++static void rebind_irq_to_cpu(unsigned int irq, unsigned int tcpu)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ rebind_evtchn_to_cpu(evtchn, tcpu);
++}
++
++static void set_affinity_irq(unsigned int irq, cpumask_t dest)
++{
++ unsigned tcpu = first_cpu(dest);
++ rebind_irq_to_cpu(irq, tcpu);
++}
++#endif
++
++int resend_irq_on_evtchn(unsigned int irq)
++{
++ int masked, evtchn = evtchn_from_irq(irq);
++ shared_info_t *s = HYPERVISOR_shared_info;
++
++ if (!VALID_EVTCHN(evtchn))
++ return 1;
++
++ masked = test_and_set_evtchn_mask(evtchn);
++ synch_set_bit(evtchn, s->evtchn_pending);
++ if (!masked)
++ unmask_evtchn(evtchn);
++
++ return 1;
++}
++
++/*
++ * Interface to generic handling in irq.c
++ */
++
++static unsigned int startup_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ unmask_evtchn(evtchn);
++ return 0;
++}
++
++static void shutdown_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ mask_evtchn(evtchn);
++}
++
++static void enable_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ unmask_evtchn(evtchn);
++}
++
++static void disable_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ mask_evtchn(evtchn);
++}
++
++static void ack_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ move_native_irq(irq);
++
++ if (VALID_EVTCHN(evtchn)) {
++ mask_evtchn(evtchn);
++ clear_evtchn(evtchn);
++ }
++}
++
++static void end_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
++ unmask_evtchn(evtchn);
++}
++
++static struct hw_interrupt_type dynirq_type = {
++ .typename = "Dynamic-irq",
++ .startup = startup_dynirq,
++ .shutdown = shutdown_dynirq,
++ .enable = enable_dynirq,
++ .disable = disable_dynirq,
++ .ack = ack_dynirq,
++ .end = end_dynirq,
++#ifdef CONFIG_SMP
++ .set_affinity = set_affinity_irq,
++#endif
++ .retrigger = resend_irq_on_evtchn,
++};
++
++static inline void pirq_unmask_notify(int pirq)
++{
++ struct physdev_eoi eoi = { .irq = pirq };
++ if (unlikely(test_bit(pirq, pirq_needs_eoi)))
++ VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
++}
++
++static inline void pirq_query_unmask(int pirq)
++{
++ struct physdev_irq_status_query irq_status;
++ irq_status.irq = pirq;
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
++ irq_status.flags = 0;
++ clear_bit(pirq, pirq_needs_eoi);
++ if (irq_status.flags & XENIRQSTAT_needs_eoi)
++ set_bit(pirq, pirq_needs_eoi);
++}
++
++/*
++ * On startup, if there is no action associated with the IRQ then we are
++ * probing. In this case we should not share with others as it will confuse us.
++ */
++#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
++
++static unsigned int startup_pirq(unsigned int irq)
++{
++ struct evtchn_bind_pirq bind_pirq;
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ goto out;
++
++ bind_pirq.pirq = irq;
++ /* NB. We are happy to share unless we are probing. */
++ bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
++ if (!probing_irq(irq))
++ printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
++ irq);
++ return 0;
++ }
++ evtchn = bind_pirq.port;
++
++ pirq_query_unmask(irq_to_pirq(irq));
++
++ evtchn_to_irq[evtchn] = irq;
++ bind_evtchn_to_cpu(evtchn, 0);
++ irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
++
++ out:
++ unmask_evtchn(evtchn);
++ pirq_unmask_notify(irq_to_pirq(irq));
++
++ return 0;
++}
++
++static void shutdown_pirq(unsigned int irq)
++{
++ struct evtchn_close close;
++ int evtchn = evtchn_from_irq(irq);
++
++ if (!VALID_EVTCHN(evtchn))
++ return;
++
++ mask_evtchn(evtchn);
++
++ close.port = evtchn;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
++ BUG();
++
++ bind_evtchn_to_cpu(evtchn, 0);
++ evtchn_to_irq[evtchn] = -1;
++ irq_info[irq] = IRQ_UNBOUND;
++}
++
++static void enable_pirq(unsigned int irq)
++{
++ startup_pirq(irq);
++}
++
++static void disable_pirq(unsigned int irq)
++{
++}
++
++static void ack_pirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ move_native_irq(irq);
++
++ if (VALID_EVTCHN(evtchn)) {
++ mask_evtchn(evtchn);
++ clear_evtchn(evtchn);
++ }
++}
++
++static void end_pirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) ==
++ (IRQ_DISABLED|IRQ_PENDING)) {
++ shutdown_pirq(irq);
++ } else if (VALID_EVTCHN(evtchn)) {
++ unmask_evtchn(evtchn);
++ pirq_unmask_notify(irq_to_pirq(irq));
++ }
++}
++
++static struct hw_interrupt_type pirq_type = {
++ .typename = "Phys-irq",
++ .startup = startup_pirq,
++ .shutdown = shutdown_pirq,
++ .enable = enable_pirq,
++ .disable = disable_pirq,
++ .ack = ack_pirq,
++ .end = end_pirq,
++#ifdef CONFIG_SMP
++ .set_affinity = set_affinity_irq,
++#endif
++ .retrigger = resend_irq_on_evtchn,
++};
++
++int irq_ignore_unhandled(unsigned int irq)
++{
++ struct physdev_irq_status_query irq_status = { .irq = irq };
++
++ if (!is_running_on_xen())
++ return 0;
++
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
++ return 0;
++ return !!(irq_status.flags & XENIRQSTAT_shared);
++}
++
++void notify_remote_via_irq(int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ notify_remote_via_evtchn(evtchn);
++}
++EXPORT_SYMBOL_GPL(notify_remote_via_irq);
++
++int irq_to_evtchn_port(int irq)
++{
++ return evtchn_from_irq(irq);
++}
++EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
++
++void mask_evtchn(int port)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ synch_set_bit(port, s->evtchn_mask);
++}
++EXPORT_SYMBOL_GPL(mask_evtchn);
++
++void unmask_evtchn(int port)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ unsigned int cpu = smp_processor_id();
++ vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
++
++ BUG_ON(!irqs_disabled());
++
++ /* Slow path (hypercall) if this is a non-local port. */
++ if (unlikely(cpu != cpu_from_evtchn(port))) {
++ struct evtchn_unmask unmask = { .port = port };
++ VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask));
++ return;
++ }
++
++ synch_clear_bit(port, s->evtchn_mask);
++
++ /* Did we miss an interrupt 'edge'? Re-fire if so. */
++ if (synch_test_bit(port, s->evtchn_pending) &&
++ !synch_test_and_set_bit(port / BITS_PER_LONG,
++ &vcpu_info->evtchn_pending_sel))
++ vcpu_info->evtchn_upcall_pending = 1;
++}
++EXPORT_SYMBOL_GPL(unmask_evtchn);
++
++void disable_all_local_evtchn(void)
++{
++ unsigned i, cpu = smp_processor_id();
++ shared_info_t *s = HYPERVISOR_shared_info;
++
++ for (i = 0; i < NR_EVENT_CHANNELS; ++i)
++ if (cpu_from_evtchn(i) == cpu)
++ synch_set_bit(i, &s->evtchn_mask[0]);
++}
++
++static void restore_cpu_virqs(unsigned int cpu)
++{
++ struct evtchn_bind_virq bind_virq;
++ int virq, irq, evtchn;
++
++ for (virq = 0; virq < NR_VIRQS; virq++) {
++ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
++ continue;
++
++ BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
++
++ /* Get a new binding from Xen. */
++ bind_virq.virq = virq;
++ bind_virq.vcpu = cpu;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
++ &bind_virq) != 0)
++ BUG();
++ evtchn = bind_virq.port;
++
++ /* Record the new mapping. */
++ evtchn_to_irq[evtchn] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
++ bind_evtchn_to_cpu(evtchn, cpu);
++
++ /* Ready for use. */
++ unmask_evtchn(evtchn);
++ }
++}
++
++static void restore_cpu_ipis(unsigned int cpu)
++{
++ struct evtchn_bind_ipi bind_ipi;
++ int ipi, irq, evtchn;
++
++ for (ipi = 0; ipi < NR_IPIS; ipi++) {
++ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
++ continue;
++
++ BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
++
++ /* Get a new binding from Xen. */
++ bind_ipi.vcpu = cpu;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
++ &bind_ipi) != 0)
++ BUG();
++ evtchn = bind_ipi.port;
++
++ /* Record the new mapping. */
++ evtchn_to_irq[evtchn] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
++ bind_evtchn_to_cpu(evtchn, cpu);
++
++ /* Ready for use. */
++ unmask_evtchn(evtchn);
++
++ }
++}
++
++void irq_resume(void)
++{
++ unsigned int cpu, pirq, irq, evtchn;
++
++ init_evtchn_cpu_bindings();
++
++ /* New event-channel space is not 'live' yet. */
++ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
++ mask_evtchn(evtchn);
++
++ /* Check that no PIRQs are still bound. */
++ for (pirq = 0; pirq < NR_PIRQS; pirq++)
++ BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
++
++ /* No IRQ <-> event-channel mappings. */
++ for (irq = 0; irq < NR_IRQS; irq++)
++ irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
++ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
++ evtchn_to_irq[evtchn] = -1;
++
++ for_each_possible_cpu(cpu) {
++ restore_cpu_virqs(cpu);
++ restore_cpu_ipis(cpu);
++ }
++
++}
++
++void __init xen_init_IRQ(void)
++{
++ unsigned int i;
++
++ init_evtchn_cpu_bindings();
++
++ /* No event channels are 'live' right now. */
++ for (i = 0; i < NR_EVENT_CHANNELS; i++)
++ mask_evtchn(i);
++
++ /* No IRQ -> event-channel mappings. */
++ for (i = 0; i < NR_IRQS; i++)
++ irq_info[i] = IRQ_UNBOUND;
++
++ /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
++ for (i = 0; i < NR_DYNIRQS; i++) {
++ irq_bindcount[dynirq_to_irq(i)] = 0;
++
++ irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
++ irq_desc[dynirq_to_irq(i)].action = NULL;
++ irq_desc[dynirq_to_irq(i)].depth = 1;
++ irq_desc[dynirq_to_irq(i)].chip = &dynirq_type;
++ }
++
++ /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
++ for (i = 0; i < NR_PIRQS; i++) {
++ irq_bindcount[pirq_to_irq(i)] = 1;
++
++#ifdef RTC_IRQ
++ /* If not domain 0, force our RTC driver to fail its probe. */
++ if ((i == RTC_IRQ) && !is_initial_xendomain())
++ continue;
++#endif
++
++ irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
++ irq_desc[pirq_to_irq(i)].action = NULL;
++ irq_desc[pirq_to_irq(i)].depth = 1;
++ irq_desc[pirq_to_irq(i)].chip = &pirq_type;
++ }
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/core/features.c linux-2.6.18-xen-3.3.0/drivers/xen/core/features.c
+--- linux-2.6.18.8/drivers/xen/core/features.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/features.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,34 @@
++/******************************************************************************
++ * features.c
++ *
++ * Xen feature flags.
++ *
++ * Copyright (c) 2006, Ian Campbell, XenSource Inc.
++ */
++#include <linux/types.h>
++#include <linux/cache.h>
++#include <linux/module.h>
++#include <asm/hypervisor.h>
++#include <xen/features.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly;
++/* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
++EXPORT_SYMBOL(xen_features);
++
++void setup_xen_features(void)
++{
++ xen_feature_info_t fi;
++ int i, j;
++
++ for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) {
++ fi.submap_idx = i;
++ if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
++ break;
++ for (j=0; j<32; j++)
++ xen_features[i*32+j] = !!(fi.submap & 1<<j);
++ }
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/core/firmware.c linux-2.6.18-xen-3.3.0/drivers/xen/core/firmware.c
+--- linux-2.6.18.8/drivers/xen/core/firmware.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/firmware.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,74 @@
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/edd.h>
++#include <video/edid.h>
++#include <xen/interface/platform.h>
++#include <asm/hypervisor.h>
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++void __init copy_edd(void)
++{
++ int ret;
++ struct xen_platform_op op;
++
++ if (!is_initial_xendomain())
++ return;
++
++ op.cmd = XENPF_firmware_info;
++
++ op.u.firmware_info.type = XEN_FW_DISK_INFO;
++ for (op.u.firmware_info.index = 0;
++ edd.edd_info_nr < EDDMAXNR;
++ op.u.firmware_info.index++) {
++ struct edd_info *info = edd.edd_info + edd.edd_info_nr;
++
++ info->params.length = sizeof(info->params);
++ set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
++ &info->params);
++ ret = HYPERVISOR_platform_op(&op);
++ if (ret)
++ break;
++
++#define C(x) info->x = op.u.firmware_info.u.disk_info.x
++ C(device);
++ C(version);
++ C(interface_support);
++ C(legacy_max_cylinder);
++ C(legacy_max_head);
++ C(legacy_sectors_per_track);
++#undef C
++
++ edd.edd_info_nr++;
++ }
++
++ op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
++ for (op.u.firmware_info.index = 0;
++ edd.mbr_signature_nr < EDD_MBR_SIG_MAX;
++ op.u.firmware_info.index++) {
++ ret = HYPERVISOR_platform_op(&op);
++ if (ret)
++ break;
++ edd.mbr_signature[edd.mbr_signature_nr++] =
++ op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
++ }
++}
++#endif
++
++void __init copy_edid(void)
++{
++#if defined(CONFIG_FIRMWARE_EDID) && defined(CONFIG_X86)
++ struct xen_platform_op op;
++
++ if (!is_initial_xendomain())
++ return;
++
++ op.cmd = XENPF_firmware_info;
++ op.u.firmware_info.index = 0;
++ op.u.firmware_info.type = XEN_FW_VBEDDC_INFO;
++ set_xen_guest_handle(op.u.firmware_info.u.vbeddc_info.edid,
++ edid_info.dummy);
++ if (HYPERVISOR_platform_op(&op) != 0)
++ memset(edid_info.dummy, 0x13, sizeof(edid_info.dummy));
++#endif
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/core/gnttab.c linux-2.6.18-xen-3.3.0/drivers/xen/core/gnttab.c
+--- linux-2.6.18.8/drivers/xen/core/gnttab.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/gnttab.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,769 @@
++/******************************************************************************
++ * gnttab.c
++ *
++ * Granting foreign access to our memory reservation.
++ *
++ * Copyright (c) 2005-2006, Christopher Clark
++ * Copyright (c) 2004-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/seqlock.h>
++#include <xen/interface/xen.h>
++#include <xen/gnttab.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/synch_bitops.h>
++#include <asm/io.h>
++#include <xen/interface/memory.h>
++#include <xen/driver_util.h>
++#include <asm/gnttab_dma.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++/* External tools reserve first few grant table entries. */
++#define NR_RESERVED_ENTRIES 8
++#define GNTTAB_LIST_END 0xffffffff
++#define ENTRIES_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t))
++
++static grant_ref_t **gnttab_list;
++static unsigned int nr_grant_frames;
++static unsigned int boot_max_nr_grant_frames;
++static int gnttab_free_count;
++static grant_ref_t gnttab_free_head;
++static DEFINE_SPINLOCK(gnttab_list_lock);
++
++static struct grant_entry *shared;
++
++static struct gnttab_free_callback *gnttab_free_callback_list;
++
++static int gnttab_expand(unsigned int req_entries);
++
++#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
++#define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP])
++
++#define nr_freelist_frames(grant_frames) \
++ (((grant_frames) * ENTRIES_PER_GRANT_FRAME + RPP - 1) / RPP)
++
++static int get_free_entries(int count)
++{
++ unsigned long flags;
++ int ref, rc;
++ grant_ref_t head;
++
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++
++ if ((gnttab_free_count < count) &&
++ ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++ return rc;
++ }
++
++ ref = head = gnttab_free_head;
++ gnttab_free_count -= count;
++ while (count-- > 1)
++ head = gnttab_entry(head);
++ gnttab_free_head = gnttab_entry(head);
++ gnttab_entry(head) = GNTTAB_LIST_END;
++
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++
++ return ref;
++}
++
++#define get_free_entry() get_free_entries(1)
++
++static void do_free_callbacks(void)
++{
++ struct gnttab_free_callback *callback, *next;
++
++ callback = gnttab_free_callback_list;
++ gnttab_free_callback_list = NULL;
++
++ while (callback != NULL) {
++ next = callback->next;
++ if (gnttab_free_count >= callback->count) {
++ callback->next = NULL;
++ callback->fn(callback->arg);
++ } else {
++ callback->next = gnttab_free_callback_list;
++ gnttab_free_callback_list = callback;
++ }
++ callback = next;
++ }
++}
++
++static inline void check_free_callbacks(void)
++{
++ if (unlikely(gnttab_free_callback_list))
++ do_free_callbacks();
++}
++
++static void put_free_entry(grant_ref_t ref)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++ gnttab_entry(ref) = gnttab_free_head;
++ gnttab_free_head = ref;
++ gnttab_free_count++;
++ check_free_callbacks();
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++
++/*
++ * Public grant-issuing interface functions
++ */
++
++int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
++ int flags)
++{
++ int ref;
++
++ if (unlikely((ref = get_free_entry()) < 0))
++ return -ENOSPC;
++
++ shared[ref].frame = frame;
++ shared[ref].domid = domid;
++ wmb();
++ BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing));
++ shared[ref].flags = GTF_permit_access | flags;
++
++ return ref;
++}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
++
++void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
++ unsigned long frame, int flags)
++{
++ shared[ref].frame = frame;
++ shared[ref].domid = domid;
++ wmb();
++ BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing));
++ shared[ref].flags = GTF_permit_access | flags;
++}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
++
++
++int gnttab_query_foreign_access(grant_ref_t ref)
++{
++ u16 nflags;
++
++ nflags = shared[ref].flags;
++
++ return (nflags & (GTF_reading|GTF_writing));
++}
++EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
++
++int gnttab_end_foreign_access_ref(grant_ref_t ref)
++{
++ u16 flags, nflags;
++
++ nflags = shared[ref].flags;
++ do {
++ if ((flags = nflags) & (GTF_reading|GTF_writing)) {
++ printk(KERN_DEBUG "WARNING: g.e. still in use!\n");
++ return 0;
++ }
++ } while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) !=
++ flags);
++
++ return 1;
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
++
++void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page)
++{
++ if (gnttab_end_foreign_access_ref(ref)) {
++ put_free_entry(ref);
++ if (page != 0)
++ free_page(page);
++ } else {
++ /* XXX This needs to be fixed so that the ref and page are
++ placed on a list to be freed up later. */
++ printk(KERN_DEBUG
++ "WARNING: leaking g.e. and page still in use!\n");
++ }
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
++
++int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
++{
++ int ref;
++
++ if (unlikely((ref = get_free_entry()) < 0))
++ return -ENOSPC;
++ gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
++
++ return ref;
++}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
++
++void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
++ unsigned long pfn)
++{
++ shared[ref].frame = pfn;
++ shared[ref].domid = domid;
++ wmb();
++ shared[ref].flags = GTF_accept_transfer;
++}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
++
++unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
++{
++ unsigned long frame;
++ u16 flags;
++
++ /*
++ * If a transfer is not even yet started, try to reclaim the grant
++ * reference and return failure (== 0).
++ */
++ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
++ if (synch_cmpxchg_subword(&shared[ref].flags, flags, 0) == flags)
++ return 0;
++ cpu_relax();
++ }
++
++ /* If a transfer is in progress then wait until it is completed. */
++ while (!(flags & GTF_transfer_completed)) {
++ flags = shared[ref].flags;
++ cpu_relax();
++ }
++
++ /* Read the frame number /after/ reading completion status. */
++ rmb();
++ frame = shared[ref].frame;
++ BUG_ON(frame == 0);
++
++ return frame;
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
++
++unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
++{
++ unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
++ put_free_entry(ref);
++ return frame;
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
++
++void gnttab_free_grant_reference(grant_ref_t ref)
++{
++ put_free_entry(ref);
++}
++EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
++
++void gnttab_free_grant_references(grant_ref_t head)
++{
++ grant_ref_t ref;
++ unsigned long flags;
++ int count = 1;
++ if (head == GNTTAB_LIST_END)
++ return;
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++ ref = head;
++ while (gnttab_entry(ref) != GNTTAB_LIST_END) {
++ ref = gnttab_entry(ref);
++ count++;
++ }
++ gnttab_entry(ref) = gnttab_free_head;
++ gnttab_free_head = head;
++ gnttab_free_count += count;
++ check_free_callbacks();
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
++
++int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
++{
++ int h = get_free_entries(count);
++
++ if (h < 0)
++ return -ENOSPC;
++
++ *head = h;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
++
++int gnttab_empty_grant_references(const grant_ref_t *private_head)
++{
++ return (*private_head == GNTTAB_LIST_END);
++}
++EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
++
++int gnttab_claim_grant_reference(grant_ref_t *private_head)
++{
++ grant_ref_t g = *private_head;
++ if (unlikely(g == GNTTAB_LIST_END))
++ return -ENOSPC;
++ *private_head = gnttab_entry(g);
++ return g;
++}
++EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
++
++void gnttab_release_grant_reference(grant_ref_t *private_head,
++ grant_ref_t release)
++{
++ gnttab_entry(release) = *private_head;
++ *private_head = release;
++}
++EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
++
++void gnttab_request_free_callback(struct gnttab_free_callback *callback,
++ void (*fn)(void *), void *arg, u16 count)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++ if (callback->next)
++ goto out;
++ callback->fn = fn;
++ callback->arg = arg;
++ callback->count = count;
++ callback->next = gnttab_free_callback_list;
++ gnttab_free_callback_list = callback;
++ check_free_callbacks();
++out:
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
++
++void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
++{
++ struct gnttab_free_callback **pcb;
++ unsigned long flags;
++
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++ for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
++ if (*pcb == callback) {
++ *pcb = callback->next;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
++
++static int grow_gnttab_list(unsigned int more_frames)
++{
++ unsigned int new_nr_grant_frames, extra_entries, i;
++ unsigned int nr_glist_frames, new_nr_glist_frames;
++
++ new_nr_grant_frames = nr_grant_frames + more_frames;
++ extra_entries = more_frames * ENTRIES_PER_GRANT_FRAME;
++
++ nr_glist_frames = nr_freelist_frames(nr_grant_frames);
++ new_nr_glist_frames = nr_freelist_frames(new_nr_grant_frames);
++ for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
++ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
++ if (!gnttab_list[i])
++ goto grow_nomem;
++ }
++
++ for (i = ENTRIES_PER_GRANT_FRAME * nr_grant_frames;
++ i < ENTRIES_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
++ gnttab_entry(i) = i + 1;
++
++ gnttab_entry(i) = gnttab_free_head;
++ gnttab_free_head = ENTRIES_PER_GRANT_FRAME * nr_grant_frames;
++ gnttab_free_count += extra_entries;
++
++ nr_grant_frames = new_nr_grant_frames;
++
++ check_free_callbacks();
++
++ return 0;
++
++grow_nomem:
++ for ( ; i >= nr_glist_frames; i--)
++ free_page((unsigned long) gnttab_list[i]);
++ return -ENOMEM;
++}
++
++static unsigned int __max_nr_grant_frames(void)
++{
++ struct gnttab_query_size query;
++ int rc;
++
++ query.dom = DOMID_SELF;
++
++ rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
++ if ((rc < 0) || (query.status != GNTST_okay))
++ return 4; /* Legacy max supported number of frames */
++
++ return query.max_nr_frames;
++}
++
++static inline unsigned int max_nr_grant_frames(void)
++{
++ unsigned int xen_max = __max_nr_grant_frames();
++
++ if (xen_max > boot_max_nr_grant_frames)
++ return boot_max_nr_grant_frames;
++ return xen_max;
++}
++
++#ifdef CONFIG_XEN
++
++static DEFINE_SEQLOCK(gnttab_dma_lock);
++
++#ifdef CONFIG_X86
++static int map_pte_fn(pte_t *pte, struct page *pmd_page,
++ unsigned long addr, void *data)
++{
++ unsigned long **frames = (unsigned long **)data;
++
++ set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL));
++ (*frames)++;
++ return 0;
++}
++
++static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
++ unsigned long addr, void *data)
++{
++
++ set_pte_at(&init_mm, addr, pte, __pte(0));
++ return 0;
++}
++
++void *arch_gnttab_alloc_shared(unsigned long *frames)
++{
++ struct vm_struct *area;
++ area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames());
++ BUG_ON(area == NULL);
++ return area->addr;
++}
++#endif /* CONFIG_X86 */
++
++static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
++{
++ struct gnttab_setup_table setup;
++ unsigned long *frames;
++ unsigned int nr_gframes = end_idx + 1;
++ int rc;
++
++ frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
++ if (!frames)
++ return -ENOMEM;
++
++ setup.dom = DOMID_SELF;
++ setup.nr_frames = nr_gframes;
++ set_xen_guest_handle(setup.frame_list, frames);
++
++ rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
++ if (rc == -ENOSYS) {
++ kfree(frames);
++ return -ENOSYS;
++ }
++
++ BUG_ON(rc || setup.status);
++
++ if (shared == NULL)
++ shared = arch_gnttab_alloc_shared(frames);
++
++#ifdef CONFIG_X86
++ rc = apply_to_page_range(&init_mm, (unsigned long)shared,
++ PAGE_SIZE * nr_gframes,
++ map_pte_fn, &frames);
++ BUG_ON(rc);
++ frames -= nr_gframes; /* adjust after map_pte_fn() */
++#endif /* CONFIG_X86 */
++
++ kfree(frames);
++
++ return 0;
++}
++
++static void gnttab_page_free(struct page *page)
++{
++ ClearPageForeign(page);
++ gnttab_reset_grant_page(page);
++ put_page(page);
++}
++
++/*
++ * Must not be called with IRQs off. This should only be used on the
++ * slow path.
++ *
++ * Copy a foreign granted page to local memory.
++ */
++int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep)
++{
++ struct gnttab_unmap_and_replace unmap;
++ mmu_update_t mmu;
++ struct page *page;
++ struct page *new_page;
++ void *new_addr;
++ void *addr;
++ paddr_t pfn;
++ maddr_t mfn;
++ maddr_t new_mfn;
++ int err;
++
++ page = *pagep;
++ if (!get_page_unless_zero(page))
++ return -ENOENT;
++
++ err = -ENOMEM;
++ new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
++ if (!new_page)
++ goto out;
++
++ new_addr = page_address(new_page);
++ addr = page_address(page);
++ memcpy(new_addr, addr, PAGE_SIZE);
++
++ pfn = page_to_pfn(page);
++ mfn = pfn_to_mfn(pfn);
++ new_mfn = virt_to_mfn(new_addr);
++
++ write_seqlock(&gnttab_dma_lock);
++
++ /* Make seq visible before checking page_mapped. */
++ smp_mb();
++
++ /* Has the page been DMA-mapped? */
++ if (unlikely(page_mapped(page))) {
++ write_sequnlock(&gnttab_dma_lock);
++ put_page(new_page);
++ err = -EBUSY;
++ goto out;
++ }
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap))
++ set_phys_to_machine(pfn, new_mfn);
++
++ gnttab_set_replace_op(&unmap, (unsigned long)addr,
++ (unsigned long)new_addr, ref);
++
++ err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
++ &unmap, 1);
++ BUG_ON(err);
++ BUG_ON(unmap.status);
++
++ write_sequnlock(&gnttab_dma_lock);
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
++
++ mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
++ mmu.val = pfn;
++ err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
++ BUG_ON(err);
++ }
++
++ new_page->mapping = page->mapping;
++ new_page->index = page->index;
++ set_bit(PG_foreign, &new_page->flags);
++ *pagep = new_page;
++
++ SetPageForeign(page, gnttab_page_free);
++ page->mapping = NULL;
++
++out:
++ put_page(page);
++ return err;
++}
++EXPORT_SYMBOL_GPL(gnttab_copy_grant_page);
++
++void gnttab_reset_grant_page(struct page *page)
++{
++ init_page_count(page);
++ reset_page_mapcount(page);
++}
++EXPORT_SYMBOL_GPL(gnttab_reset_grant_page);
++
++/*
++ * Keep track of foreign pages marked as PageForeign so that we don't
++ * return them to the remote domain prematurely.
++ *
++ * PageForeign pages are pinned down by increasing their mapcount.
++ *
++ * All other pages are simply returned as is.
++ */
++void __gnttab_dma_map_page(struct page *page)
++{
++ unsigned int seq;
++
++ if (!is_running_on_xen() || !PageForeign(page))
++ return;
++
++ do {
++ seq = read_seqbegin(&gnttab_dma_lock);
++
++ if (gnttab_dma_local_pfn(page))
++ break;
++
++ atomic_set(&page->_mapcount, 0);
++
++ /* Make _mapcount visible before read_seqretry. */
++ smp_mb();
++ } while (unlikely(read_seqretry(&gnttab_dma_lock, seq)));
++}
++
++int gnttab_resume(void)
++{
++ if (max_nr_grant_frames() < nr_grant_frames)
++ return -ENOSYS;
++ return gnttab_map(0, nr_grant_frames - 1);
++}
++
++int gnttab_suspend(void)
++{
++#ifdef CONFIG_X86
++ apply_to_page_range(&init_mm, (unsigned long)shared,
++ PAGE_SIZE * nr_grant_frames,
++ unmap_pte_fn, NULL);
++#endif
++ return 0;
++}
++
++#else /* !CONFIG_XEN */
++
++#include <platform-pci.h>
++
++static unsigned long resume_frames;
++
++static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
++{
++ struct xen_add_to_physmap xatp;
++ unsigned int i = end_idx;
++
++ /* Loop backwards, so that the first hypercall has the largest index,
++ * ensuring that the table will grow only once.
++ */
++ do {
++ xatp.domid = DOMID_SELF;
++ xatp.idx = i;
++ xatp.space = XENMAPSPACE_grant_table;
++ xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i;
++ if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
++ BUG();
++ } while (i-- > start_idx);
++
++ return 0;
++}
++
++int gnttab_resume(void)
++{
++ unsigned int max_nr_gframes, nr_gframes;
++
++ nr_gframes = nr_grant_frames;
++ max_nr_gframes = max_nr_grant_frames();
++ if (max_nr_gframes < nr_gframes)
++ return -ENOSYS;
++
++ if (!resume_frames) {
++ resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
++ shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes);
++ if (shared == NULL) {
++ printk("error to ioremap gnttab share frames\n");
++ return -1;
++ }
++ }
++
++ gnttab_map(0, nr_gframes - 1);
++
++ return 0;
++}
++
++#endif /* !CONFIG_XEN */
++
++static int gnttab_expand(unsigned int req_entries)
++{
++ int rc;
++ unsigned int cur, extra;
++
++ cur = nr_grant_frames;
++ extra = ((req_entries + (ENTRIES_PER_GRANT_FRAME-1)) /
++ ENTRIES_PER_GRANT_FRAME);
++ if (cur + extra > max_nr_grant_frames())
++ return -ENOSPC;
++
++ if ((rc = gnttab_map(cur, cur + extra - 1)) == 0)
++ rc = grow_gnttab_list(extra);
++
++ return rc;
++}
++
++int __devinit gnttab_init(void)
++{
++ int i;
++ unsigned int max_nr_glist_frames, nr_glist_frames;
++ unsigned int nr_init_grefs;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ nr_grant_frames = 1;
++ boot_max_nr_grant_frames = __max_nr_grant_frames();
++
++ /* Determine the maximum number of frames required for the
++ * grant reference free list on the current hypervisor.
++ */
++ max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames);
++
++ gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
++ GFP_KERNEL);
++ if (gnttab_list == NULL)
++ return -ENOMEM;
++
++ nr_glist_frames = nr_freelist_frames(nr_grant_frames);
++ for (i = 0; i < nr_glist_frames; i++) {
++ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
++ if (gnttab_list[i] == NULL)
++ goto ini_nomem;
++ }
++
++ if (gnttab_resume() < 0)
++ return -ENODEV;
++
++ nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME;
++
++ for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
++ gnttab_entry(i) = i + 1;
++
++ gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
++ gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
++ gnttab_free_head = NR_RESERVED_ENTRIES;
++
++ return 0;
++
++ ini_nomem:
++ for (i--; i >= 0; i--)
++ free_page((unsigned long)gnttab_list[i]);
++ kfree(gnttab_list);
++ return -ENOMEM;
++}
++
++#ifdef CONFIG_XEN
++core_initcall(gnttab_init);
++#endif
+diff -rpuN linux-2.6.18.8/drivers/xen/core/hypervisor_sysfs.c linux-2.6.18-xen-3.3.0/drivers/xen/core/hypervisor_sysfs.c
+--- linux-2.6.18.8/drivers/xen/core/hypervisor_sysfs.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/hypervisor_sysfs.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,57 @@
++/*
++ * copyright (c) 2006 IBM Corporation
++ * Authored by: Mike D. Day <ncmike@us.ibm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/kobject.h>
++#include <xen/hypervisor_sysfs.h>
++#include <asm/hypervisor.h>
++
++static ssize_t hyp_sysfs_show(struct kobject *kobj,
++ struct attribute *attr,
++ char *buffer)
++{
++ struct hyp_sysfs_attr *hyp_attr;
++ hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
++ if (hyp_attr->show)
++ return hyp_attr->show(hyp_attr, buffer);
++ return 0;
++}
++
++static ssize_t hyp_sysfs_store(struct kobject *kobj,
++ struct attribute *attr,
++ const char *buffer,
++ size_t len)
++{
++ struct hyp_sysfs_attr *hyp_attr;
++ hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
++ if (hyp_attr->store)
++ return hyp_attr->store(hyp_attr, buffer, len);
++ return 0;
++}
++
++static struct sysfs_ops hyp_sysfs_ops = {
++ .show = hyp_sysfs_show,
++ .store = hyp_sysfs_store,
++};
++
++static struct kobj_type hyp_sysfs_kobj_type = {
++ .sysfs_ops = &hyp_sysfs_ops,
++};
++
++static int __init hypervisor_subsys_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ hypervisor_subsys.kset.kobj.ktype = &hyp_sysfs_kobj_type;
++ return 0;
++}
++
++device_initcall(hypervisor_subsys_init);
+diff -rpuN linux-2.6.18.8/drivers/xen/core/machine_kexec.c linux-2.6.18-xen-3.3.0/drivers/xen/core/machine_kexec.c
+--- linux-2.6.18.8/drivers/xen/core/machine_kexec.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/machine_kexec.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,207 @@
++/*
++ * drivers/xen/core/machine_kexec.c
++ * handle transition of Linux booting another kernel
++ */
++
++#include <linux/kexec.h>
++#include <xen/interface/kexec.h>
++#include <linux/mm.h>
++#include <linux/bootmem.h>
++
++extern void machine_kexec_setup_load_arg(xen_kexec_image_t *xki,
++ struct kimage *image);
++extern int machine_kexec_setup_resources(struct resource *hypervisor,
++ struct resource *phys_cpus,
++ int nr_phys_cpus);
++extern void machine_kexec_register_resources(struct resource *res);
++
++static int __initdata xen_max_nr_phys_cpus;
++static struct resource xen_hypervisor_res;
++static struct resource *xen_phys_cpus;
++
++size_t vmcoreinfo_size_xen;
++unsigned long paddr_vmcoreinfo_xen;
++
++void __init xen_machine_kexec_setup_resources(void)
++{
++ xen_kexec_range_t range;
++ struct resource *res;
++ int k = 0;
++
++ if (!is_initial_xendomain())
++ return;
++
++ /* determine maximum number of physical cpus */
++
++ while (1) {
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_CPU;
++ range.nr = k;
++
++ if(HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ break;
++
++ k++;
++ }
++
++ if (k == 0)
++ return;
++
++ xen_max_nr_phys_cpus = k;
++
++ /* allocate xen_phys_cpus */
++
++ xen_phys_cpus = alloc_bootmem_low(k * sizeof(struct resource));
++ BUG_ON(xen_phys_cpus == NULL);
++
++ /* fill in xen_phys_cpus with per-cpu crash note information */
++
++ for (k = 0; k < xen_max_nr_phys_cpus; k++) {
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_CPU;
++ range.nr = k;
++
++ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ goto err;
++
++ res = xen_phys_cpus + k;
++
++ memset(res, 0, sizeof(*res));
++ res->name = "Crash note";
++ res->start = range.start;
++ res->end = range.start + range.size - 1;
++ res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
++ }
++
++ /* fill in xen_hypervisor_res with hypervisor machine address range */
++
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_XEN;
++
++ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ goto err;
++
++ xen_hypervisor_res.name = "Hypervisor code and data";
++ xen_hypervisor_res.start = range.start;
++ xen_hypervisor_res.end = range.start + range.size - 1;
++ xen_hypervisor_res.flags = IORESOURCE_BUSY | IORESOURCE_MEM;
++
++ /* fill in crashk_res if range is reserved by hypervisor */
++
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_CRASH;
++
++ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ goto err;
++
++ if (range.size) {
++ crashk_res.start = range.start;
++ crashk_res.end = range.start + range.size - 1;
++ }
++
++ /* get physical address of vmcoreinfo */
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_VMCOREINFO;
++
++ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ return;
++
++ if (range.size) {
++ paddr_vmcoreinfo_xen = range.start;
++ vmcoreinfo_size_xen = range.size;
++ }
++
++ if (machine_kexec_setup_resources(&xen_hypervisor_res, xen_phys_cpus,
++ xen_max_nr_phys_cpus))
++ goto err;
++
++ return;
++
++ err:
++ /*
++ * It isn't possible to free xen_phys_cpus this early in the
++ * boot. Failure at this stage is unexpected and the amount of
++ * memory is small therefore we tolerate the potential leak.
++ */
++ xen_max_nr_phys_cpus = 0;
++ return;
++}
++
++void __init xen_machine_kexec_register_resources(struct resource *res)
++{
++ request_resource(res, &xen_hypervisor_res);
++ machine_kexec_register_resources(res);
++}
++
++static void setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
++{
++ machine_kexec_setup_load_arg(xki, image);
++
++ xki->indirection_page = image->head;
++ xki->start_address = image->start;
++}
++
++/*
++ * Load the image into xen so xen can kdump itself
++ * This might have been done in prepare, but prepare
++ * is currently called too early. It might make sense
++ * to move prepare, but for now, just add an extra hook.
++ */
++int xen_machine_kexec_load(struct kimage *image)
++{
++ xen_kexec_load_t xkl;
++
++ memset(&xkl, 0, sizeof(xkl));
++ xkl.type = image->type;
++ setup_load_arg(&xkl.image, image);
++ return HYPERVISOR_kexec_op(KEXEC_CMD_kexec_load, &xkl);
++}
++
++/*
++ * Unload the image that was stored by machine_kexec_load()
++ * This might have been done in machine_kexec_cleanup() but it
++ * is called too late, and its possible xen could try and kdump
++ * using resources that have been freed.
++ */
++void xen_machine_kexec_unload(struct kimage *image)
++{
++ xen_kexec_load_t xkl;
++
++ memset(&xkl, 0, sizeof(xkl));
++ xkl.type = image->type;
++ WARN_ON(HYPERVISOR_kexec_op(KEXEC_CMD_kexec_unload, &xkl));
++}
++
++/*
++ * Do not allocate memory (or fail in any way) in machine_kexec().
++ * We are past the point of no return, committed to rebooting now.
++ *
++ * This has the hypervisor move to the prefered reboot CPU,
++ * stop all CPUs and kexec. That is it combines machine_shutdown()
++ * and machine_kexec() in Linux kexec terms.
++ */
++NORET_TYPE void machine_kexec(struct kimage *image)
++{
++ xen_kexec_exec_t xke;
++
++ memset(&xke, 0, sizeof(xke));
++ xke.type = image->type;
++ VOID(HYPERVISOR_kexec_op(KEXEC_CMD_kexec, &xke));
++ panic("KEXEC_CMD_kexec hypercall should not return\n");
++}
++
++void machine_shutdown(void)
++{
++ /* do nothing */
++}
++
++
++/*
++ * Local variables:
++ * c-file-style: "linux"
++ * indent-tabs-mode: t
++ * c-indent-level: 8
++ * c-basic-offset: 8
++ * tab-width: 8
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/drivers/xen/core/machine_reboot.c linux-2.6.18-xen-3.3.0/drivers/xen/core/machine_reboot.c
+--- linux-2.6.18.8/drivers/xen/core/machine_reboot.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/machine_reboot.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,247 @@
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/unistd.h>
++#include <linux/module.h>
++#include <linux/reboot.h>
++#include <linux/sysrq.h>
++#include <linux/stringify.h>
++#include <linux/stop_machine.h>
++#include <asm/irq.h>
++#include <asm/mmu_context.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <linux/cpu.h>
++#include <xen/gnttab.h>
++#include <xen/xencons.h>
++#include <xen/cpu_hotplug.h>
++#include <xen/interface/vcpu.h>
++
++#if defined(__i386__) || defined(__x86_64__)
++
++/*
++ * Power off function, if any
++ */
++void (*pm_power_off)(void);
++EXPORT_SYMBOL(pm_power_off);
++
++void machine_emergency_restart(void)
++{
++ /* We really want to get pending console data out before we die. */
++ xencons_force_flush();
++ HYPERVISOR_shutdown(SHUTDOWN_reboot);
++}
++
++void machine_restart(char * __unused)
++{
++ machine_emergency_restart();
++}
++
++void machine_halt(void)
++{
++ machine_power_off();
++}
++
++void machine_power_off(void)
++{
++ /* We really want to get pending console data out before we die. */
++ xencons_force_flush();
++ if (pm_power_off)
++ pm_power_off();
++ HYPERVISOR_shutdown(SHUTDOWN_poweroff);
++}
++
++int reboot_thru_bios = 0; /* for dmi_scan.c */
++EXPORT_SYMBOL(machine_restart);
++EXPORT_SYMBOL(machine_halt);
++EXPORT_SYMBOL(machine_power_off);
++
++static void pre_suspend(void)
++{
++ HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++ WARN_ON(HYPERVISOR_update_va_mapping(fix_to_virt(FIX_SHARED_INFO),
++ __pte_ma(0), 0));
++
++ xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
++ xen_start_info->console.domU.mfn =
++ mfn_to_pfn(xen_start_info->console.domU.mfn);
++}
++
++static void post_suspend(int suspend_cancelled)
++{
++ int i, j, k, fpp;
++ unsigned long shinfo_mfn;
++ extern unsigned long max_pfn;
++ extern unsigned long *pfn_to_mfn_frame_list_list;
++ extern unsigned long *pfn_to_mfn_frame_list[];
++
++ if (suspend_cancelled) {
++ xen_start_info->store_mfn =
++ pfn_to_mfn(xen_start_info->store_mfn);
++ xen_start_info->console.domU.mfn =
++ pfn_to_mfn(xen_start_info->console.domU.mfn);
++ } else {
++#ifdef CONFIG_SMP
++ cpu_initialized_map = cpu_online_map;
++#endif
++ }
++
++ shinfo_mfn = xen_start_info->shared_info >> PAGE_SHIFT;
++ if (HYPERVISOR_update_va_mapping(fix_to_virt(FIX_SHARED_INFO),
++ pfn_pte_ma(shinfo_mfn, PAGE_KERNEL),
++ 0))
++ BUG();
++ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++
++ memset(empty_zero_page, 0, PAGE_SIZE);
++
++ fpp = PAGE_SIZE/sizeof(unsigned long);
++ for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
++ if ((j % fpp) == 0) {
++ k++;
++ pfn_to_mfn_frame_list_list[k] =
++ virt_to_mfn(pfn_to_mfn_frame_list[k]);
++ j = 0;
++ }
++ pfn_to_mfn_frame_list[k][j] =
++ virt_to_mfn(&phys_to_machine_mapping[i]);
++ }
++ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
++ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++ virt_to_mfn(pfn_to_mfn_frame_list_list);
++}
++
++#else /* !(defined(__i386__) || defined(__x86_64__)) */
++
++#ifndef HAVE_XEN_PRE_SUSPEND
++#define xen_pre_suspend() ((void)0)
++#endif
++
++#ifndef HAVE_XEN_POST_SUSPEND
++#define xen_post_suspend(x) ((void)0)
++#endif
++
++#define switch_idle_mm() ((void)0)
++#define mm_pin_all() ((void)0)
++#define pre_suspend() xen_pre_suspend()
++#define post_suspend(x) xen_post_suspend(x)
++
++#endif
++
++struct suspend {
++ int fast_suspend;
++ void (*resume_notifier)(int);
++};
++
++static int take_machine_down(void *_suspend)
++{
++ struct suspend *suspend = _suspend;
++ int suspend_cancelled, err;
++ extern void time_resume(void);
++
++ if (suspend->fast_suspend) {
++ BUG_ON(!irqs_disabled());
++ } else {
++ BUG_ON(irqs_disabled());
++
++ for (;;) {
++ err = smp_suspend();
++ if (err)
++ return err;
++
++ xenbus_suspend();
++ preempt_disable();
++
++ if (num_online_cpus() == 1)
++ break;
++
++ preempt_enable();
++ xenbus_suspend_cancel();
++ }
++
++ local_irq_disable();
++ }
++
++ mm_pin_all();
++ gnttab_suspend();
++ pre_suspend();
++
++ /*
++ * This hypercall returns 1 if suspend was cancelled or the domain was
++ * merely checkpointed, and 0 if it is resuming in a new domain.
++ */
++ suspend_cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
++
++ suspend->resume_notifier(suspend_cancelled);
++ post_suspend(suspend_cancelled);
++ gnttab_resume();
++ if (!suspend_cancelled) {
++ irq_resume();
++#ifdef __x86_64__
++ /*
++ * Older versions of Xen do not save/restore the user %cr3.
++ * We do it here just in case, but there's no need if we are
++ * in fast-suspend mode as that implies a new enough Xen.
++ */
++ if (!suspend->fast_suspend)
++ xen_new_user_pt(__pa(__user_pgd(
++ current->active_mm->pgd)));
++#endif
++ }
++ time_resume();
++
++ if (!suspend->fast_suspend)
++ local_irq_enable();
++
++ return suspend_cancelled;
++}
++
++int __xen_suspend(int fast_suspend, void (*resume_notifier)(int))
++{
++ int err, suspend_cancelled;
++ struct suspend suspend;
++
++ BUG_ON(smp_processor_id() != 0);
++ BUG_ON(in_interrupt());
++
++#if defined(__i386__) || defined(__x86_64__)
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ printk(KERN_WARNING "Cannot suspend in "
++ "auto_translated_physmap mode.\n");
++ return -EOPNOTSUPP;
++ }
++#endif
++
++ /* If we are definitely UP then 'slow mode' is actually faster. */
++ if (num_possible_cpus() == 1)
++ fast_suspend = 0;
++
++ suspend.fast_suspend = fast_suspend;
++ suspend.resume_notifier = resume_notifier;
++
++ if (fast_suspend) {
++ xenbus_suspend();
++ err = stop_machine_run(take_machine_down, &suspend, 0);
++ if (err < 0)
++ xenbus_suspend_cancel();
++ } else {
++ err = take_machine_down(&suspend);
++ }
++
++ if (err < 0)
++ return err;
++
++ suspend_cancelled = err;
++ if (!suspend_cancelled) {
++ xencons_resume();
++ xenbus_resume();
++ } else {
++ xenbus_suspend_cancel();
++ }
++
++ if (!fast_suspend)
++ smp_resume();
++
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/core/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/core/Makefile
+--- linux-2.6.18.8/drivers/xen/core/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,14 @@
++#
++# Makefile for the linux kernel.
++#
++
++obj-y := evtchn.o gnttab.o features.o reboot.o machine_reboot.o firmware.o
++
++obj-$(CONFIG_PCI) += pci.o
++obj-$(CONFIG_PROC_FS) += xen_proc.o
++obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor_sysfs.o
++obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
++obj-$(CONFIG_XEN_SYSFS) += xen_sysfs.o
++obj-$(CONFIG_XEN_SMPBOOT) += smpboot.o
++obj-$(CONFIG_KEXEC) += machine_kexec.o
++obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
+diff -rpuN linux-2.6.18.8/drivers/xen/core/pci.c linux-2.6.18-xen-3.3.0/drivers/xen/core/pci.c
+--- linux-2.6.18.8/drivers/xen/core/pci.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/pci.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,67 @@
++/*
++ * vim:shiftwidth=8:noexpandtab
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <xen/interface/physdev.h>
++
++static int (*pci_bus_probe)(struct device *dev);
++static int (*pci_bus_remove)(struct device *dev);
++
++static int pci_bus_probe_wrapper(struct device *dev)
++{
++ int r;
++ struct pci_dev *pci_dev = to_pci_dev(dev);
++ struct physdev_manage_pci manage_pci;
++ manage_pci.bus = pci_dev->bus->number;
++ manage_pci.devfn = pci_dev->devfn;
++
++ r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add, &manage_pci);
++ if (r && r != -ENOSYS)
++ return r;
++
++ r = pci_bus_probe(dev);
++ if (r) {
++ int ret;
++
++ ret = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
++ &manage_pci);
++ WARN_ON(ret && ret != -ENOSYS);
++ }
++
++ return r;
++}
++
++static int pci_bus_remove_wrapper(struct device *dev)
++{
++ int r;
++ struct pci_dev *pci_dev = to_pci_dev(dev);
++ struct physdev_manage_pci manage_pci;
++ manage_pci.bus = pci_dev->bus->number;
++ manage_pci.devfn = pci_dev->devfn;
++
++ r = pci_bus_remove(dev);
++ /* dev and pci_dev are no longer valid!! */
++
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
++ &manage_pci));
++ return r;
++}
++
++static int __init hook_pci_bus(void)
++{
++ if (!is_running_on_xen() || !is_initial_xendomain())
++ return 0;
++
++ pci_bus_probe = pci_bus_type.probe;
++ pci_bus_type.probe = pci_bus_probe_wrapper;
++
++ pci_bus_remove = pci_bus_type.remove;
++ pci_bus_type.remove = pci_bus_remove_wrapper;
++
++ return 0;
++}
++
++core_initcall(hook_pci_bus);
+diff -rpuN linux-2.6.18.8/drivers/xen/core/reboot.c linux-2.6.18-xen-3.3.0/drivers/xen/core/reboot.c
+--- linux-2.6.18.8/drivers/xen/core/reboot.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/reboot.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,335 @@
++#define __KERNEL_SYSCALLS__
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/unistd.h>
++#include <linux/module.h>
++#include <linux/reboot.h>
++#include <linux/sysrq.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/evtchn.h>
++#include <linux/kmod.h>
++#include <linux/slab.h>
++#include <linux/workqueue.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++MODULE_LICENSE("Dual BSD/GPL");
++
++#define SHUTDOWN_INVALID -1
++#define SHUTDOWN_POWEROFF 0
++#define SHUTDOWN_SUSPEND 2
++#define SHUTDOWN_RESUMING 3
++#define SHUTDOWN_HALT 4
++
++/* Ignore multiple shutdown requests. */
++static int shutting_down = SHUTDOWN_INVALID;
++
++/* Was last suspend request cancelled? */
++static int suspend_cancelled;
++
++/* Can we leave APs online when we suspend? */
++static int fast_suspend;
++
++static void __shutdown_handler(void *unused);
++static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
++
++static int setup_suspend_evtchn(void);
++
++int __xen_suspend(int fast_suspend, void (*resume_notifier)(int));
++
++static int shutdown_process(void *__unused)
++{
++ static char *envp[] = { "HOME=/", "TERM=linux",
++ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
++ static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
++
++ extern asmlinkage long sys_reboot(int magic1, int magic2,
++ unsigned int cmd, void *arg);
++
++ if ((shutting_down == SHUTDOWN_POWEROFF) ||
++ (shutting_down == SHUTDOWN_HALT)) {
++ if (call_usermodehelper("/sbin/poweroff", poweroff_argv,
++ envp, 0) < 0) {
++#ifdef CONFIG_XEN
++ sys_reboot(LINUX_REBOOT_MAGIC1,
++ LINUX_REBOOT_MAGIC2,
++ LINUX_REBOOT_CMD_POWER_OFF,
++ NULL);
++#endif /* CONFIG_XEN */
++ }
++ }
++
++ shutting_down = SHUTDOWN_INVALID; /* could try again */
++
++ return 0;
++}
++
++static void xen_resume_notifier(int _suspend_cancelled)
++{
++ int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING);
++ BUG_ON(old_state != SHUTDOWN_SUSPEND);
++ suspend_cancelled = _suspend_cancelled;
++}
++
++static int xen_suspend(void *__unused)
++{
++ int err, old_state;
++
++ daemonize("suspend");
++ err = set_cpus_allowed(current, cpumask_of_cpu(0));
++ if (err) {
++ printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err);
++ goto fail;
++ }
++
++ do {
++ err = __xen_suspend(fast_suspend, xen_resume_notifier);
++ if (err) {
++ printk(KERN_ERR "Xen suspend failed (%d)\n", err);
++ goto fail;
++ }
++ if (!suspend_cancelled)
++ setup_suspend_evtchn();
++ old_state = cmpxchg(
++ &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID);
++ } while (old_state == SHUTDOWN_SUSPEND);
++
++ switch (old_state) {
++ case SHUTDOWN_INVALID:
++ case SHUTDOWN_SUSPEND:
++ BUG();
++ case SHUTDOWN_RESUMING:
++ break;
++ default:
++ schedule_work(&shutdown_work);
++ break;
++ }
++
++ return 0;
++
++ fail:
++ old_state = xchg(&shutting_down, SHUTDOWN_INVALID);
++ BUG_ON(old_state != SHUTDOWN_SUSPEND);
++ return 0;
++}
++
++static void switch_shutdown_state(int new_state)
++{
++ int prev_state, old_state = SHUTDOWN_INVALID;
++
++ /* We only drive shutdown_state into an active state. */
++ if (new_state == SHUTDOWN_INVALID)
++ return;
++
++ do {
++ /* We drop this transition if already in an active state. */
++ if ((old_state != SHUTDOWN_INVALID) &&
++ (old_state != SHUTDOWN_RESUMING))
++ return;
++ /* Attempt to transition. */
++ prev_state = old_state;
++ old_state = cmpxchg(&shutting_down, old_state, new_state);
++ } while (old_state != prev_state);
++
++ /* Either we kick off the work, or we leave it to xen_suspend(). */
++ if (old_state == SHUTDOWN_INVALID)
++ schedule_work(&shutdown_work);
++ else
++ BUG_ON(old_state != SHUTDOWN_RESUMING);
++}
++
++static void __shutdown_handler(void *unused)
++{
++ int err;
++
++ err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ?
++ xen_suspend : shutdown_process,
++ NULL, CLONE_FS | CLONE_FILES);
++
++ if (err < 0) {
++ printk(KERN_WARNING "Error creating shutdown process (%d): "
++ "retrying...\n", -err);
++ schedule_delayed_work(&shutdown_work, HZ/2);
++ }
++}
++
++static void shutdown_handler(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ extern void ctrl_alt_del(void);
++ char *str;
++ struct xenbus_transaction xbt;
++ int err, new_state = SHUTDOWN_INVALID;
++
++ if ((shutting_down != SHUTDOWN_INVALID) &&
++ (shutting_down != SHUTDOWN_RESUMING))
++ return;
++
++ again:
++ err = xenbus_transaction_start(&xbt);
++ if (err)
++ return;
++
++ str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
++ /* Ignore read errors and empty reads. */
++ if (XENBUS_IS_ERR_READ(str)) {
++ xenbus_transaction_end(xbt, 1);
++ return;
++ }
++
++ xenbus_write(xbt, "control", "shutdown", "");
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN) {
++ kfree(str);
++ goto again;
++ }
++
++ if (strcmp(str, "poweroff") == 0)
++ new_state = SHUTDOWN_POWEROFF;
++ else if (strcmp(str, "reboot") == 0)
++ ctrl_alt_del();
++ else if (strcmp(str, "suspend") == 0)
++ new_state = SHUTDOWN_SUSPEND;
++ else if (strcmp(str, "halt") == 0)
++ new_state = SHUTDOWN_HALT;
++ else
++ printk("Ignoring shutdown request: %s\n", str);
++
++ switch_shutdown_state(new_state);
++
++ kfree(str);
++}
++
++static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
++ unsigned int len)
++{
++ char sysrq_key = '\0';
++ struct xenbus_transaction xbt;
++ int err;
++
++ again:
++ err = xenbus_transaction_start(&xbt);
++ if (err)
++ return;
++ if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
++ printk(KERN_ERR "Unable to read sysrq code in "
++ "control/sysrq\n");
++ xenbus_transaction_end(xbt, 1);
++ return;
++ }
++
++ if (sysrq_key != '\0')
++ xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN)
++ goto again;
++
++#ifdef CONFIG_MAGIC_SYSRQ
++ if (sysrq_key != '\0')
++ handle_sysrq(sysrq_key, NULL, NULL);
++#endif
++}
++
++static struct xenbus_watch shutdown_watch = {
++ .node = "control/shutdown",
++ .callback = shutdown_handler
++};
++
++static struct xenbus_watch sysrq_watch = {
++ .node = "control/sysrq",
++ .callback = sysrq_handler
++};
++
++static irqreturn_t suspend_int(int irq, void* dev_id, struct pt_regs *ptregs)
++{
++ switch_shutdown_state(SHUTDOWN_SUSPEND);
++ return IRQ_HANDLED;
++}
++
++static int setup_suspend_evtchn(void)
++{
++ static int irq;
++ int port;
++ char portstr[16];
++
++ if (irq > 0)
++ unbind_from_irqhandler(irq, NULL);
++
++ irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend",
++ NULL);
++ if (irq <= 0)
++ return -1;
++
++ port = irq_to_evtchn_port(irq);
++ printk(KERN_INFO "suspend: event channel %d\n", port);
++ sprintf(portstr, "%d", port);
++ xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr);
++
++ return 0;
++}
++
++static int setup_shutdown_watcher(void)
++{
++ int err;
++
++ xenbus_scanf(XBT_NIL, "control",
++ "platform-feature-multiprocessor-suspend",
++ "%d", &fast_suspend);
++
++ err = register_xenbus_watch(&shutdown_watch);
++ if (err) {
++ printk(KERN_ERR "Failed to set shutdown watcher\n");
++ return err;
++ }
++
++ err = register_xenbus_watch(&sysrq_watch);
++ if (err) {
++ printk(KERN_ERR "Failed to set sysrq watcher\n");
++ return err;
++ }
++
++ /* suspend event channel */
++ err = setup_suspend_evtchn();
++ if (err) {
++ printk(KERN_ERR "Failed to register suspend event channel\n");
++ return err;
++ }
++
++ return 0;
++}
++
++#ifdef CONFIG_XEN
++
++static int shutdown_event(struct notifier_block *notifier,
++ unsigned long event,
++ void *data)
++{
++ setup_shutdown_watcher();
++ return NOTIFY_DONE;
++}
++
++static int __init setup_shutdown_event(void)
++{
++ static struct notifier_block xenstore_notifier = {
++ .notifier_call = shutdown_event
++ };
++ register_xenstore_notifier(&xenstore_notifier);
++
++ return 0;
++}
++
++subsys_initcall(setup_shutdown_event);
++
++#else /* !defined(CONFIG_XEN) */
++
++int xen_reboot_init(void)
++{
++ return setup_shutdown_watcher();
++}
++
++#endif /* !defined(CONFIG_XEN) */
+diff -rpuN linux-2.6.18.8/drivers/xen/core/smpboot.c linux-2.6.18-xen-3.3.0/drivers/xen/core/smpboot.c
+--- linux-2.6.18.8/drivers/xen/core/smpboot.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/smpboot.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,464 @@
++/*
++ * Xen SMP booting functions
++ *
++ * See arch/i386/kernel/smpboot.c for copyright and credits for derived
++ * portions of this file.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/kernel_stat.h>
++#include <linux/smp_lock.h>
++#include <linux/irq.h>
++#include <linux/bootmem.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <linux/percpu.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/pgalloc.h>
++#include <xen/evtchn.h>
++#include <xen/interface/vcpu.h>
++#include <xen/cpu_hotplug.h>
++#include <xen/xenbus.h>
++
++extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
++extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
++
++extern int local_setup_timer(unsigned int cpu);
++extern void local_teardown_timer(unsigned int cpu);
++
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void system_call(void);
++extern void smp_trap_init(trap_info_t *);
++
++/* Number of siblings per CPU package */
++int smp_num_siblings = 1;
++
++cpumask_t cpu_online_map;
++EXPORT_SYMBOL(cpu_online_map);
++cpumask_t cpu_possible_map;
++EXPORT_SYMBOL(cpu_possible_map);
++cpumask_t cpu_initialized_map;
++
++struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
++EXPORT_SYMBOL(cpu_data);
++
++#ifdef CONFIG_HOTPLUG_CPU
++DEFINE_PER_CPU(int, cpu_state) = { 0 };
++#endif
++
++static DEFINE_PER_CPU(int, resched_irq);
++static DEFINE_PER_CPU(int, callfunc_irq);
++static char resched_name[NR_CPUS][15];
++static char callfunc_name[NR_CPUS][15];
++
++u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
++cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
++EXPORT_SYMBOL(cpu_core_map);
++
++#if defined(__i386__)
++u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
++EXPORT_SYMBOL(x86_cpu_to_apicid);
++#elif !defined(CONFIG_X86_IO_APIC)
++unsigned int maxcpus = NR_CPUS;
++#endif
++
++void __init prefill_possible_map(void)
++{
++ int i, rc;
++
++ for_each_possible_cpu(i)
++ if (i != smp_processor_id())
++ return;
++
++ for (i = 0; i < NR_CPUS; i++) {
++ rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
++ if (rc >= 0)
++ cpu_set(i, cpu_possible_map);
++ }
++}
++
++void __init smp_alloc_memory(void)
++{
++}
++
++static inline void
++set_cpu_sibling_map(unsigned int cpu)
++{
++ cpu_data[cpu].phys_proc_id = cpu;
++ cpu_data[cpu].cpu_core_id = 0;
++
++ cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
++ cpu_core_map[cpu] = cpumask_of_cpu(cpu);
++
++ cpu_data[cpu].booted_cores = 1;
++}
++
++static void
++remove_siblinginfo(unsigned int cpu)
++{
++ cpu_data[cpu].phys_proc_id = BAD_APICID;
++ cpu_data[cpu].cpu_core_id = BAD_APICID;
++
++ cpus_clear(cpu_sibling_map[cpu]);
++ cpus_clear(cpu_core_map[cpu]);
++
++ cpu_data[cpu].booted_cores = 0;
++}
++
++static int __cpuinit xen_smp_intr_init(unsigned int cpu)
++{
++ int rc;
++
++ per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
++
++ sprintf(resched_name[cpu], "resched%u", cpu);
++ rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
++ cpu,
++ smp_reschedule_interrupt,
++ SA_INTERRUPT,
++ resched_name[cpu],
++ NULL);
++ if (rc < 0)
++ goto fail;
++ per_cpu(resched_irq, cpu) = rc;
++
++ sprintf(callfunc_name[cpu], "callfunc%u", cpu);
++ rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
++ cpu,
++ smp_call_function_interrupt,
++ SA_INTERRUPT,
++ callfunc_name[cpu],
++ NULL);
++ if (rc < 0)
++ goto fail;
++ per_cpu(callfunc_irq, cpu) = rc;
++
++ if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0))
++ goto fail;
++
++ return 0;
++
++ fail:
++ if (per_cpu(resched_irq, cpu) >= 0)
++ unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
++ if (per_cpu(callfunc_irq, cpu) >= 0)
++ unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
++ return rc;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void xen_smp_intr_exit(unsigned int cpu)
++{
++ if (cpu != 0)
++ local_teardown_timer(cpu);
++
++ unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
++ unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
++}
++#endif
++
++void __cpuinit cpu_bringup(void)
++{
++ cpu_init();
++ identify_cpu(cpu_data + smp_processor_id());
++ touch_softlockup_watchdog();
++ preempt_disable();
++ local_irq_enable();
++}
++
++static void __cpuinit cpu_bringup_and_idle(void)
++{
++ cpu_bringup();
++ cpu_idle();
++}
++
++static void __cpuinit cpu_initialize_context(unsigned int cpu)
++{
++ /* vcpu_guest_context_t is too large to allocate on the stack.
++ * Hence we allocate statically and protect it with a lock */
++ static vcpu_guest_context_t ctxt;
++ static DEFINE_SPINLOCK(ctxt_lock);
++
++ struct task_struct *idle = idle_task(cpu);
++#ifdef __x86_64__
++ struct desc_ptr *gdt_descr = &cpu_gdt_descr[cpu];
++#else
++ struct Xgt_desc_struct *gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++#endif
++
++ if (cpu_test_and_set(cpu, cpu_initialized_map))
++ return;
++
++ spin_lock(&ctxt_lock);
++
++ memset(&ctxt, 0, sizeof(ctxt));
++
++ ctxt.flags = VGCF_IN_KERNEL;
++ ctxt.user_regs.ds = __USER_DS;
++ ctxt.user_regs.es = __USER_DS;
++ ctxt.user_regs.fs = 0;
++ ctxt.user_regs.gs = 0;
++ ctxt.user_regs.ss = __KERNEL_DS;
++ ctxt.user_regs.eip = (unsigned long)cpu_bringup_and_idle;
++ ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
++
++ memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
++
++ smp_trap_init(ctxt.trap_ctxt);
++
++ ctxt.ldt_ents = 0;
++
++ ctxt.gdt_frames[0] = virt_to_mfn(gdt_descr->address);
++ ctxt.gdt_ents = gdt_descr->size / 8;
++
++#ifdef __i386__
++ ctxt.user_regs.cs = __KERNEL_CS;
++ ctxt.user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
++
++ ctxt.kernel_ss = __KERNEL_DS;
++ ctxt.kernel_sp = idle->thread.esp0;
++
++ ctxt.event_callback_cs = __KERNEL_CS;
++ ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
++ ctxt.failsafe_callback_cs = __KERNEL_CS;
++ ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
++
++ ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
++#else /* __x86_64__ */
++ ctxt.user_regs.cs = __KERNEL_CS;
++ ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
++
++ ctxt.kernel_ss = __KERNEL_DS;
++ ctxt.kernel_sp = idle->thread.rsp0;
++
++ ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
++ ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
++ ctxt.syscall_callback_eip = (unsigned long)system_call;
++
++ ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));
++
++ ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
++#endif
++
++ if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt))
++ BUG();
++
++ spin_unlock(&ctxt_lock);
++}
++
++void __init smp_prepare_cpus(unsigned int max_cpus)
++{
++ unsigned int cpu;
++ struct task_struct *idle;
++ int apicid, acpiid;
++ struct vcpu_get_physid cpu_id;
++#ifdef __x86_64__
++ struct desc_ptr *gdt_descr;
++#else
++ struct Xgt_desc_struct *gdt_descr;
++#endif
++
++ apicid = 0;
++ if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, 0, &cpu_id) == 0) {
++ apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id);
++ acpiid = xen_vcpu_physid_to_x86_acpiid(cpu_id.phys_id);
++#ifdef CONFIG_ACPI
++ if (acpiid != 0xff)
++ x86_acpiid_to_apicid[acpiid] = apicid;
++#endif
++ }
++ boot_cpu_data.apicid = apicid;
++ cpu_data[0] = boot_cpu_data;
++
++ cpu_2_logical_apicid[0] = apicid;
++ x86_cpu_to_apicid[0] = apicid;
++
++ current_thread_info()->cpu = 0;
++
++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
++ cpus_clear(cpu_sibling_map[cpu]);
++ cpus_clear(cpu_core_map[cpu]);
++ }
++
++ set_cpu_sibling_map(0);
++
++ if (xen_smp_intr_init(0))
++ BUG();
++
++ cpu_initialized_map = cpumask_of_cpu(0);
++
++ /* Restrict the possible_map according to max_cpus. */
++ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
++ for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
++ continue;
++ cpu_clear(cpu, cpu_possible_map);
++ }
++
++ for_each_possible_cpu (cpu) {
++ if (cpu == 0)
++ continue;
++
++#ifdef __x86_64__
++ gdt_descr = &cpu_gdt_descr[cpu];
++#else
++ gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++#endif
++ gdt_descr->address = get_zeroed_page(GFP_KERNEL);
++ if (unlikely(!gdt_descr->address)) {
++ printk(KERN_CRIT "CPU%d failed to allocate GDT\n",
++ cpu);
++ continue;
++ }
++ gdt_descr->size = GDT_SIZE;
++ memcpy((void *)gdt_descr->address, cpu_gdt_table, GDT_SIZE);
++ make_page_readonly(
++ (void *)gdt_descr->address,
++ XENFEAT_writable_descriptor_tables);
++
++ apicid = cpu;
++ if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, cpu, &cpu_id) == 0) {
++ apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id);
++ acpiid = xen_vcpu_physid_to_x86_acpiid(cpu_id.phys_id);
++#ifdef CONFIG_ACPI
++ if (acpiid != 0xff)
++ x86_acpiid_to_apicid[acpiid] = apicid;
++#endif
++ }
++ cpu_data[cpu] = boot_cpu_data;
++ cpu_data[cpu].apicid = apicid;
++
++ cpu_2_logical_apicid[cpu] = apicid;
++ x86_cpu_to_apicid[cpu] = apicid;
++
++ idle = fork_idle(cpu);
++ if (IS_ERR(idle))
++ panic("failed fork for CPU %d", cpu);
++
++#ifdef __x86_64__
++ cpu_pda(cpu)->pcurrent = idle;
++ cpu_pda(cpu)->cpunumber = cpu;
++ clear_ti_thread_flag(idle->thread_info, TIF_FORK);
++#endif
++
++ irq_ctx_init(cpu);
++
++#ifdef CONFIG_HOTPLUG_CPU
++ if (is_initial_xendomain())
++ cpu_set(cpu, cpu_present_map);
++#else
++ cpu_set(cpu, cpu_present_map);
++#endif
++ }
++
++ init_xenbus_allowed_cpumask();
++
++#ifdef CONFIG_X86_IO_APIC
++ /*
++ * Here we can be sure that there is an IO-APIC in the system. Let's
++ * go and set it up:
++ */
++ if (!skip_ioapic_setup && nr_ioapics)
++ setup_IO_APIC();
++#endif
++}
++
++void __devinit smp_prepare_boot_cpu(void)
++{
++ prefill_possible_map();
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Initialize cpu_present_map late to skip SMP boot code in init/main.c.
++ * But do it early enough to catch critical for_each_present_cpu() loops
++ * in i386-specific code.
++ */
++static int __init initialize_cpu_present_map(void)
++{
++ cpu_present_map = cpu_possible_map;
++ return 0;
++}
++core_initcall(initialize_cpu_present_map);
++
++int __cpu_disable(void)
++{
++ cpumask_t map = cpu_online_map;
++ unsigned int cpu = smp_processor_id();
++
++ if (cpu == 0)
++ return -EBUSY;
++
++ remove_siblinginfo(cpu);
++
++ cpu_clear(cpu, map);
++ fixup_irqs(map);
++ cpu_clear(cpu, cpu_online_map);
++
++ return 0;
++}
++
++void __cpu_die(unsigned int cpu)
++{
++ while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
++ current->state = TASK_UNINTERRUPTIBLE;
++ schedule_timeout(HZ/10);
++ }
++
++ xen_smp_intr_exit(cpu);
++
++ if (num_online_cpus() == 1)
++ alternatives_smp_switch(0);
++}
++
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __cpuinit __cpu_up(unsigned int cpu)
++{
++ int rc;
++
++ rc = cpu_up_check(cpu);
++ if (rc)
++ return rc;
++
++ cpu_initialize_context(cpu);
++
++ if (num_online_cpus() == 1)
++ alternatives_smp_switch(1);
++
++ /* This must be done before setting cpu_online_map */
++ set_cpu_sibling_map(cpu);
++ wmb();
++
++ rc = xen_smp_intr_init(cpu);
++ if (rc) {
++ remove_siblinginfo(cpu);
++ return rc;
++ }
++
++ cpu_set(cpu, cpu_online_map);
++
++ rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
++ BUG_ON(rc);
++
++ return 0;
++}
++
++void __init smp_cpus_done(unsigned int max_cpus)
++{
++}
++
++#ifndef CONFIG_X86_LOCAL_APIC
++int setup_profiling_timer(unsigned int multiplier)
++{
++ return -EINVAL;
++}
++#endif
+diff -rpuN linux-2.6.18.8/drivers/xen/core/xencomm.c linux-2.6.18-xen-3.3.0/drivers/xen/core/xencomm.c
+--- linux-2.6.18.8/drivers/xen/core/xencomm.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/xencomm.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,229 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
++ */
++
++#include <linux/gfp.h>
++#include <linux/mm.h>
++#include <asm/page.h>
++#include <xen/xencomm.h>
++#include <xen/interface/xen.h>
++#ifdef __ia64__
++#include <asm/xen/xencomm.h> /* for is_kern_addr() */
++#endif
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++static int xencomm_init(struct xencomm_desc *desc,
++ void *buffer, unsigned long bytes)
++{
++ unsigned long recorded = 0;
++ int i = 0;
++
++ while ((recorded < bytes) && (i < desc->nr_addrs)) {
++ unsigned long vaddr = (unsigned long)buffer + recorded;
++ unsigned long paddr;
++ int offset;
++ int chunksz;
++
++ offset = vaddr % PAGE_SIZE; /* handle partial pages */
++ chunksz = min(PAGE_SIZE - offset, bytes - recorded);
++
++ paddr = xencomm_vtop(vaddr);
++ if (paddr == ~0UL) {
++ printk("%s: couldn't translate vaddr %lx\n",
++ __func__, vaddr);
++ return -EINVAL;
++ }
++
++ desc->address[i++] = paddr;
++ recorded += chunksz;
++ }
++
++ if (recorded < bytes) {
++ printk("%s: could only translate %ld of %ld bytes\n",
++ __func__, recorded, bytes);
++ return -ENOSPC;
++ }
++
++ /* mark remaining addresses invalid (just for safety) */
++ while (i < desc->nr_addrs)
++ desc->address[i++] = XENCOMM_INVALID;
++
++ desc->magic = XENCOMM_MAGIC;
++
++ return 0;
++}
++
++static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
++ void *buffer, unsigned long bytes)
++{
++ struct xencomm_desc *desc;
++ unsigned long buffer_ulong = (unsigned long)buffer;
++ unsigned long start = buffer_ulong & PAGE_MASK;
++ unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
++ unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
++ unsigned long size = sizeof(*desc) +
++ sizeof(desc->address[0]) * nr_addrs;
++
++ /*
++ * slab allocator returns at least sizeof(void*) aligned pointer.
++ * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
++ * cross page boundary.
++ */
++ if (sizeof(*desc) > sizeof(void*)) {
++ unsigned long order = get_order(size);
++ desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
++ order);
++ if (desc == NULL)
++ return NULL;
++
++ desc->nr_addrs =
++ ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
++ sizeof(*desc->address);
++ } else {
++ desc = kmalloc(size, gfp_mask);
++ if (desc == NULL)
++ return NULL;
++
++ desc->nr_addrs = nr_addrs;
++ }
++ return desc;
++}
++
++void xencomm_free(struct xencomm_handle *desc)
++{
++ if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
++ struct xencomm_desc *desc__ = (struct xencomm_desc*)desc;
++ if (sizeof(*desc__) > sizeof(void*)) {
++ unsigned long size = sizeof(*desc__) +
++ sizeof(desc__->address[0]) * desc__->nr_addrs;
++ unsigned long order = get_order(size);
++ free_pages((unsigned long)__va(desc), order);
++ } else
++ kfree(__va(desc));
++ }
++}
++
++static int xencomm_create(void *buffer, unsigned long bytes, struct xencomm_desc **ret, gfp_t gfp_mask)
++{
++ struct xencomm_desc *desc;
++ int rc;
++
++ pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
++
++ if (bytes == 0) {
++ /* don't create a descriptor; Xen recognizes NULL. */
++ BUG_ON(buffer != NULL);
++ *ret = NULL;
++ return 0;
++ }
++
++ BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
++
++ desc = xencomm_alloc(gfp_mask, buffer, bytes);
++ if (!desc) {
++ printk("%s failure\n", "xencomm_alloc");
++ return -ENOMEM;
++ }
++
++ rc = xencomm_init(desc, buffer, bytes);
++ if (rc) {
++ printk("%s failure: %d\n", "xencomm_init", rc);
++ xencomm_free((struct xencomm_handle *)__pa(desc));
++ return rc;
++ }
++
++ *ret = desc;
++ return 0;
++}
++
++/* check if memory address is within VMALLOC region */
++static int is_phys_contiguous(unsigned long addr)
++{
++ if (!is_kernel_addr(addr))
++ return 0;
++
++ return (addr < VMALLOC_START) || (addr >= VMALLOC_END);
++}
++
++static struct xencomm_handle *xencomm_create_inline(void *ptr)
++{
++ unsigned long paddr;
++
++ BUG_ON(!is_phys_contiguous((unsigned long)ptr));
++
++ paddr = (unsigned long)xencomm_pa(ptr);
++ BUG_ON(paddr & XENCOMM_INLINE_FLAG);
++ return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
++}
++
++/* "mini" routine, for stack-based communications: */
++static int xencomm_create_mini(void *buffer,
++ unsigned long bytes, struct xencomm_mini *xc_desc,
++ struct xencomm_desc **ret)
++{
++ int rc = 0;
++ struct xencomm_desc *desc;
++ BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
++
++ desc = (void *)xc_desc;
++
++ desc->nr_addrs = XENCOMM_MINI_ADDRS;
++
++ if (!(rc = xencomm_init(desc, buffer, bytes)))
++ *ret = desc;
++
++ return rc;
++}
++
++struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
++{
++ int rc;
++ struct xencomm_desc *desc;
++
++ if (is_phys_contiguous((unsigned long)ptr))
++ return xencomm_create_inline(ptr);
++
++ rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
++
++ if (rc || desc == NULL)
++ return NULL;
++
++ return xencomm_pa(desc);
++}
++
++struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
++ struct xencomm_mini *xc_desc)
++{
++ int rc;
++ struct xencomm_desc *desc = NULL;
++
++ if (is_phys_contiguous((unsigned long)ptr))
++ return xencomm_create_inline(ptr);
++
++ rc = xencomm_create_mini(ptr, bytes, xc_desc,
++ &desc);
++
++ if (rc)
++ return NULL;
++
++ return xencomm_pa(desc);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/core/xen_proc.c linux-2.6.18-xen-3.3.0/drivers/xen/core/xen_proc.c
+--- linux-2.6.18.8/drivers/xen/core/xen_proc.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/xen_proc.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,23 @@
++
++#include <linux/module.h>
++#include <linux/proc_fs.h>
++#include <xen/xen_proc.h>
++
++static struct proc_dir_entry *xen_base;
++
++struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
++{
++ if ( xen_base == NULL )
++ if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
++ panic("Couldn't create /proc/xen");
++ return create_proc_entry(name, mode, xen_base);
++}
++
++EXPORT_SYMBOL_GPL(create_xen_proc_entry);
++
++void remove_xen_proc_entry(const char *name)
++{
++ remove_proc_entry(name, xen_base);
++}
++
++EXPORT_SYMBOL_GPL(remove_xen_proc_entry);
+diff -rpuN linux-2.6.18.8/drivers/xen/core/xen_sysfs.c linux-2.6.18-xen-3.3.0/drivers/xen/core/xen_sysfs.c
+--- linux-2.6.18.8/drivers/xen/core/xen_sysfs.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/core/xen_sysfs.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,420 @@
++/*
++ * copyright (c) 2006 IBM Corporation
++ * Authored by: Mike D. Day <ncmike@us.ibm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/err.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <asm/hypervisor.h>
++#include <xen/features.h>
++#include <xen/hypervisor_sysfs.h>
++#include <xen/xenbus.h>
++#include <xen/interface/kexec.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Mike D. Day <ncmike@us.ibm.com>");
++
++static ssize_t type_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ return sprintf(buffer, "xen\n");
++}
++
++HYPERVISOR_ATTR_RO(type);
++
++static int __init xen_sysfs_type_init(void)
++{
++ return sysfs_create_file(&hypervisor_subsys.kset.kobj, &type_attr.attr);
++}
++
++static void xen_sysfs_type_destroy(void)
++{
++ sysfs_remove_file(&hypervisor_subsys.kset.kobj, &type_attr.attr);
++}
++
++/* xen version attributes */
++static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int version = HYPERVISOR_xen_version(XENVER_version, NULL);
++ if (version)
++ return sprintf(buffer, "%d\n", version >> 16);
++ return -ENODEV;
++}
++
++HYPERVISOR_ATTR_RO(major);
++
++static ssize_t minor_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int version = HYPERVISOR_xen_version(XENVER_version, NULL);
++ if (version)
++ return sprintf(buffer, "%d\n", version & 0xff);
++ return -ENODEV;
++}
++
++HYPERVISOR_ATTR_RO(minor);
++
++static ssize_t extra_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ char *extra;
++
++ extra = kmalloc(XEN_EXTRAVERSION_LEN, GFP_KERNEL);
++ if (extra) {
++ ret = HYPERVISOR_xen_version(XENVER_extraversion, extra);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", extra);
++ kfree(extra);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(extra);
++
++static struct attribute *version_attrs[] = {
++ &major_attr.attr,
++ &minor_attr.attr,
++ &extra_attr.attr,
++ NULL
++};
++
++static struct attribute_group version_group = {
++ .name = "version",
++ .attrs = version_attrs,
++};
++
++static int __init xen_sysfs_version_init(void)
++{
++ return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++ &version_group);
++}
++
++static void xen_sysfs_version_destroy(void)
++{
++ sysfs_remove_group(&hypervisor_subsys.kset.kobj, &version_group);
++}
++
++/* UUID */
++
++static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ char *vm, *val;
++ int ret;
++ extern int xenstored_ready;
++
++ if (!xenstored_ready)
++ return -EBUSY;
++
++ vm = xenbus_read(XBT_NIL, "vm", "", NULL);
++ if (IS_ERR(vm))
++ return PTR_ERR(vm);
++ val = xenbus_read(XBT_NIL, vm, "uuid", NULL);
++ kfree(vm);
++ if (IS_ERR(val))
++ return PTR_ERR(val);
++ ret = sprintf(buffer, "%s\n", val);
++ kfree(val);
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(uuid);
++
++static int __init xen_sysfs_uuid_init(void)
++{
++ return sysfs_create_file(&hypervisor_subsys.kset.kobj, &uuid_attr.attr);
++}
++
++static void xen_sysfs_uuid_destroy(void)
++{
++ sysfs_remove_file(&hypervisor_subsys.kset.kobj, &uuid_attr.attr);
++}
++
++/* xen compilation attributes */
++
++static ssize_t compiler_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_compile_info *info;
++
++ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
++ if (info) {
++ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", info->compiler);
++ kfree(info);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(compiler);
++
++static ssize_t compiled_by_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_compile_info *info;
++
++ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
++ if (info) {
++ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", info->compile_by);
++ kfree(info);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(compiled_by);
++
++static ssize_t compile_date_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_compile_info *info;
++
++ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
++ if (info) {
++ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", info->compile_date);
++ kfree(info);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(compile_date);
++
++static struct attribute *xen_compile_attrs[] = {
++ &compiler_attr.attr,
++ &compiled_by_attr.attr,
++ &compile_date_attr.attr,
++ NULL
++};
++
++static struct attribute_group xen_compilation_group = {
++ .name = "compilation",
++ .attrs = xen_compile_attrs,
++};
++
++int __init static xen_compilation_init(void)
++{
++ return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++ &xen_compilation_group);
++}
++
++static void xen_compilation_destroy(void)
++{
++ sysfs_remove_group(&hypervisor_subsys.kset.kobj,
++ &xen_compilation_group);
++}
++
++/* xen properties info */
++
++static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ char *caps;
++
++ caps = kmalloc(XEN_CAPABILITIES_INFO_LEN, GFP_KERNEL);
++ if (caps) {
++ ret = HYPERVISOR_xen_version(XENVER_capabilities, caps);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", caps);
++ kfree(caps);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(capabilities);
++
++static ssize_t changeset_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ char *cset;
++
++ cset = kmalloc(XEN_CHANGESET_INFO_LEN, GFP_KERNEL);
++ if (cset) {
++ ret = HYPERVISOR_xen_version(XENVER_changeset, cset);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", cset);
++ kfree(cset);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(changeset);
++
++static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_platform_parameters *parms;
++
++ parms = kmalloc(sizeof(struct xen_platform_parameters), GFP_KERNEL);
++ if (parms) {
++ ret = HYPERVISOR_xen_version(XENVER_platform_parameters,
++ parms);
++ if (!ret)
++ ret = sprintf(buffer, "%lx\n", parms->virt_start);
++ kfree(parms);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(virtual_start);
++
++static ssize_t pagesize_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret;
++
++ ret = HYPERVISOR_xen_version(XENVER_pagesize, NULL);
++ if (ret > 0)
++ ret = sprintf(buffer, "%x\n", ret);
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(pagesize);
++
++/* eventually there will be several more features to export */
++static ssize_t xen_feature_show(int index, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_feature_info *info;
++
++ info = kmalloc(sizeof(struct xen_feature_info), GFP_KERNEL);
++ if (info) {
++ info->submap_idx = index;
++ ret = HYPERVISOR_xen_version(XENVER_get_features, info);
++ if (!ret)
++ ret = sprintf(buffer, "%d\n", info->submap);
++ kfree(info);
++ }
++
++ return ret;
++}
++
++static ssize_t writable_pt_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ return xen_feature_show(XENFEAT_writable_page_tables, buffer);
++}
++
++HYPERVISOR_ATTR_RO(writable_pt);
++
++static struct attribute *xen_properties_attrs[] = {
++ &capabilities_attr.attr,
++ &changeset_attr.attr,
++ &virtual_start_attr.attr,
++ &pagesize_attr.attr,
++ &writable_pt_attr.attr,
++ NULL
++};
++
++static struct attribute_group xen_properties_group = {
++ .name = "properties",
++ .attrs = xen_properties_attrs,
++};
++
++static int __init xen_properties_init(void)
++{
++ return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++ &xen_properties_group);
++}
++
++static void xen_properties_destroy(void)
++{
++ sysfs_remove_group(&hypervisor_subsys.kset.kobj,
++ &xen_properties_group);
++}
++
++#ifdef CONFIG_KEXEC
++
++static ssize_t vmcoreinfo_show(struct hyp_sysfs_attr *attr, char *page)
++{
++ extern size_t vmcoreinfo_size_xen;
++ extern unsigned long paddr_vmcoreinfo_xen;
++
++ return sprintf(page, "%lx %zx\n",
++ paddr_vmcoreinfo_xen, vmcoreinfo_size_xen);
++}
++
++HYPERVISOR_ATTR_RO(vmcoreinfo);
++
++static int __init xen_sysfs_vmcoreinfo_init(void)
++{
++ return sysfs_create_file(&hypervisor_subsys.kset.kobj,
++ &vmcoreinfo_attr.attr);
++}
++
++static void xen_sysfs_vmcoreinfo_destroy(void)
++{
++ sysfs_remove_file(&hypervisor_subsys.kset.kobj, &vmcoreinfo_attr.attr);
++}
++
++#else
++
++#define xen_sysfs_vmcoreinfo_init() 0
++#define xen_sysfs_vmcoreinfo_destroy() ((void)0)
++
++#endif
++
++static int __init hyper_sysfs_init(void)
++{
++ int ret;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ ret = xen_sysfs_type_init();
++ if (ret)
++ goto out;
++ ret = xen_sysfs_version_init();
++ if (ret)
++ goto version_out;
++ ret = xen_compilation_init();
++ if (ret)
++ goto comp_out;
++ ret = xen_sysfs_uuid_init();
++ if (ret)
++ goto uuid_out;
++ ret = xen_properties_init();
++ if (ret)
++ goto prop_out;
++ ret = xen_sysfs_vmcoreinfo_init();
++ if (!ret)
++ goto out;
++
++ xen_properties_destroy();
++prop_out:
++ xen_sysfs_uuid_destroy();
++uuid_out:
++ xen_compilation_destroy();
++comp_out:
++ xen_sysfs_version_destroy();
++version_out:
++ xen_sysfs_type_destroy();
++out:
++ return ret;
++}
++
++static void __exit hyper_sysfs_exit(void)
++{
++ xen_sysfs_vmcoreinfo_destroy();
++ xen_properties_destroy();
++ xen_compilation_destroy();
++ xen_sysfs_uuid_destroy();
++ xen_sysfs_version_destroy();
++ xen_sysfs_type_destroy();
++
++}
++
++module_init(hyper_sysfs_init);
++module_exit(hyper_sysfs_exit);
+diff -rpuN linux-2.6.18.8/drivers/xen/evtchn/evtchn.c linux-2.6.18-xen-3.3.0/drivers/xen/evtchn/evtchn.c
+--- linux-2.6.18.8/drivers/xen/evtchn/evtchn.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/evtchn/evtchn.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,560 @@
++/******************************************************************************
++ * evtchn.c
++ *
++ * Driver for receiving and demuxing event-channel signals.
++ *
++ * Copyright (c) 2004-2005, K A Fraser
++ * Multi-process extensions Copyright (c) 2004, Steven Smith
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/errno.h>
++#include <linux/miscdevice.h>
++#include <linux/major.h>
++#include <linux/proc_fs.h>
++#include <linux/stat.h>
++#include <linux/poll.h>
++#include <linux/irq.h>
++#include <linux/init.h>
++#include <linux/gfp.h>
++#include <linux/mutex.h>
++#include <linux/cpu.h>
++#include <xen/evtchn.h>
++#include <xen/public/evtchn.h>
++
++struct per_user_data {
++ /* Notification ring, accessed via /dev/xen/evtchn. */
++#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t))
++#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
++ evtchn_port_t *ring;
++ unsigned int ring_cons, ring_prod, ring_overflow;
++ struct mutex ring_cons_mutex; /* protect against concurrent readers */
++
++ /* Processes wait on this queue when ring is empty. */
++ wait_queue_head_t evtchn_wait;
++ struct fasync_struct *evtchn_async_queue;
++
++ int bind_cpu;
++ int nr_event_wrong_delivery;
++};
++
++/* Who's bound to each port? */
++static struct per_user_data *port_user[NR_EVENT_CHANNELS];
++static spinlock_t port_user_lock;
++
++void evtchn_device_upcall(int port)
++{
++ struct per_user_data *u;
++
++ spin_lock(&port_user_lock);
++
++ mask_evtchn(port);
++ clear_evtchn(port);
++
++ if ((u = port_user[port]) != NULL) {
++ if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
++ u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
++ wmb(); /* Ensure ring contents visible */
++ if (u->ring_cons == u->ring_prod++) {
++ wake_up_interruptible(&u->evtchn_wait);
++ kill_fasync(&u->evtchn_async_queue,
++ SIGIO, POLL_IN);
++ }
++ } else {
++ u->ring_overflow = 1;
++ }
++ }
++
++ spin_unlock(&port_user_lock);
++}
++
++static void evtchn_check_wrong_delivery(struct per_user_data *u)
++{
++ evtchn_port_t port;
++ unsigned int current_cpu = smp_processor_id();
++
++ /* Delivered to correct CPU? All is good. */
++ if (u->bind_cpu == current_cpu) {
++ u->nr_event_wrong_delivery = 0;
++ return;
++ }
++
++ /* Tolerate up to 100 consecutive misdeliveries. */
++ if (++u->nr_event_wrong_delivery < 100)
++ return;
++
++ spin_lock_irq(&port_user_lock);
++
++ for (port = 0; port < NR_EVENT_CHANNELS; port++)
++ if (port_user[port] == u)
++ rebind_evtchn_to_cpu(port, current_cpu);
++
++ u->bind_cpu = current_cpu;
++ u->nr_event_wrong_delivery = 0;
++
++ spin_unlock_irq(&port_user_lock);
++}
++
++static ssize_t evtchn_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ int rc;
++ unsigned int c, p, bytes1 = 0, bytes2 = 0;
++ struct per_user_data *u = file->private_data;
++
++ /* Whole number of ports. */
++ count &= ~(sizeof(evtchn_port_t)-1);
++
++ if (count == 0)
++ return 0;
++
++ if (count > PAGE_SIZE)
++ count = PAGE_SIZE;
++
++ for (;;) {
++ mutex_lock(&u->ring_cons_mutex);
++
++ rc = -EFBIG;
++ if (u->ring_overflow)
++ goto unlock_out;
++
++ if ((c = u->ring_cons) != (p = u->ring_prod))
++ break;
++
++ mutex_unlock(&u->ring_cons_mutex);
++
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ rc = wait_event_interruptible(
++ u->evtchn_wait, u->ring_cons != u->ring_prod);
++ if (rc)
++ return rc;
++ }
++
++ /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
++ if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
++ bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
++ sizeof(evtchn_port_t);
++ bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
++ } else {
++ bytes1 = (p - c) * sizeof(evtchn_port_t);
++ bytes2 = 0;
++ }
++
++ /* Truncate chunks according to caller's maximum byte count. */
++ if (bytes1 > count) {
++ bytes1 = count;
++ bytes2 = 0;
++ } else if ((bytes1 + bytes2) > count) {
++ bytes2 = count - bytes1;
++ }
++
++ rc = -EFAULT;
++ rmb(); /* Ensure that we see the port before we copy it. */
++ if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
++ ((bytes2 != 0) &&
++ copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
++ goto unlock_out;
++
++ evtchn_check_wrong_delivery(u);
++
++ u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
++ rc = bytes1 + bytes2;
++
++ unlock_out:
++ mutex_unlock(&u->ring_cons_mutex);
++ return rc;
++}
++
++static ssize_t evtchn_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ int rc, i;
++ evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
++ struct per_user_data *u = file->private_data;
++
++ if (kbuf == NULL)
++ return -ENOMEM;
++
++ /* Whole number of ports. */
++ count &= ~(sizeof(evtchn_port_t)-1);
++
++ rc = 0;
++ if (count == 0)
++ goto out;
++
++ if (count > PAGE_SIZE)
++ count = PAGE_SIZE;
++
++ rc = -EFAULT;
++ if (copy_from_user(kbuf, buf, count) != 0)
++ goto out;
++
++ spin_lock_irq(&port_user_lock);
++ for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
++ if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
++ unmask_evtchn(kbuf[i]);
++ spin_unlock_irq(&port_user_lock);
++
++ rc = count;
++
++ out:
++ free_page((unsigned long)kbuf);
++ return rc;
++}
++
++static unsigned int next_bind_cpu(cpumask_t map)
++{
++ static unsigned int bind_cpu;
++ bind_cpu = next_cpu(bind_cpu, map);
++ if (bind_cpu >= NR_CPUS)
++ bind_cpu = first_cpu(map);
++ return bind_cpu;
++}
++
++static void evtchn_bind_to_user(struct per_user_data *u, int port)
++{
++ spin_lock_irq(&port_user_lock);
++
++ BUG_ON(port_user[port] != NULL);
++ port_user[port] = u;
++
++ if (u->bind_cpu == -1)
++ u->bind_cpu = next_bind_cpu(cpu_online_map);
++
++ rebind_evtchn_to_cpu(port, u->bind_cpu);
++
++ unmask_evtchn(port);
++
++ spin_unlock_irq(&port_user_lock);
++}
++
++static long evtchn_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ int rc;
++ struct per_user_data *u = file->private_data;
++ void __user *uarg = (void __user *) arg;
++
++ switch (cmd) {
++ case IOCTL_EVTCHN_BIND_VIRQ: {
++ struct ioctl_evtchn_bind_virq bind;
++ struct evtchn_bind_virq bind_virq;
++
++ rc = -EFAULT;
++ if (copy_from_user(&bind, uarg, sizeof(bind)))
++ break;
++
++ bind_virq.virq = bind.virq;
++ bind_virq.vcpu = 0;
++ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
++ &bind_virq);
++ if (rc != 0)
++ break;
++
++ rc = bind_virq.port;
++ evtchn_bind_to_user(u, rc);
++ break;
++ }
++
++ case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
++ struct ioctl_evtchn_bind_interdomain bind;
++ struct evtchn_bind_interdomain bind_interdomain;
++
++ rc = -EFAULT;
++ if (copy_from_user(&bind, uarg, sizeof(bind)))
++ break;
++
++ bind_interdomain.remote_dom = bind.remote_domain;
++ bind_interdomain.remote_port = bind.remote_port;
++ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
++ &bind_interdomain);
++ if (rc != 0)
++ break;
++
++ rc = bind_interdomain.local_port;
++ evtchn_bind_to_user(u, rc);
++ break;
++ }
++
++ case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
++ struct ioctl_evtchn_bind_unbound_port bind;
++ struct evtchn_alloc_unbound alloc_unbound;
++
++ rc = -EFAULT;
++ if (copy_from_user(&bind, uarg, sizeof(bind)))
++ break;
++
++ alloc_unbound.dom = DOMID_SELF;
++ alloc_unbound.remote_dom = bind.remote_domain;
++ rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
++ &alloc_unbound);
++ if (rc != 0)
++ break;
++
++ rc = alloc_unbound.port;
++ evtchn_bind_to_user(u, rc);
++ break;
++ }
++
++ case IOCTL_EVTCHN_UNBIND: {
++ struct ioctl_evtchn_unbind unbind;
++ struct evtchn_close close;
++ int ret;
++
++ rc = -EFAULT;
++ if (copy_from_user(&unbind, uarg, sizeof(unbind)))
++ break;
++
++ rc = -EINVAL;
++ if (unbind.port >= NR_EVENT_CHANNELS)
++ break;
++
++ spin_lock_irq(&port_user_lock);
++
++ rc = -ENOTCONN;
++ if (port_user[unbind.port] != u) {
++ spin_unlock_irq(&port_user_lock);
++ break;
++ }
++
++ port_user[unbind.port] = NULL;
++ mask_evtchn(unbind.port);
++ rebind_evtchn_to_cpu(unbind.port, 0);
++
++ spin_unlock_irq(&port_user_lock);
++
++ close.port = unbind.port;
++ ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
++ BUG_ON(ret);
++
++ rc = 0;
++ break;
++ }
++
++ case IOCTL_EVTCHN_NOTIFY: {
++ struct ioctl_evtchn_notify notify;
++
++ rc = -EFAULT;
++ if (copy_from_user(&notify, uarg, sizeof(notify)))
++ break;
++
++ if (notify.port >= NR_EVENT_CHANNELS) {
++ rc = -EINVAL;
++ } else if (port_user[notify.port] != u) {
++ rc = -ENOTCONN;
++ } else {
++ notify_remote_via_evtchn(notify.port);
++ rc = 0;
++ }
++ break;
++ }
++
++ case IOCTL_EVTCHN_RESET: {
++ /* Initialise the ring to empty. Clear errors. */
++ mutex_lock(&u->ring_cons_mutex);
++ spin_lock_irq(&port_user_lock);
++ u->ring_cons = u->ring_prod = u->ring_overflow = 0;
++ spin_unlock_irq(&port_user_lock);
++ mutex_unlock(&u->ring_cons_mutex);
++ rc = 0;
++ break;
++ }
++
++ default:
++ rc = -ENOSYS;
++ break;
++ }
++
++ return rc;
++}
++
++static unsigned int evtchn_poll(struct file *file, poll_table *wait)
++{
++ unsigned int mask = POLLOUT | POLLWRNORM;
++ struct per_user_data *u = file->private_data;
++
++ poll_wait(file, &u->evtchn_wait, wait);
++ if (u->ring_cons != u->ring_prod)
++ mask |= POLLIN | POLLRDNORM;
++ if (u->ring_overflow)
++ mask = POLLERR;
++ return mask;
++}
++
++static int evtchn_fasync(int fd, struct file *filp, int on)
++{
++ struct per_user_data *u = filp->private_data;
++ return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
++}
++
++static int evtchn_open(struct inode *inode, struct file *filp)
++{
++ struct per_user_data *u;
++
++ if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
++ return -ENOMEM;
++
++ memset(u, 0, sizeof(*u));
++ init_waitqueue_head(&u->evtchn_wait);
++
++ u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
++ if (u->ring == NULL) {
++ kfree(u);
++ return -ENOMEM;
++ }
++
++ mutex_init(&u->ring_cons_mutex);
++
++ filp->private_data = u;
++
++ u->bind_cpu = -1;
++
++ return 0;
++}
++
++static int evtchn_release(struct inode *inode, struct file *filp)
++{
++ int i;
++ struct per_user_data *u = filp->private_data;
++ struct evtchn_close close;
++
++ spin_lock_irq(&port_user_lock);
++
++ free_page((unsigned long)u->ring);
++
++ for (i = 0; i < NR_EVENT_CHANNELS; i++) {
++ int ret;
++ if (port_user[i] != u)
++ continue;
++
++ port_user[i] = NULL;
++ mask_evtchn(i);
++ rebind_evtchn_to_cpu(i, 0);
++
++ close.port = i;
++ ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
++ BUG_ON(ret);
++ }
++
++ spin_unlock_irq(&port_user_lock);
++
++ kfree(u);
++
++ return 0;
++}
++
++static const struct file_operations evtchn_fops = {
++ .owner = THIS_MODULE,
++ .read = evtchn_read,
++ .write = evtchn_write,
++ .unlocked_ioctl = evtchn_ioctl,
++ .poll = evtchn_poll,
++ .fasync = evtchn_fasync,
++ .open = evtchn_open,
++ .release = evtchn_release,
++};
++
++static struct miscdevice evtchn_miscdev = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "evtchn",
++ .fops = &evtchn_fops,
++};
++
++static int __cpuinit evtchn_cpu_notify(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ int hotcpu = (unsigned long)hcpu;
++ cpumask_t map = cpu_online_map;
++ int port, newcpu;
++ struct per_user_data *u;
++
++ switch (action) {
++ case CPU_DOWN_PREPARE:
++ cpu_clear(hotcpu, map);
++ spin_lock_irq(&port_user_lock);
++ for (port = 0; port < NR_EVENT_CHANNELS; port++) {
++ if ((u = port_user[port]) != NULL &&
++ u->bind_cpu == hotcpu &&
++ (newcpu = next_bind_cpu(map)) < NR_CPUS) {
++ rebind_evtchn_to_cpu(port, newcpu);
++ u->bind_cpu = newcpu;
++ }
++ }
++ spin_unlock_irq(&port_user_lock);
++ break;
++ default:
++ return NOTIFY_DONE;
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block __cpuinitdata evtchn_cpu_nfb = {
++ .notifier_call = evtchn_cpu_notify
++};
++
++static int __init evtchn_init(void)
++{
++ int err;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ spin_lock_init(&port_user_lock);
++ memset(port_user, 0, sizeof(port_user));
++
++ /* Create '/dev/misc/evtchn'. */
++ err = misc_register(&evtchn_miscdev);
++ if (err != 0) {
++ printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
++ return err;
++ }
++
++ register_cpu_notifier(&evtchn_cpu_nfb);
++
++ printk("Event-channel device installed.\n");
++
++ return 0;
++}
++
++static void __exit evtchn_cleanup(void)
++{
++ misc_deregister(&evtchn_miscdev);
++ unregister_cpu_notifier(&evtchn_cpu_nfb);
++}
++
++module_init(evtchn_init);
++module_exit(evtchn_cleanup);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/evtchn/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/evtchn/Makefile
+--- linux-2.6.18.8/drivers/xen/evtchn/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/evtchn/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2 @@
++
++obj-y := evtchn.o
+diff -rpuN linux-2.6.18.8/drivers/xen/fbfront/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/fbfront/Makefile
+--- linux-2.6.18.8/drivers/xen/fbfront/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/fbfront/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2 @@
++obj-$(CONFIG_XEN_FRAMEBUFFER) := xenfb.o
++obj-$(CONFIG_XEN_KEYBOARD) += xenkbd.o
+diff -rpuN linux-2.6.18.8/drivers/xen/fbfront/xenfb.c linux-2.6.18-xen-3.3.0/drivers/xen/fbfront/xenfb.c
+--- linux-2.6.18.8/drivers/xen/fbfront/xenfb.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/fbfront/xenfb.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,887 @@
++/*
++ * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
++ *
++ * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
++ *
++ * Based on linux/drivers/video/q40fb.c
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++/*
++ * TODO:
++ *
++ * Switch to grant tables when they become capable of dealing with the
++ * frame buffer.
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/fb.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/mm.h>
++#include <linux/mutex.h>
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/fbif.h>
++#include <xen/interface/io/protocols.h>
++#include <xen/xenbus.h>
++#include <linux/kthread.h>
++
++struct xenfb_mapping
++{
++ struct list_head link;
++ struct vm_area_struct *vma;
++ atomic_t map_refs;
++ int faults;
++ struct xenfb_info *info;
++};
++
++struct xenfb_info
++{
++ struct task_struct *kthread;
++ wait_queue_head_t wq;
++
++ unsigned char *fb;
++ struct fb_info *fb_info;
++ struct timer_list refresh;
++ int dirty;
++ int x1, y1, x2, y2; /* dirty rectangle,
++ protected by dirty_lock */
++ spinlock_t dirty_lock;
++ struct mutex mm_lock;
++ int nr_pages;
++ struct page **pages;
++ struct list_head mappings; /* protected by mm_lock */
++
++ int irq;
++ struct xenfb_page *page;
++ unsigned long *mfns;
++ int update_wanted; /* XENFB_TYPE_UPDATE wanted */
++ int feature_resize; /* Backend has resize feature */
++ struct xenfb_resize resize;
++ int resize_dpy;
++ spinlock_t resize_lock;
++
++ struct xenbus_device *xbdev;
++};
++
++/*
++ * There are three locks:
++ * spinlock resize_lock protecting resize_dpy and resize
++ * spinlock dirty_lock protecting the dirty rectangle
++ * mutex mm_lock protecting mappings.
++ *
++ * How the dirty and mapping locks work together
++ *
++ * The problem is that dirty rectangle and mappings aren't
++ * independent: the dirty rectangle must cover all faulted pages in
++ * mappings. We need to prove that our locking maintains this
++ * invariant.
++ *
++ * There are several kinds of critical regions:
++ *
++ * 1. Holding only dirty_lock: xenfb_refresh(). May run in
++ * interrupts. Extends the dirty rectangle. Trivially preserves
++ * invariant.
++ *
++ * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close(). Touch
++ * only mappings. The former creates unfaulted pages. Preserves
++ * invariant. The latter removes pages. Preserves invariant.
++ *
++ * 3. Holding both locks: xenfb_vm_nopage(). Extends the dirty
++ * rectangle and updates mappings consistently. Preserves
++ * invariant.
++ *
++ * 4. The ugliest one: xenfb_update_screen(). Clear the dirty
++ * rectangle and update mappings consistently.
++ *
++ * We can't simply hold both locks, because zap_page_range() cannot
++ * be called with a spinlock held.
++ *
++ * Therefore, we first clear the dirty rectangle with both locks
++ * held. Then we unlock dirty_lock and update the mappings.
++ * Critical regions that hold only dirty_lock may interfere with
++ * that. This can only be region 1: xenfb_refresh(). But that
++ * just extends the dirty rectangle, which can't harm the
++ * invariant.
++ *
++ * But FIXME: the invariant is too weak. It misses that the fault
++ * record in mappings must be consistent with the mapping of pages in
++ * the associated address space! do_no_page() updates the PTE after
++ * xenfb_vm_nopage() returns, i.e. outside the critical region. This
++ * allows the following race:
++ *
++ * X writes to some address in the Xen frame buffer
++ * Fault - call do_no_page()
++ * call xenfb_vm_nopage()
++ * grab mm_lock
++ * map->faults++;
++ * release mm_lock
++ * return back to do_no_page()
++ * (preempted, or SMP)
++ * Xen worker thread runs.
++ * grab mm_lock
++ * look at mappings
++ * find this mapping, zaps its pages (but page not in pte yet)
++ * clear map->faults
++ * releases mm_lock
++ * (back to X process)
++ * put page in X's pte
++ *
++ * Oh well, we wont be updating the writes to this page anytime soon.
++ */
++#define MB_ (1024*1024)
++#define XENFB_DEFAULT_FB_LEN (XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8)
++
++enum {KPARAM_MEM, KPARAM_WIDTH, KPARAM_HEIGHT, KPARAM_CNT};
++static int video[KPARAM_CNT] = {2, XENFB_WIDTH, XENFB_HEIGHT};
++module_param_array(video, int, NULL, 0);
++MODULE_PARM_DESC(video,
++ "Size of video memory in MB and width,height in pixels, default = (2,800,600)");
++
++static int xenfb_fps = 20;
++
++static int xenfb_remove(struct xenbus_device *);
++static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
++static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
++static void xenfb_disconnect_backend(struct xenfb_info *);
++
++static void xenfb_send_event(struct xenfb_info *info,
++ union xenfb_out_event *event)
++{
++ __u32 prod;
++
++ prod = info->page->out_prod;
++ /* caller ensures !xenfb_queue_full() */
++ mb(); /* ensure ring space available */
++ XENFB_OUT_RING_REF(info->page, prod) = *event;
++ wmb(); /* ensure ring contents visible */
++ info->page->out_prod = prod + 1;
++
++ notify_remote_via_irq(info->irq);
++}
++
++static void xenfb_do_update(struct xenfb_info *info,
++ int x, int y, int w, int h)
++{
++ union xenfb_out_event event;
++
++ memset(&event, 0, sizeof(event));
++ event.type = XENFB_TYPE_UPDATE;
++ event.update.x = x;
++ event.update.y = y;
++ event.update.width = w;
++ event.update.height = h;
++
++ /* caller ensures !xenfb_queue_full() */
++ xenfb_send_event(info, &event);
++}
++
++static void xenfb_do_resize(struct xenfb_info *info)
++{
++ union xenfb_out_event event;
++
++ memset(&event, 0, sizeof(event));
++ event.resize = info->resize;
++
++ /* caller ensures !xenfb_queue_full() */
++ xenfb_send_event(info, &event);
++}
++
++static int xenfb_queue_full(struct xenfb_info *info)
++{
++ __u32 cons, prod;
++
++ prod = info->page->out_prod;
++ cons = info->page->out_cons;
++ return prod - cons == XENFB_OUT_RING_LEN;
++}
++
++static void xenfb_update_screen(struct xenfb_info *info)
++{
++ unsigned long flags;
++ int y1, y2, x1, x2;
++ struct xenfb_mapping *map;
++
++ if (!info->update_wanted)
++ return;
++ if (xenfb_queue_full(info))
++ return;
++
++ mutex_lock(&info->mm_lock);
++
++ spin_lock_irqsave(&info->dirty_lock, flags);
++ y1 = info->y1;
++ y2 = info->y2;
++ x1 = info->x1;
++ x2 = info->x2;
++ info->x1 = info->y1 = INT_MAX;
++ info->x2 = info->y2 = 0;
++ spin_unlock_irqrestore(&info->dirty_lock, flags);
++
++ list_for_each_entry(map, &info->mappings, link) {
++ if (!map->faults)
++ continue;
++ zap_page_range(map->vma, map->vma->vm_start,
++ map->vma->vm_end - map->vma->vm_start, NULL);
++ map->faults = 0;
++ }
++
++ mutex_unlock(&info->mm_lock);
++
++ if (x2 < x1 || y2 < y1) {
++ printk("xenfb_update_screen bogus rect %d %d %d %d\n",
++ x1, x2, y1, y2);
++ WARN_ON(1);
++ }
++ xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
++}
++
++static void xenfb_handle_resize_dpy(struct xenfb_info *info)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->resize_lock, flags);
++ if (info->resize_dpy) {
++ if (!xenfb_queue_full(info)) {
++ info->resize_dpy = 0;
++ xenfb_do_resize(info);
++ }
++ }
++ spin_unlock_irqrestore(&info->resize_lock, flags);
++}
++
++static int xenfb_thread(void *data)
++{
++ struct xenfb_info *info = data;
++
++ while (!kthread_should_stop()) {
++ xenfb_handle_resize_dpy(info);
++ if (info->dirty) {
++ info->dirty = 0;
++ xenfb_update_screen(info);
++ }
++ wait_event_interruptible(info->wq,
++ kthread_should_stop() || info->dirty);
++ try_to_freeze();
++ }
++ return 0;
++}
++
++static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
++ unsigned blue, unsigned transp,
++ struct fb_info *info)
++{
++ u32 v;
++
++ if (regno > info->cmap.len)
++ return 1;
++
++ red >>= (16 - info->var.red.length);
++ green >>= (16 - info->var.green.length);
++ blue >>= (16 - info->var.blue.length);
++
++ v = (red << info->var.red.offset) |
++ (green << info->var.green.offset) |
++ (blue << info->var.blue.offset);
++
++ /* FIXME is this sane? check against xxxfb_setcolreg()! */
++ switch (info->var.bits_per_pixel) {
++ case 16:
++ case 24:
++ case 32:
++ ((u32 *)info->pseudo_palette)[regno] = v;
++ break;
++ }
++
++ return 0;
++}
++
++static void xenfb_timer(unsigned long data)
++{
++ struct xenfb_info *info = (struct xenfb_info *)data;
++ wake_up(&info->wq);
++}
++
++static void __xenfb_refresh(struct xenfb_info *info,
++ int x1, int y1, int w, int h)
++{
++ int y2, x2;
++
++ y2 = y1 + h;
++ x2 = x1 + w;
++
++ if (info->y1 > y1)
++ info->y1 = y1;
++ if (info->y2 < y2)
++ info->y2 = y2;
++ if (info->x1 > x1)
++ info->x1 = x1;
++ if (info->x2 < x2)
++ info->x2 = x2;
++ info->dirty = 1;
++
++ if (timer_pending(&info->refresh))
++ return;
++
++ mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
++}
++
++static void xenfb_refresh(struct xenfb_info *info,
++ int x1, int y1, int w, int h)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->dirty_lock, flags);
++ __xenfb_refresh(info, x1, y1, w, h);
++ spin_unlock_irqrestore(&info->dirty_lock, flags);
++}
++
++static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
++{
++ struct xenfb_info *info = p->par;
++
++ cfb_fillrect(p, rect);
++ xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
++}
++
++static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
++{
++ struct xenfb_info *info = p->par;
++
++ cfb_imageblit(p, image);
++ xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
++}
++
++static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
++{
++ struct xenfb_info *info = p->par;
++
++ cfb_copyarea(p, area);
++ xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
++}
++
++static void xenfb_vm_open(struct vm_area_struct *vma)
++{
++ struct xenfb_mapping *map = vma->vm_private_data;
++ atomic_inc(&map->map_refs);
++}
++
++static void xenfb_vm_close(struct vm_area_struct *vma)
++{
++ struct xenfb_mapping *map = vma->vm_private_data;
++ struct xenfb_info *info = map->info;
++
++ mutex_lock(&info->mm_lock);
++ if (atomic_dec_and_test(&map->map_refs)) {
++ list_del(&map->link);
++ kfree(map);
++ }
++ mutex_unlock(&info->mm_lock);
++}
++
++static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
++ unsigned long vaddr, int *type)
++{
++ struct xenfb_mapping *map = vma->vm_private_data;
++ struct xenfb_info *info = map->info;
++ int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
++ unsigned long flags;
++ struct page *page;
++ int y1, y2;
++
++ if (pgnr >= info->nr_pages)
++ return NOPAGE_SIGBUS;
++
++ mutex_lock(&info->mm_lock);
++ spin_lock_irqsave(&info->dirty_lock, flags);
++ page = info->pages[pgnr];
++ get_page(page);
++ map->faults++;
++
++ y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
++ y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
++ if (y2 > info->fb_info->var.yres)
++ y2 = info->fb_info->var.yres;
++ __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
++ spin_unlock_irqrestore(&info->dirty_lock, flags);
++ mutex_unlock(&info->mm_lock);
++
++ if (type)
++ *type = VM_FAULT_MINOR;
++
++ return page;
++}
++
++static struct vm_operations_struct xenfb_vm_ops = {
++ .open = xenfb_vm_open,
++ .close = xenfb_vm_close,
++ .nopage = xenfb_vm_nopage,
++};
++
++static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
++{
++ struct xenfb_info *info = fb_info->par;
++ struct xenfb_mapping *map;
++ int map_pages;
++
++ if (!(vma->vm_flags & VM_WRITE))
++ return -EINVAL;
++ if (!(vma->vm_flags & VM_SHARED))
++ return -EINVAL;
++ if (vma->vm_pgoff != 0)
++ return -EINVAL;
++
++ map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
++ if (map_pages > info->nr_pages)
++ return -EINVAL;
++
++ map = kzalloc(sizeof(*map), GFP_KERNEL);
++ if (map == NULL)
++ return -ENOMEM;
++
++ map->vma = vma;
++ map->faults = 0;
++ map->info = info;
++ atomic_set(&map->map_refs, 1);
++
++ mutex_lock(&info->mm_lock);
++ list_add(&map->link, &info->mappings);
++ mutex_unlock(&info->mm_lock);
++
++ vma->vm_ops = &xenfb_vm_ops;
++ vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
++ vma->vm_private_data = map;
++
++ return 0;
++}
++
++static int
++xenfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++ struct xenfb_info *xenfb_info;
++ int required_mem_len;
++
++ xenfb_info = info->par;
++
++ if (!xenfb_info->feature_resize) {
++ if (var->xres == video[KPARAM_WIDTH] &&
++ var->yres == video[KPARAM_HEIGHT] &&
++ var->bits_per_pixel == xenfb_info->page->depth) {
++ return 0;
++ }
++ return -EINVAL;
++ }
++
++ /* Can't resize past initial width and height */
++ if (var->xres > video[KPARAM_WIDTH] || var->yres > video[KPARAM_HEIGHT])
++ return -EINVAL;
++
++ required_mem_len = var->xres * var->yres * (xenfb_info->page->depth / 8);
++ if (var->bits_per_pixel == xenfb_info->page->depth &&
++ var->xres <= info->fix.line_length / (XENFB_DEPTH / 8) &&
++ required_mem_len <= info->fix.smem_len) {
++ var->xres_virtual = var->xres;
++ var->yres_virtual = var->yres;
++ return 0;
++ }
++ return -EINVAL;
++}
++
++static int xenfb_set_par(struct fb_info *info)
++{
++ struct xenfb_info *xenfb_info;
++ unsigned long flags;
++
++ xenfb_info = info->par;
++
++ spin_lock_irqsave(&xenfb_info->resize_lock, flags);
++ xenfb_info->resize.type = XENFB_TYPE_RESIZE;
++ xenfb_info->resize.width = info->var.xres;
++ xenfb_info->resize.height = info->var.yres;
++ xenfb_info->resize.stride = info->fix.line_length;
++ xenfb_info->resize.depth = info->var.bits_per_pixel;
++ xenfb_info->resize.offset = 0;
++ xenfb_info->resize_dpy = 1;
++ spin_unlock_irqrestore(&xenfb_info->resize_lock, flags);
++ return 0;
++}
++
++static struct fb_ops xenfb_fb_ops = {
++ .owner = THIS_MODULE,
++ .fb_setcolreg = xenfb_setcolreg,
++ .fb_fillrect = xenfb_fillrect,
++ .fb_copyarea = xenfb_copyarea,
++ .fb_imageblit = xenfb_imageblit,
++ .fb_mmap = xenfb_mmap,
++ .fb_check_var = xenfb_check_var,
++ .fb_set_par = xenfb_set_par,
++};
++
++static irqreturn_t xenfb_event_handler(int rq, void *dev_id,
++ struct pt_regs *regs)
++{
++ /*
++ * No in events recognized, simply ignore them all.
++ * If you need to recognize some, see xenbkd's input_handler()
++ * for how to do that.
++ */
++ struct xenfb_info *info = dev_id;
++ struct xenfb_page *page = info->page;
++
++ if (page->in_cons != page->in_prod) {
++ info->page->in_cons = info->page->in_prod;
++ notify_remote_via_irq(info->irq);
++ }
++ return IRQ_HANDLED;
++}
++
++static unsigned long vmalloc_to_mfn(void *address)
++{
++ return pfn_to_mfn(vmalloc_to_pfn(address));
++}
++
++static int __devinit xenfb_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ struct xenfb_info *info;
++ struct fb_info *fb_info;
++ int fb_size;
++ int val;
++ int ret;
++
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (info == NULL) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++ return -ENOMEM;
++ }
++
++ /* Limit kernel param videoram amount to what is in xenstore */
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "videoram", "%d", &val) == 1) {
++ if (val < video[KPARAM_MEM])
++ video[KPARAM_MEM] = val;
++ }
++
++ /* If requested res does not fit in available memory, use default */
++ fb_size = video[KPARAM_MEM] * MB_;
++ if (video[KPARAM_WIDTH] * video[KPARAM_HEIGHT] * XENFB_DEPTH/8 > fb_size) {
++ video[KPARAM_WIDTH] = XENFB_WIDTH;
++ video[KPARAM_HEIGHT] = XENFB_HEIGHT;
++ fb_size = XENFB_DEFAULT_FB_LEN;
++ }
++
++ dev->dev.driver_data = info;
++ info->xbdev = dev;
++ info->irq = -1;
++ info->x1 = info->y1 = INT_MAX;
++ spin_lock_init(&info->dirty_lock);
++ spin_lock_init(&info->resize_lock);
++ mutex_init(&info->mm_lock);
++ init_waitqueue_head(&info->wq);
++ init_timer(&info->refresh);
++ info->refresh.function = xenfb_timer;
++ info->refresh.data = (unsigned long)info;
++ INIT_LIST_HEAD(&info->mappings);
++
++ info->fb = vmalloc(fb_size);
++ if (info->fb == NULL)
++ goto error_nomem;
++ memset(info->fb, 0, fb_size);
++
++ info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++ info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
++ GFP_KERNEL);
++ if (info->pages == NULL)
++ goto error_nomem;
++
++ info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
++ if (!info->mfns)
++ goto error_nomem;
++
++ /* set up shared page */
++ info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
++ if (!info->page)
++ goto error_nomem;
++
++ fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
++ /* see fishy hackery below */
++ if (fb_info == NULL)
++ goto error_nomem;
++
++ /* FIXME fishy hackery */
++ fb_info->pseudo_palette = fb_info->par;
++ fb_info->par = info;
++ /* /FIXME */
++ fb_info->screen_base = info->fb;
++
++ fb_info->fbops = &xenfb_fb_ops;
++ fb_info->var.xres_virtual = fb_info->var.xres = video[KPARAM_WIDTH];
++ fb_info->var.yres_virtual = fb_info->var.yres = video[KPARAM_HEIGHT];
++ fb_info->var.bits_per_pixel = XENFB_DEPTH;
++
++ fb_info->var.red = (struct fb_bitfield){16, 8, 0};
++ fb_info->var.green = (struct fb_bitfield){8, 8, 0};
++ fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
++
++ fb_info->var.activate = FB_ACTIVATE_NOW;
++ fb_info->var.height = -1;
++ fb_info->var.width = -1;
++ fb_info->var.vmode = FB_VMODE_NONINTERLACED;
++
++ fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
++ fb_info->fix.line_length = fb_info->var.xres * (XENFB_DEPTH / 8);
++ fb_info->fix.smem_start = 0;
++ fb_info->fix.smem_len = fb_size;
++ strcpy(fb_info->fix.id, "xen");
++ fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
++ fb_info->fix.accel = FB_ACCEL_NONE;
++
++ fb_info->flags = FBINFO_FLAG_DEFAULT;
++
++ ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
++ if (ret < 0) {
++ framebuffer_release(fb_info);
++ xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
++ goto error;
++ }
++
++ xenfb_init_shared_page(info, fb_info);
++
++ ret = register_framebuffer(fb_info);
++ if (ret) {
++ fb_dealloc_cmap(&info->fb_info->cmap);
++ framebuffer_release(fb_info);
++ xenbus_dev_fatal(dev, ret, "register_framebuffer");
++ goto error;
++ }
++ info->fb_info = fb_info;
++
++ /* FIXME should this be delayed until backend XenbusStateConnected? */
++ info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
++ if (IS_ERR(info->kthread)) {
++ ret = PTR_ERR(info->kthread);
++ info->kthread = NULL;
++ xenbus_dev_fatal(dev, ret, "register_framebuffer");
++ goto error;
++ }
++
++ ret = xenfb_connect_backend(dev, info);
++ if (ret < 0)
++ goto error;
++
++ return 0;
++
++ error_nomem:
++ ret = -ENOMEM;
++ xenbus_dev_fatal(dev, ret, "allocating device memory");
++ error:
++ xenfb_remove(dev);
++ return ret;
++}
++
++static int xenfb_resume(struct xenbus_device *dev)
++{
++ struct xenfb_info *info = dev->dev.driver_data;
++
++ xenfb_disconnect_backend(info);
++ xenfb_init_shared_page(info, info->fb_info);
++ return xenfb_connect_backend(dev, info);
++}
++
++static int xenfb_remove(struct xenbus_device *dev)
++{
++ struct xenfb_info *info = dev->dev.driver_data;
++
++ del_timer(&info->refresh);
++ if (info->kthread)
++ kthread_stop(info->kthread);
++ xenfb_disconnect_backend(info);
++ if (info->fb_info) {
++ unregister_framebuffer(info->fb_info);
++ fb_dealloc_cmap(&info->fb_info->cmap);
++ framebuffer_release(info->fb_info);
++ }
++ free_page((unsigned long)info->page);
++ vfree(info->mfns);
++ kfree(info->pages);
++ vfree(info->fb);
++ kfree(info);
++
++ return 0;
++}
++
++static void xenfb_init_shared_page(struct xenfb_info *info,
++ struct fb_info * fb_info)
++{
++ int i;
++ int epd = PAGE_SIZE / sizeof(info->mfns[0]);
++
++ for (i = 0; i < info->nr_pages; i++)
++ info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
++
++ for (i = 0; i < info->nr_pages; i++)
++ info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
++
++ for (i = 0; i * epd < info->nr_pages; i++)
++ info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]);
++
++ info->page->width = fb_info->var.xres;
++ info->page->height = fb_info->var.yres;
++ info->page->depth = fb_info->var.bits_per_pixel;
++ info->page->line_length = fb_info->fix.line_length;
++ info->page->mem_length = fb_info->fix.smem_len;
++ info->page->in_cons = info->page->in_prod = 0;
++ info->page->out_cons = info->page->out_prod = 0;
++}
++
++static int xenfb_connect_backend(struct xenbus_device *dev,
++ struct xenfb_info *info)
++{
++ int ret;
++ struct xenbus_transaction xbt;
++
++ ret = bind_listening_port_to_irqhandler(
++ dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
++ if (ret < 0) {
++ xenbus_dev_fatal(dev, ret,
++ "bind_listening_port_to_irqhandler");
++ return ret;
++ }
++ info->irq = ret;
++
++ again:
++ ret = xenbus_transaction_start(&xbt);
++ if (ret) {
++ xenbus_dev_fatal(dev, ret, "starting transaction");
++ return ret;
++ }
++ ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
++ virt_to_mfn(info->page));
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(info->irq));
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
++ XEN_IO_PROTO_ABI_NATIVE);
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_transaction_end(xbt, 0);
++ if (ret) {
++ if (ret == -EAGAIN)
++ goto again;
++ xenbus_dev_fatal(dev, ret, "completing transaction");
++ return ret;
++ }
++
++ xenbus_switch_state(dev, XenbusStateInitialised);
++ return 0;
++
++ error_xenbus:
++ xenbus_transaction_end(xbt, 1);
++ xenbus_dev_fatal(dev, ret, "writing xenstore");
++ return ret;
++}
++
++static void xenfb_disconnect_backend(struct xenfb_info *info)
++{
++ if (info->irq >= 0)
++ unbind_from_irqhandler(info->irq, info);
++ info->irq = -1;
++}
++
++static void xenfb_backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct xenfb_info *info = dev->dev.driver_data;
++ int val;
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitialised:
++ case XenbusStateReconfiguring:
++ case XenbusStateReconfigured:
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateInitWait:
++ InitWait:
++ xenbus_switch_state(dev, XenbusStateConnected);
++ break;
++
++ case XenbusStateConnected:
++ /*
++ * Work around xenbus race condition: If backend goes
++ * through InitWait to Connected fast enough, we can
++ * get Connected twice here.
++ */
++ if (dev->state != XenbusStateConnected)
++ goto InitWait; /* no InitWait seen yet, fudge it */
++
++ if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++ "request-update", "%d", &val) < 0)
++ val = 0;
++ if (val)
++ info->update_wanted = 1;
++
++ if (xenbus_scanf(XBT_NIL, dev->otherend,
++ "feature-resize", "%d", &val) < 0)
++ val = 0;
++ info->feature_resize = val;
++ break;
++
++ case XenbusStateClosing:
++ // FIXME is this safe in any dev->state?
++ xenbus_frontend_closed(dev);
++ break;
++ }
++}
++
++static const struct xenbus_device_id xenfb_ids[] = {
++ { "vfb" },
++ { "" }
++};
++MODULE_ALIAS("xen:vfb");
++
++static struct xenbus_driver xenfb_driver = {
++ .name = "vfb",
++ .owner = THIS_MODULE,
++ .ids = xenfb_ids,
++ .probe = xenfb_probe,
++ .remove = xenfb_remove,
++ .resume = xenfb_resume,
++ .otherend_changed = xenfb_backend_changed,
++};
++
++static int __init xenfb_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ /* Nothing to do if running in dom0. */
++ if (is_initial_xendomain())
++ return -ENODEV;
++
++ return xenbus_register_frontend(&xenfb_driver);
++}
++
++static void __exit xenfb_cleanup(void)
++{
++ return xenbus_unregister_driver(&xenfb_driver);
++}
++
++module_init(xenfb_init);
++module_exit(xenfb_cleanup);
++
++MODULE_LICENSE("GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/fbfront/xenkbd.c linux-2.6.18-xen-3.3.0/drivers/xen/fbfront/xenkbd.c
+--- linux-2.6.18.8/drivers/xen/fbfront/xenkbd.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/fbfront/xenkbd.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,354 @@
++/*
++ * linux/drivers/input/keyboard/xenkbd.c -- Xen para-virtual input device
++ *
++ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
++ *
++ * Based on linux/drivers/input/mouse/sermouse.c
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++/*
++ * TODO:
++ *
++ * Switch to grant tables together with xenfb.c.
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/input.h>
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/fbif.h>
++#include <xen/interface/io/kbdif.h>
++#include <xen/xenbus.h>
++
++struct xenkbd_info
++{
++ struct input_dev *kbd;
++ struct input_dev *ptr;
++ struct xenkbd_page *page;
++ int irq;
++ struct xenbus_device *xbdev;
++ char phys[32];
++};
++
++static int xenkbd_remove(struct xenbus_device *);
++static int xenkbd_connect_backend(struct xenbus_device *, struct xenkbd_info *);
++static void xenkbd_disconnect_backend(struct xenkbd_info *);
++
++/*
++ * Note: if you need to send out events, see xenfb_do_update() for how
++ * to do that.
++ */
++
++static irqreturn_t input_handler(int rq, void *dev_id, struct pt_regs *regs)
++{
++ struct xenkbd_info *info = dev_id;
++ struct xenkbd_page *page = info->page;
++ __u32 cons, prod;
++
++ prod = page->in_prod;
++ if (prod == page->in_cons)
++ return IRQ_HANDLED;
++ rmb(); /* ensure we see ring contents up to prod */
++ for (cons = page->in_cons; cons != prod; cons++) {
++ union xenkbd_in_event *event;
++ struct input_dev *dev;
++ event = &XENKBD_IN_RING_REF(page, cons);
++
++ dev = info->ptr;
++ switch (event->type) {
++ case XENKBD_TYPE_MOTION:
++ if (event->motion.rel_z)
++ input_report_rel(dev, REL_WHEEL,
++ -event->motion.rel_z);
++ input_report_rel(dev, REL_X, event->motion.rel_x);
++ input_report_rel(dev, REL_Y, event->motion.rel_y);
++ break;
++ case XENKBD_TYPE_KEY:
++ dev = NULL;
++ if (test_bit(event->key.keycode, info->kbd->keybit))
++ dev = info->kbd;
++ if (test_bit(event->key.keycode, info->ptr->keybit))
++ dev = info->ptr;
++ if (dev)
++ input_report_key(dev, event->key.keycode,
++ event->key.pressed);
++ else
++ printk("xenkbd: unhandled keycode 0x%x\n",
++ event->key.keycode);
++ break;
++ case XENKBD_TYPE_POS:
++ if (event->pos.rel_z)
++ input_report_rel(dev, REL_WHEEL,
++ -event->pos.rel_z);
++ input_report_abs(dev, ABS_X, event->pos.abs_x);
++ input_report_abs(dev, ABS_Y, event->pos.abs_y);
++ break;
++ }
++ if (dev)
++ input_sync(dev);
++ }
++ mb(); /* ensure we got ring contents */
++ page->in_cons = cons;
++ notify_remote_via_irq(info->irq);
++
++ return IRQ_HANDLED;
++}
++
++int __devinit xenkbd_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int ret, i;
++ struct xenkbd_info *info;
++ struct input_dev *kbd, *ptr;
++
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (!info) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++ return -ENOMEM;
++ }
++ dev->dev.driver_data = info;
++ info->xbdev = dev;
++ snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
++
++ info->page = (void *)__get_free_page(GFP_KERNEL);
++ if (!info->page)
++ goto error_nomem;
++ info->page->in_cons = info->page->in_prod = 0;
++ info->page->out_cons = info->page->out_prod = 0;
++
++ /* keyboard */
++ kbd = input_allocate_device();
++ if (!kbd)
++ goto error_nomem;
++ kbd->name = "Xen Virtual Keyboard";
++ kbd->phys = info->phys;
++ kbd->id.bustype = BUS_PCI;
++ kbd->id.vendor = 0x5853;
++ kbd->id.product = 0xffff;
++ kbd->evbit[0] = BIT(EV_KEY);
++ for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
++ set_bit(i, kbd->keybit);
++ for (i = KEY_OK; i < KEY_MAX; i++)
++ set_bit(i, kbd->keybit);
++
++ ret = input_register_device(kbd);
++ if (ret) {
++ input_free_device(kbd);
++ xenbus_dev_fatal(dev, ret, "input_register_device(kbd)");
++ goto error;
++ }
++ info->kbd = kbd;
++
++ /* pointing device */
++ ptr = input_allocate_device();
++ if (!ptr)
++ goto error_nomem;
++ ptr->name = "Xen Virtual Pointer";
++ ptr->phys = info->phys;
++ ptr->id.bustype = BUS_PCI;
++ ptr->id.vendor = 0x5853;
++ ptr->id.product = 0xfffe;
++ ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
++ for (i = BTN_LEFT; i <= BTN_TASK; i++)
++ set_bit(i, ptr->keybit);
++ ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL);
++ input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
++ input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
++
++ ret = input_register_device(ptr);
++ if (ret) {
++ input_free_device(ptr);
++ xenbus_dev_fatal(dev, ret, "input_register_device(ptr)");
++ goto error;
++ }
++ info->ptr = ptr;
++
++ ret = xenkbd_connect_backend(dev, info);
++ if (ret < 0)
++ goto error;
++
++ return 0;
++
++ error_nomem:
++ ret = -ENOMEM;
++ xenbus_dev_fatal(dev, ret, "allocating device memory");
++ error:
++ xenkbd_remove(dev);
++ return ret;
++}
++
++static int xenkbd_resume(struct xenbus_device *dev)
++{
++ struct xenkbd_info *info = dev->dev.driver_data;
++
++ xenkbd_disconnect_backend(info);
++ info->page->in_cons = info->page->in_prod = 0;
++ info->page->out_cons = info->page->out_prod = 0;
++ return xenkbd_connect_backend(dev, info);
++}
++
++static int xenkbd_remove(struct xenbus_device *dev)
++{
++ struct xenkbd_info *info = dev->dev.driver_data;
++
++ xenkbd_disconnect_backend(info);
++ input_unregister_device(info->kbd);
++ input_unregister_device(info->ptr);
++ free_page((unsigned long)info->page);
++ kfree(info);
++ return 0;
++}
++
++static int xenkbd_connect_backend(struct xenbus_device *dev,
++ struct xenkbd_info *info)
++{
++ int ret;
++ struct xenbus_transaction xbt;
++
++ ret = bind_listening_port_to_irqhandler(
++ dev->otherend_id, input_handler, 0, "xenkbd", info);
++ if (ret < 0) {
++ xenbus_dev_fatal(dev, ret,
++ "bind_listening_port_to_irqhandler");
++ return ret;
++ }
++ info->irq = ret;
++
++ again:
++ ret = xenbus_transaction_start(&xbt);
++ if (ret) {
++ xenbus_dev_fatal(dev, ret, "starting transaction");
++ return ret;
++ }
++ ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
++ virt_to_mfn(info->page));
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(info->irq));
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_transaction_end(xbt, 0);
++ if (ret) {
++ if (ret == -EAGAIN)
++ goto again;
++ xenbus_dev_fatal(dev, ret, "completing transaction");
++ return ret;
++ }
++
++ xenbus_switch_state(dev, XenbusStateInitialised);
++ return 0;
++
++ error_xenbus:
++ xenbus_transaction_end(xbt, 1);
++ xenbus_dev_fatal(dev, ret, "writing xenstore");
++ return ret;
++}
++
++static void xenkbd_disconnect_backend(struct xenkbd_info *info)
++{
++ if (info->irq >= 0)
++ unbind_from_irqhandler(info->irq, info);
++ info->irq = -1;
++}
++
++static void xenkbd_backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct xenkbd_info *info = dev->dev.driver_data;
++ int ret, val;
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitialised:
++ case XenbusStateReconfiguring:
++ case XenbusStateReconfigured:
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateInitWait:
++ InitWait:
++ ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++ "feature-abs-pointer", "%d", &val);
++ if (ret < 0)
++ val = 0;
++ if (val) {
++ ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
++ "request-abs-pointer", "1");
++ if (ret)
++ ; /* FIXME */
++ }
++ xenbus_switch_state(dev, XenbusStateConnected);
++ break;
++
++ case XenbusStateConnected:
++ /*
++ * Work around xenbus race condition: If backend goes
++ * through InitWait to Connected fast enough, we can
++ * get Connected twice here.
++ */
++ if (dev->state != XenbusStateConnected)
++ goto InitWait; /* no InitWait seen yet, fudge it */
++
++ /* Set input abs params to match backend screen res */
++ if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++ "width", "%d", &val) > 0 )
++ input_set_abs_params(info->ptr, ABS_X, 0, val, 0, 0);
++
++ if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++ "height", "%d", &val) > 0 )
++ input_set_abs_params(info->ptr, ABS_Y, 0, val, 0, 0);
++
++ break;
++
++ case XenbusStateClosing:
++ xenbus_frontend_closed(dev);
++ break;
++ }
++}
++
++static const struct xenbus_device_id xenkbd_ids[] = {
++ { "vkbd" },
++ { "" }
++};
++MODULE_ALIAS("xen:vkbd");
++
++static struct xenbus_driver xenkbd_driver = {
++ .name = "vkbd",
++ .owner = THIS_MODULE,
++ .ids = xenkbd_ids,
++ .probe = xenkbd_probe,
++ .remove = xenkbd_remove,
++ .resume = xenkbd_resume,
++ .otherend_changed = xenkbd_backend_changed,
++};
++
++static int __init xenkbd_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ /* Nothing to do if running in dom0. */
++ if (is_initial_xendomain())
++ return -ENODEV;
++
++ return xenbus_register_frontend(&xenkbd_driver);
++}
++
++static void __exit xenkbd_cleanup(void)
++{
++ return xenbus_unregister_driver(&xenkbd_driver);
++}
++
++module_init(xenkbd_init);
++module_exit(xenkbd_cleanup);
++
++MODULE_LICENSE("GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/gntdev/gntdev.c linux-2.6.18-xen-3.3.0/drivers/xen/gntdev/gntdev.c
+--- linux-2.6.18.8/drivers/xen/gntdev/gntdev.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/gntdev/gntdev.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1074 @@
++/******************************************************************************
++ * gntdev.c
++ *
++ * Device for accessing (in user-space) pages that have been granted by other
++ * domains.
++ *
++ * Copyright (c) 2006-2007, D G Murray.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <asm/atomic.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/device.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <xen/gnttab.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/evtchn.h>
++#include <xen/driver_util.h>
++
++#include <linux/types.h>
++#include <xen/public/gntdev.h>
++
++
++#define DRIVER_AUTHOR "Derek G. Murray <Derek.Murray@cl.cam.ac.uk>"
++#define DRIVER_DESC "User-space granted page access driver"
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++
++#define MAX_GRANTS_LIMIT 1024
++#define DEFAULT_MAX_GRANTS 128
++
++/* A slot can be in one of three states:
++ *
++ * 0. GNTDEV_SLOT_INVALID:
++ * This slot is not associated with a grant reference, and is therefore free
++ * to be overwritten by a new grant reference.
++ *
++ * 1. GNTDEV_SLOT_NOT_YET_MAPPED:
++ * This slot is associated with a grant reference (via the
++ * IOCTL_GNTDEV_MAP_GRANT_REF ioctl), but it has not yet been mmap()-ed.
++ *
++ * 2. GNTDEV_SLOT_MAPPED:
++ * This slot is associated with a grant reference, and has been mmap()-ed.
++ */
++typedef enum gntdev_slot_state {
++ GNTDEV_SLOT_INVALID = 0,
++ GNTDEV_SLOT_NOT_YET_MAPPED,
++ GNTDEV_SLOT_MAPPED
++} gntdev_slot_state_t;
++
++#define GNTDEV_INVALID_HANDLE -1
++#define GNTDEV_FREE_LIST_INVALID -1
++/* Each opened instance of gntdev is associated with a list of grants,
++ * represented by an array of elements of the following type,
++ * gntdev_grant_info_t.
++ */
++typedef struct gntdev_grant_info {
++ gntdev_slot_state_t state;
++ union {
++ uint32_t free_list_index;
++ struct {
++ domid_t domid;
++ grant_ref_t ref;
++ grant_handle_t kernel_handle;
++ grant_handle_t user_handle;
++ uint64_t dev_bus_addr;
++ } valid;
++ } u;
++} gntdev_grant_info_t;
++
++/* Private data structure, which is stored in the file pointer for files
++ * associated with this device.
++ */
++typedef struct gntdev_file_private_data {
++
++ /* Array of grant information. */
++ gntdev_grant_info_t *grants;
++ uint32_t grants_size;
++
++ /* Read/write semaphore used to protect the grants array. */
++ struct rw_semaphore grants_sem;
++
++ /* An array of indices of free slots in the grants array.
++ * N.B. An entry in this list may temporarily have the value
++ * GNTDEV_FREE_LIST_INVALID if the corresponding slot has been removed
++ * from the list by the contiguous allocator, but the list has not yet
++ * been compressed. However, this is not visible across invocations of
++ * the device.
++ */
++ int32_t *free_list;
++
++ /* The number of free slots in the grants array. */
++ uint32_t free_list_size;
++
++ /* Read/write semaphore used to protect the free list. */
++ struct rw_semaphore free_list_sem;
++
++ /* Index of the next slot after the most recent contiguous allocation,
++ * for use in a next-fit allocator.
++ */
++ uint32_t next_fit_index;
++
++ /* Used to map grants into the kernel, before mapping them into user
++ * space.
++ */
++ struct page **foreign_pages;
++
++} gntdev_file_private_data_t;
++
++/* Module lifecycle operations. */
++static int __init gntdev_init(void);
++static void __exit gntdev_exit(void);
++
++module_init(gntdev_init);
++module_exit(gntdev_exit);
++
++/* File operations. */
++static int gntdev_open(struct inode *inode, struct file *flip);
++static int gntdev_release(struct inode *inode, struct file *flip);
++static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma);
++static long gntdev_ioctl(struct file *flip,
++ unsigned int cmd, unsigned long arg);
++
++static const struct file_operations gntdev_fops = {
++ .owner = THIS_MODULE,
++ .open = gntdev_open,
++ .release = gntdev_release,
++ .mmap = gntdev_mmap,
++ .unlocked_ioctl = gntdev_ioctl
++};
++
++/* VM operations. */
++static void gntdev_vma_close(struct vm_area_struct *vma);
++static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
++ pte_t *ptep, int is_fullmm);
++
++static struct vm_operations_struct gntdev_vmops = {
++ .close = gntdev_vma_close,
++ .zap_pte = gntdev_clear_pte
++};
++
++/* Global variables. */
++
++/* The driver major number, for use when unregistering the driver. */
++static int gntdev_major;
++
++#define GNTDEV_NAME "gntdev"
++
++/* Memory mapping functions
++ * ------------------------
++ *
++ * Every granted page is mapped into both kernel and user space, and the two
++ * following functions return the respective virtual addresses of these pages.
++ *
++ * When shadow paging is disabled, the granted page is mapped directly into
++ * user space; when it is enabled, it is mapped into the kernel and remapped
++ * into user space using vm_insert_page() (see gntdev_mmap(), below).
++ */
++
++/* Returns the virtual address (in user space) of the @page_index'th page
++ * in the given VM area.
++ */
++static inline unsigned long get_user_vaddr (struct vm_area_struct *vma,
++ int page_index)
++{
++ return (unsigned long) vma->vm_start + (page_index << PAGE_SHIFT);
++}
++
++/* Returns the virtual address (in kernel space) of the @slot_index'th page
++ * mapped by the gntdev instance that owns the given private data struct.
++ */
++static inline unsigned long get_kernel_vaddr (gntdev_file_private_data_t *priv,
++ int slot_index)
++{
++ unsigned long pfn;
++ void *kaddr;
++ pfn = page_to_pfn(priv->foreign_pages[slot_index]);
++ kaddr = pfn_to_kaddr(pfn);
++ return (unsigned long) kaddr;
++}
++
++/* Helper functions. */
++
++/* Adds information about a grant reference to the list of grants in the file's
++ * private data structure. Returns non-zero on failure. On success, sets the
++ * value of *offset to the offset that should be mmap()-ed in order to map the
++ * grant reference.
++ */
++static int add_grant_reference(struct file *flip,
++ struct ioctl_gntdev_grant_ref *op,
++ uint64_t *offset)
++{
++ gntdev_file_private_data_t *private_data
++ = (gntdev_file_private_data_t *) flip->private_data;
++
++ uint32_t slot_index;
++
++ if (unlikely(private_data->free_list_size == 0)) {
++ return -ENOMEM;
++ }
++
++ slot_index = private_data->free_list[--private_data->free_list_size];
++ private_data->free_list[private_data->free_list_size]
++ = GNTDEV_FREE_LIST_INVALID;
++
++ /* Copy the grant information into file's private data. */
++ private_data->grants[slot_index].state = GNTDEV_SLOT_NOT_YET_MAPPED;
++ private_data->grants[slot_index].u.valid.domid = op->domid;
++ private_data->grants[slot_index].u.valid.ref = op->ref;
++
++ /* The offset is calculated as the index of the chosen entry in the
++ * file's private data's array of grant information. This is then
++ * shifted to give an offset into the virtual "file address space".
++ */
++ *offset = slot_index << PAGE_SHIFT;
++
++ return 0;
++}
++
++/* Adds the @count grant references to the contiguous range in the slot array
++ * beginning at @first_slot. It is assumed that @first_slot was returned by a
++ * previous invocation of find_contiguous_free_range(), during the same
++ * invocation of the driver.
++ */
++static int add_grant_references(struct file *flip,
++ int count,
++ struct ioctl_gntdev_grant_ref *ops,
++ uint32_t first_slot)
++{
++ gntdev_file_private_data_t *private_data
++ = (gntdev_file_private_data_t *) flip->private_data;
++ int i;
++
++ for (i = 0; i < count; ++i) {
++
++ /* First, mark the slot's entry in the free list as invalid. */
++ int free_list_index =
++ private_data->grants[first_slot+i].u.free_list_index;
++ private_data->free_list[free_list_index] =
++ GNTDEV_FREE_LIST_INVALID;
++
++ /* Now, update the slot. */
++ private_data->grants[first_slot+i].state =
++ GNTDEV_SLOT_NOT_YET_MAPPED;
++ private_data->grants[first_slot+i].u.valid.domid =
++ ops[i].domid;
++ private_data->grants[first_slot+i].u.valid.ref = ops[i].ref;
++ }
++
++ return 0;
++}
++
++/* Scans through the free list for @flip, removing entries that are marked as
++ * GNTDEV_SLOT_INVALID. This will reduce the recorded size of the free list to
++ * the number of valid entries.
++ */
++static void compress_free_list(struct file *flip)
++{
++ gntdev_file_private_data_t *private_data
++ = (gntdev_file_private_data_t *) flip->private_data;
++ int i, j = 0, old_size, slot_index;
++
++ old_size = private_data->free_list_size;
++ for (i = 0; i < old_size; ++i) {
++ if (private_data->free_list[i] != GNTDEV_FREE_LIST_INVALID) {
++ if (i > j) {
++ slot_index = private_data->free_list[i];
++ private_data->free_list[j] = slot_index;
++ private_data->grants[slot_index].u
++ .free_list_index = j;
++ private_data->free_list[i]
++ = GNTDEV_FREE_LIST_INVALID;
++ }
++ ++j;
++ } else {
++ --private_data->free_list_size;
++ }
++ }
++}
++
++/* Searches the grant array in the private data of @flip for a range of
++ * @num_slots contiguous slots in the GNTDEV_SLOT_INVALID state.
++ *
++ * Returns the index of the first slot if a range is found, otherwise -ENOMEM.
++ */
++static int find_contiguous_free_range(struct file *flip,
++ uint32_t num_slots)
++{
++ gntdev_file_private_data_t *private_data
++ = (gntdev_file_private_data_t *) flip->private_data;
++
++ int i;
++ int start_index = private_data->next_fit_index;
++ int range_start = 0, range_length;
++
++ if (private_data->free_list_size < num_slots) {
++ return -ENOMEM;
++ }
++
++ /* First search from the start_index to the end of the array. */
++ range_length = 0;
++ for (i = start_index; i < private_data->grants_size; ++i) {
++ if (private_data->grants[i].state == GNTDEV_SLOT_INVALID) {
++ if (range_length == 0) {
++ range_start = i;
++ }
++ ++range_length;
++ if (range_length == num_slots) {
++ return range_start;
++ }
++ }
++ }
++
++ /* Now search from the start of the array to the start_index. */
++ range_length = 0;
++ for (i = 0; i < start_index; ++i) {
++ if (private_data->grants[i].state == GNTDEV_SLOT_INVALID) {
++ if (range_length == 0) {
++ range_start = i;
++ }
++ ++range_length;
++ if (range_length == num_slots) {
++ return range_start;
++ }
++ }
++ }
++
++ return -ENOMEM;
++}
++
++static int init_private_data(gntdev_file_private_data_t *priv,
++ uint32_t max_grants)
++{
++ int i;
++
++ /* Allocate space for the kernel-mapping of granted pages. */
++ priv->foreign_pages =
++ alloc_empty_pages_and_pagevec(max_grants);
++ if (!priv->foreign_pages)
++ goto nomem_out;
++
++ /* Allocate the grant list and free-list. */
++ priv->grants = kmalloc(max_grants * sizeof(gntdev_grant_info_t),
++ GFP_KERNEL);
++ if (!priv->grants)
++ goto nomem_out2;
++ priv->free_list = kmalloc(max_grants * sizeof(int32_t), GFP_KERNEL);
++ if (!priv->free_list)
++ goto nomem_out3;
++
++ /* Initialise the free-list, which contains all slots at first. */
++ for (i = 0; i < max_grants; ++i) {
++ priv->free_list[max_grants - i - 1] = i;
++ priv->grants[i].state = GNTDEV_SLOT_INVALID;
++ priv->grants[i].u.free_list_index = max_grants - i - 1;
++ }
++ priv->grants_size = max_grants;
++ priv->free_list_size = max_grants;
++ priv->next_fit_index = 0;
++
++ return 0;
++
++nomem_out3:
++ kfree(priv->grants);
++nomem_out2:
++ free_empty_pages_and_pagevec(priv->foreign_pages, max_grants);
++nomem_out:
++ return -ENOMEM;
++
++}
++
++/* Interface functions. */
++
++/* Initialises the driver. Called when the module is loaded. */
++static int __init gntdev_init(void)
++{
++ struct class *class;
++ struct class_device *device;
++
++ if (!is_running_on_xen()) {
++ printk(KERN_ERR "You must be running Xen to use gntdev\n");
++ return -ENODEV;
++ }
++
++ gntdev_major = register_chrdev(0, GNTDEV_NAME, &gntdev_fops);
++ if (gntdev_major < 0)
++ {
++ printk(KERN_ERR "Could not register gntdev device\n");
++ return -ENOMEM;
++ }
++
++ /* Note that if the sysfs code fails, we will still initialise the
++ * device, and output the major number so that the device can be
++ * created manually using mknod.
++ */
++ if ((class = get_xen_class()) == NULL) {
++ printk(KERN_ERR "Error setting up xen_class\n");
++ printk(KERN_ERR "gntdev created with major number = %d\n",
++ gntdev_major);
++ return 0;
++ }
++
++ device = class_device_create(class, NULL, MKDEV(gntdev_major, 0),
++ NULL, GNTDEV_NAME);
++ if (IS_ERR(device)) {
++ printk(KERN_ERR "Error creating gntdev device in xen_class\n");
++ printk(KERN_ERR "gntdev created with major number = %d\n",
++ gntdev_major);
++ return 0;
++ }
++
++ return 0;
++}
++
++/* Cleans up and unregisters the driver. Called when the driver is unloaded.
++ */
++static void __exit gntdev_exit(void)
++{
++ struct class *class;
++ if ((class = get_xen_class()) != NULL)
++ class_device_destroy(class, MKDEV(gntdev_major, 0));
++ unregister_chrdev(gntdev_major, GNTDEV_NAME);
++}
++
++/* Called when the device is opened. */
++static int gntdev_open(struct inode *inode, struct file *flip)
++{
++ gntdev_file_private_data_t *private_data;
++
++ try_module_get(THIS_MODULE);
++
++ /* Allocate space for the per-instance private data. */
++ private_data = kmalloc(sizeof(*private_data), GFP_KERNEL);
++ if (!private_data)
++ goto nomem_out;
++
++ /* These will be lazily initialised by init_private_data. */
++ private_data->grants = NULL;
++ private_data->free_list = NULL;
++ private_data->foreign_pages = NULL;
++
++ init_rwsem(&private_data->grants_sem);
++ init_rwsem(&private_data->free_list_sem);
++
++ flip->private_data = private_data;
++
++ return 0;
++
++nomem_out:
++ return -ENOMEM;
++}
++
++/* Called when the device is closed.
++ */
++static int gntdev_release(struct inode *inode, struct file *flip)
++{
++ if (flip->private_data) {
++ gntdev_file_private_data_t *private_data =
++ (gntdev_file_private_data_t *) flip->private_data;
++ if (private_data->foreign_pages)
++ free_empty_pages_and_pagevec
++ (private_data->foreign_pages,
++ private_data->grants_size);
++ if (private_data->grants)
++ kfree(private_data->grants);
++ if (private_data->free_list)
++ kfree(private_data->free_list);
++ kfree(private_data);
++ }
++ module_put(THIS_MODULE);
++ return 0;
++}
++
++/* Called when an attempt is made to mmap() the device. The private data from
++ * @flip contains the list of grant references that can be mapped. The vm_pgoff
++ * field of @vma contains the index into that list that refers to the grant
++ * reference that will be mapped. Only mappings that are a multiple of
++ * PAGE_SIZE are handled.
++ */
++static int gntdev_mmap (struct file *flip, struct vm_area_struct *vma)
++{
++ struct gnttab_map_grant_ref op;
++ unsigned long slot_index = vma->vm_pgoff;
++ unsigned long kernel_vaddr, user_vaddr;
++ uint32_t size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ uint64_t ptep;
++ int ret;
++ int flags;
++ int i;
++ struct page *page;
++ gntdev_file_private_data_t *private_data = flip->private_data;
++
++ if (unlikely(!private_data)) {
++ printk(KERN_ERR "File's private data is NULL.\n");
++ return -EINVAL;
++ }
++
++ /* Test to make sure that the grants array has been initialised. */
++ down_read(&private_data->grants_sem);
++ if (unlikely(!private_data->grants)) {
++ up_read(&private_data->grants_sem);
++ printk(KERN_ERR "Attempted to mmap before ioctl.\n");
++ return -EINVAL;
++ }
++ up_read(&private_data->grants_sem);
++
++ if (unlikely((size <= 0) ||
++ (size + slot_index) > private_data->grants_size)) {
++ printk(KERN_ERR "Invalid number of pages or offset"
++ "(num_pages = %d, first_slot = %ld).\n",
++ size, slot_index);
++ return -ENXIO;
++ }
++
++ if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
++ printk(KERN_ERR "Writable mappings must be shared.\n");
++ return -EINVAL;
++ }
++
++ /* Slots must be in the NOT_YET_MAPPED state. */
++ down_write(&private_data->grants_sem);
++ for (i = 0; i < size; ++i) {
++ if (private_data->grants[slot_index + i].state !=
++ GNTDEV_SLOT_NOT_YET_MAPPED) {
++ printk(KERN_ERR "Slot (index = %ld) is in the wrong "
++ "state (%d).\n", slot_index + i,
++ private_data->grants[slot_index + i].state);
++ up_write(&private_data->grants_sem);
++ return -EINVAL;
++ }
++ }
++
++ /* Install the hook for unmapping. */
++ vma->vm_ops = &gntdev_vmops;
++
++ /* The VM area contains pages from another VM. */
++ vma->vm_flags |= VM_FOREIGN;
++ vma->vm_private_data = kzalloc(size * sizeof(struct page *),
++ GFP_KERNEL);
++ if (vma->vm_private_data == NULL) {
++ printk(KERN_ERR "Couldn't allocate mapping structure for VM "
++ "area.\n");
++ return -ENOMEM;
++ }
++
++ /* This flag prevents Bad PTE errors when the memory is unmapped. */
++ vma->vm_flags |= VM_RESERVED;
++
++ /* This flag prevents this VM area being copied on a fork(). A better
++ * behaviour might be to explicitly carry out the appropriate mappings
++ * on fork(), but I don't know if there's a hook for this.
++ */
++ vma->vm_flags |= VM_DONTCOPY;
++
++#ifdef CONFIG_X86
++ /* This flag ensures that the page tables are not unpinned before the
++ * VM area is unmapped. Therefore Xen still recognises the PTE as
++ * belonging to an L1 pagetable, and the grant unmap operation will
++ * succeed, even if the process does not exit cleanly.
++ */
++ vma->vm_mm->context.has_foreign_mappings = 1;
++#endif
++
++ for (i = 0; i < size; ++i) {
++
++ flags = GNTMAP_host_map;
++ if (!(vma->vm_flags & VM_WRITE))
++ flags |= GNTMAP_readonly;
++
++ kernel_vaddr = get_kernel_vaddr(private_data, slot_index + i);
++ user_vaddr = get_user_vaddr(vma, i);
++ page = pfn_to_page(__pa(kernel_vaddr) >> PAGE_SHIFT);
++
++ gnttab_set_map_op(&op, kernel_vaddr, flags,
++ private_data->grants[slot_index+i]
++ .u.valid.ref,
++ private_data->grants[slot_index+i]
++ .u.valid.domid);
++
++ /* Carry out the mapping of the grant reference. */
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++ &op, 1);
++ BUG_ON(ret);
++ if (op.status) {
++ printk(KERN_ERR "Error mapping the grant reference "
++ "into the kernel (%d). domid = %d; ref = %d\n",
++ op.status,
++ private_data->grants[slot_index+i]
++ .u.valid.domid,
++ private_data->grants[slot_index+i]
++ .u.valid.ref);
++ goto undo_map_out;
++ }
++
++ /* Store a reference to the page that will be mapped into user
++ * space.
++ */
++ ((struct page **) vma->vm_private_data)[i] = page;
++
++ /* Mark mapped page as reserved. */
++ SetPageReserved(page);
++
++ /* Record the grant handle, for use in the unmap operation. */
++ private_data->grants[slot_index+i].u.valid.kernel_handle =
++ op.handle;
++ private_data->grants[slot_index+i].u.valid.dev_bus_addr =
++ op.dev_bus_addr;
++
++ private_data->grants[slot_index+i].state = GNTDEV_SLOT_MAPPED;
++ private_data->grants[slot_index+i].u.valid.user_handle =
++ GNTDEV_INVALID_HANDLE;
++
++ /* Now perform the mapping to user space. */
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++
++ /* NOT USING SHADOW PAGE TABLES. */
++ /* In this case, we map the grant(s) straight into user
++ * space.
++ */
++
++ /* Get the machine address of the PTE for the user
++ * page.
++ */
++ if ((ret = create_lookup_pte_addr(vma->vm_mm,
++ vma->vm_start
++ + (i << PAGE_SHIFT),
++ &ptep)))
++ {
++ printk(KERN_ERR "Error obtaining PTE pointer "
++ "(%d).\n", ret);
++ goto undo_map_out;
++ }
++
++ /* Configure the map operation. */
++
++ /* The reference is to be used by host CPUs. */
++ flags = GNTMAP_host_map;
++
++ /* Specifies a user space mapping. */
++ flags |= GNTMAP_application_map;
++
++ /* The map request contains the machine address of the
++ * PTE to update.
++ */
++ flags |= GNTMAP_contains_pte;
++
++ if (!(vma->vm_flags & VM_WRITE))
++ flags |= GNTMAP_readonly;
++
++ gnttab_set_map_op(&op, ptep, flags,
++ private_data->grants[slot_index+i]
++ .u.valid.ref,
++ private_data->grants[slot_index+i]
++ .u.valid.domid);
++
++ /* Carry out the mapping of the grant reference. */
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++ &op, 1);
++ BUG_ON(ret);
++ if (op.status) {
++ printk(KERN_ERR "Error mapping the grant "
++ "reference into user space (%d). domid "
++ "= %d; ref = %d\n", op.status,
++ private_data->grants[slot_index+i].u
++ .valid.domid,
++ private_data->grants[slot_index+i].u
++ .valid.ref);
++ goto undo_map_out;
++ }
++
++ /* Record the grant handle, for use in the unmap
++ * operation.
++ */
++ private_data->grants[slot_index+i].u.
++ valid.user_handle = op.handle;
++
++ /* Update p2m structure with the new mapping. */
++ set_phys_to_machine(__pa(kernel_vaddr) >> PAGE_SHIFT,
++ FOREIGN_FRAME(private_data->
++ grants[slot_index+i]
++ .u.valid.dev_bus_addr
++ >> PAGE_SHIFT));
++ } else {
++ /* USING SHADOW PAGE TABLES. */
++ /* In this case, we simply insert the page into the VM
++ * area. */
++ ret = vm_insert_page(vma, user_vaddr, page);
++ }
++
++ }
++
++ up_write(&private_data->grants_sem);
++ return 0;
++
++undo_map_out:
++ /* If we have a mapping failure, the unmapping will be taken care of
++ * by do_mmap_pgoff(), which will eventually call gntdev_clear_pte().
++ * All we need to do here is free the vma_private_data.
++ */
++ kfree(vma->vm_private_data);
++
++ /* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
++ * to NULL on failure. However, we need this in gntdev_clear_pte() to
++ * unmap the grants. Therefore, we smuggle a reference to the file's
++ * private data in the VM area's private data pointer.
++ */
++ vma->vm_private_data = private_data;
++
++ up_write(&private_data->grants_sem);
++
++ return -ENOMEM;
++}
++
++static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
++ pte_t *ptep, int is_fullmm)
++{
++ int slot_index, ret;
++ pte_t copy;
++ struct gnttab_unmap_grant_ref op;
++ gntdev_file_private_data_t *private_data;
++
++ /* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
++ * to NULL on failure. However, we need this in gntdev_clear_pte() to
++ * unmap the grants. Therefore, we smuggle a reference to the file's
++ * private data in the VM area's private data pointer.
++ */
++ if (vma->vm_file) {
++ private_data = (gntdev_file_private_data_t *)
++ vma->vm_file->private_data;
++ } else if (vma->vm_private_data) {
++ private_data = (gntdev_file_private_data_t *)
++ vma->vm_private_data;
++ } else {
++ private_data = NULL; /* gcc warning */
++ BUG();
++ }
++
++ /* Copy the existing value of the PTE for returning. */
++ copy = *ptep;
++
++ /* Calculate the grant relating to this PTE. */
++ slot_index = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
++
++ /* Only unmap grants if the slot has been mapped. This could be being
++ * called from a failing mmap().
++ */
++ if (private_data->grants[slot_index].state == GNTDEV_SLOT_MAPPED) {
++
++ /* First, we clear the user space mapping, if it has been made.
++ */
++ if (private_data->grants[slot_index].u.valid.user_handle !=
++ GNTDEV_INVALID_HANDLE &&
++ !xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* NOT USING SHADOW PAGE TABLES. */
++ gnttab_set_unmap_op(&op, virt_to_machine(ptep),
++ GNTMAP_contains_pte,
++ private_data->grants[slot_index]
++ .u.valid.user_handle);
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, &op, 1);
++ BUG_ON(ret);
++ if (op.status)
++ printk("User unmap grant status = %d\n",
++ op.status);
++ } else {
++ /* USING SHADOW PAGE TABLES. */
++ pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
++ }
++
++ /* Finally, we unmap the grant from kernel space. */
++ gnttab_set_unmap_op(&op,
++ get_kernel_vaddr(private_data, slot_index),
++ GNTMAP_host_map,
++ private_data->grants[slot_index].u.valid
++ .kernel_handle);
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
++ &op, 1);
++ BUG_ON(ret);
++ if (op.status)
++ printk("Kernel unmap grant status = %d\n", op.status);
++
++
++ /* Return slot to the not-yet-mapped state, so that it may be
++ * mapped again, or removed by a subsequent ioctl.
++ */
++ private_data->grants[slot_index].state =
++ GNTDEV_SLOT_NOT_YET_MAPPED;
++
++ /* Invalidate the physical to machine mapping for this page. */
++ set_phys_to_machine(__pa(get_kernel_vaddr(private_data,
++ slot_index))
++ >> PAGE_SHIFT, INVALID_P2M_ENTRY);
++
++ } else {
++ pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
++ }
++
++ return copy;
++}
++
++/* "Destructor" for a VM area.
++ */
++static void gntdev_vma_close(struct vm_area_struct *vma) {
++ if (vma->vm_private_data) {
++ kfree(vma->vm_private_data);
++ }
++}
++
++/* Called when an ioctl is made on the device.
++ */
++static long gntdev_ioctl(struct file *flip,
++ unsigned int cmd, unsigned long arg)
++{
++ int rc = 0;
++ gntdev_file_private_data_t *private_data =
++ (gntdev_file_private_data_t *) flip->private_data;
++
++ /* On the first invocation, we will lazily initialise the grant array
++ * and free-list.
++ */
++ if (unlikely(!private_data->grants)
++ && likely(cmd != IOCTL_GNTDEV_SET_MAX_GRANTS)) {
++ down_write(&private_data->grants_sem);
++
++ if (unlikely(private_data->grants)) {
++ up_write(&private_data->grants_sem);
++ goto private_data_initialised;
++ }
++
++ /* Just use the default. Setting to a non-default is handled
++ * in the ioctl switch.
++ */
++ rc = init_private_data(private_data, DEFAULT_MAX_GRANTS);
++
++ up_write(&private_data->grants_sem);
++
++ if (rc) {
++ printk (KERN_ERR "Initialising gntdev private data "
++ "failed.\n");
++ return rc;
++ }
++ }
++
++private_data_initialised:
++ switch (cmd) {
++ case IOCTL_GNTDEV_MAP_GRANT_REF:
++ {
++ struct ioctl_gntdev_map_grant_ref op;
++ down_write(&private_data->grants_sem);
++ down_write(&private_data->free_list_sem);
++
++ if ((rc = copy_from_user(&op, (void __user *) arg,
++ sizeof(op)))) {
++ rc = -EFAULT;
++ goto map_out;
++ }
++ if (unlikely(op.count <= 0)) {
++ rc = -EINVAL;
++ goto map_out;
++ }
++
++ if (op.count == 1) {
++ if ((rc = add_grant_reference(flip, &op.refs[0],
++ &op.index)) < 0) {
++ printk(KERN_ERR "Adding grant reference "
++ "failed (%d).\n", rc);
++ goto map_out;
++ }
++ } else {
++ struct ioctl_gntdev_grant_ref *refs, *u;
++ refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL);
++ if (!refs) {
++ rc = -ENOMEM;
++ goto map_out;
++ }
++ u = ((struct ioctl_gntdev_map_grant_ref *)arg)->refs;
++ if ((rc = copy_from_user(refs,
++ (void __user *)u,
++ sizeof(*refs) * op.count))) {
++ printk(KERN_ERR "Copying refs from user failed"
++ " (%d).\n", rc);
++ rc = -EINVAL;
++ goto map_out;
++ }
++ if ((rc = find_contiguous_free_range(flip, op.count))
++ < 0) {
++ printk(KERN_ERR "Finding contiguous range "
++ "failed (%d).\n", rc);
++ kfree(refs);
++ goto map_out;
++ }
++ op.index = rc << PAGE_SHIFT;
++ if ((rc = add_grant_references(flip, op.count,
++ refs, rc))) {
++ printk(KERN_ERR "Adding grant references "
++ "failed (%d).\n", rc);
++ kfree(refs);
++ goto map_out;
++ }
++ compress_free_list(flip);
++ kfree(refs);
++ }
++ if ((rc = copy_to_user((void __user *) arg,
++ &op,
++ sizeof(op)))) {
++ printk(KERN_ERR "Copying result back to user failed "
++ "(%d)\n", rc);
++ rc = -EFAULT;
++ goto map_out;
++ }
++ map_out:
++ up_write(&private_data->grants_sem);
++ up_write(&private_data->free_list_sem);
++ return rc;
++ }
++ case IOCTL_GNTDEV_UNMAP_GRANT_REF:
++ {
++ struct ioctl_gntdev_unmap_grant_ref op;
++ int i, start_index;
++
++ down_write(&private_data->grants_sem);
++ down_write(&private_data->free_list_sem);
++
++ if ((rc = copy_from_user(&op,
++ (void __user *) arg,
++ sizeof(op)))) {
++ rc = -EFAULT;
++ goto unmap_out;
++ }
++
++ start_index = op.index >> PAGE_SHIFT;
++
++ /* First, check that all pages are in the NOT_YET_MAPPED
++ * state.
++ */
++ for (i = 0; i < op.count; ++i) {
++ if (unlikely
++ (private_data->grants[start_index + i].state
++ != GNTDEV_SLOT_NOT_YET_MAPPED)) {
++ if (private_data->grants[start_index + i].state
++ == GNTDEV_SLOT_INVALID) {
++ printk(KERN_ERR
++ "Tried to remove an invalid "
++ "grant at offset 0x%x.",
++ (start_index + i)
++ << PAGE_SHIFT);
++ rc = -EINVAL;
++ } else {
++ printk(KERN_ERR
++ "Tried to remove a grant which "
++ "is currently mmap()-ed at "
++ "offset 0x%x.",
++ (start_index + i)
++ << PAGE_SHIFT);
++ rc = -EBUSY;
++ }
++ goto unmap_out;
++ }
++ }
++
++ /* Unmap pages and add them to the free list.
++ */
++ for (i = 0; i < op.count; ++i) {
++ private_data->grants[start_index+i].state =
++ GNTDEV_SLOT_INVALID;
++ private_data->grants[start_index+i].u.free_list_index =
++ private_data->free_list_size;
++ private_data->free_list[private_data->free_list_size] =
++ start_index + i;
++ ++private_data->free_list_size;
++ }
++
++ unmap_out:
++ up_write(&private_data->grants_sem);
++ up_write(&private_data->free_list_sem);
++ return rc;
++ }
++ case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
++ {
++ struct ioctl_gntdev_get_offset_for_vaddr op;
++ struct vm_area_struct *vma;
++ unsigned long vaddr;
++
++ if ((rc = copy_from_user(&op,
++ (void __user *) arg,
++ sizeof(op)))) {
++ rc = -EFAULT;
++ goto get_offset_out;
++ }
++ vaddr = (unsigned long)op.vaddr;
++
++ down_read(&current->mm->mmap_sem);
++ vma = find_vma(current->mm, vaddr);
++ if (vma == NULL) {
++ rc = -EFAULT;
++ goto get_offset_unlock_out;
++ }
++ if ((!vma->vm_ops) || (vma->vm_ops != &gntdev_vmops)) {
++ printk(KERN_ERR "The vaddr specified does not belong "
++ "to a gntdev instance: %#lx\n", vaddr);
++ rc = -EFAULT;
++ goto get_offset_unlock_out;
++ }
++ if (vma->vm_start != vaddr) {
++ printk(KERN_ERR "The vaddr specified in an "
++ "IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR must be at "
++ "the start of the VM area. vma->vm_start = "
++ "%#lx; vaddr = %#lx\n",
++ vma->vm_start, vaddr);
++ rc = -EFAULT;
++ goto get_offset_unlock_out;
++ }
++ op.offset = vma->vm_pgoff << PAGE_SHIFT;
++ op.count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ up_read(&current->mm->mmap_sem);
++ if ((rc = copy_to_user((void __user *) arg,
++ &op,
++ sizeof(op)))) {
++ rc = -EFAULT;
++ goto get_offset_out;
++ }
++ goto get_offset_out;
++ get_offset_unlock_out:
++ up_read(&current->mm->mmap_sem);
++ get_offset_out:
++ return rc;
++ }
++ case IOCTL_GNTDEV_SET_MAX_GRANTS:
++ {
++ struct ioctl_gntdev_set_max_grants op;
++ if ((rc = copy_from_user(&op,
++ (void __user *) arg,
++ sizeof(op)))) {
++ rc = -EFAULT;
++ goto set_max_out;
++ }
++ down_write(&private_data->grants_sem);
++ if (private_data->grants) {
++ rc = -EBUSY;
++ goto set_max_unlock_out;
++ }
++ if (op.count > MAX_GRANTS_LIMIT) {
++ rc = -EINVAL;
++ goto set_max_unlock_out;
++ }
++ rc = init_private_data(private_data, op.count);
++ set_max_unlock_out:
++ up_write(&private_data->grants_sem);
++ set_max_out:
++ return rc;
++ }
++ default:
++ return -ENOIOCTLCMD;
++ }
++
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/gntdev/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/gntdev/Makefile
+--- linux-2.6.18.8/drivers/xen/gntdev/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/gntdev/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1 @@
++obj-$(CONFIG_XEN_GRANT_DEV) := gntdev.o
+diff -rpuN linux-2.6.18.8/drivers/xen/Kconfig linux-2.6.18-xen-3.3.0/drivers/xen/Kconfig
+--- linux-2.6.18.8/drivers/xen/Kconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,314 @@
++#
++# This Kconfig describe xen options
++#
++
++mainmenu "Xen Configuration"
++
++config XEN
++ bool
++
++if XEN
++config XEN_INTERFACE_VERSION
++ hex
++ default 0x00030207
++
++menu "XEN"
++
++config XEN_PRIVILEGED_GUEST
++ bool "Privileged Guest (domain 0)"
++ help
++ Support for privileged operation (domain 0)
++
++config XEN_UNPRIVILEGED_GUEST
++ def_bool !XEN_PRIVILEGED_GUEST
++
++config XEN_PRIVCMD
++ def_bool y
++ depends on PROC_FS
++
++config XEN_XENBUS_DEV
++ def_bool y
++ depends on PROC_FS
++
++config XEN_NETDEV_ACCEL_SFC_UTIL
++ depends on X86
++ tristate
++
++config XEN_BACKEND
++ tristate "Backend driver support"
++ default XEN_PRIVILEGED_GUEST
++ help
++ Support for backend device drivers that provide I/O services
++ to other virtual machines.
++
++config XEN_BLKDEV_BACKEND
++ tristate "Block-device backend driver"
++ depends on XEN_BACKEND
++ default XEN_BACKEND
++ help
++ The block-device backend driver allows the kernel to export its
++ block devices to other guests via a high-performance shared-memory
++ interface.
++
++config XEN_BLKDEV_TAP
++ tristate "Block-device tap backend driver"
++ depends on XEN_BACKEND
++ default XEN_BACKEND
++ help
++ The block tap driver is an alternative to the block back driver
++ and allows VM block requests to be redirected to userspace through
++ a device interface. The tap allows user-space development of
++ high-performance block backends, where disk images may be implemented
++ as files, in memory, or on other hosts across the network. This
++ driver can safely coexist with the existing blockback driver.
++
++config XEN_NETDEV_BACKEND
++ tristate "Network-device backend driver"
++ depends on XEN_BACKEND && NET
++ default XEN_BACKEND
++ help
++ The network-device backend driver allows the kernel to export its
++ network devices to other guests via a high-performance shared-memory
++ interface.
++
++config XEN_NETDEV_PIPELINED_TRANSMITTER
++ bool "Pipelined transmitter (DANGEROUS)"
++ depends on XEN_NETDEV_BACKEND
++ help
++ If the net backend is a dumb domain, such as a transparent Ethernet
++ bridge with no local IP interface, it is safe to say Y here to get
++ slightly lower network overhead.
++ If the backend has a local IP interface; or may be doing smart things
++ like reassembling packets to perform firewall filtering; or if you
++ are unsure; or if you experience network hangs when this option is
++ enabled; then you must say N here.
++
++config XEN_NETDEV_ACCEL_SFC_BACKEND
++ tristate "Network-device backend driver acceleration for Solarflare NICs"
++ depends on XEN_NETDEV_BACKEND && SFC && SFC_RESOURCE && X86
++ select XEN_NETDEV_ACCEL_SFC_UTIL
++ default m
++
++config XEN_NETDEV_LOOPBACK
++ tristate "Network-device loopback driver"
++ depends on XEN_NETDEV_BACKEND
++ help
++ A two-interface loopback device to emulate a local netfront-netback
++ connection. If unsure, it is probably safe to say N here.
++
++config XEN_PCIDEV_BACKEND
++ tristate "PCI-device backend driver"
++ depends on PCI && XEN_BACKEND
++ default XEN_BACKEND
++ help
++ The PCI device backend driver allows the kernel to export arbitrary
++ PCI devices to other guests. If you select this to be a module, you
++ will need to make sure no other driver has bound to the device(s)
++ you want to make visible to other guests.
++
++choice
++ prompt "PCI Backend Mode"
++ depends on XEN_PCIDEV_BACKEND
++ default XEN_PCIDEV_BACKEND_VPCI if !IA64
++ default XEN_PCIDEV_BACKEND_CONTROLLER if IA64
++
++config XEN_PCIDEV_BACKEND_VPCI
++ bool "Virtual PCI"
++ ---help---
++ This PCI Backend hides the true PCI topology and makes the frontend
++ think there is a single PCI bus with only the exported devices on it.
++ For example, a device at 03:05.0 will be re-assigned to 00:00.0. A
++ second device at 02:1a.1 will be re-assigned to 00:01.1.
++
++config XEN_PCIDEV_BACKEND_PASS
++ bool "Passthrough"
++ ---help---
++ This PCI Backend provides a real view of the PCI topology to the
++ frontend (for example, a device at 06:01.b will still appear at
++ 06:01.b to the frontend). This is similar to how Xen 2.0.x exposed
++ PCI devices to its driver domains. This may be required for drivers
++ which depend on finding their hardward in certain bus/slot
++ locations.
++
++config XEN_PCIDEV_BACKEND_SLOT
++ bool "Slot"
++ ---help---
++ This PCI Backend hides the true PCI topology and makes the frontend
++ think there is a single PCI bus with only the exported devices on it.
++ Contrary to the virtual PCI backend, a function becomes a new slot.
++ For example, a device at 03:05.2 will be re-assigned to 00:00.0. A
++ second device at 02:1a.1 will be re-assigned to 00:01.0.
++
++config XEN_PCIDEV_BACKEND_CONTROLLER
++ bool "Controller"
++ depends on IA64
++ ---help---
++ This PCI backend virtualizes the PCI bus topology by providing a
++ virtual bus per PCI root device. Devices which are physically under
++ the same root bus will appear on the same virtual bus. For systems
++ with complex I/O addressing, this is the only backend which supports
++ extended I/O port spaces and MMIO translation offsets. This backend
++ also supports slot virtualization. For example, a device at
++ 0000:01:02.1 will be re-assigned to 0000:00:00.0. A second device
++ at 0000:02:05.0 (behind a P2P bridge on bus 0000:01) will be
++ re-assigned to 0000:00:01.0. A third device at 0000:16:05.0 (under
++ a different PCI root bus) will be re-assigned to 0000:01:00.0.
++
++endchoice
++
++config XEN_PCIDEV_BE_DEBUG
++ bool "PCI Backend Debugging"
++ depends on XEN_PCIDEV_BACKEND
++
++config XEN_TPMDEV_BACKEND
++ tristate "TPM-device backend driver"
++ depends on XEN_BACKEND
++ help
++ The TPM-device backend driver
++
++config XEN_SCSI_BACKEND
++ tristate "SCSI backend driver"
++ depends on SCSI && XEN_BACKEND
++ default m
++ help
++ The SCSI backend driver allows the kernel to export its SCSI Devices
++ to other guests via a high-performance shared-memory interface.
++
++config XEN_BLKDEV_FRONTEND
++ tristate "Block-device frontend driver"
++ default y
++ help
++ The block-device frontend driver allows the kernel to access block
++ devices mounted within another guest OS. Unless you are building a
++ dedicated device-driver domain, or your master control domain
++ (domain 0), then you almost certainly want to say Y here.
++
++config XEN_NETDEV_FRONTEND
++ tristate "Network-device frontend driver"
++ depends on NET
++ default y
++ help
++ The network-device frontend driver allows the kernel to access
++ network interfaces within another guest OS. Unless you are building a
++ dedicated device-driver domain, or your master control domain
++ (domain 0), then you almost certainly want to say Y here.
++
++config XEN_NETDEV_ACCEL_SFC_FRONTEND
++ tristate "Network-device frontend driver acceleration for Solarflare NICs"
++ depends on XEN_NETDEV_FRONTEND && X86
++ select XEN_NETDEV_ACCEL_SFC_UTIL
++ default m
++
++config XEN_SCSI_FRONTEND
++ tristate "SCSI frontend driver"
++ depends on SCSI
++ default m
++ help
++ The SCSI frontend driver allows the kernel to access SCSI Devices
++ within another guest OS.
++
++config XEN_GRANT_DEV
++ tristate "User-space granted page access driver"
++ default XEN_PRIVILEGED_GUEST
++ help
++ Device for accessing (in user-space) pages that have been granted
++ by other domains.
++
++config XEN_FRAMEBUFFER
++ tristate "Framebuffer-device frontend driver"
++ depends on FB
++ select FB_CFB_FILLRECT
++ select FB_CFB_COPYAREA
++ select FB_CFB_IMAGEBLIT
++ default y
++ help
++ The framebuffer-device frontend drivers allows the kernel to create a
++ virtual framebuffer. This framebuffer can be viewed in another
++ domain. Unless this domain has access to a real video card, you
++ probably want to say Y here.
++
++config XEN_KEYBOARD
++ tristate "Keyboard-device frontend driver"
++ depends on XEN_FRAMEBUFFER && INPUT
++ default y
++ help
++ The keyboard-device frontend driver allows the kernel to create a
++ virtual keyboard. This keyboard can then be driven by another
++ domain. If you've said Y to CONFIG_XEN_FRAMEBUFFER, you probably
++ want to say Y here.
++
++config XEN_SCRUB_PAGES
++ bool "Scrub memory before freeing it to Xen"
++ default y
++ help
++ Erase memory contents before freeing it back to Xen's global
++ pool. This ensures that any secrets contained within that
++ memory (e.g., private keys) cannot be found by other guests that
++ may be running on the machine. Most people will want to say Y here.
++ If security is not a concern then you may increase performance by
++ saying N.
++
++config XEN_DISABLE_SERIAL
++ bool "Disable serial port drivers"
++ default y
++ help
++ Disable serial port drivers, allowing the Xen console driver
++ to provide a serial console at ttyS0.
++
++config XEN_SYSFS
++ tristate "Export Xen attributes in sysfs"
++ depends on SYSFS
++ select SYS_HYPERVISOR
++ default y
++ help
++ Xen hypervisor attributes will show up under /sys/hypervisor/.
++
++choice
++ prompt "Xen version compatibility"
++ default XEN_COMPAT_030002_AND_LATER
++
++ config XEN_COMPAT_030002_AND_LATER
++ bool "3.0.2 and later"
++
++ config XEN_COMPAT_030004_AND_LATER
++ bool "3.0.4 and later"
++
++ config XEN_COMPAT_030100_AND_LATER
++ bool "3.1.0 and later"
++
++ config XEN_COMPAT_LATEST_ONLY
++ bool "no compatibility code"
++
++endchoice
++
++config XEN_COMPAT
++ hex
++ default 0xffffff if XEN_COMPAT_LATEST_ONLY
++ default 0x030100 if XEN_COMPAT_030100_AND_LATER
++ default 0x030004 if XEN_COMPAT_030004_AND_LATER
++ default 0x030002 if XEN_COMPAT_030002_AND_LATER
++ default 0
++
++endmenu
++
++config HAVE_IRQ_IGNORE_UNHANDLED
++ def_bool y
++
++config NO_IDLE_HZ
++ def_bool y
++
++config XEN_SMPBOOT
++ def_bool y
++ depends on SMP && !PPC_XEN
++
++config XEN_BALLOON
++ def_bool y
++ depends on !PPC_XEN
++
++config XEN_XENCOMM
++ bool
++
++config XEN_DEVMEM
++ def_bool y
++
++endif
+diff -rpuN linux-2.6.18.8/drivers/xen/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/Makefile
+--- linux-2.6.18.8/drivers/xen/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,25 @@
++obj-y += core/
++obj-y += console/
++obj-y += evtchn/
++obj-y += xenbus/
++obj-y += char/
++
++obj-y += util.o
++obj-$(CONFIG_XEN_BALLOON) += balloon/
++obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/
++obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/
++obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
++obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmback/
++obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/
++obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/
++obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback/
++obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront/
++obj-$(CONFIG_XEN_FRAMEBUFFER) += fbfront/
++obj-$(CONFIG_XEN_KEYBOARD) += fbfront/
++obj-$(CONFIG_XEN_SCSI_BACKEND) += scsiback/
++obj-$(CONFIG_XEN_SCSI_FRONTEND) += scsifront/
++obj-$(CONFIG_XEN_PRIVCMD) += privcmd/
++obj-$(CONFIG_XEN_GRANT_DEV) += gntdev/
++obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL) += sfc_netutil/
++obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND) += sfc_netfront/
++obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND) += sfc_netback/
+diff -rpuN linux-2.6.18.8/drivers/xen/netback/accel.c linux-2.6.18-xen-3.3.0/drivers/xen/netback/accel.c
+--- linux-2.6.18.8/drivers/xen/netback/accel.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/netback/accel.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,269 @@
++/******************************************************************************
++ * drivers/xen/netback/accel.c
++ *
++ * Interface between backend virtual network device and accelerated plugin.
++ *
++ * Copyright (C) 2007 Solarflare Communications, Inc
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/list.h>
++#include <asm/atomic.h>
++#include <xen/xenbus.h>
++#include <linux/mutex.h>
++
++#include "common.h"
++
++#if 0
++#undef DPRINTK
++#define DPRINTK(fmt, args...) \
++ printk("netback/accel (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++#endif
++
++/*
++ * A list of available netback accelerator plugin modules (each list
++ * entry is of type struct netback_accelerator)
++ */
++static struct list_head accelerators_list;
++/* Lock used to protect access to accelerators_list */
++DEFINE_MUTEX(accelerators_mutex);
++
++/*
++ * Compare a backend to an accelerator, and decide if they are
++ * compatible (i.e. if the accelerator should be used by the
++ * backend)
++ */
++static int match_accelerator(struct xenbus_device *xendev,
++ struct backend_info *be,
++ struct netback_accelerator *accelerator)
++{
++ int rc = 0;
++ char *eth_name = xenbus_read(XBT_NIL, xendev->nodename, "accel", NULL);
++
++ if (IS_ERR(eth_name)) {
++ /* Probably means not present */
++ DPRINTK("%s: no match due to xenbus_read accel error %d\n",
++ __FUNCTION__, PTR_ERR(eth_name));
++ return 0;
++ } else {
++ if (!strcmp(eth_name, accelerator->eth_name))
++ rc = 1;
++ kfree(eth_name);
++ return rc;
++ }
++}
++
++
++static void do_probe(struct backend_info *be,
++ struct netback_accelerator *accelerator,
++ struct xenbus_device *xendev)
++{
++ be->accelerator = accelerator;
++ atomic_inc(&be->accelerator->use_count);
++ if (be->accelerator->hooks->probe(xendev) != 0) {
++ atomic_dec(&be->accelerator->use_count);
++ module_put(be->accelerator->hooks->owner);
++ be->accelerator = NULL;
++ }
++}
++
++
++/*
++ * Notify suitable backends that a new accelerator is available and
++ * connected. This will also notify the accelerator plugin module
++ * that it is being used for a device through the probe hook.
++ */
++static int netback_accelerator_probe_backend(struct device *dev, void *arg)
++{
++ struct netback_accelerator *accelerator =
++ (struct netback_accelerator *)arg;
++ struct xenbus_device *xendev = to_xenbus_device(dev);
++
++ if (!strcmp("vif", xendev->devicetype)) {
++ struct backend_info *be = xendev->dev.driver_data;
++
++ if (match_accelerator(xendev, be, accelerator) &&
++ try_module_get(accelerator->hooks->owner)) {
++ do_probe(be, accelerator, xendev);
++ }
++ }
++ return 0;
++}
++
++
++/*
++ * Notify suitable backends that an accelerator is unavailable.
++ */
++static int netback_accelerator_remove_backend(struct device *dev, void *arg)
++{
++ struct xenbus_device *xendev = to_xenbus_device(dev);
++ struct netback_accelerator *accelerator =
++ (struct netback_accelerator *)arg;
++
++ if (!strcmp("vif", xendev->devicetype)) {
++ struct backend_info *be = xendev->dev.driver_data;
++
++ if (be->accelerator == accelerator) {
++ be->accelerator->hooks->remove(xendev);
++ atomic_dec(&be->accelerator->use_count);
++ module_put(be->accelerator->hooks->owner);
++ be->accelerator = NULL;
++ }
++ }
++ return 0;
++}
++
++
++
++/*
++ * Entry point for an netback accelerator plugin module. Called to
++ * advertise its presence, and connect to any suitable backends.
++ */
++int netback_connect_accelerator(unsigned version, int id, const char *eth_name,
++ struct netback_accel_hooks *hooks)
++{
++ struct netback_accelerator *new_accelerator;
++ unsigned eth_name_len;
++
++ if (version != NETBACK_ACCEL_VERSION) {
++ if (version > NETBACK_ACCEL_VERSION) {
++ /* Caller has higher version number, leave it
++ up to them to decide whether to continue.
++ They can recall with a lower number if
++ they're happy to be compatible with us */
++ return NETBACK_ACCEL_VERSION;
++ } else {
++ /* We have a more recent version than caller.
++ Currently reject, but may in future be able
++ to be backwardly compatible */
++ return -EPROTO;
++ }
++ }
++
++ new_accelerator =
++ kmalloc(sizeof(struct netback_accelerator), GFP_KERNEL);
++ if (!new_accelerator) {
++ DPRINTK("%s: failed to allocate memory for accelerator\n",
++ __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ new_accelerator->id = id;
++
++ eth_name_len = strlen(eth_name)+1;
++ new_accelerator->eth_name = kmalloc(eth_name_len, GFP_KERNEL);
++ if (!new_accelerator->eth_name) {
++ DPRINTK("%s: failed to allocate memory for eth_name string\n",
++ __FUNCTION__);
++ kfree(new_accelerator);
++ return -ENOMEM;
++ }
++ strlcpy(new_accelerator->eth_name, eth_name, eth_name_len);
++
++ new_accelerator->hooks = hooks;
++
++ atomic_set(&new_accelerator->use_count, 0);
++
++ mutex_lock(&accelerators_mutex);
++ list_add(&new_accelerator->link, &accelerators_list);
++
++ /* tell existing backends about new plugin */
++ xenbus_for_each_backend(new_accelerator,
++ netback_accelerator_probe_backend);
++
++ mutex_unlock(&accelerators_mutex);
++
++ return 0;
++
++}
++EXPORT_SYMBOL_GPL(netback_connect_accelerator);
++
++
++/*
++ * Disconnect an accelerator plugin module that has previously been
++ * connected.
++ */
++void netback_disconnect_accelerator(int id, const char *eth_name)
++{
++ struct netback_accelerator *accelerator, *next;
++
++ mutex_lock(&accelerators_mutex);
++ list_for_each_entry_safe(accelerator, next, &accelerators_list, link) {
++ if (!strcmp(eth_name, accelerator->eth_name)) {
++ xenbus_for_each_backend
++ (accelerator, netback_accelerator_remove_backend);
++ BUG_ON(atomic_read(&accelerator->use_count) != 0);
++ list_del(&accelerator->link);
++ kfree(accelerator->eth_name);
++ kfree(accelerator);
++ break;
++ }
++ }
++ mutex_unlock(&accelerators_mutex);
++}
++EXPORT_SYMBOL_GPL(netback_disconnect_accelerator);
++
++
++void netback_probe_accelerators(struct backend_info *be,
++ struct xenbus_device *dev)
++{
++ struct netback_accelerator *accelerator;
++
++ /*
++ * Check list of accelerators to see if any is suitable, and
++ * use it if it is.
++ */
++ mutex_lock(&accelerators_mutex);
++ list_for_each_entry(accelerator, &accelerators_list, link) {
++ if (match_accelerator(dev, be, accelerator) &&
++ try_module_get(accelerator->hooks->owner)) {
++ do_probe(be, accelerator, dev);
++ break;
++ }
++ }
++ mutex_unlock(&accelerators_mutex);
++}
++
++
++void netback_remove_accelerators(struct backend_info *be,
++ struct xenbus_device *dev)
++{
++ mutex_lock(&accelerators_mutex);
++ /* Notify the accelerator (if any) of this device's removal */
++ if (be->accelerator != NULL) {
++ be->accelerator->hooks->remove(dev);
++ atomic_dec(&be->accelerator->use_count);
++ module_put(be->accelerator->hooks->owner);
++ be->accelerator = NULL;
++ }
++ mutex_unlock(&accelerators_mutex);
++}
++
++
++void netif_accel_init(void)
++{
++ INIT_LIST_HEAD(&accelerators_list);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/netback/common.h linux-2.6.18-xen-3.3.0/drivers/xen/netback/common.h
+--- linux-2.6.18.8/drivers/xen/netback/common.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/netback/common.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,217 @@
++/******************************************************************************
++ * arch/xen/drivers/netif/backend/common.h
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __NETIF__BACKEND__COMMON_H__
++#define __NETIF__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/ip.h>
++#include <linux/in.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/wait.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/netif.h>
++#include <asm/io.h>
++#include <asm/pgalloc.h>
++#include <xen/interface/grant_table.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++#include <xen/xenbus.h>
++
++#define DPRINTK(_f, _a...) \
++ pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "xen_net: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "xen_net: " fmt, ##args)
++
++typedef struct netif_st {
++ /* Unique identifier for this interface. */
++ domid_t domid;
++ unsigned int handle;
++
++ u8 fe_dev_addr[6];
++
++ /* Physical parameters of the comms window. */
++ grant_handle_t tx_shmem_handle;
++ grant_ref_t tx_shmem_ref;
++ grant_handle_t rx_shmem_handle;
++ grant_ref_t rx_shmem_ref;
++ unsigned int irq;
++
++ /* The shared rings and indexes. */
++ netif_tx_back_ring_t tx;
++ netif_rx_back_ring_t rx;
++ struct vm_struct *tx_comms_area;
++ struct vm_struct *rx_comms_area;
++
++ /* Set of features that can be turned on in dev->features. */
++ int features;
++
++ /* Internal feature information. */
++ u8 can_queue:1; /* can queue packets for receiver? */
++ u8 copying_receiver:1; /* copy packets to receiver? */
++
++ /* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
++ RING_IDX rx_req_cons_peek;
++
++ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
++ unsigned long credit_bytes;
++ unsigned long credit_usec;
++ unsigned long remaining_credit;
++ struct timer_list credit_timeout;
++
++ /* Enforce draining of the transmit queue. */
++ struct timer_list tx_queue_timeout;
++
++ /* Miscellaneous private stuff. */
++ struct list_head list; /* scheduling list */
++ atomic_t refcnt;
++ struct net_device *dev;
++ struct net_device_stats stats;
++
++ unsigned int carrier;
++
++ wait_queue_head_t waiting_to_free;
++} netif_t;
++
++/*
++ * Implement our own carrier flag: the network stack's version causes delays
++ * when the carrier is re-enabled (in particular, dev_activate() may not
++ * immediately be called, which can cause packet loss; also the etherbridge
++ * can be rather lazy in activating its port).
++ */
++#define netback_carrier_on(netif) ((netif)->carrier = 1)
++#define netback_carrier_off(netif) ((netif)->carrier = 0)
++#define netback_carrier_ok(netif) ((netif)->carrier)
++
++enum {
++ NETBK_DONT_COPY_SKB,
++ NETBK_DELAYED_COPY_SKB,
++ NETBK_ALWAYS_COPY_SKB,
++};
++
++extern int netbk_copy_skb_mode;
++
++/* Function pointers into netback accelerator plugin modules */
++struct netback_accel_hooks {
++ struct module *owner;
++ int (*probe)(struct xenbus_device *dev);
++ int (*remove)(struct xenbus_device *dev);
++};
++
++/* Structure to track the state of a netback accelerator plugin */
++struct netback_accelerator {
++ struct list_head link;
++ int id;
++ char *eth_name;
++ atomic_t use_count;
++ struct netback_accel_hooks *hooks;
++};
++
++struct backend_info {
++ struct xenbus_device *dev;
++ netif_t *netif;
++ enum xenbus_state frontend_state;
++
++ /* State relating to the netback accelerator */
++ void *netback_accel_priv;
++ /* The accelerator that this backend is currently using */
++ struct netback_accelerator *accelerator;
++};
++
++#define NETBACK_ACCEL_VERSION 0x00010001
++
++/*
++ * Connect an accelerator plugin module to netback. Returns zero on
++ * success, < 0 on error, > 0 (with highest version number supported)
++ * if version mismatch.
++ */
++extern int netback_connect_accelerator(unsigned version,
++ int id, const char *eth_name,
++ struct netback_accel_hooks *hooks);
++/* Disconnect a previously connected accelerator plugin module */
++extern void netback_disconnect_accelerator(int id, const char *eth_name);
++
++
++extern
++void netback_probe_accelerators(struct backend_info *be,
++ struct xenbus_device *dev);
++extern
++void netback_remove_accelerators(struct backend_info *be,
++ struct xenbus_device *dev);
++extern
++void netif_accel_init(void);
++
++
++#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
++#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
++
++void netif_disconnect(netif_t *netif);
++
++netif_t *netif_alloc(domid_t domid, unsigned int handle);
++int netif_map(netif_t *netif, unsigned long tx_ring_ref,
++ unsigned long rx_ring_ref, unsigned int evtchn);
++
++#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define netif_put(_b) \
++ do { \
++ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
++ wake_up(&(_b)->waiting_to_free); \
++ } while (0)
++
++void netif_xenbus_init(void);
++
++#define netif_schedulable(netif) \
++ (netif_running((netif)->dev) && netback_carrier_ok(netif))
++
++void netif_schedule_work(netif_t *netif);
++void netif_deschedule_work(netif_t *netif);
++
++int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
++struct net_device_stats *netif_be_get_stats(struct net_device *dev);
++irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++
++static inline int netbk_can_queue(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ return netif->can_queue;
++}
++
++static inline int netbk_can_sg(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ return netif->features & NETIF_F_SG;
++}
++
++#endif /* __NETIF__BACKEND__COMMON_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/netback/interface.c linux-2.6.18-xen-3.3.0/drivers/xen/netback/interface.c
+--- linux-2.6.18.8/drivers/xen/netback/interface.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/netback/interface.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,336 @@
++/******************************************************************************
++ * arch/xen/drivers/netif/backend/interface.c
++ *
++ * Network-device interface management.
++ *
++ * Copyright (c) 2004-2005, Keir Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "common.h"
++#include <linux/ethtool.h>
++#include <linux/rtnetlink.h>
++
++/*
++ * Module parameter 'queue_length':
++ *
++ * Enables queuing in the network stack when a client has run out of receive
++ * descriptors. Although this feature can improve receive bandwidth by avoiding
++ * packet loss, it can also result in packets sitting in the 'tx_queue' for
++ * unbounded time. This is bad if those packets hold onto foreign resources.
++ * For example, consider a packet that holds onto resources belonging to the
++ * guest for which it is queued (e.g., packet received on vif1.0, destined for
++ * vif1.1 which is not activated in the guest): in this situation the guest
++ * will never be destroyed, unless vif1.1 is taken down. To avoid this, we
++ * run a timer (tx_queue_timeout) to drain the queue when the interface is
++ * blocked.
++ */
++static unsigned long netbk_queue_length = 32;
++module_param_named(queue_length, netbk_queue_length, ulong, 0);
++
++static void __netif_up(netif_t *netif)
++{
++ enable_irq(netif->irq);
++ netif_schedule_work(netif);
++}
++
++static void __netif_down(netif_t *netif)
++{
++ disable_irq(netif->irq);
++ netif_deschedule_work(netif);
++}
++
++static int net_open(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ if (netback_carrier_ok(netif)) {
++ __netif_up(netif);
++ netif_start_queue(dev);
++ }
++ return 0;
++}
++
++static int net_close(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ if (netback_carrier_ok(netif))
++ __netif_down(netif);
++ netif_stop_queue(dev);
++ return 0;
++}
++
++static int netbk_change_mtu(struct net_device *dev, int mtu)
++{
++ int max = netbk_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
++
++ if (mtu > max)
++ return -EINVAL;
++ dev->mtu = mtu;
++ return 0;
++}
++
++static int netbk_set_sg(struct net_device *dev, u32 data)
++{
++ if (data) {
++ netif_t *netif = netdev_priv(dev);
++
++ if (!(netif->features & NETIF_F_SG))
++ return -ENOSYS;
++ }
++
++ return ethtool_op_set_sg(dev, data);
++}
++
++static int netbk_set_tso(struct net_device *dev, u32 data)
++{
++ if (data) {
++ netif_t *netif = netdev_priv(dev);
++
++ if (!(netif->features & NETIF_F_TSO))
++ return -ENOSYS;
++ }
++
++ return ethtool_op_set_tso(dev, data);
++}
++
++static struct ethtool_ops network_ethtool_ops =
++{
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = ethtool_op_set_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = netbk_set_sg,
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = netbk_set_tso,
++ .get_link = ethtool_op_get_link,
++};
++
++netif_t *netif_alloc(domid_t domid, unsigned int handle)
++{
++ int err = 0;
++ struct net_device *dev;
++ netif_t *netif;
++ char name[IFNAMSIZ] = {};
++
++ snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
++ dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
++ if (dev == NULL) {
++ DPRINTK("Could not create netif: out of memory\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ netif = netdev_priv(dev);
++ memset(netif, 0, sizeof(*netif));
++ netif->domid = domid;
++ netif->handle = handle;
++ atomic_set(&netif->refcnt, 1);
++ init_waitqueue_head(&netif->waiting_to_free);
++ netif->dev = dev;
++
++ netback_carrier_off(netif);
++
++ netif->credit_bytes = netif->remaining_credit = ~0UL;
++ netif->credit_usec = 0UL;
++ init_timer(&netif->credit_timeout);
++ /* Initialize 'expires' now: it's used to track the credit window. */
++ netif->credit_timeout.expires = jiffies;
++
++ init_timer(&netif->tx_queue_timeout);
++
++ dev->hard_start_xmit = netif_be_start_xmit;
++ dev->get_stats = netif_be_get_stats;
++ dev->open = net_open;
++ dev->stop = net_close;
++ dev->change_mtu = netbk_change_mtu;
++ dev->features = NETIF_F_IP_CSUM;
++
++ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
++
++ dev->tx_queue_len = netbk_queue_length;
++
++ /*
++ * Initialise a dummy MAC address. We choose the numerically
++ * largest non-broadcast address to prevent the address getting
++ * stolen by an Ethernet bridge for STP purposes.
++ * (FE:FF:FF:FF:FF:FF)
++ */
++ memset(dev->dev_addr, 0xFF, ETH_ALEN);
++ dev->dev_addr[0] &= ~0x01;
++
++ rtnl_lock();
++ err = register_netdevice(dev);
++ rtnl_unlock();
++ if (err) {
++ DPRINTK("Could not register new net device %s: err=%d\n",
++ dev->name, err);
++ free_netdev(dev);
++ return ERR_PTR(err);
++ }
++
++ DPRINTK("Successfully created netif\n");
++ return netif;
++}
++
++static int map_frontend_pages(
++ netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr,
++ GNTMAP_host_map, tx_ring_ref, netif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
++ return op.status;
++ }
++
++ netif->tx_shmem_ref = tx_ring_ref;
++ netif->tx_shmem_handle = op.handle;
++
++ gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr,
++ GNTMAP_host_map, rx_ring_ref, netif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
++ return op.status;
++ }
++
++ netif->rx_shmem_ref = rx_ring_ref;
++ netif->rx_shmem_handle = op.handle;
++
++ return 0;
++}
++
++static void unmap_frontend_pages(netif_t *netif)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)netif->tx_comms_area->addr,
++ GNTMAP_host_map, netif->tx_shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++
++ gnttab_set_unmap_op(&op, (unsigned long)netif->rx_comms_area->addr,
++ GNTMAP_host_map, netif->rx_shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++}
++
++int netif_map(netif_t *netif, unsigned long tx_ring_ref,
++ unsigned long rx_ring_ref, unsigned int evtchn)
++{
++ int err = -ENOMEM;
++ netif_tx_sring_t *txs;
++ netif_rx_sring_t *rxs;
++
++ /* Already connected through? */
++ if (netif->irq)
++ return 0;
++
++ netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
++ if (netif->tx_comms_area == NULL)
++ return -ENOMEM;
++ netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
++ if (netif->rx_comms_area == NULL)
++ goto err_rx;
++
++ err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
++ if (err)
++ goto err_map;
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ netif->domid, evtchn, netif_be_int, 0,
++ netif->dev->name, netif);
++ if (err < 0)
++ goto err_hypervisor;
++ netif->irq = err;
++ disable_irq(netif->irq);
++
++ txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
++ BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
++
++ rxs = (netif_rx_sring_t *)
++ ((char *)netif->rx_comms_area->addr);
++ BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
++
++ netif->rx_req_cons_peek = 0;
++
++ netif_get(netif);
++
++ rtnl_lock();
++ netback_carrier_on(netif);
++ if (netif_running(netif->dev))
++ __netif_up(netif);
++ rtnl_unlock();
++
++ return 0;
++err_hypervisor:
++ unmap_frontend_pages(netif);
++err_map:
++ free_vm_area(netif->rx_comms_area);
++err_rx:
++ free_vm_area(netif->tx_comms_area);
++ return err;
++}
++
++void netif_disconnect(netif_t *netif)
++{
++ if (netback_carrier_ok(netif)) {
++ rtnl_lock();
++ netback_carrier_off(netif);
++ netif_carrier_off(netif->dev); /* discard queued packets */
++ if (netif_running(netif->dev))
++ __netif_down(netif);
++ rtnl_unlock();
++ netif_put(netif);
++ }
++
++ atomic_dec(&netif->refcnt);
++ wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
++
++ del_timer_sync(&netif->credit_timeout);
++ del_timer_sync(&netif->tx_queue_timeout);
++
++ if (netif->irq)
++ unbind_from_irqhandler(netif->irq, netif);
++
++ unregister_netdev(netif->dev);
++
++ if (netif->tx.sring) {
++ unmap_frontend_pages(netif);
++ free_vm_area(netif->tx_comms_area);
++ free_vm_area(netif->rx_comms_area);
++ }
++
++ free_netdev(netif->dev);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/netback/loopback.c linux-2.6.18-xen-3.3.0/drivers/xen/netback/loopback.c
+--- linux-2.6.18.8/drivers/xen/netback/loopback.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/netback/loopback.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,324 @@
++/******************************************************************************
++ * netback/loopback.c
++ *
++ * A two-interface loopback device to emulate a local netfront-netback
++ * connection. This ensures that local packet delivery looks identical
++ * to inter-domain delivery. Most importantly, packets delivered locally
++ * originating from other domains will get *copied* when they traverse this
++ * driver. This prevents unbounded delays in socket-buffer queues from
++ * causing the netback driver to "seize up".
++ *
++ * This driver creates a symmetric pair of loopback interfaces with names
++ * vif0.0 and veth0. The intention is that 'vif0.0' is bound to an Ethernet
++ * bridge, just like a proper netback interface, while a local IP interface
++ * is configured on 'veth0'.
++ *
++ * As with a real netback interface, vif0.0 is configured with a suitable
++ * dummy MAC address. No default is provided for veth0: a reasonable strategy
++ * is to transfer eth0's MAC address to veth0, and give eth0 a dummy address
++ * (to avoid confusing the Etherbridge).
++ *
++ * Copyright (c) 2005 K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/ethtool.h>
++#include <net/dst.h>
++#include <net/xfrm.h> /* secpath_reset() */
++#include <asm/hypervisor.h> /* is_initial_xendomain() */
++
++static int nloopbacks = -1;
++module_param(nloopbacks, int, 0);
++MODULE_PARM_DESC(nloopbacks, "Number of netback-loopback devices to create");
++
++struct net_private {
++ struct net_device *loopback_dev;
++ struct net_device_stats stats;
++};
++
++static int loopback_open(struct net_device *dev)
++{
++ struct net_private *np = netdev_priv(dev);
++ memset(&np->stats, 0, sizeof(np->stats));
++ netif_start_queue(dev);
++ return 0;
++}
++
++static int loopback_close(struct net_device *dev)
++{
++ netif_stop_queue(dev);
++ return 0;
++}
++
++#ifdef CONFIG_X86
++static int is_foreign(unsigned long pfn)
++{
++ /* NB. Play it safe for auto-translation mode. */
++ return (xen_feature(XENFEAT_auto_translated_physmap) ||
++ (phys_to_machine_mapping[pfn] & FOREIGN_FRAME_BIT));
++}
++#else
++/* How to detect a foreign mapping? Play it safe. */
++#define is_foreign(pfn) (1)
++#endif
++
++static int skb_remove_foreign_references(struct sk_buff *skb)
++{
++ struct page *page;
++ unsigned long pfn;
++ int i, off;
++ char *vaddr;
++
++ BUG_ON(skb_shinfo(skb)->frag_list);
++
++ if (skb_cloned(skb) &&
++ unlikely(pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
++ return 0;
++
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ pfn = page_to_pfn(skb_shinfo(skb)->frags[i].page);
++ if (!is_foreign(pfn))
++ continue;
++
++ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(!page))
++ return 0;
++
++ vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
++ off = skb_shinfo(skb)->frags[i].page_offset;
++ memcpy(page_address(page) + off,
++ vaddr + off,
++ skb_shinfo(skb)->frags[i].size);
++ kunmap_skb_frag(vaddr);
++
++ put_page(skb_shinfo(skb)->frags[i].page);
++ skb_shinfo(skb)->frags[i].page = page;
++ }
++
++ return 1;
++}
++
++static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct net_private *np = netdev_priv(dev);
++
++ if (!skb_remove_foreign_references(skb)) {
++ np->stats.tx_dropped++;
++ dev_kfree_skb(skb);
++ return 0;
++ }
++
++ dst_release(skb->dst);
++ skb->dst = NULL;
++
++ skb_orphan(skb);
++
++ np->stats.tx_bytes += skb->len;
++ np->stats.tx_packets++;
++
++ /* Switch to loopback context. */
++ dev = np->loopback_dev;
++ np = netdev_priv(dev);
++
++ np->stats.rx_bytes += skb->len;
++ np->stats.rx_packets++;
++
++ if (skb->ip_summed == CHECKSUM_HW) {
++ /* Defer checksum calculation. */
++ skb->proto_csum_blank = 1;
++ /* Must be a local packet: assert its integrity. */
++ skb->proto_data_valid = 1;
++ }
++
++ skb->ip_summed = skb->proto_data_valid ?
++ CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
++
++ skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */
++ skb->protocol = eth_type_trans(skb, dev);
++ skb->dev = dev;
++ dev->last_rx = jiffies;
++
++ /* Flush netfilter context: rx'ed skbuffs not expected to have any. */
++ nf_reset(skb);
++ secpath_reset(skb);
++
++ netif_rx(skb);
++
++ return 0;
++}
++
++static struct net_device_stats *loopback_get_stats(struct net_device *dev)
++{
++ struct net_private *np = netdev_priv(dev);
++ return &np->stats;
++}
++
++static struct ethtool_ops network_ethtool_ops =
++{
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = ethtool_op_set_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = ethtool_op_set_sg,
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = ethtool_op_set_tso,
++ .get_link = ethtool_op_get_link,
++};
++
++/*
++ * Nothing to do here. Virtual interface is point-to-point and the
++ * physical interface is probably promiscuous anyway.
++ */
++static void loopback_set_multicast_list(struct net_device *dev)
++{
++}
++
++static void loopback_construct(struct net_device *dev, struct net_device *lo)
++{
++ struct net_private *np = netdev_priv(dev);
++
++ np->loopback_dev = lo;
++
++ dev->open = loopback_open;
++ dev->stop = loopback_close;
++ dev->hard_start_xmit = loopback_start_xmit;
++ dev->get_stats = loopback_get_stats;
++ dev->set_multicast_list = loopback_set_multicast_list;
++ dev->change_mtu = NULL; /* allow arbitrary mtu */
++
++ dev->tx_queue_len = 0;
++
++ dev->features = (NETIF_F_HIGHDMA |
++ NETIF_F_LLTX |
++ NETIF_F_TSO |
++ NETIF_F_SG |
++ NETIF_F_IP_CSUM);
++
++ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
++
++ /*
++ * We do not set a jumbo MTU on the interface. Otherwise the network
++ * stack will try to send large packets that will get dropped by the
++ * Ethernet bridge (unless the physical Ethernet interface is
++ * configured to transfer jumbo packets). If a larger MTU is desired
++ * then the system administrator can specify it using the 'ifconfig'
++ * command.
++ */
++ /*dev->mtu = 16*1024;*/
++}
++
++static int __init make_loopback(int i)
++{
++ struct net_device *dev1, *dev2;
++ char dev_name[IFNAMSIZ];
++ int err = -ENOMEM;
++
++ sprintf(dev_name, "vif0.%d", i);
++ dev1 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
++ if (!dev1)
++ return err;
++
++ sprintf(dev_name, "veth%d", i);
++ dev2 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
++ if (!dev2)
++ goto fail_netdev2;
++
++ loopback_construct(dev1, dev2);
++ loopback_construct(dev2, dev1);
++
++ /*
++ * Initialise a dummy MAC address for the 'dummy backend' interface. We
++ * choose the numerically largest non-broadcast address to prevent the
++ * address getting stolen by an Ethernet bridge for STP purposes.
++ */
++ memset(dev1->dev_addr, 0xFF, ETH_ALEN);
++ dev1->dev_addr[0] &= ~0x01;
++
++ if ((err = register_netdev(dev1)) != 0)
++ goto fail;
++
++ if ((err = register_netdev(dev2)) != 0) {
++ unregister_netdev(dev1);
++ goto fail;
++ }
++
++ return 0;
++
++ fail:
++ free_netdev(dev2);
++ fail_netdev2:
++ free_netdev(dev1);
++ return err;
++}
++
++static void __exit clean_loopback(int i)
++{
++ struct net_device *dev1, *dev2;
++ char dev_name[IFNAMSIZ];
++
++ sprintf(dev_name, "vif0.%d", i);
++ dev1 = dev_get_by_name(dev_name);
++ sprintf(dev_name, "veth%d", i);
++ dev2 = dev_get_by_name(dev_name);
++ if (dev1 && dev2) {
++ unregister_netdev(dev2);
++ unregister_netdev(dev1);
++ free_netdev(dev2);
++ free_netdev(dev1);
++ }
++}
++
++static int __init loopback_init(void)
++{
++ int i, err = 0;
++
++ if (nloopbacks == -1)
++ nloopbacks = is_initial_xendomain() ? 4 : 0;
++
++ for (i = 0; i < nloopbacks; i++)
++ if ((err = make_loopback(i)) != 0)
++ break;
++
++ return err;
++}
++
++module_init(loopback_init);
++
++static void __exit loopback_exit(void)
++{
++ int i;
++
++ for (i = nloopbacks; i-- > 0; )
++ clean_loopback(i);
++}
++
++module_exit(loopback_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/netback/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/netback/Makefile
+--- linux-2.6.18.8/drivers/xen/netback/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/netback/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,5 @@
++obj-$(CONFIG_XEN_NETDEV_BACKEND) := netbk.o
++obj-$(CONFIG_XEN_NETDEV_LOOPBACK) += netloop.o
++
++netbk-y := netback.o xenbus.o interface.o accel.o
++netloop-y := loopback.o
+diff -rpuN linux-2.6.18.8/drivers/xen/netback/netback.c linux-2.6.18-xen-3.3.0/drivers/xen/netback/netback.c
+--- linux-2.6.18.8/drivers/xen/netback/netback.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/netback/netback.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1614 @@
++/******************************************************************************
++ * drivers/xen/netback/netback.c
++ *
++ * Back-end of the driver for virtual network devices. This portion of the
++ * driver exports a 'unified' network-device interface that can be accessed
++ * by any operating system that implements a compatible front end. A
++ * reference front-end implementation can be found in:
++ * drivers/xen/netfront/netfront.c
++ *
++ * Copyright (c) 2002-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "common.h"
++#include <xen/balloon.h>
++#include <xen/interface/memory.h>
++
++/*define NETBE_DEBUG_INTERRUPT*/
++
++/* extra field used in struct page */
++#define netif_page_index(pg) (*(long *)&(pg)->mapping)
++
++struct netbk_rx_meta {
++ skb_frag_t frag;
++ int id;
++ u8 copy:1;
++};
++
++struct netbk_tx_pending_inuse {
++ struct list_head list;
++ unsigned long alloc_time;
++};
++
++static void netif_idx_release(u16 pending_idx);
++static void netif_page_release(struct page *page);
++static void make_tx_response(netif_t *netif,
++ netif_tx_request_t *txp,
++ s8 st);
++static netif_rx_response_t *make_rx_response(netif_t *netif,
++ u16 id,
++ s8 st,
++ u16 offset,
++ u16 size,
++ u16 flags);
++
++static void net_tx_action(unsigned long unused);
++static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
++
++static void net_rx_action(unsigned long unused);
++static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
++
++static struct timer_list net_timer;
++static struct timer_list netbk_tx_pending_timer;
++
++#define MAX_PENDING_REQS 256
++
++static struct sk_buff_head rx_queue;
++
++static struct page **mmap_pages;
++static inline unsigned long idx_to_pfn(unsigned int idx)
++{
++ return page_to_pfn(mmap_pages[idx]);
++}
++
++static inline unsigned long idx_to_kaddr(unsigned int idx)
++{
++ return (unsigned long)pfn_to_kaddr(idx_to_pfn(idx));
++}
++
++#define PKT_PROT_LEN 64
++
++static struct pending_tx_info {
++ netif_tx_request_t req;
++ netif_t *netif;
++} pending_tx_info[MAX_PENDING_REQS];
++static u16 pending_ring[MAX_PENDING_REQS];
++typedef unsigned int PEND_RING_IDX;
++#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
++static PEND_RING_IDX pending_prod, pending_cons;
++#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
++
++/* Freed TX SKBs get batched on this ring before return to pending_ring. */
++static u16 dealloc_ring[MAX_PENDING_REQS];
++static PEND_RING_IDX dealloc_prod, dealloc_cons;
++
++/* Doubly-linked list of in-use pending entries. */
++static struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
++static LIST_HEAD(pending_inuse_head);
++
++static struct sk_buff_head tx_queue;
++
++static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
++static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
++static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
++
++static struct list_head net_schedule_list;
++static spinlock_t net_schedule_list_lock;
++
++#define MAX_MFN_ALLOC 64
++static unsigned long mfn_list[MAX_MFN_ALLOC];
++static unsigned int alloc_index = 0;
++
++/* Setting this allows the safe use of this driver without netloop. */
++static int MODPARM_copy_skb = 1;
++module_param_named(copy_skb, MODPARM_copy_skb, bool, 0);
++MODULE_PARM_DESC(copy_skb, "Copy data received from netfront without netloop");
++
++int netbk_copy_skb_mode;
++
++static inline unsigned long alloc_mfn(void)
++{
++ BUG_ON(alloc_index == 0);
++ return mfn_list[--alloc_index];
++}
++
++static int check_mfn(int nr)
++{
++ struct xen_memory_reservation reservation = {
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ int rc;
++
++ if (likely(alloc_index >= nr))
++ return 0;
++
++ set_xen_guest_handle(reservation.extent_start, mfn_list + alloc_index);
++ reservation.nr_extents = MAX_MFN_ALLOC - alloc_index;
++ rc = HYPERVISOR_memory_op(XENMEM_increase_reservation, &reservation);
++ if (likely(rc > 0))
++ alloc_index += rc;
++
++ return alloc_index >= nr ? 0 : -ENOMEM;
++}
++
++static inline void maybe_schedule_tx_action(void)
++{
++ smp_mb();
++ if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
++ !list_empty(&net_schedule_list))
++ tasklet_schedule(&net_tx_tasklet);
++}
++
++static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
++{
++ struct skb_shared_info *ninfo;
++ struct sk_buff *nskb;
++ unsigned long offset;
++ int ret;
++ int len;
++ int headlen;
++
++ BUG_ON(skb_shinfo(skb)->frag_list != NULL);
++
++ nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(!nskb))
++ goto err;
++
++ skb_reserve(nskb, 16 + NET_IP_ALIGN);
++ headlen = nskb->end - nskb->data;
++ if (headlen > skb_headlen(skb))
++ headlen = skb_headlen(skb);
++ ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
++ BUG_ON(ret);
++
++ ninfo = skb_shinfo(nskb);
++ ninfo->gso_size = skb_shinfo(skb)->gso_size;
++ ninfo->gso_type = skb_shinfo(skb)->gso_type;
++
++ offset = headlen;
++ len = skb->len - headlen;
++
++ nskb->len = skb->len;
++ nskb->data_len = len;
++ nskb->truesize += len;
++
++ while (len) {
++ struct page *page;
++ int copy;
++ int zero;
++
++ if (unlikely(ninfo->nr_frags >= MAX_SKB_FRAGS)) {
++ dump_stack();
++ goto err_free;
++ }
++
++ copy = len >= PAGE_SIZE ? PAGE_SIZE : len;
++ zero = len >= PAGE_SIZE ? 0 : __GFP_ZERO;
++
++ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | zero);
++ if (unlikely(!page))
++ goto err_free;
++
++ ret = skb_copy_bits(skb, offset, page_address(page), copy);
++ BUG_ON(ret);
++
++ ninfo->frags[ninfo->nr_frags].page = page;
++ ninfo->frags[ninfo->nr_frags].page_offset = 0;
++ ninfo->frags[ninfo->nr_frags].size = copy;
++ ninfo->nr_frags++;
++
++ offset += copy;
++ len -= copy;
++ }
++
++ offset = nskb->data - skb->data;
++
++ nskb->h.raw = skb->h.raw + offset;
++ nskb->nh.raw = skb->nh.raw + offset;
++ nskb->mac.raw = skb->mac.raw + offset;
++
++ return nskb;
++
++ err_free:
++ kfree_skb(nskb);
++ err:
++ return NULL;
++}
++
++static inline int netbk_max_required_rx_slots(netif_t *netif)
++{
++ if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
++ return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */
++ return 1; /* all in one */
++}
++
++static inline int netbk_queue_full(netif_t *netif)
++{
++ RING_IDX peek = netif->rx_req_cons_peek;
++ RING_IDX needed = netbk_max_required_rx_slots(netif);
++
++ return ((netif->rx.sring->req_prod - peek) < needed) ||
++ ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) < needed);
++}
++
++static void tx_queue_callback(unsigned long data)
++{
++ netif_t *netif = (netif_t *)data;
++ if (netif_schedulable(netif))
++ netif_wake_queue(netif->dev);
++}
++
++int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++
++ BUG_ON(skb->dev != dev);
++
++ /* Drop the packet if the target domain has no receive buffers. */
++ if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
++ goto drop;
++
++ /*
++ * Copy the packet here if it's destined for a flipping interface
++ * but isn't flippable (e.g. extra references to data).
++ * XXX For now we also copy skbuffs whose head crosses a page
++ * boundary, because netbk_gop_skb can't handle them.
++ */
++ if (!netif->copying_receiver ||
++ ((skb_headlen(skb) + offset_in_page(skb->data)) >= PAGE_SIZE)) {
++ struct sk_buff *nskb = netbk_copy_skb(skb);
++ if ( unlikely(nskb == NULL) )
++ goto drop;
++ /* Copy only the header fields we use in this driver. */
++ nskb->dev = skb->dev;
++ nskb->ip_summed = skb->ip_summed;
++ nskb->proto_data_valid = skb->proto_data_valid;
++ dev_kfree_skb(skb);
++ skb = nskb;
++ }
++
++ netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
++ !!skb_shinfo(skb)->gso_size;
++ netif_get(netif);
++
++ if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
++ netif->rx.sring->req_event = netif->rx_req_cons_peek +
++ netbk_max_required_rx_slots(netif);
++ mb(); /* request notification /then/ check & stop the queue */
++ if (netbk_queue_full(netif)) {
++ netif_stop_queue(dev);
++ /*
++ * Schedule 500ms timeout to restart the queue, thus
++ * ensuring that an inactive queue will be drained.
++ * Packets will be immediately be dropped until more
++ * receive buffers become available (see
++ * netbk_queue_full() check above).
++ */
++ netif->tx_queue_timeout.data = (unsigned long)netif;
++ netif->tx_queue_timeout.function = tx_queue_callback;
++ __mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
++ }
++ }
++
++ skb_queue_tail(&rx_queue, skb);
++ tasklet_schedule(&net_rx_tasklet);
++
++ return 0;
++
++ drop:
++ netif->stats.tx_dropped++;
++ dev_kfree_skb(skb);
++ return 0;
++}
++
++#if 0
++static void xen_network_done_notify(void)
++{
++ static struct net_device *eth0_dev = NULL;
++ if (unlikely(eth0_dev == NULL))
++ eth0_dev = __dev_get_by_name("eth0");
++ netif_rx_schedule(eth0_dev);
++}
++/*
++ * Add following to poll() function in NAPI driver (Tigon3 is example):
++ * if ( xen_network_done() )
++ * tg3_enable_ints(tp);
++ */
++int xen_network_done(void)
++{
++ return skb_queue_empty(&rx_queue);
++}
++#endif
++
++struct netrx_pending_operations {
++ unsigned trans_prod, trans_cons;
++ unsigned mmu_prod, mmu_mcl;
++ unsigned mcl_prod, mcl_cons;
++ unsigned copy_prod, copy_cons;
++ unsigned meta_prod, meta_cons;
++ mmu_update_t *mmu;
++ gnttab_transfer_t *trans;
++ gnttab_copy_t *copy;
++ multicall_entry_t *mcl;
++ struct netbk_rx_meta *meta;
++};
++
++/* Set up the grant operations for this fragment. If it's a flipping
++ interface, we also set up the unmap request from here. */
++static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
++ int i, struct netrx_pending_operations *npo,
++ struct page *page, unsigned long size,
++ unsigned long offset)
++{
++ mmu_update_t *mmu;
++ gnttab_transfer_t *gop;
++ gnttab_copy_t *copy_gop;
++ multicall_entry_t *mcl;
++ netif_rx_request_t *req;
++ unsigned long old_mfn, new_mfn;
++
++ old_mfn = virt_to_mfn(page_address(page));
++
++ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
++ if (netif->copying_receiver) {
++ /* The fragment needs to be copied rather than
++ flipped. */
++ meta->copy = 1;
++ copy_gop = npo->copy + npo->copy_prod++;
++ copy_gop->flags = GNTCOPY_dest_gref;
++ if (PageForeign(page)) {
++ struct pending_tx_info *src_pend =
++ &pending_tx_info[netif_page_index(page)];
++ copy_gop->source.domid = src_pend->netif->domid;
++ copy_gop->source.u.ref = src_pend->req.gref;
++ copy_gop->flags |= GNTCOPY_source_gref;
++ } else {
++ copy_gop->source.domid = DOMID_SELF;
++ copy_gop->source.u.gmfn = old_mfn;
++ }
++ copy_gop->source.offset = offset;
++ copy_gop->dest.domid = netif->domid;
++ copy_gop->dest.offset = 0;
++ copy_gop->dest.u.ref = req->gref;
++ copy_gop->len = size;
++ } else {
++ meta->copy = 0;
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ new_mfn = alloc_mfn();
++
++ /*
++ * Set the new P2M table entry before
++ * reassigning the old data page. Heed the
++ * comment in pgtable-2level.h:pte_page(). :-)
++ */
++ set_phys_to_machine(page_to_pfn(page), new_mfn);
++
++ mcl = npo->mcl + npo->mcl_prod++;
++ MULTI_update_va_mapping(mcl,
++ (unsigned long)page_address(page),
++ pfn_pte_ma(new_mfn, PAGE_KERNEL),
++ 0);
++
++ mmu = npo->mmu + npo->mmu_prod++;
++ mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
++ MMU_MACHPHYS_UPDATE;
++ mmu->val = page_to_pfn(page);
++ }
++
++ gop = npo->trans + npo->trans_prod++;
++ gop->mfn = old_mfn;
++ gop->domid = netif->domid;
++ gop->ref = req->gref;
++ }
++ return req->id;
++}
++
++static void netbk_gop_skb(struct sk_buff *skb,
++ struct netrx_pending_operations *npo)
++{
++ netif_t *netif = netdev_priv(skb->dev);
++ int nr_frags = skb_shinfo(skb)->nr_frags;
++ int i;
++ int extra;
++ struct netbk_rx_meta *head_meta, *meta;
++
++ head_meta = npo->meta + npo->meta_prod++;
++ head_meta->frag.page_offset = skb_shinfo(skb)->gso_type;
++ head_meta->frag.size = skb_shinfo(skb)->gso_size;
++ extra = !!head_meta->frag.size + 1;
++
++ for (i = 0; i < nr_frags; i++) {
++ meta = npo->meta + npo->meta_prod++;
++ meta->frag = skb_shinfo(skb)->frags[i];
++ meta->id = netbk_gop_frag(netif, meta, i + extra, npo,
++ meta->frag.page,
++ meta->frag.size,
++ meta->frag.page_offset);
++ }
++
++ /*
++ * This must occur at the end to ensure that we don't trash skb_shinfo
++ * until we're done. We know that the head doesn't cross a page
++ * boundary because such packets get copied in netif_be_start_xmit.
++ */
++ head_meta->id = netbk_gop_frag(netif, head_meta, 0, npo,
++ virt_to_page(skb->data),
++ skb_headlen(skb),
++ offset_in_page(skb->data));
++
++ netif->rx.req_cons += nr_frags + extra;
++}
++
++static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
++{
++ int i;
++
++ for (i = 0; i < nr_frags; i++)
++ put_page(meta[i].frag.page);
++}
++
++/* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
++ used to set up the operations on the top of
++ netrx_pending_operations, which have since been done. Check that
++ they didn't give any errors and advance over them. */
++static int netbk_check_gop(int nr_frags, domid_t domid,
++ struct netrx_pending_operations *npo)
++{
++ multicall_entry_t *mcl;
++ gnttab_transfer_t *gop;
++ gnttab_copy_t *copy_op;
++ int status = NETIF_RSP_OKAY;
++ int i;
++
++ for (i = 0; i <= nr_frags; i++) {
++ if (npo->meta[npo->meta_cons + i].copy) {
++ copy_op = npo->copy + npo->copy_cons++;
++ if (copy_op->status != GNTST_okay) {
++ DPRINTK("Bad status %d from copy to DOM%d.\n",
++ copy_op->status, domid);
++ status = NETIF_RSP_ERROR;
++ }
++ } else {
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ mcl = npo->mcl + npo->mcl_cons++;
++ /* The update_va_mapping() must not fail. */
++ BUG_ON(mcl->result != 0);
++ }
++
++ gop = npo->trans + npo->trans_cons++;
++ /* Check the reassignment error code. */
++ if (gop->status != 0) {
++ DPRINTK("Bad status %d from grant transfer to DOM%u\n",
++ gop->status, domid);
++ /*
++ * Page no longer belongs to us unless
++ * GNTST_bad_page, but that should be
++ * a fatal error anyway.
++ */
++ BUG_ON(gop->status == GNTST_bad_page);
++ status = NETIF_RSP_ERROR;
++ }
++ }
++ }
++
++ return status;
++}
++
++static void netbk_add_frag_responses(netif_t *netif, int status,
++ struct netbk_rx_meta *meta, int nr_frags)
++{
++ int i;
++ unsigned long offset;
++
++ for (i = 0; i < nr_frags; i++) {
++ int id = meta[i].id;
++ int flags = (i == nr_frags - 1) ? 0 : NETRXF_more_data;
++
++ if (meta[i].copy)
++ offset = 0;
++ else
++ offset = meta[i].frag.page_offset;
++ make_rx_response(netif, id, status, offset,
++ meta[i].frag.size, flags);
++ }
++}
++
++static void net_rx_action(unsigned long unused)
++{
++ netif_t *netif = NULL;
++ s8 status;
++ u16 id, irq, flags;
++ netif_rx_response_t *resp;
++ multicall_entry_t *mcl;
++ struct sk_buff_head rxq;
++ struct sk_buff *skb;
++ int notify_nr = 0;
++ int ret;
++ int nr_frags;
++ int count;
++ unsigned long offset;
++
++ /*
++ * Putting hundreds of bytes on the stack is considered rude.
++ * Static works because a tasklet can only be on one CPU at any time.
++ */
++ static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3];
++ static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
++ static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
++ static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE];
++ static unsigned char rx_notify[NR_IRQS];
++ static u16 notify_list[NET_RX_RING_SIZE];
++ static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
++
++ struct netrx_pending_operations npo = {
++ mmu: rx_mmu,
++ trans: grant_trans_op,
++ copy: grant_copy_op,
++ mcl: rx_mcl,
++ meta: meta};
++
++ skb_queue_head_init(&rxq);
++
++ count = 0;
++
++ while ((skb = skb_dequeue(&rx_queue)) != NULL) {
++ nr_frags = skb_shinfo(skb)->nr_frags;
++ *(int *)skb->cb = nr_frags;
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap) &&
++ !((netif_t *)netdev_priv(skb->dev))->copying_receiver &&
++ check_mfn(nr_frags + 1)) {
++ /* Memory squeeze? Back off for an arbitrary while. */
++ if ( net_ratelimit() )
++ WPRINTK("Memory squeeze in netback "
++ "driver.\n");
++ mod_timer(&net_timer, jiffies + HZ);
++ skb_queue_head(&rx_queue, skb);
++ break;
++ }
++
++ netbk_gop_skb(skb, &npo);
++
++ count += nr_frags + 1;
++
++ __skb_queue_tail(&rxq, skb);
++
++ /* Filled the batch queue? */
++ if (count + MAX_SKB_FRAGS >= NET_RX_RING_SIZE)
++ break;
++ }
++
++ BUG_ON(npo.meta_prod > ARRAY_SIZE(meta));
++
++ npo.mmu_mcl = npo.mcl_prod;
++ if (npo.mcl_prod) {
++ BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
++ BUG_ON(npo.mmu_prod > ARRAY_SIZE(rx_mmu));
++ mcl = npo.mcl + npo.mcl_prod++;
++
++ BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
++ mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
++
++ mcl->op = __HYPERVISOR_mmu_update;
++ mcl->args[0] = (unsigned long)rx_mmu;
++ mcl->args[1] = npo.mmu_prod;
++ mcl->args[2] = 0;
++ mcl->args[3] = DOMID_SELF;
++ }
++
++ if (npo.trans_prod) {
++ BUG_ON(npo.trans_prod > ARRAY_SIZE(grant_trans_op));
++ mcl = npo.mcl + npo.mcl_prod++;
++ mcl->op = __HYPERVISOR_grant_table_op;
++ mcl->args[0] = GNTTABOP_transfer;
++ mcl->args[1] = (unsigned long)grant_trans_op;
++ mcl->args[2] = npo.trans_prod;
++ }
++
++ if (npo.copy_prod) {
++ BUG_ON(npo.copy_prod > ARRAY_SIZE(grant_copy_op));
++ mcl = npo.mcl + npo.mcl_prod++;
++ mcl->op = __HYPERVISOR_grant_table_op;
++ mcl->args[0] = GNTTABOP_copy;
++ mcl->args[1] = (unsigned long)grant_copy_op;
++ mcl->args[2] = npo.copy_prod;
++ }
++
++ /* Nothing to do? */
++ if (!npo.mcl_prod)
++ return;
++
++ BUG_ON(npo.mcl_prod > ARRAY_SIZE(rx_mcl));
++
++ ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
++ BUG_ON(ret != 0);
++ /* The mmu_machphys_update() must not fail. */
++ BUG_ON(npo.mmu_mcl && npo.mcl[npo.mmu_mcl].result != 0);
++
++ while ((skb = __skb_dequeue(&rxq)) != NULL) {
++ nr_frags = *(int *)skb->cb;
++
++ netif = netdev_priv(skb->dev);
++ /* We can't rely on skb_release_data to release the
++ pages used by fragments for us, since it tries to
++ touch the pages in the fraglist. If we're in
++ flipping mode, that doesn't work. In copying mode,
++ we still have access to all of the pages, and so
++ it's safe to let release_data deal with it. */
++ /* (Freeing the fragments is safe since we copy
++ non-linear skbs destined for flipping interfaces) */
++ if (!netif->copying_receiver) {
++ atomic_set(&(skb_shinfo(skb)->dataref), 1);
++ skb_shinfo(skb)->frag_list = NULL;
++ skb_shinfo(skb)->nr_frags = 0;
++ netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
++ }
++
++ netif->stats.tx_bytes += skb->len;
++ netif->stats.tx_packets++;
++
++ status = netbk_check_gop(nr_frags, netif->domid, &npo);
++
++ id = meta[npo.meta_cons].id;
++ flags = nr_frags ? NETRXF_more_data : 0;
++
++ if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++ flags |= NETRXF_csum_blank | NETRXF_data_validated;
++ else if (skb->proto_data_valid) /* remote but checksummed? */
++ flags |= NETRXF_data_validated;
++
++ if (meta[npo.meta_cons].copy)
++ offset = 0;
++ else
++ offset = offset_in_page(skb->data);
++ resp = make_rx_response(netif, id, status, offset,
++ skb_headlen(skb), flags);
++
++ if (meta[npo.meta_cons].frag.size) {
++ struct netif_extra_info *gso =
++ (struct netif_extra_info *)
++ RING_GET_RESPONSE(&netif->rx,
++ netif->rx.rsp_prod_pvt++);
++
++ resp->flags |= NETRXF_extra_info;
++
++ gso->u.gso.size = meta[npo.meta_cons].frag.size;
++ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
++ gso->u.gso.pad = 0;
++ gso->u.gso.features = 0;
++
++ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
++ gso->flags = 0;
++ }
++
++ netbk_add_frag_responses(netif, status,
++ meta + npo.meta_cons + 1,
++ nr_frags);
++
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
++ irq = netif->irq;
++ if (ret && !rx_notify[irq]) {
++ rx_notify[irq] = 1;
++ notify_list[notify_nr++] = irq;
++ }
++
++ if (netif_queue_stopped(netif->dev) &&
++ netif_schedulable(netif) &&
++ !netbk_queue_full(netif))
++ netif_wake_queue(netif->dev);
++
++ netif_put(netif);
++ dev_kfree_skb(skb);
++ npo.meta_cons += nr_frags + 1;
++ }
++
++ while (notify_nr != 0) {
++ irq = notify_list[--notify_nr];
++ rx_notify[irq] = 0;
++ notify_remote_via_irq(irq);
++ }
++
++ /* More work to do? */
++ if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
++ tasklet_schedule(&net_rx_tasklet);
++#if 0
++ else
++ xen_network_done_notify();
++#endif
++}
++
++static void net_alarm(unsigned long unused)
++{
++ tasklet_schedule(&net_rx_tasklet);
++}
++
++static void netbk_tx_pending_timeout(unsigned long unused)
++{
++ tasklet_schedule(&net_tx_tasklet);
++}
++
++struct net_device_stats *netif_be_get_stats(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ return &netif->stats;
++}
++
++static int __on_net_schedule_list(netif_t *netif)
++{
++ return netif->list.next != NULL;
++}
++
++static void remove_from_net_schedule_list(netif_t *netif)
++{
++ spin_lock_irq(&net_schedule_list_lock);
++ if (likely(__on_net_schedule_list(netif))) {
++ list_del(&netif->list);
++ netif->list.next = NULL;
++ netif_put(netif);
++ }
++ spin_unlock_irq(&net_schedule_list_lock);
++}
++
++static void add_to_net_schedule_list_tail(netif_t *netif)
++{
++ if (__on_net_schedule_list(netif))
++ return;
++
++ spin_lock_irq(&net_schedule_list_lock);
++ if (!__on_net_schedule_list(netif) &&
++ likely(netif_schedulable(netif))) {
++ list_add_tail(&netif->list, &net_schedule_list);
++ netif_get(netif);
++ }
++ spin_unlock_irq(&net_schedule_list_lock);
++}
++
++/*
++ * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
++ * If this driver is pipelining transmit requests then we can be very
++ * aggressive in avoiding new-packet notifications -- frontend only needs to
++ * send a notification if there are no outstanding unreceived responses.
++ * If we may be buffer transmit buffers for any reason then we must be rather
++ * more conservative and treat this as the final check for pending work.
++ */
++void netif_schedule_work(netif_t *netif)
++{
++ int more_to_do;
++
++#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
++ more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
++#else
++ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
++#endif
++
++ if (more_to_do) {
++ add_to_net_schedule_list_tail(netif);
++ maybe_schedule_tx_action();
++ }
++}
++
++void netif_deschedule_work(netif_t *netif)
++{
++ remove_from_net_schedule_list(netif);
++}
++
++
++static void tx_add_credit(netif_t *netif)
++{
++ unsigned long max_burst, max_credit;
++
++ /*
++ * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
++ * Otherwise the interface can seize up due to insufficient credit.
++ */
++ max_burst = RING_GET_REQUEST(&netif->tx, netif->tx.req_cons)->size;
++ max_burst = min(max_burst, 131072UL);
++ max_burst = max(max_burst, netif->credit_bytes);
++
++ /* Take care that adding a new chunk of credit doesn't wrap to zero. */
++ max_credit = netif->remaining_credit + netif->credit_bytes;
++ if (max_credit < netif->remaining_credit)
++ max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
++
++ netif->remaining_credit = min(max_credit, max_burst);
++}
++
++static void tx_credit_callback(unsigned long data)
++{
++ netif_t *netif = (netif_t *)data;
++ tx_add_credit(netif);
++ netif_schedule_work(netif);
++}
++
++static inline int copy_pending_req(PEND_RING_IDX pending_idx)
++{
++ return gnttab_copy_grant_page(grant_tx_handle[pending_idx],
++ &mmap_pages[pending_idx]);
++}
++
++inline static void net_tx_action_dealloc(void)
++{
++ struct netbk_tx_pending_inuse *inuse, *n;
++ gnttab_unmap_grant_ref_t *gop;
++ u16 pending_idx;
++ PEND_RING_IDX dc, dp;
++ netif_t *netif;
++ int ret;
++ LIST_HEAD(list);
++
++ dc = dealloc_cons;
++ gop = tx_unmap_ops;
++
++ /*
++ * Free up any grants we have finished using
++ */
++ do {
++ dp = dealloc_prod;
++
++ /* Ensure we see all indices enqueued by netif_idx_release(). */
++ smp_rmb();
++
++ while (dc != dp) {
++ unsigned long pfn;
++
++ pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
++ list_move_tail(&pending_inuse[pending_idx].list, &list);
++
++ pfn = idx_to_pfn(pending_idx);
++ /* Already unmapped? */
++ if (!phys_to_machine_mapping_valid(pfn))
++ continue;
++
++ gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx),
++ GNTMAP_host_map,
++ grant_tx_handle[pending_idx]);
++ gop++;
++ }
++
++ if (netbk_copy_skb_mode != NETBK_DELAYED_COPY_SKB ||
++ list_empty(&pending_inuse_head))
++ break;
++
++ /* Copy any entries that have been pending for too long. */
++ list_for_each_entry_safe(inuse, n, &pending_inuse_head, list) {
++ if (time_after(inuse->alloc_time + HZ / 2, jiffies))
++ break;
++
++ switch (copy_pending_req(inuse - pending_inuse)) {
++ case 0:
++ list_move_tail(&inuse->list, &list);
++ continue;
++ case -EBUSY:
++ list_del_init(&inuse->list);
++ continue;
++ case -ENOENT:
++ continue;
++ }
++
++ break;
++ }
++ } while (dp != dealloc_prod);
++
++ dealloc_cons = dc;
++
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
++ BUG_ON(ret);
++
++ list_for_each_entry_safe(inuse, n, &list, list) {
++ pending_idx = inuse - pending_inuse;
++
++ netif = pending_tx_info[pending_idx].netif;
++
++ make_tx_response(netif, &pending_tx_info[pending_idx].req,
++ NETIF_RSP_OKAY);
++
++ /* Ready for next use. */
++ gnttab_reset_grant_page(mmap_pages[pending_idx]);
++
++ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++
++ netif_put(netif);
++
++ list_del_init(&inuse->list);
++ }
++}
++
++static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
++{
++ RING_IDX cons = netif->tx.req_cons;
++
++ do {
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ if (cons >= end)
++ break;
++ txp = RING_GET_REQUEST(&netif->tx, cons++);
++ } while (1);
++ netif->tx.req_cons = cons;
++ netif_schedule_work(netif);
++ netif_put(netif);
++}
++
++static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
++ netif_tx_request_t *txp, int work_to_do)
++{
++ RING_IDX cons = netif->tx.req_cons;
++ int frags = 0;
++
++ if (!(first->flags & NETTXF_more_data))
++ return 0;
++
++ do {
++ if (frags >= work_to_do) {
++ DPRINTK("Need more frags\n");
++ return -frags;
++ }
++
++ if (unlikely(frags >= MAX_SKB_FRAGS)) {
++ DPRINTK("Too many frags\n");
++ return -frags;
++ }
++
++ memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
++ sizeof(*txp));
++ if (txp->size > first->size) {
++ DPRINTK("Frags galore\n");
++ return -frags;
++ }
++
++ first->size -= txp->size;
++ frags++;
++
++ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
++ DPRINTK("txp->offset: %x, size: %u\n",
++ txp->offset, txp->size);
++ return -frags;
++ }
++ } while ((txp++)->flags & NETTXF_more_data);
++
++ return frags;
++}
++
++static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
++ struct sk_buff *skb,
++ netif_tx_request_t *txp,
++ gnttab_map_grant_ref_t *mop)
++{
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ skb_frag_t *frags = shinfo->frags;
++ unsigned long pending_idx = *((u16 *)skb->data);
++ int i, start;
++
++ /* Skip first skb fragment if it is on same page as header fragment. */
++ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
++
++ for (i = start; i < shinfo->nr_frags; i++, txp++) {
++ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)];
++
++ gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx),
++ GNTMAP_host_map | GNTMAP_readonly,
++ txp->gref, netif->domid);
++
++ memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
++ netif_get(netif);
++ pending_tx_info[pending_idx].netif = netif;
++ frags[i].page = (void *)pending_idx;
++ }
++
++ return mop;
++}
++
++static int netbk_tx_check_mop(struct sk_buff *skb,
++ gnttab_map_grant_ref_t **mopp)
++{
++ gnttab_map_grant_ref_t *mop = *mopp;
++ int pending_idx = *((u16 *)skb->data);
++ netif_t *netif = pending_tx_info[pending_idx].netif;
++ netif_tx_request_t *txp;
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ int nr_frags = shinfo->nr_frags;
++ int i, err, start;
++
++ /* Check status of header. */
++ err = mop->status;
++ if (unlikely(err)) {
++ txp = &pending_tx_info[pending_idx].req;
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++ netif_put(netif);
++ } else {
++ set_phys_to_machine(
++ __pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
++ FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
++ grant_tx_handle[pending_idx] = mop->handle;
++ }
++
++ /* Skip first skb fragment if it is on same page as header fragment. */
++ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
++
++ for (i = start; i < nr_frags; i++) {
++ int j, newerr;
++
++ pending_idx = (unsigned long)shinfo->frags[i].page;
++
++ /* Check error status: if okay then remember grant handle. */
++ newerr = (++mop)->status;
++ if (likely(!newerr)) {
++ set_phys_to_machine(
++ __pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
++ FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
++ grant_tx_handle[pending_idx] = mop->handle;
++ /* Had a previous error? Invalidate this fragment. */
++ if (unlikely(err))
++ netif_idx_release(pending_idx);
++ continue;
++ }
++
++ /* Error on this fragment: respond to client with an error. */
++ txp = &pending_tx_info[pending_idx].req;
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++ netif_put(netif);
++
++ /* Not the first error? Preceding frags already invalidated. */
++ if (err)
++ continue;
++
++ /* First error: invalidate header and preceding fragments. */
++ pending_idx = *((u16 *)skb->data);
++ netif_idx_release(pending_idx);
++ for (j = start; j < i; j++) {
++ pending_idx = (unsigned long)shinfo->frags[i].page;
++ netif_idx_release(pending_idx);
++ }
++
++ /* Remember the error: invalidate all subsequent fragments. */
++ err = newerr;
++ }
++
++ *mopp = mop + 1;
++ return err;
++}
++
++static void netbk_fill_frags(struct sk_buff *skb)
++{
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ int nr_frags = shinfo->nr_frags;
++ int i;
++
++ for (i = 0; i < nr_frags; i++) {
++ skb_frag_t *frag = shinfo->frags + i;
++ netif_tx_request_t *txp;
++ unsigned long pending_idx;
++
++ pending_idx = (unsigned long)frag->page;
++
++ pending_inuse[pending_idx].alloc_time = jiffies;
++ list_add_tail(&pending_inuse[pending_idx].list,
++ &pending_inuse_head);
++
++ txp = &pending_tx_info[pending_idx].req;
++ frag->page = virt_to_page(idx_to_kaddr(pending_idx));
++ frag->size = txp->size;
++ frag->page_offset = txp->offset;
++
++ skb->len += txp->size;
++ skb->data_len += txp->size;
++ skb->truesize += txp->size;
++ }
++}
++
++int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
++ int work_to_do)
++{
++ struct netif_extra_info extra;
++ RING_IDX cons = netif->tx.req_cons;
++
++ do {
++ if (unlikely(work_to_do-- <= 0)) {
++ DPRINTK("Missing extra info\n");
++ return -EBADR;
++ }
++
++ memcpy(&extra, RING_GET_REQUEST(&netif->tx, cons),
++ sizeof(extra));
++ if (unlikely(!extra.type ||
++ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
++ netif->tx.req_cons = ++cons;
++ DPRINTK("Invalid extra type: %d\n", extra.type);
++ return -EINVAL;
++ }
++
++ memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
++ netif->tx.req_cons = ++cons;
++ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
++
++ return work_to_do;
++}
++
++static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
++{
++ if (!gso->u.gso.size) {
++ DPRINTK("GSO size must not be zero.\n");
++ return -EINVAL;
++ }
++
++ /* Currently only TCPv4 S.O. is supported. */
++ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
++ DPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
++ return -EINVAL;
++ }
++
++ skb_shinfo(skb)->gso_size = gso->u.gso.size;
++ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
++
++ /* Header must be checked, and gso_segs computed. */
++ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
++ skb_shinfo(skb)->gso_segs = 0;
++
++ return 0;
++}
++
++/* Called after netfront has transmitted */
++static void net_tx_action(unsigned long unused)
++{
++ struct list_head *ent;
++ struct sk_buff *skb;
++ netif_t *netif;
++ netif_tx_request_t txreq;
++ netif_tx_request_t txfrags[MAX_SKB_FRAGS];
++ struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
++ u16 pending_idx;
++ RING_IDX i;
++ gnttab_map_grant_ref_t *mop;
++ unsigned int data_len;
++ int ret, work_to_do;
++
++ if (dealloc_cons != dealloc_prod)
++ net_tx_action_dealloc();
++
++ mop = tx_map_ops;
++ while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
++ !list_empty(&net_schedule_list)) {
++ /* Get a netif from the list with work to do. */
++ ent = net_schedule_list.next;
++ netif = list_entry(ent, netif_t, list);
++ netif_get(netif);
++ remove_from_net_schedule_list(netif);
++
++ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
++ if (!work_to_do) {
++ netif_put(netif);
++ continue;
++ }
++
++ i = netif->tx.req_cons;
++ rmb(); /* Ensure that we see the request before we copy it. */
++ memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
++
++ /* Credit-based scheduling. */
++ if (txreq.size > netif->remaining_credit) {
++ unsigned long now = jiffies;
++ unsigned long next_credit =
++ netif->credit_timeout.expires +
++ msecs_to_jiffies(netif->credit_usec / 1000);
++
++ /* Timer could already be pending in rare cases. */
++ if (timer_pending(&netif->credit_timeout)) {
++ netif_put(netif);
++ continue;
++ }
++
++ /* Passed the point where we can replenish credit? */
++ if (time_after_eq(now, next_credit)) {
++ netif->credit_timeout.expires = now;
++ tx_add_credit(netif);
++ }
++
++ /* Still too big to send right now? Set a callback. */
++ if (txreq.size > netif->remaining_credit) {
++ netif->credit_timeout.data =
++ (unsigned long)netif;
++ netif->credit_timeout.function =
++ tx_credit_callback;
++ __mod_timer(&netif->credit_timeout,
++ next_credit);
++ netif_put(netif);
++ continue;
++ }
++ }
++ netif->remaining_credit -= txreq.size;
++
++ work_to_do--;
++ netif->tx.req_cons = ++i;
++
++ memset(extras, 0, sizeof(extras));
++ if (txreq.flags & NETTXF_extra_info) {
++ work_to_do = netbk_get_extras(netif, extras,
++ work_to_do);
++ i = netif->tx.req_cons;
++ if (unlikely(work_to_do < 0)) {
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
++ }
++
++ ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
++ if (unlikely(ret < 0)) {
++ netbk_tx_err(netif, &txreq, i - ret);
++ continue;
++ }
++ i += ret;
++
++ if (unlikely(txreq.size < ETH_HLEN)) {
++ DPRINTK("Bad packet size: %d\n", txreq.size);
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
++
++ /* No crossing a page as the payload mustn't fragment. */
++ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
++ DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
++ txreq.offset, txreq.size,
++ (txreq.offset &~PAGE_MASK) + txreq.size);
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
++
++ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
++
++ data_len = (txreq.size > PKT_PROT_LEN &&
++ ret < MAX_SKB_FRAGS) ?
++ PKT_PROT_LEN : txreq.size;
++
++ skb = alloc_skb(data_len + 16 + NET_IP_ALIGN,
++ GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(skb == NULL)) {
++ DPRINTK("Can't allocate a skb in start_xmit.\n");
++ netbk_tx_err(netif, &txreq, i);
++ break;
++ }
++
++ /* Packets passed to netif_rx() must have some headroom. */
++ skb_reserve(skb, 16 + NET_IP_ALIGN);
++
++ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
++ struct netif_extra_info *gso;
++ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
++
++ if (netbk_set_skb_gso(skb, gso)) {
++ kfree_skb(skb);
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
++ }
++
++ gnttab_set_map_op(mop, idx_to_kaddr(pending_idx),
++ GNTMAP_host_map | GNTMAP_readonly,
++ txreq.gref, netif->domid);
++ mop++;
++
++ memcpy(&pending_tx_info[pending_idx].req,
++ &txreq, sizeof(txreq));
++ pending_tx_info[pending_idx].netif = netif;
++ *((u16 *)skb->data) = pending_idx;
++
++ __skb_put(skb, data_len);
++
++ skb_shinfo(skb)->nr_frags = ret;
++ if (data_len < txreq.size) {
++ skb_shinfo(skb)->nr_frags++;
++ skb_shinfo(skb)->frags[0].page =
++ (void *)(unsigned long)pending_idx;
++ } else {
++ /* Discriminate from any valid pending_idx value. */
++ skb_shinfo(skb)->frags[0].page = (void *)~0UL;
++ }
++
++ __skb_queue_tail(&tx_queue, skb);
++
++ pending_cons++;
++
++ mop = netbk_get_requests(netif, skb, txfrags, mop);
++
++ netif->tx.req_cons = i;
++ netif_schedule_work(netif);
++
++ if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
++ break;
++ }
++
++ if (mop == tx_map_ops)
++ return;
++
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
++ BUG_ON(ret);
++
++ mop = tx_map_ops;
++ while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
++ netif_tx_request_t *txp;
++
++ pending_idx = *((u16 *)skb->data);
++ netif = pending_tx_info[pending_idx].netif;
++ txp = &pending_tx_info[pending_idx].req;
++
++ /* Check the remap error code. */
++ if (unlikely(netbk_tx_check_mop(skb, &mop))) {
++ DPRINTK("netback grant failed.\n");
++ skb_shinfo(skb)->nr_frags = 0;
++ kfree_skb(skb);
++ continue;
++ }
++
++ data_len = skb->len;
++ memcpy(skb->data,
++ (void *)(idx_to_kaddr(pending_idx)|txp->offset),
++ data_len);
++ if (data_len < txp->size) {
++ /* Append the packet payload as a fragment. */
++ txp->offset += data_len;
++ txp->size -= data_len;
++ } else {
++ /* Schedule a response immediately. */
++ netif_idx_release(pending_idx);
++ }
++
++ /*
++ * Old frontends do not assert data_validated but we
++ * can infer it from csum_blank so test both flags.
++ */
++ if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ skb->proto_data_valid = 1;
++ } else {
++ skb->ip_summed = CHECKSUM_NONE;
++ skb->proto_data_valid = 0;
++ }
++ skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank);
++
++ netbk_fill_frags(skb);
++
++ skb->dev = netif->dev;
++ skb->protocol = eth_type_trans(skb, skb->dev);
++
++ netif->stats.rx_bytes += skb->len;
++ netif->stats.rx_packets++;
++
++ if (unlikely(netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB) &&
++ unlikely(skb_linearize(skb))) {
++ DPRINTK("Can't linearize skb in net_tx_action.\n");
++ kfree_skb(skb);
++ continue;
++ }
++
++ netif_rx(skb);
++ netif->dev->last_rx = jiffies;
++ }
++
++ if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB &&
++ !list_empty(&pending_inuse_head)) {
++ struct netbk_tx_pending_inuse *oldest;
++
++ oldest = list_entry(pending_inuse_head.next,
++ struct netbk_tx_pending_inuse, list);
++ mod_timer(&netbk_tx_pending_timer, oldest->alloc_time + HZ);
++ }
++}
++
++static void netif_idx_release(u16 pending_idx)
++{
++ static DEFINE_SPINLOCK(_lock);
++ unsigned long flags;
++
++ spin_lock_irqsave(&_lock, flags);
++ dealloc_ring[MASK_PEND_IDX(dealloc_prod)] = pending_idx;
++ /* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
++ smp_wmb();
++ dealloc_prod++;
++ spin_unlock_irqrestore(&_lock, flags);
++
++ tasklet_schedule(&net_tx_tasklet);
++}
++
++static void netif_page_release(struct page *page)
++{
++ netif_idx_release(netif_page_index(page));
++}
++
++irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++ netif_t *netif = dev_id;
++
++ add_to_net_schedule_list_tail(netif);
++ maybe_schedule_tx_action();
++
++ if (netif_schedulable(netif) && !netbk_queue_full(netif))
++ netif_wake_queue(netif->dev);
++
++ return IRQ_HANDLED;
++}
++
++static void make_tx_response(netif_t *netif,
++ netif_tx_request_t *txp,
++ s8 st)
++{
++ RING_IDX i = netif->tx.rsp_prod_pvt;
++ netif_tx_response_t *resp;
++ int notify;
++
++ resp = RING_GET_RESPONSE(&netif->tx, i);
++ resp->id = txp->id;
++ resp->status = st;
++
++ if (txp->flags & NETTXF_extra_info)
++ RING_GET_RESPONSE(&netif->tx, ++i)->status = NETIF_RSP_NULL;
++
++ netif->tx.rsp_prod_pvt = ++i;
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
++ if (notify)
++ notify_remote_via_irq(netif->irq);
++
++#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
++ if (i == netif->tx.req_cons) {
++ int more_to_do;
++ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
++ if (more_to_do)
++ add_to_net_schedule_list_tail(netif);
++ }
++#endif
++}
++
++static netif_rx_response_t *make_rx_response(netif_t *netif,
++ u16 id,
++ s8 st,
++ u16 offset,
++ u16 size,
++ u16 flags)
++{
++ RING_IDX i = netif->rx.rsp_prod_pvt;
++ netif_rx_response_t *resp;
++
++ resp = RING_GET_RESPONSE(&netif->rx, i);
++ resp->offset = offset;
++ resp->flags = flags;
++ resp->id = id;
++ resp->status = (s16)size;
++ if (st < 0)
++ resp->status = (s16)st;
++
++ netif->rx.rsp_prod_pvt = ++i;
++
++ return resp;
++}
++
++#ifdef NETBE_DEBUG_INTERRUPT
++static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
++{
++ struct list_head *ent;
++ netif_t *netif;
++ int i = 0;
++
++ printk(KERN_ALERT "netif_schedule_list:\n");
++ spin_lock_irq(&net_schedule_list_lock);
++
++ list_for_each (ent, &net_schedule_list) {
++ netif = list_entry(ent, netif_t, list);
++ printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
++ "rx_resp_prod=%08x\n",
++ i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
++ printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
++ netif->tx.req_cons, netif->tx.rsp_prod_pvt);
++ printk(KERN_ALERT " shared(rx_req_prod=%08x "
++ "rx_resp_prod=%08x\n",
++ netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
++ printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
++ netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
++ printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
++ netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
++ i++;
++ }
++
++ spin_unlock_irq(&net_schedule_list_lock);
++ printk(KERN_ALERT " ** End of netif_schedule_list **\n");
++
++ return IRQ_HANDLED;
++}
++#endif
++
++static int __init netback_init(void)
++{
++ int i;
++ struct page *page;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ /* We can increase reservation by this much in net_rx_action(). */
++ balloon_update_driver_allowance(NET_RX_RING_SIZE);
++
++ skb_queue_head_init(&rx_queue);
++ skb_queue_head_init(&tx_queue);
++
++ init_timer(&net_timer);
++ net_timer.data = 0;
++ net_timer.function = net_alarm;
++
++ init_timer(&netbk_tx_pending_timer);
++ netbk_tx_pending_timer.data = 0;
++ netbk_tx_pending_timer.function = netbk_tx_pending_timeout;
++
++ mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
++ if (mmap_pages == NULL) {
++ printk("%s: out of memory\n", __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < MAX_PENDING_REQS; i++) {
++ page = mmap_pages[i];
++ SetPageForeign(page, netif_page_release);
++ netif_page_index(page) = i;
++ INIT_LIST_HEAD(&pending_inuse[i].list);
++ }
++
++ pending_cons = 0;
++ pending_prod = MAX_PENDING_REQS;
++ for (i = 0; i < MAX_PENDING_REQS; i++)
++ pending_ring[i] = i;
++
++ spin_lock_init(&net_schedule_list_lock);
++ INIT_LIST_HEAD(&net_schedule_list);
++
++ netbk_copy_skb_mode = NETBK_DONT_COPY_SKB;
++ if (MODPARM_copy_skb) {
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
++ NULL, 0))
++ netbk_copy_skb_mode = NETBK_ALWAYS_COPY_SKB;
++ else
++ netbk_copy_skb_mode = NETBK_DELAYED_COPY_SKB;
++ }
++
++ netif_accel_init();
++
++ netif_xenbus_init();
++
++#ifdef NETBE_DEBUG_INTERRUPT
++ (void)bind_virq_to_irqhandler(VIRQ_DEBUG,
++ 0,
++ netif_be_dbg,
++ SA_SHIRQ,
++ "net-be-dbg",
++ &netif_be_dbg);
++#endif
++
++ return 0;
++}
++
++module_init(netback_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/netback/xenbus.c linux-2.6.18-xen-3.3.0/drivers/xen/netback/xenbus.c
+--- linux-2.6.18.8/drivers/xen/netback/xenbus.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/netback/xenbus.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,454 @@
++/* Xenbus code for netif backend
++ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
++ Copyright (C) 2005 XenSource Ltd
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++*/
++
++#include <stdarg.h>
++#include <linux/module.h>
++#include <xen/xenbus.h>
++#include "common.h"
++
++#if 0
++#undef DPRINTK
++#define DPRINTK(fmt, args...) \
++ printk("netback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++#endif
++
++
++static int connect_rings(struct backend_info *);
++static void connect(struct backend_info *);
++static void backend_create_netif(struct backend_info *be);
++
++static int netback_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ netback_remove_accelerators(be, dev);
++
++ if (be->netif) {
++ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
++ netif_disconnect(be->netif);
++ be->netif = NULL;
++ }
++ kfree(be);
++ dev->dev.driver_data = NULL;
++ return 0;
++}
++
++
++/**
++ * Entry point to this code when a new device is created. Allocate the basic
++ * structures and switch to InitWait.
++ */
++static int netback_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ const char *message;
++ struct xenbus_transaction xbt;
++ int err;
++ int sg;
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
++
++ be->dev = dev;
++ dev->dev.driver_data = be;
++
++ sg = 1;
++ if (netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB)
++ sg = 0;
++
++ do {
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ goto fail;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
++ if (err) {
++ message = "writing feature-sg";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
++ "%d", sg);
++ if (err) {
++ message = "writing feature-gso-tcpv4";
++ goto abort_transaction;
++ }
++
++ /* We support rx-copy path. */
++ err = xenbus_printf(xbt, dev->nodename,
++ "feature-rx-copy", "%d", 1);
++ if (err) {
++ message = "writing feature-rx-copy";
++ goto abort_transaction;
++ }
++
++ /*
++ * We don't support rx-flip path (except old guests who don't
++ * grok this feature flag).
++ */
++ err = xenbus_printf(xbt, dev->nodename,
++ "feature-rx-flip", "%d", 0);
++ if (err) {
++ message = "writing feature-rx-flip";
++ goto abort_transaction;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ } while (err == -EAGAIN);
++
++ if (err) {
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto fail;
++ }
++
++ netback_probe_accelerators(be, dev);
++
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto fail;
++
++ /* This kicks hotplug scripts, so do it immediately. */
++ backend_create_netif(be);
++
++ return 0;
++
++abort_transaction:
++ xenbus_transaction_end(xbt, 1);
++ xenbus_dev_fatal(dev, err, "%s", message);
++fail:
++ DPRINTK("failed");
++ netback_remove(dev);
++ return err;
++}
++
++
++/**
++ * Handle the creation of the hotplug script environment. We add the script
++ * and vif variables to the environment, for the benefit of the vif-* hotplug
++ * scripts.
++ */
++static int netback_uevent(struct xenbus_device *xdev, char **envp,
++ int num_envp, char *buffer, int buffer_size)
++{
++ struct backend_info *be = xdev->dev.driver_data;
++ netif_t *netif = be->netif;
++ int i = 0, length = 0;
++ char *val;
++
++ DPRINTK("netback_uevent");
++
++ val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
++ if (IS_ERR(val)) {
++ int err = PTR_ERR(val);
++ xenbus_dev_fatal(xdev, err, "reading script");
++ return err;
++ }
++ else {
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
++ &length, "script=%s", val);
++ kfree(val);
++ }
++
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "vif=%s", netif->dev->name);
++
++ envp[i] = NULL;
++
++ return 0;
++}
++
++
++static void backend_create_netif(struct backend_info *be)
++{
++ int err;
++ long handle;
++ struct xenbus_device *dev = be->dev;
++
++ if (be->netif != NULL)
++ return;
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
++ if (err != 1) {
++ xenbus_dev_fatal(dev, err, "reading handle");
++ return;
++ }
++
++ be->netif = netif_alloc(dev->otherend_id, handle);
++ if (IS_ERR(be->netif)) {
++ err = PTR_ERR(be->netif);
++ be->netif = NULL;
++ xenbus_dev_fatal(dev, err, "creating interface");
++ return;
++ }
++
++ kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
++}
++
++
++/**
++ * Callback received when the frontend's state changes.
++ */
++static void frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ DPRINTK("%s", xenbus_strstate(frontend_state));
++
++ be->frontend_state = frontend_state;
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ if (dev->state == XenbusStateClosed) {
++ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
++ __FUNCTION__, dev->nodename);
++ xenbus_switch_state(dev, XenbusStateInitWait);
++ }
++ break;
++
++ case XenbusStateInitialised:
++ break;
++
++ case XenbusStateConnected:
++ if (dev->state == XenbusStateConnected)
++ break;
++ backend_create_netif(be);
++ if (be->netif)
++ connect(be);
++ break;
++
++ case XenbusStateClosing:
++ if (be->netif) {
++ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
++ netif_disconnect(be->netif);
++ be->netif = NULL;
++ }
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
++
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ if (xenbus_dev_is_online(dev))
++ break;
++ /* fall through if not online */
++ case XenbusStateUnknown:
++ device_unregister(&dev->dev);
++ break;
++
++ default:
++ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
++
++
++static void xen_net_read_rate(struct xenbus_device *dev,
++ unsigned long *bytes, unsigned long *usec)
++{
++ char *s, *e;
++ unsigned long b, u;
++ char *ratestr;
++
++ /* Default to unlimited bandwidth. */
++ *bytes = ~0UL;
++ *usec = 0;
++
++ ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
++ if (IS_ERR(ratestr))
++ return;
++
++ s = ratestr;
++ b = simple_strtoul(s, &e, 10);
++ if ((s == e) || (*e != ','))
++ goto fail;
++
++ s = e + 1;
++ u = simple_strtoul(s, &e, 10);
++ if ((s == e) || (*e != '\0'))
++ goto fail;
++
++ *bytes = b;
++ *usec = u;
++
++ kfree(ratestr);
++ return;
++
++ fail:
++ WPRINTK("Failed to parse network rate limit. Traffic unlimited.\n");
++ kfree(ratestr);
++}
++
++static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
++{
++ char *s, *e, *macstr;
++ int i;
++
++ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
++ if (IS_ERR(macstr))
++ return PTR_ERR(macstr);
++
++ for (i = 0; i < ETH_ALEN; i++) {
++ mac[i] = simple_strtoul(s, &e, 16);
++ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
++ kfree(macstr);
++ return -ENOENT;
++ }
++ s = e+1;
++ }
++
++ kfree(macstr);
++ return 0;
++}
++
++static void connect(struct backend_info *be)
++{
++ int err;
++ struct xenbus_device *dev = be->dev;
++
++ err = connect_rings(be);
++ if (err)
++ return;
++
++ err = xen_net_read_mac(dev, be->netif->fe_dev_addr);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
++ return;
++ }
++
++ xen_net_read_rate(dev, &be->netif->credit_bytes,
++ &be->netif->credit_usec);
++ be->netif->remaining_credit = be->netif->credit_bytes;
++
++ xenbus_switch_state(dev, XenbusStateConnected);
++
++ netif_wake_queue(be->netif->dev);
++}
++
++
++static int connect_rings(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned long tx_ring_ref, rx_ring_ref;
++ unsigned int evtchn, rx_copy;
++ int err;
++ int val;
++
++ DPRINTK("");
++
++ err = xenbus_gather(XBT_NIL, dev->otherend,
++ "tx-ring-ref", "%lu", &tx_ring_ref,
++ "rx-ring-ref", "%lu", &rx_ring_ref,
++ "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_fatal(dev, err,
++ "reading %s/ring-ref and event-channel",
++ dev->otherend);
++ return err;
++ }
++
++ err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
++ &rx_copy);
++ if (err == -ENOENT) {
++ err = 0;
++ rx_copy = 0;
++ }
++ if (err < 0) {
++ xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
++ dev->otherend);
++ return err;
++ }
++ be->netif->copying_receiver = !!rx_copy;
++
++ if (be->netif->dev->tx_queue_len != 0) {
++ if (xenbus_scanf(XBT_NIL, dev->otherend,
++ "feature-rx-notify", "%d", &val) < 0)
++ val = 0;
++ if (val)
++ be->netif->can_queue = 1;
++ else
++ /* Must be non-zero for pfifo_fast to work. */
++ be->netif->dev->tx_queue_len = 1;
++ }
++
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0)
++ val = 0;
++ if (val) {
++ be->netif->features |= NETIF_F_SG;
++ be->netif->dev->features |= NETIF_F_SG;
++ }
++
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d",
++ &val) < 0)
++ val = 0;
++ if (val) {
++ be->netif->features |= NETIF_F_TSO;
++ be->netif->dev->features |= NETIF_F_TSO;
++ }
++
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
++ "%d", &val) < 0)
++ val = 0;
++ if (val) {
++ be->netif->features &= ~NETIF_F_IP_CSUM;
++ be->netif->dev->features &= ~NETIF_F_IP_CSUM;
++ }
++
++ /* Map the shared frame, irq etc. */
++ err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
++ if (err) {
++ xenbus_dev_fatal(dev, err,
++ "mapping shared-frames %lu/%lu port %u",
++ tx_ring_ref, rx_ring_ref, evtchn);
++ return err;
++ }
++ return 0;
++}
++
++
++/* ** Driver Registration ** */
++
++
++static const struct xenbus_device_id netback_ids[] = {
++ { "vif" },
++ { "" }
++};
++
++
++static struct xenbus_driver netback = {
++ .name = "vif",
++ .owner = THIS_MODULE,
++ .ids = netback_ids,
++ .probe = netback_probe,
++ .remove = netback_remove,
++ .uevent = netback_uevent,
++ .otherend_changed = frontend_changed,
++};
++
++
++void netif_xenbus_init(void)
++{
++ xenbus_register_backend(&netback);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/netfront/accel.c linux-2.6.18-xen-3.3.0/drivers/xen/netfront/accel.c
+--- linux-2.6.18.8/drivers/xen/netfront/accel.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/netfront/accel.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,824 @@
++/******************************************************************************
++ * Virtual network driver for conversing with remote driver backends.
++ *
++ * Copyright (C) 2007 Solarflare Communications, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/netdevice.h>
++#include <linux/skbuff.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++
++#include "netfront.h"
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("netfront/accel (%s:%d) " fmt, \
++ __FUNCTION__, __LINE__, ##args)
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "netfront/accel: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "netfront/accel: " fmt, ##args)
++
++static int netfront_remove_accelerator(struct netfront_info *np,
++ struct xenbus_device *dev);
++static int netfront_load_accelerator(struct netfront_info *np,
++ struct xenbus_device *dev,
++ const char *frontend);
++
++/*
++ * List of all netfront accelerator plugin modules available. Each
++ * list entry is of type struct netfront_accelerator.
++ */
++static struct list_head accelerators_list;
++
++/* Lock to protect access to accelerators_list */
++static spinlock_t accelerators_lock;
++
++/* Workqueue to process acceleration configuration changes */
++struct workqueue_struct *accel_watch_workqueue;
++
++/* Mutex to prevent concurrent loads and suspends, etc. */
++DEFINE_MUTEX(accelerator_mutex);
++
++void netif_init_accel(void)
++{
++ INIT_LIST_HEAD(&accelerators_list);
++ spin_lock_init(&accelerators_lock);
++
++ accel_watch_workqueue = create_workqueue("net_accel");
++}
++
++void netif_exit_accel(void)
++{
++ struct netfront_accelerator *accelerator, *tmp;
++ unsigned long flags;
++
++ flush_workqueue(accel_watch_workqueue);
++ destroy_workqueue(accel_watch_workqueue);
++
++ spin_lock_irqsave(&accelerators_lock, flags);
++
++ list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) {
++ BUG_ON(!list_empty(&accelerator->vif_states));
++
++ list_del(&accelerator->link);
++ kfree(accelerator->frontend);
++ kfree(accelerator);
++ }
++
++ spin_unlock_irqrestore(&accelerators_lock, flags);
++}
++
++
++/*
++ * Watch the configured accelerator and change plugin if it's modified
++ */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++static void accel_watch_work(struct work_struct *context)
++#else
++static void accel_watch_work(void *context)
++#endif
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++ struct netfront_accel_vif_state *vif_state =
++ container_of(context, struct netfront_accel_vif_state,
++ accel_work);
++#else
++ struct netfront_accel_vif_state *vif_state =
++ (struct netfront_accel_vif_state *)context;
++#endif
++ struct netfront_info *np = vif_state->np;
++ char *accel_frontend;
++ int accel_len, rc = -1;
++
++ mutex_lock(&accelerator_mutex);
++
++ accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend,
++ "accel-frontend", &accel_len);
++ if (IS_ERR(accel_frontend)) {
++ accel_frontend = NULL;
++ netfront_remove_accelerator(np, np->xbdev);
++ } else {
++ /* If this is the first time, request the accelerator,
++ otherwise only request one if it has changed */
++ if (vif_state->accel_frontend == NULL) {
++ rc = netfront_load_accelerator(np, np->xbdev,
++ accel_frontend);
++ } else {
++ if (strncmp(vif_state->accel_frontend, accel_frontend,
++ accel_len)) {
++ netfront_remove_accelerator(np, np->xbdev);
++ rc = netfront_load_accelerator(np, np->xbdev,
++ accel_frontend);
++ }
++ }
++ }
++
++ /* Get rid of previous state and replace with the new name */
++ if (vif_state->accel_frontend != NULL)
++ kfree(vif_state->accel_frontend);
++ vif_state->accel_frontend = accel_frontend;
++
++ mutex_unlock(&accelerator_mutex);
++
++ if (rc == 0) {
++ DPRINTK("requesting module %s\n", accel_frontend);
++ request_module("%s", accel_frontend);
++ /*
++ * Module should now call netfront_accelerator_loaded() once
++ * it's up and running, and we can continue from there
++ */
++ }
++}
++
++
++static void accel_watch_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ struct netfront_accel_vif_state *vif_state =
++ container_of(watch, struct netfront_accel_vif_state,
++ accel_watch);
++ queue_work(accel_watch_workqueue, &vif_state->accel_work);
++}
++
++
++void netfront_accelerator_add_watch(struct netfront_info *np)
++{
++ int err;
++
++ /* Check we're not trying to overwrite an existing watch */
++ BUG_ON(np->accel_vif_state.accel_watch.node != NULL);
++
++ /* Get a watch on the accelerator plugin */
++ err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend,
++ "accel-frontend",
++ &np->accel_vif_state.accel_watch,
++ accel_watch_changed);
++ if (err) {
++ DPRINTK("%s: Failed to register accel watch: %d\n",
++ __FUNCTION__, err);
++ np->accel_vif_state.accel_watch.node = NULL;
++ }
++}
++
++
++static
++void netfront_accelerator_remove_watch(struct netfront_info *np)
++{
++ struct netfront_accel_vif_state *vif_state = &np->accel_vif_state;
++
++ /* Get rid of watch on accelerator plugin */
++ if (vif_state->accel_watch.node != NULL) {
++ unregister_xenbus_watch(&vif_state->accel_watch);
++ kfree(vif_state->accel_watch.node);
++ vif_state->accel_watch.node = NULL;
++
++ flush_workqueue(accel_watch_workqueue);
++
++ /* Clean up any state left from watch */
++ if (vif_state->accel_frontend != NULL) {
++ kfree(vif_state->accel_frontend);
++ vif_state->accel_frontend = NULL;
++ }
++ }
++}
++
++
++/*
++ * Initialise the accel_vif_state field in the netfront state
++ */
++void init_accelerator_vif(struct netfront_info *np,
++ struct xenbus_device *dev)
++{
++ np->accelerator = NULL;
++
++ /* It's assumed that these things don't change */
++ np->accel_vif_state.np = np;
++ np->accel_vif_state.dev = dev;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++ INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work);
++#else
++ INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work,
++ &np->accel_vif_state);
++#endif
++}
++
++
++/*
++ * Compare a frontend description string against an accelerator to see
++ * if they match. Would ultimately be nice to replace the string with
++ * a unique numeric identifier for each accelerator.
++ */
++static int match_accelerator(const char *frontend,
++ struct netfront_accelerator *accelerator)
++{
++ return strcmp(frontend, accelerator->frontend) == 0;
++}
++
++
++/*
++ * Add a frontend vif to the list of vifs that is using a netfront
++ * accelerator plugin module.
++ */
++static void add_accelerator_vif(struct netfront_accelerator *accelerator,
++ struct netfront_info *np)
++{
++ unsigned long flags;
++
++ /* Need lock to write list */
++ spin_lock_irqsave(&accelerator->vif_states_lock, flags);
++
++ if (np->accelerator == NULL) {
++ np->accelerator = accelerator;
++
++ list_add(&np->accel_vif_state.link, &accelerator->vif_states);
++ } else {
++ /*
++ * May get here legitimately if suspend_cancel is
++ * called, but in that case configuration should not
++ * have changed
++ */
++ BUG_ON(np->accelerator != accelerator);
++ }
++
++ spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
++}
++
++
++/*
++ * Initialise the state to track an accelerator plugin module.
++ */
++static int init_accelerator(const char *frontend,
++ struct netfront_accelerator **result,
++ struct netfront_accel_hooks *hooks)
++{
++ struct netfront_accelerator *accelerator =
++ kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL);
++ unsigned long flags;
++ int frontend_len;
++
++ if (!accelerator) {
++ DPRINTK("no memory for accelerator\n");
++ return -ENOMEM;
++ }
++
++ frontend_len = strlen(frontend) + 1;
++ accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL);
++ if (!accelerator->frontend) {
++ DPRINTK("no memory for accelerator\n");
++ kfree(accelerator);
++ return -ENOMEM;
++ }
++ strlcpy(accelerator->frontend, frontend, frontend_len);
++
++ INIT_LIST_HEAD(&accelerator->vif_states);
++ spin_lock_init(&accelerator->vif_states_lock);
++
++ accelerator->hooks = hooks;
++
++ spin_lock_irqsave(&accelerators_lock, flags);
++ list_add(&accelerator->link, &accelerators_list);
++ spin_unlock_irqrestore(&accelerators_lock, flags);
++
++ *result = accelerator;
++
++ return 0;
++}
++
++
++/*
++ * Modify the hooks stored in the per-vif state to match that in the
++ * netfront accelerator's state.
++ */
++static void
++accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state)
++{
++ /* This function must be called with the vif_states_lock held */
++
++ DPRINTK("%p\n",vif_state);
++
++ /* Make sure there are no data path operations going on */
++ netif_poll_disable(vif_state->np->netdev);
++ netif_tx_lock_bh(vif_state->np->netdev);
++
++ vif_state->hooks = vif_state->np->accelerator->hooks;
++
++ netif_tx_unlock_bh(vif_state->np->netdev);
++ netif_poll_enable(vif_state->np->netdev);
++}
++
++
++static void accelerator_probe_new_vif(struct netfront_info *np,
++ struct xenbus_device *dev,
++ struct netfront_accelerator *accelerator)
++{
++ struct netfront_accel_hooks *hooks;
++ unsigned long flags;
++
++ DPRINTK("\n");
++
++ /* Include this frontend device on the accelerator's list */
++ add_accelerator_vif(accelerator, np);
++
++ hooks = accelerator->hooks;
++
++ if (hooks) {
++ if (hooks->new_device(np->netdev, dev) == 0) {
++ spin_lock_irqsave
++ (&accelerator->vif_states_lock, flags);
++
++ accelerator_set_vif_state_hooks(&np->accel_vif_state);
++
++ spin_unlock_irqrestore
++ (&accelerator->vif_states_lock, flags);
++ }
++ }
++
++ return;
++}
++
++
++/*
++ * Request that a particular netfront accelerator plugin is loaded.
++ * Usually called as a result of the vif configuration specifying
++ * which one to use. Must be called with accelerator_mutex held
++ */
++static int netfront_load_accelerator(struct netfront_info *np,
++ struct xenbus_device *dev,
++ const char *frontend)
++{
++ struct netfront_accelerator *accelerator;
++ int rc = 0;
++
++ DPRINTK(" %s\n", frontend);
++
++ /*
++ * Look at list of loaded accelerators to see if the requested
++ * one is already there
++ */
++ list_for_each_entry(accelerator, &accelerators_list, link) {
++ if (match_accelerator(frontend, accelerator)) {
++ accelerator_probe_new_vif(np, dev, accelerator);
++ return 0;
++ }
++ }
++
++ /* Couldn't find it, so create a new one and load the module */
++ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) {
++ return rc;
++ }
++
++ /* Include this frontend device on the accelerator's list */
++ add_accelerator_vif(accelerator, np);
++
++ return rc;
++}
++
++
++/*
++ * Go through all the netfront vifs and see if they have requested
++ * this accelerator. Notify the accelerator plugin of the relevant
++ * device if so. Called when an accelerator plugin module is first
++ * loaded and connects to netfront.
++ */
++static void
++accelerator_probe_vifs(struct netfront_accelerator *accelerator,
++ struct netfront_accel_hooks *hooks)
++{
++ struct netfront_accel_vif_state *vif_state, *tmp;
++ unsigned long flags;
++
++ DPRINTK("%p\n", accelerator);
++
++ /*
++ * Store the hooks for future calls to probe a new device, and
++ * to wire into the vif_state once the accelerator plugin is
++ * ready to accelerate each vif
++ */
++ BUG_ON(hooks == NULL);
++ accelerator->hooks = hooks;
++
++ /*
++ * currently hold accelerator_mutex, so don't need
++ * vif_states_lock to read the list
++ */
++ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states,
++ link) {
++ struct netfront_info *np = vif_state->np;
++
++ if (hooks->new_device(np->netdev, vif_state->dev) == 0) {
++ spin_lock_irqsave
++ (&accelerator->vif_states_lock, flags);
++
++ accelerator_set_vif_state_hooks(vif_state);
++
++ spin_unlock_irqrestore
++ (&accelerator->vif_states_lock, flags);
++ }
++ }
++}
++
++
++/*
++ * Called by the netfront accelerator plugin module when it has loaded
++ */
++int netfront_accelerator_loaded(int version, const char *frontend,
++ struct netfront_accel_hooks *hooks)
++{
++ struct netfront_accelerator *accelerator;
++
++ if (is_initial_xendomain())
++ return -EINVAL;
++
++ if (version != NETFRONT_ACCEL_VERSION) {
++ if (version > NETFRONT_ACCEL_VERSION) {
++ /* Caller has higher version number, leave it
++ up to them to decide whether to continue.
++ They can re-call with a lower number if
++ they're happy to be compatible with us */
++ return NETFRONT_ACCEL_VERSION;
++ } else {
++ /* We have a more recent version than caller.
++ Currently reject, but may in future be able
++ to be backwardly compatible */
++ return -EPROTO;
++ }
++ }
++
++ mutex_lock(&accelerator_mutex);
++
++ /*
++ * Look through list of accelerators to see if it has already
++ * been requested
++ */
++ list_for_each_entry(accelerator, &accelerators_list, link) {
++ if (match_accelerator(frontend, accelerator)) {
++ accelerator_probe_vifs(accelerator, hooks);
++ goto out;
++ }
++ }
++
++ /*
++ * If it wasn't in the list, add it now so that when it is
++ * requested the caller will find it
++ */
++ DPRINTK("Couldn't find matching accelerator (%s)\n",
++ frontend);
++
++ init_accelerator(frontend, &accelerator, hooks);
++
++ out:
++ mutex_unlock(&accelerator_mutex);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(netfront_accelerator_loaded);
++
++
++/*
++ * Remove the hooks from a single vif state.
++ */
++static void
++accelerator_remove_single_hook(struct netfront_accelerator *accelerator,
++ struct netfront_accel_vif_state *vif_state)
++{
++ /* Make sure there are no data path operations going on */
++ netif_poll_disable(vif_state->np->netdev);
++ netif_tx_lock_bh(vif_state->np->netdev);
++
++ /*
++ * Remove the hooks, but leave the vif_state on the
++ * accelerator's list as that signifies this vif is
++ * interested in using that accelerator if it becomes
++ * available again
++ */
++ vif_state->hooks = NULL;
++
++ netif_tx_unlock_bh(vif_state->np->netdev);
++ netif_poll_enable(vif_state->np->netdev);
++}
++
++
++/*
++ * Safely remove the accelerator function hooks from a netfront state.
++ */
++static void accelerator_remove_hooks(struct netfront_accelerator *accelerator)
++{
++ struct netfront_accel_hooks *hooks;
++ struct netfront_accel_vif_state *vif_state, *tmp;
++ unsigned long flags;
++
++ /* Mutex is held so don't need vif_states_lock to iterate list */
++ list_for_each_entry_safe(vif_state, tmp,
++ &accelerator->vif_states,
++ link) {
++ spin_lock_irqsave(&accelerator->vif_states_lock, flags);
++
++ if(vif_state->hooks) {
++ hooks = vif_state->hooks;
++
++ /* Last chance to get statistics from the accelerator */
++ hooks->get_stats(vif_state->np->netdev,
++ &vif_state->np->stats);
++
++ spin_unlock_irqrestore(&accelerator->vif_states_lock,
++ flags);
++
++ accelerator_remove_single_hook(accelerator, vif_state);
++
++ accelerator->hooks->remove(vif_state->dev);
++ } else {
++ spin_unlock_irqrestore(&accelerator->vif_states_lock,
++ flags);
++ }
++ }
++
++ accelerator->hooks = NULL;
++}
++
++
++/*
++ * Called by a netfront accelerator when it is unloaded. This safely
++ * removes the hooks into the plugin and blocks until all devices have
++ * finished using it, so on return it is safe to unload.
++ */
++void netfront_accelerator_stop(const char *frontend)
++{
++ struct netfront_accelerator *accelerator;
++ unsigned long flags;
++
++ mutex_lock(&accelerator_mutex);
++ spin_lock_irqsave(&accelerators_lock, flags);
++
++ list_for_each_entry(accelerator, &accelerators_list, link) {
++ if (match_accelerator(frontend, accelerator)) {
++ spin_unlock_irqrestore(&accelerators_lock, flags);
++
++ accelerator_remove_hooks(accelerator);
++
++ goto out;
++ }
++ }
++ spin_unlock_irqrestore(&accelerators_lock, flags);
++ out:
++ mutex_unlock(&accelerator_mutex);
++}
++EXPORT_SYMBOL_GPL(netfront_accelerator_stop);
++
++
++/* Helper for call_remove and do_suspend */
++static int do_remove(struct netfront_info *np, struct xenbus_device *dev,
++ unsigned long *lock_flags)
++{
++ struct netfront_accelerator *accelerator = np->accelerator;
++ struct netfront_accel_hooks *hooks;
++ int rc = 0;
++
++ if (np->accel_vif_state.hooks) {
++ hooks = np->accel_vif_state.hooks;
++
++ /* Last chance to get statistics from the accelerator */
++ hooks->get_stats(np->netdev, &np->stats);
++
++ spin_unlock_irqrestore(&accelerator->vif_states_lock,
++ *lock_flags);
++
++ /*
++ * Try and do the opposite of accelerator_probe_new_vif
++ * to ensure there's no state pointing back at the
++ * netdev
++ */
++ accelerator_remove_single_hook(accelerator,
++ &np->accel_vif_state);
++
++ rc = accelerator->hooks->remove(dev);
++
++ spin_lock_irqsave(&accelerator->vif_states_lock, *lock_flags);
++ }
++
++ return rc;
++}
++
++
++static int netfront_remove_accelerator(struct netfront_info *np,
++ struct xenbus_device *dev)
++{
++ struct netfront_accelerator *accelerator;
++ struct netfront_accel_vif_state *tmp_vif_state;
++ unsigned long flags;
++ int rc = 0;
++
++ /* Check that we've got a device that was accelerated */
++ if (np->accelerator == NULL)
++ return rc;
++
++ accelerator = np->accelerator;
++
++ spin_lock_irqsave(&accelerator->vif_states_lock, flags);
++
++ list_for_each_entry(tmp_vif_state, &accelerator->vif_states,
++ link) {
++ if (tmp_vif_state == &np->accel_vif_state) {
++ list_del(&np->accel_vif_state.link);
++ break;
++ }
++ }
++
++ rc = do_remove(np, dev, &flags);
++
++ np->accelerator = NULL;
++
++ spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
++
++ return rc;
++}
++
++
++int netfront_accelerator_call_remove(struct netfront_info *np,
++ struct xenbus_device *dev)
++{
++ int rc;
++ netfront_accelerator_remove_watch(np);
++ mutex_lock(&accelerator_mutex);
++ rc = netfront_remove_accelerator(np, dev);
++ mutex_unlock(&accelerator_mutex);
++ return rc;
++}
++
++
++int netfront_accelerator_suspend(struct netfront_info *np,
++ struct xenbus_device *dev)
++{
++ unsigned long flags;
++ int rc = 0;
++
++ netfront_accelerator_remove_watch(np);
++
++ mutex_lock(&accelerator_mutex);
++
++ /* Check that we've got a device that was accelerated */
++ if (np->accelerator == NULL)
++ goto out;
++
++ /*
++ * Call the remove accelerator hook, but leave the vif_state
++ * on the accelerator's list in case there is a suspend_cancel.
++ */
++ spin_lock_irqsave(&np->accelerator->vif_states_lock, flags);
++
++ rc = do_remove(np, dev, &flags);
++
++ spin_unlock_irqrestore(&np->accelerator->vif_states_lock, flags);
++ out:
++ mutex_unlock(&accelerator_mutex);
++ return rc;
++}
++
++
++int netfront_accelerator_suspend_cancel(struct netfront_info *np,
++ struct xenbus_device *dev)
++{
++ /*
++ * Setting the watch will cause it to fire and probe the
++ * accelerator, so no need to call accelerator_probe_new_vif()
++ * directly here
++ */
++ if (dev->state == XenbusStateConnected)
++ netfront_accelerator_add_watch(np);
++ return 0;
++}
++
++
++void netfront_accelerator_resume(struct netfront_info *np,
++ struct xenbus_device *dev)
++{
++ struct netfront_accel_vif_state *accel_vif_state = NULL;
++ spinlock_t *vif_states_lock;
++ unsigned long flags;
++
++ mutex_lock(&accelerator_mutex);
++
++ /* Check that we've got a device that was accelerated */
++ if(np->accelerator == NULL)
++ goto out;
++
++ /* Find the vif_state from the accelerator's list */
++ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states,
++ link) {
++ if (accel_vif_state->dev == dev) {
++ BUG_ON(accel_vif_state != &np->accel_vif_state);
++
++ vif_states_lock = &np->accelerator->vif_states_lock;
++ spin_lock_irqsave(vif_states_lock, flags);
++
++ /*
++ * Remove it from the accelerator's list so
++ * state is consistent for probing new vifs
++ * when they get connected
++ */
++ list_del(&accel_vif_state->link);
++ np->accelerator = NULL;
++
++ spin_unlock_irqrestore(vif_states_lock, flags);
++
++ break;
++ }
++ }
++
++ out:
++ mutex_unlock(&accelerator_mutex);
++ return;
++}
++
++
++int netfront_check_accelerator_queue_ready(struct net_device *dev,
++ struct netfront_info *np)
++{
++ struct netfront_accelerator *accelerator;
++ struct netfront_accel_hooks *hooks;
++ int rc = 1;
++ unsigned long flags;
++
++ accelerator = np->accelerator;
++
++ /* Call the check_ready accelerator hook. */
++ if (np->accel_vif_state.hooks && accelerator) {
++ spin_lock_irqsave(&accelerator->vif_states_lock, flags);
++ hooks = np->accel_vif_state.hooks;
++ if (hooks && np->accelerator == accelerator)
++ rc = np->accel_vif_state.hooks->check_ready(dev);
++ spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
++ }
++
++ return rc;
++}
++
++
++void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np,
++ struct net_device *dev)
++{
++ struct netfront_accelerator *accelerator;
++ struct netfront_accel_hooks *hooks;
++ unsigned long flags;
++
++ accelerator = np->accelerator;
++
++ /* Call the stop_napi_interrupts accelerator hook. */
++ if (np->accel_vif_state.hooks && accelerator != NULL) {
++ spin_lock_irqsave(&accelerator->vif_states_lock, flags);
++ hooks = np->accel_vif_state.hooks;
++ if (hooks && np->accelerator == accelerator)
++ np->accel_vif_state.hooks->stop_napi_irq(dev);
++ spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
++ }
++}
++
++
++int netfront_accelerator_call_get_stats(struct netfront_info *np,
++ struct net_device *dev)
++{
++ struct netfront_accelerator *accelerator;
++ struct netfront_accel_hooks *hooks;
++ unsigned long flags;
++ int rc = 0;
++
++ accelerator = np->accelerator;
++
++ /* Call the get_stats accelerator hook. */
++ if (np->accel_vif_state.hooks && accelerator != NULL) {
++ spin_lock_irqsave(&accelerator->vif_states_lock, flags);
++ hooks = np->accel_vif_state.hooks;
++ if (hooks && np->accelerator == accelerator)
++ rc = np->accel_vif_state.hooks->get_stats(dev,
++ &np->stats);
++ spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
++ }
++ return rc;
++}
++
+diff -rpuN linux-2.6.18.8/drivers/xen/netfront/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/netfront/Makefile
+--- linux-2.6.18.8/drivers/xen/netfront/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/netfront/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,4 @@
++
++obj-$(CONFIG_XEN_NETDEV_FRONTEND) := xennet.o
++
++xennet-objs := netfront.o accel.o
+diff -rpuN linux-2.6.18.8/drivers/xen/netfront/netfront.c linux-2.6.18-xen-3.3.0/drivers/xen/netfront/netfront.c
+--- linux-2.6.18.8/drivers/xen/netfront/netfront.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/netfront/netfront.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2240 @@
++/******************************************************************************
++ * Virtual network driver for conversing with remote driver backends.
++ *
++ * Copyright (c) 2002-2005, K A Fraser
++ * Copyright (c) 2005, XenSource Ltd
++ * Copyright (C) 2007 Solarflare Communications, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/init.h>
++#include <linux/bitops.h>
++#include <linux/ethtool.h>
++#include <linux/in.h>
++#include <linux/if_ether.h>
++#include <linux/io.h>
++#include <linux/moduleparam.h>
++#include <net/sock.h>
++#include <net/pkt_sched.h>
++#include <net/arp.h>
++#include <net/route.h>
++#include <asm/uaccess.h>
++#include <xen/evtchn.h>
++#include <xen/xenbus.h>
++#include <xen/interface/io/netif.h>
++#include <xen/interface/memory.h>
++#include <xen/balloon.h>
++#include <asm/page.h>
++#include <asm/maddr.h>
++#include <asm/uaccess.h>
++#include <xen/interface/grant_table.h>
++#include <xen/gnttab.h>
++
++struct netfront_cb {
++ struct page *page;
++ unsigned offset;
++};
++
++#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
++
++#include "netfront.h"
++
++/*
++ * Mutually-exclusive module options to select receive data path:
++ * rx_copy : Packets are copied by network backend into local memory
++ * rx_flip : Page containing packet data is transferred to our ownership
++ * For fully-virtualised guests there is no option - copying must be used.
++ * For paravirtualised guests, flipping is the default.
++ */
++#ifdef CONFIG_XEN
++static int MODPARM_rx_copy = 0;
++module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
++MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
++static int MODPARM_rx_flip = 0;
++module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
++MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
++#else
++static const int MODPARM_rx_copy = 1;
++static const int MODPARM_rx_flip = 0;
++#endif
++
++#define RX_COPY_THRESHOLD 256
++
++/* If we don't have GSO, fake things up so that we never try to use it. */
++#if defined(NETIF_F_GSO)
++#define HAVE_GSO 1
++#define HAVE_TSO 1 /* TSO is a subset of GSO */
++#define HAVE_CSUM_OFFLOAD 1
++static inline void dev_disable_gso_features(struct net_device *dev)
++{
++ /* Turn off all GSO bits except ROBUST. */
++ dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
++ dev->features |= NETIF_F_GSO_ROBUST;
++}
++#elif defined(NETIF_F_TSO)
++#define HAVE_GSO 0
++#define HAVE_TSO 1
++
++/* Some older kernels cannot cope with incorrect checksums,
++ * particularly in netfilter. I'm not sure there is 100% correlation
++ * with the presence of NETIF_F_TSO but it appears to be a good first
++ * approximiation.
++ */
++#define HAVE_CSUM_OFFLOAD 0
++
++#define gso_size tso_size
++#define gso_segs tso_segs
++static inline void dev_disable_gso_features(struct net_device *dev)
++{
++ /* Turn off all TSO bits. */
++ dev->features &= ~NETIF_F_TSO;
++}
++static inline int skb_is_gso(const struct sk_buff *skb)
++{
++ return skb_shinfo(skb)->tso_size;
++}
++static inline int skb_gso_ok(struct sk_buff *skb, int features)
++{
++ return (features & NETIF_F_TSO);
++}
++
++static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
++{
++ return skb_is_gso(skb) &&
++ (!skb_gso_ok(skb, dev->features) ||
++ unlikely(skb->ip_summed != CHECKSUM_HW));
++}
++#else
++#define HAVE_GSO 0
++#define HAVE_TSO 0
++#define HAVE_CSUM_OFFLOAD 0
++#define netif_needs_gso(dev, skb) 0
++#define dev_disable_gso_features(dev) ((void)0)
++#define ethtool_op_set_tso(dev, data) (-ENOSYS)
++#endif
++
++#define GRANT_INVALID_REF 0
++
++struct netfront_rx_info {
++ struct netif_rx_response rx;
++ struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
++};
++
++/*
++ * Implement our own carrier flag: the network stack's version causes delays
++ * when the carrier is re-enabled (in particular, dev_activate() may not
++ * immediately be called, which can cause packet loss).
++ */
++#define netfront_carrier_on(netif) ((netif)->carrier = 1)
++#define netfront_carrier_off(netif) ((netif)->carrier = 0)
++#define netfront_carrier_ok(netif) ((netif)->carrier)
++
++/*
++ * Access macros for acquiring freeing slots in tx_skbs[].
++ */
++
++static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
++{
++ list[id] = list[0];
++ list[0] = (void *)(unsigned long)id;
++}
++
++static inline unsigned short get_id_from_freelist(struct sk_buff **list)
++{
++ unsigned int id = (unsigned int)(unsigned long)list[0];
++ list[0] = list[id];
++ return id;
++}
++
++static inline int xennet_rxidx(RING_IDX idx)
++{
++ return idx & (NET_RX_RING_SIZE - 1);
++}
++
++static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
++ RING_IDX ri)
++{
++ int i = xennet_rxidx(ri);
++ struct sk_buff *skb = np->rx_skbs[i];
++ np->rx_skbs[i] = NULL;
++ return skb;
++}
++
++static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
++ RING_IDX ri)
++{
++ int i = xennet_rxidx(ri);
++ grant_ref_t ref = np->grant_rx_ref[i];
++ np->grant_rx_ref[i] = GRANT_INVALID_REF;
++ return ref;
++}
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("netfront (%s:%d) " fmt, \
++ __FUNCTION__, __LINE__, ##args)
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "netfront: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "netfront: " fmt, ##args)
++
++static int setup_device(struct xenbus_device *, struct netfront_info *);
++static struct net_device *create_netdev(struct xenbus_device *);
++
++static void end_access(int, void *);
++static void netif_disconnect_backend(struct netfront_info *);
++
++static int network_connect(struct net_device *);
++static void network_tx_buf_gc(struct net_device *);
++static void network_alloc_rx_buffers(struct net_device *);
++static void send_fake_arp(struct net_device *);
++
++static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
++
++#ifdef CONFIG_SYSFS
++static int xennet_sysfs_addif(struct net_device *netdev);
++static void xennet_sysfs_delif(struct net_device *netdev);
++#else /* !CONFIG_SYSFS */
++#define xennet_sysfs_addif(dev) (0)
++#define xennet_sysfs_delif(dev) do { } while(0)
++#endif
++
++static inline int xennet_can_sg(struct net_device *dev)
++{
++ return dev->features & NETIF_F_SG;
++}
++
++/**
++ * Entry point to this code when a new device is created. Allocate the basic
++ * structures and the ring buffers for communication with the backend, and
++ * inform the backend of the appropriate details for those.
++ */
++static int __devinit netfront_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ struct net_device *netdev;
++ struct netfront_info *info;
++
++ netdev = create_netdev(dev);
++ if (IS_ERR(netdev)) {
++ err = PTR_ERR(netdev);
++ xenbus_dev_fatal(dev, err, "creating netdev");
++ return err;
++ }
++
++ info = netdev_priv(netdev);
++ dev->dev.driver_data = info;
++
++ err = register_netdev(info->netdev);
++ if (err) {
++ printk(KERN_WARNING "%s: register_netdev err=%d\n",
++ __FUNCTION__, err);
++ goto fail;
++ }
++
++ err = xennet_sysfs_addif(info->netdev);
++ if (err) {
++ unregister_netdev(info->netdev);
++ printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
++ __FUNCTION__, err);
++ goto fail;
++ }
++
++ return 0;
++
++ fail:
++ free_netdev(netdev);
++ dev->dev.driver_data = NULL;
++ return err;
++}
++
++static int __devexit netfront_remove(struct xenbus_device *dev)
++{
++ struct netfront_info *info = dev->dev.driver_data;
++
++ DPRINTK("%s\n", dev->nodename);
++
++ netfront_accelerator_call_remove(info, dev);
++
++ netif_disconnect_backend(info);
++
++ del_timer_sync(&info->rx_refill_timer);
++
++ xennet_sysfs_delif(info->netdev);
++
++ unregister_netdev(info->netdev);
++
++ free_netdev(info->netdev);
++
++ return 0;
++}
++
++
++static int netfront_suspend(struct xenbus_device *dev)
++{
++ struct netfront_info *info = dev->dev.driver_data;
++ return netfront_accelerator_suspend(info, dev);
++}
++
++
++static int netfront_suspend_cancel(struct xenbus_device *dev)
++{
++ struct netfront_info *info = dev->dev.driver_data;
++ return netfront_accelerator_suspend_cancel(info, dev);
++}
++
++
++/**
++ * We are reconnecting to the backend, due to a suspend/resume, or a backend
++ * driver restart. We tear down our netif structure and recreate it, but
++ * leave the device-layer structures intact so that this is transparent to the
++ * rest of the kernel.
++ */
++static int netfront_resume(struct xenbus_device *dev)
++{
++ struct netfront_info *info = dev->dev.driver_data;
++
++ DPRINTK("%s\n", dev->nodename);
++
++ netfront_accelerator_resume(info, dev);
++
++ netif_disconnect_backend(info);
++ return 0;
++}
++
++static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
++{
++ char *s, *e, *macstr;
++ int i;
++
++ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
++ if (IS_ERR(macstr))
++ return PTR_ERR(macstr);
++
++ for (i = 0; i < ETH_ALEN; i++) {
++ mac[i] = simple_strtoul(s, &e, 16);
++ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
++ kfree(macstr);
++ return -ENOENT;
++ }
++ s = e+1;
++ }
++
++ kfree(macstr);
++ return 0;
++}
++
++/* Common code used when first setting up, and when resuming. */
++static int talk_to_backend(struct xenbus_device *dev,
++ struct netfront_info *info)
++{
++ const char *message;
++ struct xenbus_transaction xbt;
++ int err;
++
++ /* Read mac only in the first setup. */
++ if (!is_valid_ether_addr(info->mac)) {
++ err = xen_net_read_mac(dev, info->mac);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "parsing %s/mac",
++ dev->nodename);
++ goto out;
++ }
++ }
++
++ /* Create shared ring, alloc event channel. */
++ err = setup_device(dev, info);
++ if (err)
++ goto out;
++
++ /* This will load an accelerator if one is configured when the
++ * watch fires */
++ netfront_accelerator_add_watch(info);
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ goto destroy_ring;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
++ info->tx_ring_ref);
++ if (err) {
++ message = "writing tx ring-ref";
++ goto abort_transaction;
++ }
++ err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
++ info->rx_ring_ref);
++ if (err) {
++ message = "writing rx ring-ref";
++ goto abort_transaction;
++ }
++ err = xenbus_printf(xbt, dev->nodename,
++ "event-channel", "%u",
++ irq_to_evtchn_port(info->irq));
++ if (err) {
++ message = "writing event-channel";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
++ info->copying_receiver);
++ if (err) {
++ message = "writing request-rx-copy";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
++ if (err) {
++ message = "writing feature-rx-notify";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload",
++ "%d", !HAVE_CSUM_OFFLOAD);
++ if (err) {
++ message = "writing feature-no-csum-offload";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
++ if (err) {
++ message = "writing feature-sg";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d",
++ HAVE_TSO);
++ if (err) {
++ message = "writing feature-gso-tcpv4";
++ goto abort_transaction;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err) {
++ if (err == -EAGAIN)
++ goto again;
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto destroy_ring;
++ }
++
++ return 0;
++
++ abort_transaction:
++ xenbus_transaction_end(xbt, 1);
++ xenbus_dev_fatal(dev, err, "%s", message);
++ destroy_ring:
++ netfront_accelerator_call_remove(info, dev);
++ netif_disconnect_backend(info);
++ out:
++ return err;
++}
++
++static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
++{
++ struct netif_tx_sring *txs;
++ struct netif_rx_sring *rxs;
++ int err;
++ struct net_device *netdev = info->netdev;
++
++ info->tx_ring_ref = GRANT_INVALID_REF;
++ info->rx_ring_ref = GRANT_INVALID_REF;
++ info->rx.sring = NULL;
++ info->tx.sring = NULL;
++ info->irq = 0;
++
++ txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
++ if (!txs) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(dev, err, "allocating tx ring page");
++ goto fail;
++ }
++ SHARED_RING_INIT(txs);
++ FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(txs));
++ if (err < 0) {
++ free_page((unsigned long)txs);
++ goto fail;
++ }
++ info->tx_ring_ref = err;
++
++ rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
++ if (!rxs) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(dev, err, "allocating rx ring page");
++ goto fail;
++ }
++ SHARED_RING_INIT(rxs);
++ FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
++ if (err < 0) {
++ free_page((unsigned long)rxs);
++ goto fail;
++ }
++ info->rx_ring_ref = err;
++
++ memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
++
++ err = bind_listening_port_to_irqhandler(
++ dev->otherend_id, netif_int, SA_SAMPLE_RANDOM, netdev->name,
++ netdev);
++ if (err < 0)
++ goto fail;
++ info->irq = err;
++
++ return 0;
++
++ fail:
++ return err;
++}
++
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct netfront_info *np = dev->dev.driver_data;
++ struct net_device *netdev = np->netdev;
++
++ DPRINTK("%s\n", xenbus_strstate(backend_state));
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitialised:
++ case XenbusStateConnected:
++ case XenbusStateReconfiguring:
++ case XenbusStateReconfigured:
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateInitWait:
++ if (dev->state != XenbusStateInitialising)
++ break;
++ if (network_connect(netdev) != 0)
++ break;
++ xenbus_switch_state(dev, XenbusStateConnected);
++ send_fake_arp(netdev);
++ break;
++
++ case XenbusStateClosing:
++ xenbus_frontend_closed(dev);
++ break;
++ }
++}
++
++/** Send a packet on a net device to encourage switches to learn the
++ * MAC. We send a fake ARP request.
++ *
++ * @param dev device
++ * @return 0 on success, error code otherwise
++ */
++static void send_fake_arp(struct net_device *dev)
++{
++#ifdef CONFIG_INET
++ struct sk_buff *skb;
++ u32 src_ip, dst_ip;
++
++ dst_ip = INADDR_BROADCAST;
++ src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
++
++ /* No IP? Then nothing to do. */
++ if (src_ip == 0)
++ return;
++
++ skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
++ dst_ip, dev, src_ip,
++ /*dst_hw*/ NULL, /*src_hw*/ NULL,
++ /*target_hw*/ dev->dev_addr);
++ if (skb == NULL)
++ return;
++
++ dev_queue_xmit(skb);
++#endif
++}
++
++static inline int netfront_tx_slot_available(struct netfront_info *np)
++{
++ return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
++ (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
++}
++
++
++static inline void network_maybe_wake_tx(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++
++ if (unlikely(netif_queue_stopped(dev)) &&
++ netfront_tx_slot_available(np) &&
++ likely(netif_running(dev)) &&
++ netfront_check_accelerator_queue_ready(dev, np))
++ netif_wake_queue(dev);
++}
++
++
++int netfront_check_queue_ready(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++
++ return unlikely(netif_queue_stopped(dev)) &&
++ netfront_tx_slot_available(np) &&
++ likely(netif_running(dev));
++}
++EXPORT_SYMBOL(netfront_check_queue_ready);
++
++
++static int network_open(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++
++ memset(&np->stats, 0, sizeof(np->stats));
++
++ spin_lock_bh(&np->rx_lock);
++ if (netfront_carrier_ok(np)) {
++ network_alloc_rx_buffers(dev);
++ np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
++ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){
++ netfront_accelerator_call_stop_napi_irq(np, dev);
++
++ netif_rx_schedule(dev);
++ }
++ }
++ spin_unlock_bh(&np->rx_lock);
++
++ network_maybe_wake_tx(dev);
++
++ return 0;
++}
++
++static void network_tx_buf_gc(struct net_device *dev)
++{
++ RING_IDX cons, prod;
++ unsigned short id;
++ struct netfront_info *np = netdev_priv(dev);
++ struct sk_buff *skb;
++
++ BUG_ON(!netfront_carrier_ok(np));
++
++ do {
++ prod = np->tx.sring->rsp_prod;
++ rmb(); /* Ensure we see responses up to 'rp'. */
++
++ for (cons = np->tx.rsp_cons; cons != prod; cons++) {
++ struct netif_tx_response *txrsp;
++
++ txrsp = RING_GET_RESPONSE(&np->tx, cons);
++ if (txrsp->status == NETIF_RSP_NULL)
++ continue;
++
++ id = txrsp->id;
++ skb = np->tx_skbs[id];
++ if (unlikely(gnttab_query_foreign_access(
++ np->grant_tx_ref[id]) != 0)) {
++ printk(KERN_ALERT "network_tx_buf_gc: warning "
++ "-- grant still in use by backend "
++ "domain.\n");
++ BUG();
++ }
++ gnttab_end_foreign_access_ref(np->grant_tx_ref[id]);
++ gnttab_release_grant_reference(
++ &np->gref_tx_head, np->grant_tx_ref[id]);
++ np->grant_tx_ref[id] = GRANT_INVALID_REF;
++ add_id_to_freelist(np->tx_skbs, id);
++ dev_kfree_skb_irq(skb);
++ }
++
++ np->tx.rsp_cons = prod;
++
++ /*
++ * Set a new event, then check for race with update of tx_cons.
++ * Note that it is essential to schedule a callback, no matter
++ * how few buffers are pending. Even if there is space in the
++ * transmit ring, higher layers may be blocked because too much
++ * data is outstanding: in such cases notification from Xen is
++ * likely to be the only kick that we'll get.
++ */
++ np->tx.sring->rsp_event =
++ prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
++ mb();
++ } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
++
++ network_maybe_wake_tx(dev);
++}
++
++static void rx_refill_timeout(unsigned long data)
++{
++ struct net_device *dev = (struct net_device *)data;
++ struct netfront_info *np = netdev_priv(dev);
++
++ netfront_accelerator_call_stop_napi_irq(np, dev);
++
++ netif_rx_schedule(dev);
++}
++
++static void network_alloc_rx_buffers(struct net_device *dev)
++{
++ unsigned short id;
++ struct netfront_info *np = netdev_priv(dev);
++ struct sk_buff *skb;
++ struct page *page;
++ int i, batch_target, notify;
++ RING_IDX req_prod = np->rx.req_prod_pvt;
++ struct xen_memory_reservation reservation;
++ grant_ref_t ref;
++ unsigned long pfn;
++ void *vaddr;
++ int nr_flips;
++ netif_rx_request_t *req;
++
++ if (unlikely(!netfront_carrier_ok(np)))
++ return;
++
++ /*
++ * Allocate skbuffs greedily, even though we batch updates to the
++ * receive ring. This creates a less bursty demand on the memory
++ * allocator, so should reduce the chance of failed allocation requests
++ * both for ourself and for other kernel subsystems.
++ */
++ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
++ for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
++ /*
++ * Allocate an skb and a page. Do not use __dev_alloc_skb as
++ * that will allocate page-sized buffers which is not
++ * necessary here.
++ * 16 bytes added as necessary headroom for netif_receive_skb.
++ */
++ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN,
++ GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(!skb))
++ goto no_skb;
++
++ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
++ if (!page) {
++ kfree_skb(skb);
++no_skb:
++ /* Any skbuffs queued for refill? Force them out. */
++ if (i != 0)
++ goto refill;
++ /* Could not allocate any skbuffs. Try again later. */
++ mod_timer(&np->rx_refill_timer,
++ jiffies + (HZ/10));
++ break;
++ }
++
++ skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */
++ skb_shinfo(skb)->frags[0].page = page;
++ skb_shinfo(skb)->nr_frags = 1;
++ __skb_queue_tail(&np->rx_batch, skb);
++ }
++
++ /* Is the batch large enough to be worthwhile? */
++ if (i < (np->rx_target/2)) {
++ if (req_prod > np->rx.sring->req_prod)
++ goto push;
++ return;
++ }
++
++ /* Adjust our fill target if we risked running out of buffers. */
++ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
++ ((np->rx_target *= 2) > np->rx_max_target))
++ np->rx_target = np->rx_max_target;
++
++ refill:
++ for (nr_flips = i = 0; ; i++) {
++ if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
++ break;
++
++ skb->dev = dev;
++
++ id = xennet_rxidx(req_prod + i);
++
++ BUG_ON(np->rx_skbs[id]);
++ np->rx_skbs[id] = skb;
++
++ ref = gnttab_claim_grant_reference(&np->gref_rx_head);
++ BUG_ON((signed short)ref < 0);
++ np->grant_rx_ref[id] = ref;
++
++ pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
++ vaddr = page_address(skb_shinfo(skb)->frags[0].page);
++
++ req = RING_GET_REQUEST(&np->rx, req_prod + i);
++ if (!np->copying_receiver) {
++ gnttab_grant_foreign_transfer_ref(ref,
++ np->xbdev->otherend_id,
++ pfn);
++ np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn);
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Remove this page before passing
++ * back to Xen. */
++ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++ MULTI_update_va_mapping(np->rx_mcl+i,
++ (unsigned long)vaddr,
++ __pte(0), 0);
++ }
++ nr_flips++;
++ } else {
++ gnttab_grant_foreign_access_ref(ref,
++ np->xbdev->otherend_id,
++ pfn_to_mfn(pfn),
++ 0);
++ }
++
++ req->id = id;
++ req->gref = ref;
++ }
++
++ if ( nr_flips != 0 ) {
++ /* Tell the ballon driver what is going on. */
++ balloon_update_driver_allowance(i);
++
++ set_xen_guest_handle(reservation.extent_start,
++ np->rx_pfn_array);
++ reservation.nr_extents = nr_flips;
++ reservation.extent_order = 0;
++ reservation.address_bits = 0;
++ reservation.domid = DOMID_SELF;
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* After all PTEs have been zapped, flush the TLB. */
++ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
++ UVMF_TLB_FLUSH|UVMF_ALL;
++
++ /* Give away a batch of pages. */
++ np->rx_mcl[i].op = __HYPERVISOR_memory_op;
++ np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
++ np->rx_mcl[i].args[1] = (unsigned long)&reservation;
++
++ /* Zap PTEs and give away pages in one big
++ * multicall. */
++ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1)))
++ BUG();
++
++ /* Check return status of HYPERVISOR_memory_op(). */
++ if (unlikely(np->rx_mcl[i].result != i))
++ panic("Unable to reduce memory reservation\n");
++ while (nr_flips--)
++ BUG_ON(np->rx_mcl[nr_flips].result);
++ } else {
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &reservation) != i)
++ panic("Unable to reduce memory reservation\n");
++ }
++ } else {
++ wmb();
++ }
++
++ /* Above is a suitable barrier to ensure backend will see requests. */
++ np->rx.req_prod_pvt = req_prod + i;
++ push:
++ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
++ if (notify)
++ notify_remote_via_irq(np->irq);
++}
++
++static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
++ struct netif_tx_request *tx)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ char *data = skb->data;
++ unsigned long mfn;
++ RING_IDX prod = np->tx.req_prod_pvt;
++ int frags = skb_shinfo(skb)->nr_frags;
++ unsigned int offset = offset_in_page(data);
++ unsigned int len = skb_headlen(skb);
++ unsigned int id;
++ grant_ref_t ref;
++ int i;
++
++ while (len > PAGE_SIZE - offset) {
++ tx->size = PAGE_SIZE - offset;
++ tx->flags |= NETTXF_more_data;
++ len -= tx->size;
++ data += tx->size;
++ offset = 0;
++
++ id = get_id_from_freelist(np->tx_skbs);
++ np->tx_skbs[id] = skb_get(skb);
++ tx = RING_GET_REQUEST(&np->tx, prod++);
++ tx->id = id;
++ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
++ BUG_ON((signed short)ref < 0);
++
++ mfn = virt_to_mfn(data);
++ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
++ mfn, GTF_readonly);
++
++ tx->gref = np->grant_tx_ref[id] = ref;
++ tx->offset = offset;
++ tx->size = len;
++ tx->flags = 0;
++ }
++
++ for (i = 0; i < frags; i++) {
++ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
++
++ tx->flags |= NETTXF_more_data;
++
++ id = get_id_from_freelist(np->tx_skbs);
++ np->tx_skbs[id] = skb_get(skb);
++ tx = RING_GET_REQUEST(&np->tx, prod++);
++ tx->id = id;
++ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
++ BUG_ON((signed short)ref < 0);
++
++ mfn = pfn_to_mfn(page_to_pfn(frag->page));
++ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
++ mfn, GTF_readonly);
++
++ tx->gref = np->grant_tx_ref[id] = ref;
++ tx->offset = frag->page_offset;
++ tx->size = frag->size;
++ tx->flags = 0;
++ }
++
++ np->tx.req_prod_pvt = prod;
++}
++
++static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ unsigned short id;
++ struct netfront_info *np = netdev_priv(dev);
++ struct netif_tx_request *tx;
++ struct netif_extra_info *extra;
++ char *data = skb->data;
++ RING_IDX i;
++ grant_ref_t ref;
++ unsigned long mfn;
++ int notify;
++ int frags = skb_shinfo(skb)->nr_frags;
++ unsigned int offset = offset_in_page(data);
++ unsigned int len = skb_headlen(skb);
++
++ /* Check the fast path, if hooks are available */
++ if (np->accel_vif_state.hooks &&
++ np->accel_vif_state.hooks->start_xmit(skb, dev)) {
++ /* Fast path has sent this packet */
++ return 0;
++ }
++
++ frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
++ if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
++ printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
++ frags);
++ dump_stack();
++ goto drop;
++ }
++
++ spin_lock_irq(&np->tx_lock);
++
++ if (unlikely(!netfront_carrier_ok(np) ||
++ (frags > 1 && !xennet_can_sg(dev)) ||
++ netif_needs_gso(dev, skb))) {
++ spin_unlock_irq(&np->tx_lock);
++ goto drop;
++ }
++
++ i = np->tx.req_prod_pvt;
++
++ id = get_id_from_freelist(np->tx_skbs);
++ np->tx_skbs[id] = skb;
++
++ tx = RING_GET_REQUEST(&np->tx, i);
++
++ tx->id = id;
++ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
++ BUG_ON((signed short)ref < 0);
++ mfn = virt_to_mfn(data);
++ gnttab_grant_foreign_access_ref(
++ ref, np->xbdev->otherend_id, mfn, GTF_readonly);
++ tx->gref = np->grant_tx_ref[id] = ref;
++ tx->offset = offset;
++ tx->size = len;
++
++ tx->flags = 0;
++ extra = NULL;
++
++ if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
++#ifdef CONFIG_XEN
++ if (skb->proto_data_valid) /* remote but checksummed? */
++ tx->flags |= NETTXF_data_validated;
++#endif
++
++#if HAVE_TSO
++ if (skb_shinfo(skb)->gso_size) {
++ struct netif_extra_info *gso = (struct netif_extra_info *)
++ RING_GET_REQUEST(&np->tx, ++i);
++
++ if (extra)
++ extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
++ else
++ tx->flags |= NETTXF_extra_info;
++
++ gso->u.gso.size = skb_shinfo(skb)->gso_size;
++ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
++ gso->u.gso.pad = 0;
++ gso->u.gso.features = 0;
++
++ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
++ gso->flags = 0;
++ extra = gso;
++ }
++#endif
++
++ np->tx.req_prod_pvt = i + 1;
++
++ xennet_make_frags(skb, dev, tx);
++ tx->size = skb->len;
++
++ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
++ if (notify)
++ notify_remote_via_irq(np->irq);
++
++ np->stats.tx_bytes += skb->len;
++ np->stats.tx_packets++;
++ dev->trans_start = jiffies;
++
++ /* Note: It is not safe to access skb after network_tx_buf_gc()! */
++ network_tx_buf_gc(dev);
++
++ if (!netfront_tx_slot_available(np))
++ netif_stop_queue(dev);
++
++ spin_unlock_irq(&np->tx_lock);
++
++ return 0;
++
++ drop:
++ np->stats.tx_dropped++;
++ dev_kfree_skb(skb);
++ return 0;
++}
++
++static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
++{
++ struct net_device *dev = dev_id;
++ struct netfront_info *np = netdev_priv(dev);
++ unsigned long flags;
++
++ spin_lock_irqsave(&np->tx_lock, flags);
++
++ if (likely(netfront_carrier_ok(np))) {
++ network_tx_buf_gc(dev);
++ /* Under tx_lock: protects access to rx shared-ring indexes. */
++ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) {
++ netfront_accelerator_call_stop_napi_irq(np, dev);
++
++ netif_rx_schedule(dev);
++ dev->last_rx = jiffies;
++ }
++ }
++
++ spin_unlock_irqrestore(&np->tx_lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
++ grant_ref_t ref)
++{
++ int new = xennet_rxidx(np->rx.req_prod_pvt);
++
++ BUG_ON(np->rx_skbs[new]);
++ np->rx_skbs[new] = skb;
++ np->grant_rx_ref[new] = ref;
++ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
++ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
++ np->rx.req_prod_pvt++;
++}
++
++int xennet_get_extras(struct netfront_info *np,
++ struct netif_extra_info *extras, RING_IDX rp)
++
++{
++ struct netif_extra_info *extra;
++ RING_IDX cons = np->rx.rsp_cons;
++ int err = 0;
++
++ do {
++ struct sk_buff *skb;
++ grant_ref_t ref;
++
++ if (unlikely(cons + 1 == rp)) {
++ if (net_ratelimit())
++ WPRINTK("Missing extra info\n");
++ err = -EBADR;
++ break;
++ }
++
++ extra = (struct netif_extra_info *)
++ RING_GET_RESPONSE(&np->rx, ++cons);
++
++ if (unlikely(!extra->type ||
++ extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
++ if (net_ratelimit())
++ WPRINTK("Invalid extra type: %d\n",
++ extra->type);
++ err = -EINVAL;
++ } else {
++ memcpy(&extras[extra->type - 1], extra,
++ sizeof(*extra));
++ }
++
++ skb = xennet_get_rx_skb(np, cons);
++ ref = xennet_get_rx_ref(np, cons);
++ xennet_move_rx_slot(np, skb, ref);
++ } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
++
++ np->rx.rsp_cons = cons;
++ return err;
++}
++
++static int xennet_get_responses(struct netfront_info *np,
++ struct netfront_rx_info *rinfo, RING_IDX rp,
++ struct sk_buff_head *list,
++ int *pages_flipped_p)
++{
++ int pages_flipped = *pages_flipped_p;
++ struct mmu_update *mmu;
++ struct multicall_entry *mcl;
++ struct netif_rx_response *rx = &rinfo->rx;
++ struct netif_extra_info *extras = rinfo->extras;
++ RING_IDX cons = np->rx.rsp_cons;
++ struct sk_buff *skb = xennet_get_rx_skb(np, cons);
++ grant_ref_t ref = xennet_get_rx_ref(np, cons);
++ int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
++ int frags = 1;
++ int err = 0;
++ unsigned long ret;
++
++ if (rx->flags & NETRXF_extra_info) {
++ err = xennet_get_extras(np, extras, rp);
++ cons = np->rx.rsp_cons;
++ }
++
++ for (;;) {
++ unsigned long mfn;
++
++ if (unlikely(rx->status < 0 ||
++ rx->offset + rx->status > PAGE_SIZE)) {
++ if (net_ratelimit())
++ WPRINTK("rx->offset: %x, size: %u\n",
++ rx->offset, rx->status);
++ xennet_move_rx_slot(np, skb, ref);
++ err = -EINVAL;
++ goto next;
++ }
++
++ /*
++ * This definitely indicates a bug, either in this driver or in
++ * the backend driver. In future this should flag the bad
++ * situation to the system controller to reboot the backed.
++ */
++ if (ref == GRANT_INVALID_REF) {
++ if (net_ratelimit())
++ WPRINTK("Bad rx response id %d.\n", rx->id);
++ err = -EINVAL;
++ goto next;
++ }
++
++ if (!np->copying_receiver) {
++ /* Memory pressure, insufficient buffer
++ * headroom, ... */
++ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
++ if (net_ratelimit())
++ WPRINTK("Unfulfilled rx req "
++ "(id=%d, st=%d).\n",
++ rx->id, rx->status);
++ xennet_move_rx_slot(np, skb, ref);
++ err = -ENOMEM;
++ goto next;
++ }
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Remap the page. */
++ struct page *page =
++ skb_shinfo(skb)->frags[0].page;
++ unsigned long pfn = page_to_pfn(page);
++ void *vaddr = page_address(page);
++
++ mcl = np->rx_mcl + pages_flipped;
++ mmu = np->rx_mmu + pages_flipped;
++
++ MULTI_update_va_mapping(mcl,
++ (unsigned long)vaddr,
++ pfn_pte_ma(mfn,
++ PAGE_KERNEL),
++ 0);
++ mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
++ | MMU_MACHPHYS_UPDATE;
++ mmu->val = pfn;
++
++ set_phys_to_machine(pfn, mfn);
++ }
++ pages_flipped++;
++ } else {
++ ret = gnttab_end_foreign_access_ref(ref);
++ BUG_ON(!ret);
++ }
++
++ gnttab_release_grant_reference(&np->gref_rx_head, ref);
++
++ __skb_queue_tail(list, skb);
++
++next:
++ if (!(rx->flags & NETRXF_more_data))
++ break;
++
++ if (cons + frags == rp) {
++ if (net_ratelimit())
++ WPRINTK("Need more frags\n");
++ err = -ENOENT;
++ break;
++ }
++
++ rx = RING_GET_RESPONSE(&np->rx, cons + frags);
++ skb = xennet_get_rx_skb(np, cons + frags);
++ ref = xennet_get_rx_ref(np, cons + frags);
++ frags++;
++ }
++
++ if (unlikely(frags > max)) {
++ if (net_ratelimit())
++ WPRINTK("Too many frags\n");
++ err = -E2BIG;
++ }
++
++ if (unlikely(err))
++ np->rx.rsp_cons = cons + frags;
++
++ *pages_flipped_p = pages_flipped;
++
++ return err;
++}
++
++static RING_IDX xennet_fill_frags(struct netfront_info *np,
++ struct sk_buff *skb,
++ struct sk_buff_head *list)
++{
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ int nr_frags = shinfo->nr_frags;
++ RING_IDX cons = np->rx.rsp_cons;
++ skb_frag_t *frag = shinfo->frags + nr_frags;
++ struct sk_buff *nskb;
++
++ while ((nskb = __skb_dequeue(list))) {
++ struct netif_rx_response *rx =
++ RING_GET_RESPONSE(&np->rx, ++cons);
++
++ frag->page = skb_shinfo(nskb)->frags[0].page;
++ frag->page_offset = rx->offset;
++ frag->size = rx->status;
++
++ skb->data_len += rx->status;
++
++ skb_shinfo(nskb)->nr_frags = 0;
++ kfree_skb(nskb);
++
++ frag++;
++ nr_frags++;
++ }
++
++ shinfo->nr_frags = nr_frags;
++ return cons;
++}
++
++static int xennet_set_skb_gso(struct sk_buff *skb,
++ struct netif_extra_info *gso)
++{
++ if (!gso->u.gso.size) {
++ if (net_ratelimit())
++ WPRINTK("GSO size must not be zero.\n");
++ return -EINVAL;
++ }
++
++ /* Currently only TCPv4 S.O. is supported. */
++ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
++ if (net_ratelimit())
++ WPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
++ return -EINVAL;
++ }
++
++#if HAVE_TSO
++ skb_shinfo(skb)->gso_size = gso->u.gso.size;
++#if HAVE_GSO
++ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
++
++ /* Header must be checked, and gso_segs computed. */
++ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
++#endif
++ skb_shinfo(skb)->gso_segs = 0;
++
++ return 0;
++#else
++ if (net_ratelimit())
++ WPRINTK("GSO unsupported by this kernel.\n");
++ return -EINVAL;
++#endif
++}
++
++static int netif_poll(struct net_device *dev, int *pbudget)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ struct sk_buff *skb;
++ struct netfront_rx_info rinfo;
++ struct netif_rx_response *rx = &rinfo.rx;
++ struct netif_extra_info *extras = rinfo.extras;
++ RING_IDX i, rp;
++ struct multicall_entry *mcl;
++ int work_done, budget, more_to_do = 1, accel_more_to_do = 1;
++ struct sk_buff_head rxq;
++ struct sk_buff_head errq;
++ struct sk_buff_head tmpq;
++ unsigned long flags;
++ unsigned int len;
++ int pages_flipped = 0;
++ int err;
++
++ spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */
++
++ if (unlikely(!netfront_carrier_ok(np))) {
++ spin_unlock(&np->rx_lock);
++ return 0;
++ }
++
++ skb_queue_head_init(&rxq);
++ skb_queue_head_init(&errq);
++ skb_queue_head_init(&tmpq);
++
++ if ((budget = *pbudget) > dev->quota)
++ budget = dev->quota;
++ rp = np->rx.sring->rsp_prod;
++ rmb(); /* Ensure we see queued responses up to 'rp'. */
++
++ i = np->rx.rsp_cons;
++ work_done = 0;
++ while ((i != rp) && (work_done < budget)) {
++ memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
++ memset(extras, 0, sizeof(rinfo.extras));
++
++ err = xennet_get_responses(np, &rinfo, rp, &tmpq,
++ &pages_flipped);
++
++ if (unlikely(err)) {
++err:
++ while ((skb = __skb_dequeue(&tmpq)))
++ __skb_queue_tail(&errq, skb);
++ np->stats.rx_errors++;
++ i = np->rx.rsp_cons;
++ continue;
++ }
++
++ skb = __skb_dequeue(&tmpq);
++
++ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
++ struct netif_extra_info *gso;
++ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
++
++ if (unlikely(xennet_set_skb_gso(skb, gso))) {
++ __skb_queue_head(&tmpq, skb);
++ np->rx.rsp_cons += skb_queue_len(&tmpq);
++ goto err;
++ }
++ }
++
++ NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
++ NETFRONT_SKB_CB(skb)->offset = rx->offset;
++
++ len = rx->status;
++ if (len > RX_COPY_THRESHOLD)
++ len = RX_COPY_THRESHOLD;
++ skb_put(skb, len);
++
++ if (rx->status > len) {
++ skb_shinfo(skb)->frags[0].page_offset =
++ rx->offset + len;
++ skb_shinfo(skb)->frags[0].size = rx->status - len;
++ skb->data_len = rx->status - len;
++ } else {
++ skb_shinfo(skb)->frags[0].page = NULL;
++ skb_shinfo(skb)->nr_frags = 0;
++ }
++
++ i = xennet_fill_frags(np, skb, &tmpq);
++
++ /*
++ * Truesize must approximates the size of true data plus
++ * any supervisor overheads. Adding hypervisor overheads
++ * has been shown to significantly reduce achievable
++ * bandwidth with the default receive buffer size. It is
++ * therefore not wise to account for it here.
++ *
++ * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to
++ * RX_COPY_THRESHOLD + the supervisor overheads. Here, we
++ * add the size of the data pulled in xennet_fill_frags().
++ *
++ * We also adjust for any unused space in the main data
++ * area by subtracting (RX_COPY_THRESHOLD - len). This is
++ * especially important with drivers which split incoming
++ * packets into header and data, using only 66 bytes of
++ * the main data area (see the e1000 driver for example.)
++ * On such systems, without this last adjustement, our
++ * achievable receive throughout using the standard receive
++ * buffer size was cut by 25%(!!!).
++ */
++ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
++ skb->len += skb->data_len;
++
++ /*
++ * Old backends do not assert data_validated but we
++ * can infer it from csum_blank so test both flags.
++ */
++ if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank))
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ else
++ skb->ip_summed = CHECKSUM_NONE;
++#ifdef CONFIG_XEN
++ skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE);
++ skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank);
++#endif
++ np->stats.rx_packets++;
++ np->stats.rx_bytes += skb->len;
++
++ __skb_queue_tail(&rxq, skb);
++
++ np->rx.rsp_cons = ++i;
++ work_done++;
++ }
++
++ if (pages_flipped) {
++ /* Some pages are no longer absent... */
++ balloon_update_driver_allowance(-pages_flipped);
++
++ /* Do all the remapping work and M2P updates. */
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ mcl = np->rx_mcl + pages_flipped;
++ mcl->op = __HYPERVISOR_mmu_update;
++ mcl->args[0] = (unsigned long)np->rx_mmu;
++ mcl->args[1] = pages_flipped;
++ mcl->args[2] = 0;
++ mcl->args[3] = DOMID_SELF;
++ err = HYPERVISOR_multicall_check(np->rx_mcl,
++ pages_flipped + 1,
++ NULL);
++ BUG_ON(err);
++ }
++ }
++
++ while ((skb = __skb_dequeue(&errq)))
++ kfree_skb(skb);
++
++ while ((skb = __skb_dequeue(&rxq)) != NULL) {
++ struct page *page = NETFRONT_SKB_CB(skb)->page;
++ void *vaddr = page_address(page);
++ unsigned offset = NETFRONT_SKB_CB(skb)->offset;
++
++ memcpy(skb->data, vaddr + offset, skb_headlen(skb));
++
++ if (page != skb_shinfo(skb)->frags[0].page)
++ __free_page(page);
++
++ /* Ethernet work: Delayed to here as it peeks the header. */
++ skb->protocol = eth_type_trans(skb, dev);
++
++ /* Pass it up. */
++ netif_receive_skb(skb);
++ dev->last_rx = jiffies;
++ }
++
++ /* If we get a callback with very few responses, reduce fill target. */
++ /* NB. Note exponential increase, linear decrease. */
++ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
++ ((3*np->rx_target) / 4)) &&
++ (--np->rx_target < np->rx_min_target))
++ np->rx_target = np->rx_min_target;
++
++ network_alloc_rx_buffers(dev);
++
++ if (work_done < budget) {
++ /* there's some spare capacity, try the accelerated path */
++ int accel_budget = budget - work_done;
++ int accel_budget_start = accel_budget;
++
++ if (np->accel_vif_state.hooks) {
++ accel_more_to_do =
++ np->accel_vif_state.hooks->netdev_poll
++ (dev, &accel_budget);
++ work_done += (accel_budget_start - accel_budget);
++ } else
++ accel_more_to_do = 0;
++ }
++
++ *pbudget -= work_done;
++ dev->quota -= work_done;
++
++ if (work_done < budget) {
++ local_irq_save(flags);
++
++ RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
++
++ if (!more_to_do && !accel_more_to_do &&
++ np->accel_vif_state.hooks) {
++ /*
++ * Slow path has nothing more to do, see if
++ * fast path is likewise
++ */
++ accel_more_to_do =
++ np->accel_vif_state.hooks->start_napi_irq(dev);
++ }
++
++ if (!more_to_do && !accel_more_to_do)
++ __netif_rx_complete(dev);
++
++ local_irq_restore(flags);
++ }
++
++ spin_unlock(&np->rx_lock);
++
++ return more_to_do | accel_more_to_do;
++}
++
++static void netif_release_tx_bufs(struct netfront_info *np)
++{
++ struct sk_buff *skb;
++ int i;
++
++ for (i = 1; i <= NET_TX_RING_SIZE; i++) {
++ if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET)
++ continue;
++
++ skb = np->tx_skbs[i];
++ gnttab_end_foreign_access_ref(np->grant_tx_ref[i]);
++ gnttab_release_grant_reference(
++ &np->gref_tx_head, np->grant_tx_ref[i]);
++ np->grant_tx_ref[i] = GRANT_INVALID_REF;
++ add_id_to_freelist(np->tx_skbs, i);
++ dev_kfree_skb_irq(skb);
++ }
++}
++
++static void netif_release_rx_bufs_flip(struct netfront_info *np)
++{
++ struct mmu_update *mmu = np->rx_mmu;
++ struct multicall_entry *mcl = np->rx_mcl;
++ struct sk_buff_head free_list;
++ struct sk_buff *skb;
++ unsigned long mfn;
++ int xfer = 0, noxfer = 0, unused = 0;
++ int id, ref, rc;
++
++ skb_queue_head_init(&free_list);
++
++ spin_lock_bh(&np->rx_lock);
++
++ for (id = 0; id < NET_RX_RING_SIZE; id++) {
++ if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
++ unused++;
++ continue;
++ }
++
++ skb = np->rx_skbs[id];
++ mfn = gnttab_end_foreign_transfer_ref(ref);
++ gnttab_release_grant_reference(&np->gref_rx_head, ref);
++ np->grant_rx_ref[id] = GRANT_INVALID_REF;
++ add_id_to_freelist(np->rx_skbs, id);
++
++ if (0 == mfn) {
++ struct page *page = skb_shinfo(skb)->frags[0].page;
++ balloon_release_driver_page(page);
++ skb_shinfo(skb)->nr_frags = 0;
++ dev_kfree_skb(skb);
++ noxfer++;
++ continue;
++ }
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Remap the page. */
++ struct page *page = skb_shinfo(skb)->frags[0].page;
++ unsigned long pfn = page_to_pfn(page);
++ void *vaddr = page_address(page);
++
++ MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
++ pfn_pte_ma(mfn, PAGE_KERNEL),
++ 0);
++ mcl++;
++ mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
++ | MMU_MACHPHYS_UPDATE;
++ mmu->val = pfn;
++ mmu++;
++
++ set_phys_to_machine(pfn, mfn);
++ }
++ __skb_queue_tail(&free_list, skb);
++ xfer++;
++ }
++
++ DPRINTK("%s: %d xfer, %d noxfer, %d unused\n",
++ __FUNCTION__, xfer, noxfer, unused);
++
++ if (xfer) {
++ /* Some pages are no longer absent... */
++ balloon_update_driver_allowance(-xfer);
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Do all the remapping work and M2P updates. */
++ mcl->op = __HYPERVISOR_mmu_update;
++ mcl->args[0] = (unsigned long)np->rx_mmu;
++ mcl->args[1] = mmu - np->rx_mmu;
++ mcl->args[2] = 0;
++ mcl->args[3] = DOMID_SELF;
++ mcl++;
++ rc = HYPERVISOR_multicall_check(
++ np->rx_mcl, mcl - np->rx_mcl, NULL);
++ BUG_ON(rc);
++ }
++ }
++
++ while ((skb = __skb_dequeue(&free_list)) != NULL)
++ dev_kfree_skb(skb);
++
++ spin_unlock_bh(&np->rx_lock);
++}
++
++static void netif_release_rx_bufs_copy(struct netfront_info *np)
++{
++ struct sk_buff *skb;
++ int i, ref;
++ int busy = 0, inuse = 0;
++
++ spin_lock_bh(&np->rx_lock);
++
++ for (i = 0; i < NET_RX_RING_SIZE; i++) {
++ ref = np->grant_rx_ref[i];
++
++ if (ref == GRANT_INVALID_REF)
++ continue;
++
++ inuse++;
++
++ skb = np->rx_skbs[i];
++
++ if (!gnttab_end_foreign_access_ref(ref))
++ {
++ busy++;
++ continue;
++ }
++
++ gnttab_release_grant_reference(&np->gref_rx_head, ref);
++ np->grant_rx_ref[i] = GRANT_INVALID_REF;
++ add_id_to_freelist(np->rx_skbs, i);
++
++ dev_kfree_skb(skb);
++ }
++
++ if (busy)
++ DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n",
++ __FUNCTION__, busy, inuse, NET_RX_RING_SIZE);
++
++ spin_unlock_bh(&np->rx_lock);
++}
++
++static int network_close(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ netif_stop_queue(np->netdev);
++ return 0;
++}
++
++
++static struct net_device_stats *network_get_stats(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++
++ netfront_accelerator_call_get_stats(np, dev);
++ return &np->stats;
++}
++
++static int xennet_set_mac_address(struct net_device *dev, void *p)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ struct sockaddr *addr = p;
++
++ if (netif_running(dev))
++ return -EBUSY;
++
++ if (!is_valid_ether_addr(addr->sa_data))
++ return -EADDRNOTAVAIL;
++
++ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
++ memcpy(np->mac, addr->sa_data, ETH_ALEN);
++
++ return 0;
++}
++
++static int xennet_change_mtu(struct net_device *dev, int mtu)
++{
++ int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
++
++ if (mtu > max)
++ return -EINVAL;
++ dev->mtu = mtu;
++ return 0;
++}
++
++static int xennet_set_sg(struct net_device *dev, u32 data)
++{
++ if (data) {
++ struct netfront_info *np = netdev_priv(dev);
++ int val;
++
++ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
++ "%d", &val) < 0)
++ val = 0;
++ if (!val)
++ return -ENOSYS;
++ } else if (dev->mtu > ETH_DATA_LEN)
++ dev->mtu = ETH_DATA_LEN;
++
++ return ethtool_op_set_sg(dev, data);
++}
++
++static int xennet_set_tso(struct net_device *dev, u32 data)
++{
++ if (data) {
++ struct netfront_info *np = netdev_priv(dev);
++ int val;
++
++ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
++ "feature-gso-tcpv4", "%d", &val) < 0)
++ val = 0;
++ if (!val)
++ return -ENOSYS;
++ }
++
++ return ethtool_op_set_tso(dev, data);
++}
++
++static void xennet_set_features(struct net_device *dev)
++{
++ dev_disable_gso_features(dev);
++ xennet_set_sg(dev, 0);
++
++ /* We need checksum offload to enable scatter/gather and TSO. */
++ if (!(dev->features & NETIF_F_IP_CSUM))
++ return;
++
++ if (xennet_set_sg(dev, 1))
++ return;
++
++ /* Before 2.6.9 TSO seems to be unreliable so do not enable it
++ * on older kernels.
++ */
++ if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9))
++ xennet_set_tso(dev, 1);
++}
++
++static int network_connect(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ int i, requeue_idx, err;
++ struct sk_buff *skb;
++ grant_ref_t ref;
++ netif_rx_request_t *req;
++ unsigned int feature_rx_copy, feature_rx_flip;
++
++ err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
++ "feature-rx-copy", "%u", &feature_rx_copy);
++ if (err != 1)
++ feature_rx_copy = 0;
++ err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
++ "feature-rx-flip", "%u", &feature_rx_flip);
++ if (err != 1)
++ feature_rx_flip = 1;
++
++ /*
++ * Copy packets on receive path if:
++ * (a) This was requested by user, and the backend supports it; or
++ * (b) Flipping was requested, but this is unsupported by the backend.
++ */
++ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
++ (MODPARM_rx_flip && !feature_rx_flip));
++
++ err = talk_to_backend(np->xbdev, np);
++ if (err)
++ return err;
++
++ xennet_set_features(dev);
++
++ DPRINTK("device %s has %sing receive path.\n",
++ dev->name, np->copying_receiver ? "copy" : "flipp");
++
++ spin_lock_bh(&np->rx_lock);
++ spin_lock_irq(&np->tx_lock);
++
++ /*
++ * Recovery procedure:
++ * NB. Freelist index entries are always going to be less than
++ * PAGE_OFFSET, whereas pointers to skbs will always be equal or
++ * greater than PAGE_OFFSET: we use this property to distinguish
++ * them.
++ */
++
++ /* Step 1: Discard all pending TX packet fragments. */
++ netif_release_tx_bufs(np);
++
++ /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
++ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
++ if (!np->rx_skbs[i])
++ continue;
++
++ skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
++ ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
++ req = RING_GET_REQUEST(&np->rx, requeue_idx);
++
++ if (!np->copying_receiver) {
++ gnttab_grant_foreign_transfer_ref(
++ ref, np->xbdev->otherend_id,
++ page_to_pfn(skb_shinfo(skb)->frags->page));
++ } else {
++ gnttab_grant_foreign_access_ref(
++ ref, np->xbdev->otherend_id,
++ pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
++ frags->page)),
++ 0);
++ }
++ req->gref = ref;
++ req->id = requeue_idx;
++
++ requeue_idx++;
++ }
++
++ np->rx.req_prod_pvt = requeue_idx;
++
++ /*
++ * Step 3: All public and private state should now be sane. Get
++ * ready to start sending and receiving packets and give the driver
++ * domain a kick because we've probably just requeued some
++ * packets.
++ */
++ netfront_carrier_on(np);
++ notify_remote_via_irq(np->irq);
++ network_tx_buf_gc(dev);
++ network_alloc_rx_buffers(dev);
++
++ spin_unlock_irq(&np->tx_lock);
++ spin_unlock_bh(&np->rx_lock);
++
++ return 0;
++}
++
++static void netif_uninit(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ netif_release_tx_bufs(np);
++ if (np->copying_receiver)
++ netif_release_rx_bufs_copy(np);
++ else
++ netif_release_rx_bufs_flip(np);
++ gnttab_free_grant_references(np->gref_tx_head);
++ gnttab_free_grant_references(np->gref_rx_head);
++}
++
++static struct ethtool_ops network_ethtool_ops =
++{
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = ethtool_op_set_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = xennet_set_sg,
++#if HAVE_TSO
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = xennet_set_tso,
++#endif
++ .get_link = ethtool_op_get_link,
++};
++
++#ifdef CONFIG_SYSFS
++static ssize_t show_rxbuf_min(struct class_device *cd, char *buf)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *info = netdev_priv(netdev);
++
++ return sprintf(buf, "%u\n", info->rx_min_target);
++}
++
++static ssize_t store_rxbuf_min(struct class_device *cd,
++ const char *buf, size_t len)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *np = netdev_priv(netdev);
++ char *endp;
++ unsigned long target;
++
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++
++ target = simple_strtoul(buf, &endp, 0);
++ if (endp == buf)
++ return -EBADMSG;
++
++ if (target < RX_MIN_TARGET)
++ target = RX_MIN_TARGET;
++ if (target > RX_MAX_TARGET)
++ target = RX_MAX_TARGET;
++
++ spin_lock_bh(&np->rx_lock);
++ if (target > np->rx_max_target)
++ np->rx_max_target = target;
++ np->rx_min_target = target;
++ if (target > np->rx_target)
++ np->rx_target = target;
++
++ network_alloc_rx_buffers(netdev);
++
++ spin_unlock_bh(&np->rx_lock);
++ return len;
++}
++
++static ssize_t show_rxbuf_max(struct class_device *cd, char *buf)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *info = netdev_priv(netdev);
++
++ return sprintf(buf, "%u\n", info->rx_max_target);
++}
++
++static ssize_t store_rxbuf_max(struct class_device *cd,
++ const char *buf, size_t len)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *np = netdev_priv(netdev);
++ char *endp;
++ unsigned long target;
++
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++
++ target = simple_strtoul(buf, &endp, 0);
++ if (endp == buf)
++ return -EBADMSG;
++
++ if (target < RX_MIN_TARGET)
++ target = RX_MIN_TARGET;
++ if (target > RX_MAX_TARGET)
++ target = RX_MAX_TARGET;
++
++ spin_lock_bh(&np->rx_lock);
++ if (target < np->rx_min_target)
++ np->rx_min_target = target;
++ np->rx_max_target = target;
++ if (target < np->rx_target)
++ np->rx_target = target;
++
++ network_alloc_rx_buffers(netdev);
++
++ spin_unlock_bh(&np->rx_lock);
++ return len;
++}
++
++static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *info = netdev_priv(netdev);
++
++ return sprintf(buf, "%u\n", info->rx_target);
++}
++
++static const struct class_device_attribute xennet_attrs[] = {
++ __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
++ __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
++ __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
++};
++
++static int xennet_sysfs_addif(struct net_device *netdev)
++{
++ int i;
++ int error = 0;
++
++ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
++ error = class_device_create_file(&netdev->class_dev,
++ &xennet_attrs[i]);
++ if (error)
++ goto fail;
++ }
++ return 0;
++
++ fail:
++ while (--i >= 0)
++ class_device_remove_file(&netdev->class_dev,
++ &xennet_attrs[i]);
++ return error;
++}
++
++static void xennet_sysfs_delif(struct net_device *netdev)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
++ class_device_remove_file(&netdev->class_dev,
++ &xennet_attrs[i]);
++ }
++}
++
++#endif /* CONFIG_SYSFS */
++
++
++/*
++ * Nothing to do here. Virtual interface is point-to-point and the
++ * physical interface is probably promiscuous anyway.
++ */
++static void network_set_multicast_list(struct net_device *dev)
++{
++}
++
++static struct net_device * __devinit create_netdev(struct xenbus_device *dev)
++{
++ int i, err = 0;
++ struct net_device *netdev = NULL;
++ struct netfront_info *np = NULL;
++
++ netdev = alloc_etherdev(sizeof(struct netfront_info));
++ if (!netdev) {
++ printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
++ __FUNCTION__);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ np = netdev_priv(netdev);
++ np->xbdev = dev;
++
++ spin_lock_init(&np->tx_lock);
++ spin_lock_init(&np->rx_lock);
++
++ init_accelerator_vif(np, dev);
++
++ skb_queue_head_init(&np->rx_batch);
++ np->rx_target = RX_DFL_MIN_TARGET;
++ np->rx_min_target = RX_DFL_MIN_TARGET;
++ np->rx_max_target = RX_MAX_TARGET;
++
++ init_timer(&np->rx_refill_timer);
++ np->rx_refill_timer.data = (unsigned long)netdev;
++ np->rx_refill_timer.function = rx_refill_timeout;
++
++ /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
++ for (i = 0; i <= NET_TX_RING_SIZE; i++) {
++ np->tx_skbs[i] = (void *)((unsigned long) i+1);
++ np->grant_tx_ref[i] = GRANT_INVALID_REF;
++ }
++
++ for (i = 0; i < NET_RX_RING_SIZE; i++) {
++ np->rx_skbs[i] = NULL;
++ np->grant_rx_ref[i] = GRANT_INVALID_REF;
++ }
++
++ /* A grant for every tx ring slot */
++ if (gnttab_alloc_grant_references(TX_MAX_TARGET,
++ &np->gref_tx_head) < 0) {
++ printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
++ err = -ENOMEM;
++ goto exit;
++ }
++ /* A grant for every rx ring slot */
++ if (gnttab_alloc_grant_references(RX_MAX_TARGET,
++ &np->gref_rx_head) < 0) {
++ printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
++ err = -ENOMEM;
++ goto exit_free_tx;
++ }
++
++ netdev->open = network_open;
++ netdev->hard_start_xmit = network_start_xmit;
++ netdev->stop = network_close;
++ netdev->get_stats = network_get_stats;
++ netdev->poll = netif_poll;
++ netdev->set_multicast_list = network_set_multicast_list;
++ netdev->uninit = netif_uninit;
++ netdev->set_mac_address = xennet_set_mac_address;
++ netdev->change_mtu = xennet_change_mtu;
++ netdev->weight = 64;
++ netdev->features = NETIF_F_IP_CSUM;
++
++ SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
++ SET_MODULE_OWNER(netdev);
++ SET_NETDEV_DEV(netdev, &dev->dev);
++
++ np->netdev = netdev;
++
++ netfront_carrier_off(np);
++
++ return netdev;
++
++ exit_free_tx:
++ gnttab_free_grant_references(np->gref_tx_head);
++ exit:
++ free_netdev(netdev);
++ return ERR_PTR(err);
++}
++
++#ifdef CONFIG_INET
++/*
++ * We use this notifier to send out a fake ARP reply to reset switches and
++ * router ARP caches when an IP interface is brought up on a VIF.
++ */
++static int
++inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
++ struct net_device *dev = ifa->ifa_dev->dev;
++
++ /* UP event and is it one of our devices? */
++ if (event == NETDEV_UP && dev->open == network_open)
++ send_fake_arp(dev);
++
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block notifier_inetdev = {
++ .notifier_call = inetdev_notify,
++ .next = NULL,
++ .priority = 0
++};
++#endif
++
++
++static void netif_disconnect_backend(struct netfront_info *info)
++{
++ /* Stop old i/f to prevent errors whilst we rebuild the state. */
++ spin_lock_bh(&info->rx_lock);
++ spin_lock_irq(&info->tx_lock);
++ netfront_carrier_off(info);
++ spin_unlock_irq(&info->tx_lock);
++ spin_unlock_bh(&info->rx_lock);
++
++ if (info->irq)
++ unbind_from_irqhandler(info->irq, info->netdev);
++ info->irq = 0;
++
++ end_access(info->tx_ring_ref, info->tx.sring);
++ end_access(info->rx_ring_ref, info->rx.sring);
++ info->tx_ring_ref = GRANT_INVALID_REF;
++ info->rx_ring_ref = GRANT_INVALID_REF;
++ info->tx.sring = NULL;
++ info->rx.sring = NULL;
++}
++
++
++static void end_access(int ref, void *page)
++{
++ if (ref != GRANT_INVALID_REF)
++ gnttab_end_foreign_access(ref, (unsigned long)page);
++}
++
++
++/* ** Driver registration ** */
++
++
++static const struct xenbus_device_id netfront_ids[] = {
++ { "vif" },
++ { "" }
++};
++MODULE_ALIAS("xen:vif");
++
++
++static struct xenbus_driver netfront_driver = {
++ .name = "vif",
++ .owner = THIS_MODULE,
++ .ids = netfront_ids,
++ .probe = netfront_probe,
++ .remove = __devexit_p(netfront_remove),
++ .suspend = netfront_suspend,
++ .suspend_cancel = netfront_suspend_cancel,
++ .resume = netfront_resume,
++ .otherend_changed = backend_changed,
++};
++
++
++static int __init netif_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++#ifdef CONFIG_XEN
++ if (MODPARM_rx_flip && MODPARM_rx_copy) {
++ WPRINTK("Cannot specify both rx_copy and rx_flip.\n");
++ return -EINVAL;
++ }
++
++ if (!MODPARM_rx_flip && !MODPARM_rx_copy)
++ MODPARM_rx_flip = 1; /* Default is to flip. */
++#endif
++
++ netif_init_accel();
++
++ IPRINTK("Initialising virtual ethernet driver.\n");
++
++#ifdef CONFIG_INET
++ (void)register_inetaddr_notifier(&notifier_inetdev);
++#endif
++
++ return xenbus_register_frontend(&netfront_driver);
++}
++module_init(netif_init);
++
++
++static void __exit netif_exit(void)
++{
++#ifdef CONFIG_INET
++ unregister_inetaddr_notifier(&notifier_inetdev);
++#endif
++
++ netif_exit_accel();
++
++ return xenbus_unregister_driver(&netfront_driver);
++}
++module_exit(netif_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/netfront/netfront.h linux-2.6.18-xen-3.3.0/drivers/xen/netfront/netfront.h
+--- linux-2.6.18.8/drivers/xen/netfront/netfront.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/netfront/netfront.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,274 @@
++/******************************************************************************
++ * Virtual network driver for conversing with remote driver backends.
++ *
++ * Copyright (c) 2002-2005, K A Fraser
++ * Copyright (c) 2005, XenSource Ltd
++ * Copyright (C) 2007 Solarflare Communications, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef NETFRONT_H
++#define NETFRONT_H
++
++#include <xen/interface/io/netif.h>
++#include <linux/netdevice.h>
++#include <linux/skbuff.h>
++#include <linux/list.h>
++
++#define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
++#define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
++
++#include <xen/xenbus.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++/*
++ * Function pointer table for hooks into a network acceleration
++ * plugin. These are called at appropriate points from the netfront
++ * driver
++ */
++struct netfront_accel_hooks {
++ /*
++ * new_device: Accelerator hook to ask the plugin to support a
++ * new network interface
++ */
++ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev);
++ /*
++ * remove: Opposite of new_device
++ */
++ int (*remove)(struct xenbus_device *dev);
++ /*
++ * The net_device is being polled, check the accelerated
++ * hardware for any pending packets
++ */
++ int (*netdev_poll)(struct net_device *dev, int *pbudget);
++ /*
++ * start_xmit: Used to give the accelerated plugin the option
++ * of sending a packet. Returns non-zero if has done so, or
++ * zero to decline and force the packet onto normal send
++ * path
++ */
++ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev);
++ /*
++ * start/stop_napi_interrupts Used by netfront to indicate
++ * when napi interrupts should be enabled or disabled
++ */
++ int (*start_napi_irq)(struct net_device *dev);
++ void (*stop_napi_irq)(struct net_device *dev);
++ /*
++ * Called before re-enabling the TX queue to check the fast
++ * path has slots too
++ */
++ int (*check_ready)(struct net_device *dev);
++ /*
++ * Get the fastpath network statistics
++ */
++ int (*get_stats)(struct net_device *dev,
++ struct net_device_stats *stats);
++};
++
++
++/* Version of API/protocol for communication between netfront and
++ acceleration plugin supported */
++#define NETFRONT_ACCEL_VERSION 0x00010003
++
++/*
++ * Per-netfront device state for the accelerator. This is used to
++ * allow efficient per-netfront device access to the accelerator
++ * hooks
++ */
++struct netfront_accel_vif_state {
++ struct list_head link;
++
++ struct xenbus_device *dev;
++ struct netfront_info *np;
++ struct netfront_accel_hooks *hooks;
++
++ /* Watch on the accelerator configuration value */
++ struct xenbus_watch accel_watch;
++ /* Work item to process change in accelerator */
++ struct work_struct accel_work;
++ /* The string from xenbus last time accel_watch fired */
++ char *accel_frontend;
++};
++
++/*
++ * Per-accelerator state stored in netfront. These form a list that
++ * is used to track which devices are accelerated by which plugins,
++ * and what plugins are available/have been requested
++ */
++struct netfront_accelerator {
++ /* Used to make a list */
++ struct list_head link;
++ /* ID of the accelerator */
++ int id;
++ /*
++ * String describing the accelerator. Currently this is the
++ * name of the accelerator module. This is provided by the
++ * backend accelerator through xenstore
++ */
++ char *frontend;
++ /* The hooks into the accelerator plugin module */
++ struct netfront_accel_hooks *hooks;
++
++ /*
++ * List of per-netfront device state (struct
++ * netfront_accel_vif_state) for each netfront device that is
++ * using this accelerator
++ */
++ struct list_head vif_states;
++ spinlock_t vif_states_lock;
++};
++
++struct netfront_info {
++ struct list_head list;
++ struct net_device *netdev;
++
++ struct net_device_stats stats;
++
++ struct netif_tx_front_ring tx;
++ struct netif_rx_front_ring rx;
++
++ spinlock_t tx_lock;
++ spinlock_t rx_lock;
++
++ unsigned int irq;
++ unsigned int copying_receiver;
++ unsigned int carrier;
++
++ /* Receive-ring batched refills. */
++#define RX_MIN_TARGET 8
++#define RX_DFL_MIN_TARGET 64
++#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
++ unsigned rx_min_target, rx_max_target, rx_target;
++ struct sk_buff_head rx_batch;
++
++ struct timer_list rx_refill_timer;
++
++ /*
++ * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs
++ * is an index into a chain of free entries.
++ */
++ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
++ struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
++
++#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
++ grant_ref_t gref_tx_head;
++ grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
++ grant_ref_t gref_rx_head;
++ grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
++
++ struct xenbus_device *xbdev;
++ int tx_ring_ref;
++ int rx_ring_ref;
++ u8 mac[ETH_ALEN];
++
++ unsigned long rx_pfn_array[NET_RX_RING_SIZE];
++ struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
++ struct mmu_update rx_mmu[NET_RX_RING_SIZE];
++
++ /* Private pointer to state internal to accelerator module */
++ void *accel_priv;
++ /* The accelerator used by this netfront device */
++ struct netfront_accelerator *accelerator;
++ /* The accelerator state for this netfront device */
++ struct netfront_accel_vif_state accel_vif_state;
++};
++
++
++/* Exported Functions */
++
++/*
++ * Called by an accelerator plugin module when it has loaded.
++ *
++ * frontend: the string describing the accelerator, currently the module name
++ * hooks: the hooks for netfront to use to call into the accelerator
++ * version: the version of API between frontend and plugin requested
++ *
++ * return: 0 on success, <0 on error, >0 (with version supported) on
++ * version mismatch
++ */
++extern int netfront_accelerator_loaded(int version, const char *frontend,
++ struct netfront_accel_hooks *hooks);
++
++/*
++ * Called by an accelerator plugin module when it is about to unload.
++ *
++ * frontend: the string describing the accelerator. Must match the
++ * one passed to netfront_accelerator_loaded()
++ */
++extern void netfront_accelerator_stop(const char *frontend);
++
++/*
++ * Called by an accelerator before waking the net device's TX queue to
++ * ensure the slow path has available slots. Returns true if OK to
++ * wake, false if still busy
++ */
++extern int netfront_check_queue_ready(struct net_device *net_dev);
++
++
++/* Internal-to-netfront Functions */
++
++/*
++ * Call into accelerator and check to see if it has tx space before we
++ * wake the net device's TX queue. Returns true if OK to wake, false
++ * if still busy
++ */
++extern
++int netfront_check_accelerator_queue_ready(struct net_device *dev,
++ struct netfront_info *np);
++extern
++int netfront_accelerator_call_remove(struct netfront_info *np,
++ struct xenbus_device *dev);
++extern
++int netfront_accelerator_suspend(struct netfront_info *np,
++ struct xenbus_device *dev);
++extern
++int netfront_accelerator_suspend_cancel(struct netfront_info *np,
++ struct xenbus_device *dev);
++extern
++void netfront_accelerator_resume(struct netfront_info *np,
++ struct xenbus_device *dev);
++extern
++void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np,
++ struct net_device *dev);
++extern
++int netfront_accelerator_call_get_stats(struct netfront_info *np,
++ struct net_device *dev);
++extern
++void netfront_accelerator_add_watch(struct netfront_info *np);
++
++extern
++void netif_init_accel(void);
++extern
++void netif_exit_accel(void);
++
++extern
++void init_accelerator_vif(struct netfront_info *np,
++ struct xenbus_device *dev);
++#endif /* NETFRONT_H */
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/conf_space.c linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space.c
+--- linux-2.6.18.8/drivers/xen/pciback/conf_space.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,426 @@
++/*
++ * PCI Backend - Functions for creating a virtual configuration space for
++ * exported PCI Devices.
++ * It's dangerous to allow PCI Driver Domains to change their
++ * device's resources (memory, i/o ports, interrupts). We need to
++ * restrict changes to certain PCI Configuration registers:
++ * BARs, INTERRUPT_PIN, most registers in the header...
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_quirks.h"
++
++#define DEFINE_PCI_CONFIG(op,size,type) \
++int pciback_##op##_config_##size \
++(struct pci_dev *dev, int offset, type value, void *data) \
++{ \
++ return pci_##op##_config_##size (dev, offset, value); \
++}
++
++DEFINE_PCI_CONFIG(read, byte, u8 *)
++DEFINE_PCI_CONFIG(read, word, u16 *)
++DEFINE_PCI_CONFIG(read, dword, u32 *)
++
++DEFINE_PCI_CONFIG(write, byte, u8)
++DEFINE_PCI_CONFIG(write, word, u16)
++DEFINE_PCI_CONFIG(write, dword, u32)
++
++static int conf_space_read(struct pci_dev *dev,
++ struct config_field_entry *entry, int offset,
++ u32 * value)
++{
++ int ret = 0;
++ struct config_field *field = entry->field;
++
++ *value = 0;
++
++ switch (field->size) {
++ case 1:
++ if (field->u.b.read)
++ ret = field->u.b.read(dev, offset, (u8 *) value,
++ entry->data);
++ break;
++ case 2:
++ if (field->u.w.read)
++ ret = field->u.w.read(dev, offset, (u16 *) value,
++ entry->data);
++ break;
++ case 4:
++ if (field->u.dw.read)
++ ret = field->u.dw.read(dev, offset, value, entry->data);
++ break;
++ }
++ return ret;
++}
++
++static int conf_space_write(struct pci_dev *dev,
++ struct config_field_entry *entry, int offset,
++ u32 value)
++{
++ int ret = 0;
++ struct config_field *field = entry->field;
++
++ switch (field->size) {
++ case 1:
++ if (field->u.b.write)
++ ret = field->u.b.write(dev, offset, (u8) value,
++ entry->data);
++ break;
++ case 2:
++ if (field->u.w.write)
++ ret = field->u.w.write(dev, offset, (u16) value,
++ entry->data);
++ break;
++ case 4:
++ if (field->u.dw.write)
++ ret = field->u.dw.write(dev, offset, value,
++ entry->data);
++ break;
++ }
++ return ret;
++}
++
++static inline u32 get_mask(int size)
++{
++ if (size == 1)
++ return 0xff;
++ else if (size == 2)
++ return 0xffff;
++ else
++ return 0xffffffff;
++}
++
++static inline int valid_request(int offset, int size)
++{
++ /* Validate request (no un-aligned requests) */
++ if ((size == 1 || size == 2 || size == 4) && (offset % size) == 0)
++ return 1;
++ return 0;
++}
++
++static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
++ int offset)
++{
++ if (offset >= 0) {
++ new_val_mask <<= (offset * 8);
++ new_val <<= (offset * 8);
++ } else {
++ new_val_mask >>= (offset * -8);
++ new_val >>= (offset * -8);
++ }
++ val = (val & ~new_val_mask) | (new_val & new_val_mask);
++
++ return val;
++}
++
++static int pcibios_err_to_errno(int err)
++{
++ switch (err) {
++ case PCIBIOS_SUCCESSFUL:
++ return XEN_PCI_ERR_success;
++ case PCIBIOS_DEVICE_NOT_FOUND:
++ return XEN_PCI_ERR_dev_not_found;
++ case PCIBIOS_BAD_REGISTER_NUMBER:
++ return XEN_PCI_ERR_invalid_offset;
++ case PCIBIOS_FUNC_NOT_SUPPORTED:
++ return XEN_PCI_ERR_not_implemented;
++ case PCIBIOS_SET_FAILED:
++ return XEN_PCI_ERR_access_denied;
++ }
++ return err;
++}
++
++int pciback_config_read(struct pci_dev *dev, int offset, int size,
++ u32 * ret_val)
++{
++ int err = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry;
++ struct config_field *field;
++ int req_start, req_end, field_start, field_end;
++ /* if read fails for any reason, return 0 (as if device didn't respond) */
++ u32 value = 0, tmp_val;
++
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x\n",
++ pci_name(dev), size, offset);
++
++ if (!valid_request(offset, size)) {
++ err = XEN_PCI_ERR_invalid_offset;
++ goto out;
++ }
++
++ /* Get the real value first, then modify as appropriate */
++ switch (size) {
++ case 1:
++ err = pci_read_config_byte(dev, offset, (u8 *) & value);
++ break;
++ case 2:
++ err = pci_read_config_word(dev, offset, (u16 *) & value);
++ break;
++ case 4:
++ err = pci_read_config_dword(dev, offset, &value);
++ break;
++ }
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++
++ req_start = offset;
++ req_end = offset + size;
++ field_start = OFFSET(cfg_entry);
++ field_end = OFFSET(cfg_entry) + field->size;
++
++ if ((req_start >= field_start && req_start < field_end)
++ || (req_end > field_start && req_end <= field_end)) {
++ err = conf_space_read(dev, cfg_entry, field_start,
++ &tmp_val);
++ if (err)
++ goto out;
++
++ value = merge_value(value, tmp_val,
++ get_mask(field->size),
++ field_start - req_start);
++ }
++ }
++
++ out:
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x = %x\n",
++ pci_name(dev), size, offset, value);
++
++ *ret_val = value;
++ return pcibios_err_to_errno(err);
++}
++
++int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
++{
++ int err = 0, handled = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry;
++ struct config_field *field;
++ u32 tmp_val;
++ int req_start, req_end, field_start, field_end;
++
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG
++ "pciback: %s: write request %d bytes at 0x%x = %x\n",
++ pci_name(dev), size, offset, value);
++
++ if (!valid_request(offset, size))
++ return XEN_PCI_ERR_invalid_offset;
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++
++ req_start = offset;
++ req_end = offset + size;
++ field_start = OFFSET(cfg_entry);
++ field_end = OFFSET(cfg_entry) + field->size;
++
++ if ((req_start >= field_start && req_start < field_end)
++ || (req_end > field_start && req_end <= field_end)) {
++ tmp_val = 0;
++
++ err = pciback_config_read(dev, field_start,
++ field->size, &tmp_val);
++ if (err)
++ break;
++
++ tmp_val = merge_value(tmp_val, value, get_mask(size),
++ req_start - field_start);
++
++ err = conf_space_write(dev, cfg_entry, field_start,
++ tmp_val);
++
++ /* handled is set true here, but not every byte
++ * may have been written! Properly detecting if
++ * every byte is handled is unnecessary as the
++ * flag is used to detect devices that need
++ * special helpers to work correctly.
++ */
++ handled = 1;
++ }
++ }
++
++ if (!handled && !err) {
++ /* By default, anything not specificially handled above is
++ * read-only. The permissive flag changes this behavior so
++ * that anything not specifically handled above is writable.
++ * This means that some fields may still be read-only because
++ * they have entries in the config_field list that intercept
++ * the write and do nothing. */
++ if (dev_data->permissive) {
++ switch (size) {
++ case 1:
++ err = pci_write_config_byte(dev, offset,
++ (u8) value);
++ break;
++ case 2:
++ err = pci_write_config_word(dev, offset,
++ (u16) value);
++ break;
++ case 4:
++ err = pci_write_config_dword(dev, offset,
++ (u32) value);
++ break;
++ }
++ } else if (!dev_data->warned_on_write) {
++ dev_data->warned_on_write = 1;
++ dev_warn(&dev->dev, "Driver tried to write to a "
++ "read-only configuration space field at offset "
++ "0x%x, size %d. This may be harmless, but if "
++ "you have problems with your device:\n"
++ "1) see permissive attribute in sysfs\n"
++ "2) report problems to the xen-devel "
++ "mailing list along with details of your "
++ "device obtained from lspci.\n", offset, size);
++ }
++ }
++
++ return pcibios_err_to_errno(err);
++}
++
++void pciback_config_free_dyn_fields(struct pci_dev *dev)
++{
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry, *t;
++ struct config_field *field;
++
++ dev_dbg(&dev->dev,
++ "free-ing dynamically allocated virtual configuration space fields\n");
++
++ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++
++ if (field->clean) {
++ field->clean(field);
++
++ if (cfg_entry->data)
++ kfree(cfg_entry->data);
++
++ list_del(&cfg_entry->list);
++ kfree(cfg_entry);
++ }
++
++ }
++}
++
++void pciback_config_reset_dev(struct pci_dev *dev)
++{
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry;
++ struct config_field *field;
++
++ dev_dbg(&dev->dev, "resetting virtual configuration space\n");
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++
++ if (field->reset)
++ field->reset(dev, OFFSET(cfg_entry), cfg_entry->data);
++ }
++}
++
++void pciback_config_free_dev(struct pci_dev *dev)
++{
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry, *t;
++ struct config_field *field;
++
++ dev_dbg(&dev->dev, "free-ing virtual configuration space fields\n");
++
++ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
++ list_del(&cfg_entry->list);
++
++ field = cfg_entry->field;
++
++ if (field->release)
++ field->release(dev, OFFSET(cfg_entry), cfg_entry->data);
++
++ kfree(cfg_entry);
++ }
++}
++
++int pciback_config_add_field_offset(struct pci_dev *dev,
++ struct config_field *field,
++ unsigned int base_offset)
++{
++ int err = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry;
++ void *tmp;
++
++ cfg_entry = kmalloc(sizeof(*cfg_entry), GFP_KERNEL);
++ if (!cfg_entry) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ cfg_entry->data = NULL;
++ cfg_entry->field = field;
++ cfg_entry->base_offset = base_offset;
++
++ /* silently ignore duplicate fields */
++ err = pciback_field_is_dup(dev,OFFSET(cfg_entry));
++ if (err)
++ goto out;
++
++ if (field->init) {
++ tmp = field->init(dev, OFFSET(cfg_entry));
++
++ if (IS_ERR(tmp)) {
++ err = PTR_ERR(tmp);
++ goto out;
++ }
++
++ cfg_entry->data = tmp;
++ }
++
++ dev_dbg(&dev->dev, "added config field at offset 0x%02x\n",
++ OFFSET(cfg_entry));
++ list_add_tail(&cfg_entry->list, &dev_data->config_fields);
++
++ out:
++ if (err)
++ kfree(cfg_entry);
++
++ return err;
++}
++
++/* This sets up the device's virtual configuration space to keep track of
++ * certain registers (like the base address registers (BARs) so that we can
++ * keep the client from manipulating them directly.
++ */
++int pciback_config_init_dev(struct pci_dev *dev)
++{
++ int err = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++
++ dev_dbg(&dev->dev, "initializing virtual configuration space\n");
++
++ INIT_LIST_HEAD(&dev_data->config_fields);
++
++ err = pciback_config_header_add_fields(dev);
++ if (err)
++ goto out;
++
++ err = pciback_config_capability_add_fields(dev);
++ if (err)
++ goto out;
++
++ err = pciback_config_quirks_init(dev);
++
++ out:
++ return err;
++}
++
++int pciback_config_init(void)
++{
++ return pciback_config_capability_init();
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/conf_space_capability.c linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_capability.c
+--- linux-2.6.18.8/drivers/xen/pciback/conf_space_capability.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_capability.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,71 @@
++/*
++ * PCI Backend - Handles the virtual fields found on the capability lists
++ * in the configuration space.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_capability.h"
++
++static LIST_HEAD(capabilities);
++
++static struct config_field caplist_header[] = {
++ {
++ .offset = PCI_CAP_LIST_ID,
++ .size = 2, /* encompass PCI_CAP_LIST_ID & PCI_CAP_LIST_NEXT */
++ .u.w.read = pciback_read_config_word,
++ .u.w.write = NULL,
++ },
++ {
++ .size = 0,
++ },
++};
++
++static inline void register_capability(struct pciback_config_capability *cap)
++{
++ list_add_tail(&cap->cap_list, &capabilities);
++}
++
++int pciback_config_capability_add_fields(struct pci_dev *dev)
++{
++ int err = 0;
++ struct pciback_config_capability *cap;
++ int cap_offset;
++
++ list_for_each_entry(cap, &capabilities, cap_list) {
++ cap_offset = pci_find_capability(dev, cap->capability);
++ if (cap_offset) {
++ dev_dbg(&dev->dev, "Found capability 0x%x at 0x%x\n",
++ cap->capability, cap_offset);
++
++ err = pciback_config_add_fields_offset(dev,
++ caplist_header,
++ cap_offset);
++ if (err)
++ goto out;
++ err = pciback_config_add_fields_offset(dev,
++ cap->fields,
++ cap_offset);
++ if (err)
++ goto out;
++ }
++ }
++
++ out:
++ return err;
++}
++
++extern struct pciback_config_capability pciback_config_capability_vpd;
++extern struct pciback_config_capability pciback_config_capability_pm;
++
++int pciback_config_capability_init(void)
++{
++ register_capability(&pciback_config_capability_vpd);
++ register_capability(&pciback_config_capability_pm);
++
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/conf_space_capability.h linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_capability.h
+--- linux-2.6.18.8/drivers/xen/pciback/conf_space_capability.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_capability.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,23 @@
++/*
++ * PCI Backend - Data structures for special overlays for structures on
++ * the capability list.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#ifndef __PCIBACK_CONFIG_CAPABILITY_H__
++#define __PCIBACK_CONFIG_CAPABILITY_H__
++
++#include <linux/pci.h>
++#include <linux/list.h>
++
++struct pciback_config_capability {
++ struct list_head cap_list;
++
++ int capability;
++
++ /* If the device has the capability found above, add these fields */
++ struct config_field *fields;
++};
++
++#endif
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/conf_space_capability_msi.c linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_capability_msi.c
+--- linux-2.6.18.8/drivers/xen/pciback/conf_space_capability_msi.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_capability_msi.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,69 @@
++/*
++ * PCI Backend -- Configuration overlay for MSI capability
++ */
++#include <linux/pci.h>
++#include "conf_space.h"
++#include "conf_space_capability.h"
++#include <xen/interface/io/pciif.h>
++#include "pciback.h"
++
++int pciback_enable_msi(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op)
++{
++ int otherend = pdev->xdev->otherend_id;
++ int status;
++
++ status = pci_enable_msi(dev);
++
++ if (status) {
++ printk("error enable msi for guest %x status %x\n", otherend, status);
++ op->value = 0;
++ return XEN_PCI_ERR_op_failed;
++ }
++
++ op->value = dev->irq;
++ return 0;
++}
++
++int pciback_disable_msi(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op)
++{
++ pci_disable_msi(dev);
++
++ op->value = dev->irq;
++ return 0;
++}
++
++int pciback_enable_msix(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op)
++{
++ int result;
++
++ if (op->value > SH_INFO_MAX_VEC)
++ return -EINVAL;
++ else {
++ struct msix_entry entries[op->value];
++ int i;
++
++ for (i = 0; i < op->value; i++) {
++ entries[i].entry = op->msix_entries[i].entry;
++ entries[i].vector = op->msix_entries[i].vector;
++ }
++
++ result = pci_enable_msix(dev, entries, op->value);
++ op->value = result;
++ }
++
++ return result;
++}
++
++int pciback_disable_msix(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op)
++{
++
++ pci_disable_msix(dev);
++
++ op->value = dev->irq;
++ return 0;
++}
++
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/conf_space_capability_pm.c linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_capability_pm.c
+--- linux-2.6.18.8/drivers/xen/pciback/conf_space_capability_pm.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_capability_pm.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,128 @@
++/*
++ * PCI Backend - Configuration space overlay for power management
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/pci.h>
++#include "conf_space.h"
++#include "conf_space_capability.h"
++
++static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
++ void *data)
++{
++ int err;
++ u16 real_value;
++
++ err = pci_read_config_word(dev, offset, &real_value);
++ if (err)
++ goto out;
++
++ *value = real_value & ~PCI_PM_CAP_PME_MASK;
++
++ out:
++ return err;
++}
++
++/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
++ * Can't allow driver domain to enable PMEs - they're shared */
++#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
++
++static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
++ void *data)
++{
++ int err;
++ u16 old_value;
++ pci_power_t new_state, old_state;
++
++ err = pci_read_config_word(dev, offset, &old_value);
++ if (err)
++ goto out;
++
++ old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
++ new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
++
++ new_value &= PM_OK_BITS;
++ if ((old_value & PM_OK_BITS) != new_value) {
++ new_value = (old_value & ~PM_OK_BITS) | new_value;
++ err = pci_write_config_word(dev, offset, new_value);
++ if (err)
++ goto out;
++ }
++
++ /* Let pci core handle the power management change */
++ dev_dbg(&dev->dev, "set power state to %x\n", new_state);
++ err = pci_set_power_state(dev, new_state);
++ if (err) {
++ err = PCIBIOS_SET_FAILED;
++ goto out;
++ }
++
++ /*
++ * Device may lose PCI config info on D3->D0 transition. This
++ * is a problem for some guests which will not reset BARs. Even
++ * those that have a go will be foiled by our BAR-write handler
++ * which will discard the write! Since Linux won't re-init
++ * the config space automatically in all cases, we do it here.
++ * Future: Should we re-initialise all first 64 bytes of config space?
++ */
++ if (new_state == PCI_D0 &&
++ (old_state == PCI_D3hot || old_state == PCI_D3cold) &&
++ !(old_value & PCI_PM_CTRL_NO_SOFT_RESET))
++ pci_restore_bars(dev);
++
++ out:
++ return err;
++}
++
++/* Ensure PMEs are disabled */
++static void *pm_ctrl_init(struct pci_dev *dev, int offset)
++{
++ int err;
++ u16 value;
++
++ err = pci_read_config_word(dev, offset, &value);
++ if (err)
++ goto out;
++
++ if (value & PCI_PM_CTRL_PME_ENABLE) {
++ value &= ~PCI_PM_CTRL_PME_ENABLE;
++ err = pci_write_config_word(dev, offset, value);
++ }
++
++ out:
++ return ERR_PTR(err);
++}
++
++static struct config_field caplist_pm[] = {
++ {
++ .offset = PCI_PM_PMC,
++ .size = 2,
++ .u.w.read = pm_caps_read,
++ },
++ {
++ .offset = PCI_PM_CTRL,
++ .size = 2,
++ .init = pm_ctrl_init,
++ .u.w.read = pciback_read_config_word,
++ .u.w.write = pm_ctrl_write,
++ },
++ {
++ .offset = PCI_PM_PPB_EXTENSIONS,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ },
++ {
++ .offset = PCI_PM_DATA_REGISTER,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ },
++ {
++ .size = 0,
++ },
++};
++
++struct pciback_config_capability pciback_config_capability_pm = {
++ .capability = PCI_CAP_ID_PM,
++ .fields = caplist_pm,
++};
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/conf_space_capability_vpd.c linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_capability_vpd.c
+--- linux-2.6.18.8/drivers/xen/pciback/conf_space_capability_vpd.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_capability_vpd.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,42 @@
++/*
++ * PCI Backend - Configuration space overlay for Vital Product Data
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/pci.h>
++#include "conf_space.h"
++#include "conf_space_capability.h"
++
++static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
++ void *data)
++{
++ /* Disallow writes to the vital product data */
++ if (value & PCI_VPD_ADDR_F)
++ return PCIBIOS_SET_FAILED;
++ else
++ return pci_write_config_word(dev, offset, value);
++}
++
++static struct config_field caplist_vpd[] = {
++ {
++ .offset = PCI_VPD_ADDR,
++ .size = 2,
++ .u.w.read = pciback_read_config_word,
++ .u.w.write = vpd_address_write,
++ },
++ {
++ .offset = PCI_VPD_DATA,
++ .size = 4,
++ .u.dw.read = pciback_read_config_dword,
++ .u.dw.write = NULL,
++ },
++ {
++ .size = 0,
++ },
++};
++
++struct pciback_config_capability pciback_config_capability_vpd = {
++ .capability = PCI_CAP_ID_VPD,
++ .fields = caplist_vpd,
++};
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/conf_space.h linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space.h
+--- linux-2.6.18.8/drivers/xen/pciback/conf_space.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,126 @@
++/*
++ * PCI Backend - Common data structures for overriding the configuration space
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#ifndef __XEN_PCIBACK_CONF_SPACE_H__
++#define __XEN_PCIBACK_CONF_SPACE_H__
++
++#include <linux/list.h>
++#include <linux/err.h>
++
++/* conf_field_init can return an errno in a ptr with ERR_PTR() */
++typedef void *(*conf_field_init) (struct pci_dev * dev, int offset);
++typedef void (*conf_field_reset) (struct pci_dev * dev, int offset, void *data);
++typedef void (*conf_field_free) (struct pci_dev * dev, int offset, void *data);
++
++typedef int (*conf_dword_write) (struct pci_dev * dev, int offset, u32 value,
++ void *data);
++typedef int (*conf_word_write) (struct pci_dev * dev, int offset, u16 value,
++ void *data);
++typedef int (*conf_byte_write) (struct pci_dev * dev, int offset, u8 value,
++ void *data);
++typedef int (*conf_dword_read) (struct pci_dev * dev, int offset, u32 * value,
++ void *data);
++typedef int (*conf_word_read) (struct pci_dev * dev, int offset, u16 * value,
++ void *data);
++typedef int (*conf_byte_read) (struct pci_dev * dev, int offset, u8 * value,
++ void *data);
++
++/* These are the fields within the configuration space which we
++ * are interested in intercepting reads/writes to and changing their
++ * values.
++ */
++struct config_field {
++ unsigned int offset;
++ unsigned int size;
++ unsigned int mask;
++ conf_field_init init;
++ conf_field_reset reset;
++ conf_field_free release;
++ void (*clean) (struct config_field * field);
++ union {
++ struct {
++ conf_dword_write write;
++ conf_dword_read read;
++ } dw;
++ struct {
++ conf_word_write write;
++ conf_word_read read;
++ } w;
++ struct {
++ conf_byte_write write;
++ conf_byte_read read;
++ } b;
++ } u;
++ struct list_head list;
++};
++
++struct config_field_entry {
++ struct list_head list;
++ struct config_field *field;
++ unsigned int base_offset;
++ void *data;
++};
++
++#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
++
++/* Add fields to a device - the add_fields macro expects to get a pointer to
++ * the first entry in an array (of which the ending is marked by size==0)
++ */
++int pciback_config_add_field_offset(struct pci_dev *dev,
++ struct config_field *field,
++ unsigned int offset);
++
++static inline int pciback_config_add_field(struct pci_dev *dev,
++ struct config_field *field)
++{
++ return pciback_config_add_field_offset(dev, field, 0);
++}
++
++static inline int pciback_config_add_fields(struct pci_dev *dev,
++ struct config_field *field)
++{
++ int i, err = 0;
++ for (i = 0; field[i].size != 0; i++) {
++ err = pciback_config_add_field(dev, &field[i]);
++ if (err)
++ break;
++ }
++ return err;
++}
++
++static inline int pciback_config_add_fields_offset(struct pci_dev *dev,
++ struct config_field *field,
++ unsigned int offset)
++{
++ int i, err = 0;
++ for (i = 0; field[i].size != 0; i++) {
++ err = pciback_config_add_field_offset(dev, &field[i], offset);
++ if (err)
++ break;
++ }
++ return err;
++}
++
++/* Read/Write the real configuration space */
++int pciback_read_config_byte(struct pci_dev *dev, int offset, u8 * value,
++ void *data);
++int pciback_read_config_word(struct pci_dev *dev, int offset, u16 * value,
++ void *data);
++int pciback_read_config_dword(struct pci_dev *dev, int offset, u32 * value,
++ void *data);
++int pciback_write_config_byte(struct pci_dev *dev, int offset, u8 value,
++ void *data);
++int pciback_write_config_word(struct pci_dev *dev, int offset, u16 value,
++ void *data);
++int pciback_write_config_dword(struct pci_dev *dev, int offset, u32 value,
++ void *data);
++
++int pciback_config_capability_init(void);
++
++int pciback_config_header_add_fields(struct pci_dev *dev);
++int pciback_config_capability_add_fields(struct pci_dev *dev);
++
++#endif /* __XEN_PCIBACK_CONF_SPACE_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/conf_space_header.c linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_header.c
+--- linux-2.6.18.8/drivers/xen/pciback/conf_space_header.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_header.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,323 @@
++/*
++ * PCI Backend - Handles the virtual fields in the configuration space headers.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++
++struct pci_bar_info {
++ u32 val;
++ u32 len_val;
++ int which;
++};
++
++#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
++#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
++
++static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
++{
++ int err;
++
++ if (!dev->is_enabled && is_enable_cmd(value)) {
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: enable\n",
++ pci_name(dev));
++ err = pci_enable_device(dev);
++ if (err)
++ return err;
++ } else if (dev->is_enabled && !is_enable_cmd(value)) {
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: disable\n",
++ pci_name(dev));
++ pci_disable_device(dev);
++ }
++
++ if (!dev->is_busmaster && is_master_cmd(value)) {
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: set bus master\n",
++ pci_name(dev));
++ pci_set_master(dev);
++ }
++
++ if (value & PCI_COMMAND_INVALIDATE) {
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG
++ "pciback: %s: enable memory-write-invalidate\n",
++ pci_name(dev));
++ err = pci_set_mwi(dev);
++ if (err) {
++ printk(KERN_WARNING
++ "pciback: %s: cannot enable memory-write-invalidate (%d)\n",
++ pci_name(dev), err);
++ value &= ~PCI_COMMAND_INVALIDATE;
++ }
++ }
++
++ return pci_write_config_word(dev, offset, value);
++}
++
++static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
++{
++ struct pci_bar_info *bar = data;
++
++ if (unlikely(!bar)) {
++ printk(KERN_WARNING "pciback: driver data not found for %s\n",
++ pci_name(dev));
++ return XEN_PCI_ERR_op_failed;
++ }
++
++ /* A write to obtain the length must happen as a 32-bit write.
++ * This does not (yet) support writing individual bytes
++ */
++ if (value == ~PCI_ROM_ADDRESS_ENABLE)
++ bar->which = 1;
++ else {
++ u32 tmpval;
++ pci_read_config_dword(dev, offset, &tmpval);
++ if (tmpval != bar->val && value == bar->val) {
++ /* Allow restoration of bar value. */
++ pci_write_config_dword(dev, offset, bar->val);
++ }
++ bar->which = 0;
++ }
++
++ /* Do we need to support enabling/disabling the rom address here? */
++
++ return 0;
++}
++
++/* For the BARs, only allow writes which write ~0 or
++ * the correct resource information
++ * (Needed for when the driver probes the resource usage)
++ */
++static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
++{
++ struct pci_bar_info *bar = data;
++
++ if (unlikely(!bar)) {
++ printk(KERN_WARNING "pciback: driver data not found for %s\n",
++ pci_name(dev));
++ return XEN_PCI_ERR_op_failed;
++ }
++
++ /* A write to obtain the length must happen as a 32-bit write.
++ * This does not (yet) support writing individual bytes
++ */
++ if (value == ~0)
++ bar->which = 1;
++ else {
++ u32 tmpval;
++ pci_read_config_dword(dev, offset, &tmpval);
++ if (tmpval != bar->val && value == bar->val) {
++ /* Allow restoration of bar value. */
++ pci_write_config_dword(dev, offset, bar->val);
++ }
++ bar->which = 0;
++ }
++
++ return 0;
++}
++
++static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
++{
++ struct pci_bar_info *bar = data;
++
++ if (unlikely(!bar)) {
++ printk(KERN_WARNING "pciback: driver data not found for %s\n",
++ pci_name(dev));
++ return XEN_PCI_ERR_op_failed;
++ }
++
++ *value = bar->which ? bar->len_val : bar->val;
++
++ return 0;
++}
++
++static inline void read_dev_bar(struct pci_dev *dev,
++ struct pci_bar_info *bar_info, int offset,
++ u32 len_mask)
++{
++ pci_read_config_dword(dev, offset, &bar_info->val);
++ pci_write_config_dword(dev, offset, len_mask);
++ pci_read_config_dword(dev, offset, &bar_info->len_val);
++ pci_write_config_dword(dev, offset, bar_info->val);
++}
++
++static void *bar_init(struct pci_dev *dev, int offset)
++{
++ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
++
++ if (!bar)
++ return ERR_PTR(-ENOMEM);
++
++ read_dev_bar(dev, bar, offset, ~0);
++ bar->which = 0;
++
++ return bar;
++}
++
++static void *rom_init(struct pci_dev *dev, int offset)
++{
++ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
++
++ if (!bar)
++ return ERR_PTR(-ENOMEM);
++
++ read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
++ bar->which = 0;
++
++ return bar;
++}
++
++static void bar_reset(struct pci_dev *dev, int offset, void *data)
++{
++ struct pci_bar_info *bar = data;
++
++ bar->which = 0;
++}
++
++static void bar_release(struct pci_dev *dev, int offset, void *data)
++{
++ kfree(data);
++}
++
++static int interrupt_read(struct pci_dev *dev, int offset, u8 * value,
++ void *data)
++{
++ *value = (u8) dev->irq;
++
++ return 0;
++}
++
++static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data)
++{
++ u8 cur_value;
++ int err;
++
++ err = pci_read_config_byte(dev, offset, &cur_value);
++ if (err)
++ goto out;
++
++ if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START)
++ || value == PCI_BIST_START)
++ err = pci_write_config_byte(dev, offset, value);
++
++ out:
++ return err;
++}
++
++static struct config_field header_common[] = {
++ {
++ .offset = PCI_COMMAND,
++ .size = 2,
++ .u.w.read = pciback_read_config_word,
++ .u.w.write = command_write,
++ },
++ {
++ .offset = PCI_INTERRUPT_LINE,
++ .size = 1,
++ .u.b.read = interrupt_read,
++ },
++ {
++ .offset = PCI_INTERRUPT_PIN,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ },
++ {
++ /* Any side effects of letting driver domain control cache line? */
++ .offset = PCI_CACHE_LINE_SIZE,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ .u.b.write = pciback_write_config_byte,
++ },
++ {
++ .offset = PCI_LATENCY_TIMER,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ },
++ {
++ .offset = PCI_BIST,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ .u.b.write = bist_write,
++ },
++ {
++ .size = 0,
++ },
++};
++
++#define CFG_FIELD_BAR(reg_offset) \
++ { \
++ .offset = reg_offset, \
++ .size = 4, \
++ .init = bar_init, \
++ .reset = bar_reset, \
++ .release = bar_release, \
++ .u.dw.read = bar_read, \
++ .u.dw.write = bar_write, \
++ }
++
++#define CFG_FIELD_ROM(reg_offset) \
++ { \
++ .offset = reg_offset, \
++ .size = 4, \
++ .init = rom_init, \
++ .reset = bar_reset, \
++ .release = bar_release, \
++ .u.dw.read = bar_read, \
++ .u.dw.write = rom_write, \
++ }
++
++static struct config_field header_0[] = {
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_2),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_3),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_4),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_5),
++ CFG_FIELD_ROM(PCI_ROM_ADDRESS),
++ {
++ .size = 0,
++ },
++};
++
++static struct config_field header_1[] = {
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
++ CFG_FIELD_ROM(PCI_ROM_ADDRESS1),
++ {
++ .size = 0,
++ },
++};
++
++int pciback_config_header_add_fields(struct pci_dev *dev)
++{
++ int err;
++
++ err = pciback_config_add_fields(dev, header_common);
++ if (err)
++ goto out;
++
++ switch (dev->hdr_type) {
++ case PCI_HEADER_TYPE_NORMAL:
++ err = pciback_config_add_fields(dev, header_0);
++ break;
++
++ case PCI_HEADER_TYPE_BRIDGE:
++ err = pciback_config_add_fields(dev, header_1);
++ break;
++
++ default:
++ err = -EINVAL;
++ printk(KERN_ERR "pciback: %s: Unsupported header type %d!\n",
++ pci_name(dev), dev->hdr_type);
++ break;
++ }
++
++ out:
++ return err;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/conf_space_quirks.c linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_quirks.c
+--- linux-2.6.18.8/drivers/xen/pciback/conf_space_quirks.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_quirks.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,126 @@
++/*
++ * PCI Backend - Handle special overlays for broken devices.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ * Author: Chris Bookholt <hap10@epoch.ncsc.mil>
++ */
++
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_quirks.h"
++
++LIST_HEAD(pciback_quirks);
++
++struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev)
++{
++ struct pciback_config_quirk *tmp_quirk;
++
++ list_for_each_entry(tmp_quirk, &pciback_quirks, quirks_list)
++ if (pci_match_id(&tmp_quirk->devid, dev))
++ goto out;
++ tmp_quirk = NULL;
++ printk(KERN_DEBUG
++ "quirk didn't match any device pciback knows about\n");
++ out:
++ return tmp_quirk;
++}
++
++static inline void register_quirk(struct pciback_config_quirk *quirk)
++{
++ list_add_tail(&quirk->quirks_list, &pciback_quirks);
++}
++
++int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg)
++{
++ int ret = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry;
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ if ( OFFSET(cfg_entry) == reg) {
++ ret = 1;
++ break;
++ }
++ }
++ return ret;
++}
++
++int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
++ *field)
++{
++ int err = 0;
++
++ switch (field->size) {
++ case 1:
++ field->u.b.read = pciback_read_config_byte;
++ field->u.b.write = pciback_write_config_byte;
++ break;
++ case 2:
++ field->u.w.read = pciback_read_config_word;
++ field->u.w.write = pciback_write_config_word;
++ break;
++ case 4:
++ field->u.dw.read = pciback_read_config_dword;
++ field->u.dw.write = pciback_write_config_dword;
++ break;
++ default:
++ err = -EINVAL;
++ goto out;
++ }
++
++ pciback_config_add_field(dev, field);
++
++ out:
++ return err;
++}
++
++int pciback_config_quirks_init(struct pci_dev *dev)
++{
++ struct pciback_config_quirk *quirk;
++ int ret = 0;
++
++ quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
++ if (!quirk) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ quirk->devid.vendor = dev->vendor;
++ quirk->devid.device = dev->device;
++ quirk->devid.subvendor = dev->subsystem_vendor;
++ quirk->devid.subdevice = dev->subsystem_device;
++ quirk->devid.class = 0;
++ quirk->devid.class_mask = 0;
++ quirk->devid.driver_data = 0UL;
++
++ quirk->pdev = dev;
++
++ register_quirk(quirk);
++ out:
++ return ret;
++}
++
++void pciback_config_field_free(struct config_field *field)
++{
++ kfree(field);
++}
++
++int pciback_config_quirk_release(struct pci_dev *dev)
++{
++ struct pciback_config_quirk *quirk;
++ int ret = 0;
++
++ quirk = pciback_find_quirk(dev);
++ if (!quirk) {
++ ret = -ENXIO;
++ goto out;
++ }
++
++ list_del(&quirk->quirks_list);
++ kfree(quirk);
++
++ out:
++ return ret;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/conf_space_quirks.h linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_quirks.h
+--- linux-2.6.18.8/drivers/xen/pciback/conf_space_quirks.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/conf_space_quirks.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,35 @@
++/*
++ * PCI Backend - Data structures for special overlays for broken devices.
++ *
++ * Ryan Wilson <hap9@epoch.ncsc.mil>
++ * Chris Bookholt <hap10@epoch.ncsc.mil>
++ */
++
++#ifndef __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
++#define __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
++
++#include <linux/pci.h>
++#include <linux/list.h>
++
++struct pciback_config_quirk {
++ struct list_head quirks_list;
++ struct pci_device_id devid;
++ struct pci_dev *pdev;
++};
++
++struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev);
++
++int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
++ *field);
++
++int pciback_config_quirks_remove_field(struct pci_dev *dev, int reg);
++
++int pciback_config_quirks_init(struct pci_dev *dev);
++
++void pciback_config_field_free(struct config_field *field);
++
++int pciback_config_quirk_release(struct pci_dev *dev);
++
++int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg);
++
++#endif
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/controller.c linux-2.6.18-xen-3.3.0/drivers/xen/pciback/controller.c
+--- linux-2.6.18.8/drivers/xen/pciback/controller.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/controller.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,408 @@
++/*
++ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
++ * Alex Williamson <alex.williamson@hp.com>
++ *
++ * PCI "Controller" Backend - virtualize PCI bus topology based on PCI
++ * controllers. Devices under the same PCI controller are exposed on the
++ * same virtual domain:bus. Within a bus, device slots are virtualized
++ * to compact the bus.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++
++#include <linux/acpi.h>
++#include <linux/list.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
++
++#define PCI_MAX_BUSSES 255
++#define PCI_MAX_SLOTS 32
++
++struct controller_dev_entry {
++ struct list_head list;
++ struct pci_dev *dev;
++ unsigned int devfn;
++};
++
++struct controller_list_entry {
++ struct list_head list;
++ struct pci_controller *controller;
++ unsigned int domain;
++ unsigned int bus;
++ unsigned int next_devfn;
++ struct list_head dev_list;
++};
++
++struct controller_dev_data {
++ struct list_head list;
++ unsigned int next_domain;
++ unsigned int next_bus;
++ spinlock_t lock;
++};
++
++struct walk_info {
++ struct pciback_device *pdev;
++ int resource_count;
++ int root_num;
++};
++
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn)
++{
++ struct controller_dev_data *dev_data = pdev->pci_dev_data;
++ struct controller_dev_entry *dev_entry;
++ struct controller_list_entry *cntrl_entry;
++ struct pci_dev *dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++
++ list_for_each_entry(cntrl_entry, &dev_data->list, list) {
++ if (cntrl_entry->domain != domain ||
++ cntrl_entry->bus != bus)
++ continue;
++
++ list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
++ if (devfn == dev_entry->devfn) {
++ dev = dev_entry->dev;
++ goto found;
++ }
++ }
++ }
++found:
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++
++ return dev;
++}
++
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++ int devid, publish_pci_dev_cb publish_cb)
++{
++ struct controller_dev_data *dev_data = pdev->pci_dev_data;
++ struct controller_dev_entry *dev_entry;
++ struct controller_list_entry *cntrl_entry;
++ struct pci_controller *dev_controller = PCI_CONTROLLER(dev);
++ unsigned long flags;
++ int ret = 0, found = 0;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++
++ /* Look to see if we already have a domain:bus for this controller */
++ list_for_each_entry(cntrl_entry, &dev_data->list, list) {
++ if (cntrl_entry->controller == dev_controller) {
++ found = 1;
++ break;
++ }
++ }
++
++ if (!found) {
++ cntrl_entry = kmalloc(sizeof(*cntrl_entry), GFP_ATOMIC);
++ if (!cntrl_entry) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ cntrl_entry->controller = dev_controller;
++ cntrl_entry->next_devfn = PCI_DEVFN(0, 0);
++
++ cntrl_entry->domain = dev_data->next_domain;
++ cntrl_entry->bus = dev_data->next_bus++;
++ if (dev_data->next_bus > PCI_MAX_BUSSES) {
++ dev_data->next_domain++;
++ dev_data->next_bus = 0;
++ }
++
++ INIT_LIST_HEAD(&cntrl_entry->dev_list);
++
++ list_add_tail(&cntrl_entry->list, &dev_data->list);
++ }
++
++ if (PCI_SLOT(cntrl_entry->next_devfn) > PCI_MAX_SLOTS) {
++ /*
++ * While it seems unlikely, this can actually happen if
++ * a controller has P2P bridges under it.
++ */
++ xenbus_dev_fatal(pdev->xdev, -ENOSPC, "Virtual bus %04x:%02x "
++ "is full, no room to export %04x:%02x:%02x.%x",
++ cntrl_entry->domain, cntrl_entry->bus,
++ pci_domain_nr(dev->bus), dev->bus->number,
++ PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
++ ret = -ENOSPC;
++ goto out;
++ }
++
++ dev_entry = kmalloc(sizeof(*dev_entry), GFP_ATOMIC);
++ if (!dev_entry) {
++ if (list_empty(&cntrl_entry->dev_list)) {
++ list_del(&cntrl_entry->list);
++ kfree(cntrl_entry);
++ }
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ dev_entry->dev = dev;
++ dev_entry->devfn = cntrl_entry->next_devfn;
++
++ list_add_tail(&dev_entry->list, &cntrl_entry->dev_list);
++
++ cntrl_entry->next_devfn += PCI_DEVFN(1, 0);
++
++out:
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++
++ /* TODO: Publish virtual domain:bus:slot.func here. */
++
++ return ret;
++}
++
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++ struct controller_dev_data *dev_data = pdev->pci_dev_data;
++ struct controller_list_entry *cntrl_entry;
++ struct controller_dev_entry *dev_entry = NULL;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++
++ list_for_each_entry(cntrl_entry, &dev_data->list, list) {
++ if (cntrl_entry->controller != PCI_CONTROLLER(dev))
++ continue;
++
++ list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
++ if (dev_entry->dev == dev) {
++ found_dev = dev_entry->dev;
++ break;
++ }
++ }
++ }
++
++ if (!found_dev) {
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++ return;
++ }
++
++ list_del(&dev_entry->list);
++ kfree(dev_entry);
++
++ if (list_empty(&cntrl_entry->dev_list)) {
++ list_del(&cntrl_entry->list);
++ kfree(cntrl_entry);
++ }
++
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++ pcistub_put_pci_dev(found_dev);
++}
++
++int pciback_init_devices(struct pciback_device *pdev)
++{
++ struct controller_dev_data *dev_data;
++
++ dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
++ if (!dev_data)
++ return -ENOMEM;
++
++ spin_lock_init(&dev_data->lock);
++
++ INIT_LIST_HEAD(&dev_data->list);
++
++ /* Starting domain:bus numbers */
++ dev_data->next_domain = 0;
++ dev_data->next_bus = 0;
++
++ pdev->pci_dev_data = dev_data;
++
++ return 0;
++}
++
++static acpi_status write_xenbus_resource(struct acpi_resource *res, void *data)
++{
++ struct walk_info *info = data;
++ struct acpi_resource_address64 addr;
++ acpi_status status;
++ int i, len, err;
++ char str[32], tmp[3];
++ unsigned char *ptr, *buf;
++
++ status = acpi_resource_to_address64(res, &addr);
++
++ /* Do we care about this range? Let's check. */
++ if (!ACPI_SUCCESS(status) ||
++ !(addr.resource_type == ACPI_MEMORY_RANGE ||
++ addr.resource_type == ACPI_IO_RANGE) ||
++ !addr.address_length || addr.producer_consumer != ACPI_PRODUCER)
++ return AE_OK;
++
++ /*
++ * Furthermore, we really only care to tell the guest about
++ * address ranges that require address translation of some sort.
++ */
++ if (!(addr.resource_type == ACPI_MEMORY_RANGE &&
++ addr.info.mem.translation) &&
++ !(addr.resource_type == ACPI_IO_RANGE &&
++ addr.info.io.translation))
++ return AE_OK;
++
++ /* Store the resource in xenbus for the guest */
++ len = snprintf(str, sizeof(str), "root-%d-resource-%d",
++ info->root_num, info->resource_count);
++ if (unlikely(len >= (sizeof(str) - 1)))
++ return AE_OK;
++
++ buf = kzalloc((sizeof(*res) * 2) + 1, GFP_KERNEL);
++ if (!buf)
++ return AE_OK;
++
++ /* Clean out resource_source */
++ res->data.address64.resource_source.index = 0xFF;
++ res->data.address64.resource_source.string_length = 0;
++ res->data.address64.resource_source.string_ptr = NULL;
++
++ ptr = (unsigned char *)res;
++
++ /* Turn the acpi_resource into an ASCII byte stream */
++ for (i = 0; i < sizeof(*res); i++) {
++ snprintf(tmp, sizeof(tmp), "%02x", ptr[i]);
++ strncat(buf, tmp, 2);
++ }
++
++ err = xenbus_printf(XBT_NIL, info->pdev->xdev->nodename,
++ str, "%s", buf);
++
++ if (!err)
++ info->resource_count++;
++
++ kfree(buf);
++
++ return AE_OK;
++}
++
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++ publish_pci_root_cb publish_root_cb)
++{
++ struct controller_dev_data *dev_data = pdev->pci_dev_data;
++ struct controller_list_entry *cntrl_entry;
++ int i, root_num, len, err = 0;
++ unsigned int domain, bus;
++ char str[64];
++ struct walk_info info;
++
++ spin_lock(&dev_data->lock);
++
++ list_for_each_entry(cntrl_entry, &dev_data->list, list) {
++ /* First publish all the domain:bus info */
++ err = publish_root_cb(pdev, cntrl_entry->domain,
++ cntrl_entry->bus);
++ if (err)
++ goto out;
++
++ /*
++ * Now figure out which root-%d this belongs to
++ * so we can associate resources with it.
++ */
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++ "root_num", "%d", &root_num);
++
++ if (err != 1)
++ goto out;
++
++ for (i = 0; i < root_num; i++) {
++ len = snprintf(str, sizeof(str), "root-%d", i);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++ str, "%x:%x", &domain, &bus);
++ if (err != 2)
++ goto out;
++
++ /* Is this the one we just published? */
++ if (domain == cntrl_entry->domain &&
++ bus == cntrl_entry->bus)
++ break;
++ }
++
++ if (i == root_num)
++ goto out;
++
++ info.pdev = pdev;
++ info.resource_count = 0;
++ info.root_num = i;
++
++ /* Let ACPI do the heavy lifting on decoding resources */
++ acpi_walk_resources(cntrl_entry->controller->acpi_handle,
++ METHOD_NAME__CRS, write_xenbus_resource,
++ &info);
++
++ /* No resouces. OK. On to the next one */
++ if (!info.resource_count)
++ continue;
++
++ /* Store the number of resources we wrote for this root-%d */
++ len = snprintf(str, sizeof(str), "root-%d-resources", i);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
++ "%d", info.resource_count);
++ if (err)
++ goto out;
++ }
++
++ /* Finally, write some magic to synchronize with the guest. */
++ len = snprintf(str, sizeof(str), "root-resource-magic");
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
++ "%lx", (sizeof(struct acpi_resource) * 2) + 1);
++
++out:
++ spin_unlock(&dev_data->lock);
++
++ return err;
++}
++
++void pciback_release_devices(struct pciback_device *pdev)
++{
++ struct controller_dev_data *dev_data = pdev->pci_dev_data;
++ struct controller_list_entry *cntrl_entry, *c;
++ struct controller_dev_entry *dev_entry, *d;
++
++ list_for_each_entry_safe(cntrl_entry, c, &dev_data->list, list) {
++ list_for_each_entry_safe(dev_entry, d,
++ &cntrl_entry->dev_list, list) {
++ list_del(&dev_entry->list);
++ pcistub_put_pci_dev(dev_entry->dev);
++ kfree(dev_entry);
++ }
++ list_del(&cntrl_entry->list);
++ kfree(cntrl_entry);
++ }
++
++ kfree(dev_data);
++ pdev->pci_dev_data = NULL;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/pciback/Makefile
+--- linux-2.6.18.8/drivers/xen/pciback/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,17 @@
++obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback.o
++
++pciback-y := pci_stub.o pciback_ops.o xenbus.o
++pciback-y += conf_space.o conf_space_header.o \
++ conf_space_capability.o \
++ conf_space_capability_vpd.o \
++ conf_space_capability_pm.o \
++ conf_space_quirks.o
++pciback-$(CONFIG_PCI_MSI) += conf_space_capability_msi.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_VPCI) += vpci.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_SLOT) += slot.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER) += controller.o
++
++ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/passthrough.c linux-2.6.18-xen-3.3.0/drivers/xen/pciback/passthrough.c
+--- linux-2.6.18.8/drivers/xen/pciback/passthrough.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/passthrough.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,166 @@
++/*
++ * PCI Backend - Provides restricted access to the real PCI bus topology
++ * to the frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/list.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
++
++struct passthrough_dev_data {
++ /* Access to dev_list must be protected by lock */
++ struct list_head dev_list;
++ spinlock_t lock;
++};
++
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn)
++{
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry;
++ struct pci_dev *dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++
++ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
++ if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
++ && bus == (unsigned int)dev_entry->dev->bus->number
++ && devfn == dev_entry->dev->devfn) {
++ dev = dev_entry->dev;
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++
++ return dev;
++}
++
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++ int devid, publish_pci_dev_cb publish_cb)
++{
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry;
++ unsigned long flags;
++ unsigned int domain, bus, devfn;
++ int err;
++
++ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
++ if (!dev_entry)
++ return -ENOMEM;
++ dev_entry->dev = dev;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++ list_add_tail(&dev_entry->list, &dev_data->dev_list);
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++
++ /* Publish this device. */
++ domain = (unsigned int)pci_domain_nr(dev->bus);
++ bus = (unsigned int)dev->bus->number;
++ devfn = dev->devfn;
++ err = publish_cb(pdev, domain, bus, devfn, devid);
++
++ return err;
++}
++
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry, *t;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++
++ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
++ if (dev_entry->dev == dev) {
++ list_del(&dev_entry->list);
++ found_dev = dev_entry->dev;
++ kfree(dev_entry);
++ }
++ }
++
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++
++ if (found_dev)
++ pcistub_put_pci_dev(found_dev);
++}
++
++int pciback_init_devices(struct pciback_device *pdev)
++{
++ struct passthrough_dev_data *dev_data;
++
++ dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
++ if (!dev_data)
++ return -ENOMEM;
++
++ spin_lock_init(&dev_data->lock);
++
++ INIT_LIST_HEAD(&dev_data->dev_list);
++
++ pdev->pci_dev_data = dev_data;
++
++ return 0;
++}
++
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++ publish_pci_root_cb publish_root_cb)
++{
++ int err = 0;
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry, *e;
++ struct pci_dev *dev;
++ int found;
++ unsigned int domain, bus;
++
++ spin_lock(&dev_data->lock);
++
++ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
++ /* Only publish this device as a root if none of its
++ * parent bridges are exported
++ */
++ found = 0;
++ dev = dev_entry->dev->bus->self;
++ for (; !found && dev != NULL; dev = dev->bus->self) {
++ list_for_each_entry(e, &dev_data->dev_list, list) {
++ if (dev == e->dev) {
++ found = 1;
++ break;
++ }
++ }
++ }
++
++ domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
++ bus = (unsigned int)dev_entry->dev->bus->number;
++
++ if (!found) {
++ err = publish_root_cb(pdev, domain, bus);
++ if (err)
++ break;
++ }
++ }
++
++ spin_unlock(&dev_data->lock);
++
++ return err;
++}
++
++void pciback_release_devices(struct pciback_device *pdev)
++{
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry, *t;
++
++ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
++ list_del(&dev_entry->list);
++ pcistub_put_pci_dev(dev_entry->dev);
++ kfree(dev_entry);
++ }
++
++ kfree(dev_data);
++ pdev->pci_dev_data = NULL;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/pciback.h linux-2.6.18-xen-3.3.0/drivers/xen/pciback/pciback.h
+--- linux-2.6.18.8/drivers/xen/pciback/pciback.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/pciback.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,111 @@
++/*
++ * PCI Backend Common Data Structures & Function Declarations
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#ifndef __XEN_PCIBACK_H__
++#define __XEN_PCIBACK_H__
++
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++#include <xen/xenbus.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/workqueue.h>
++#include <asm/atomic.h>
++#include <xen/interface/io/pciif.h>
++
++struct pci_dev_entry {
++ struct list_head list;
++ struct pci_dev *dev;
++};
++
++#define _PDEVF_op_active (0)
++#define PDEVF_op_active (1<<(_PDEVF_op_active))
++
++struct pciback_device {
++ void *pci_dev_data;
++ spinlock_t dev_lock;
++
++ struct xenbus_device *xdev;
++
++ struct xenbus_watch be_watch;
++ u8 be_watching;
++
++ int evtchn_irq;
++
++ struct vm_struct *sh_area;
++ struct xen_pci_sharedinfo *sh_info;
++
++ unsigned long flags;
++
++ struct work_struct op_work;
++};
++
++struct pciback_dev_data {
++ struct list_head config_fields;
++ int permissive;
++ int warned_on_write;
++};
++
++/* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
++struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
++ int domain, int bus,
++ int slot, int func);
++struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
++ struct pci_dev *dev);
++void pcistub_put_pci_dev(struct pci_dev *dev);
++
++/* Ensure a device is turned off or reset */
++void pciback_reset_device(struct pci_dev *pdev);
++
++/* Access a virtual configuration space for a PCI device */
++int pciback_config_init(void);
++int pciback_config_init_dev(struct pci_dev *dev);
++void pciback_config_free_dyn_fields(struct pci_dev *dev);
++void pciback_config_reset_dev(struct pci_dev *dev);
++void pciback_config_free_dev(struct pci_dev *dev);
++int pciback_config_read(struct pci_dev *dev, int offset, int size,
++ u32 * ret_val);
++int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value);
++
++/* Handle requests for specific devices from the frontend */
++typedef int (*publish_pci_dev_cb) (struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn, unsigned int devid);
++typedef int (*publish_pci_root_cb) (struct pciback_device * pdev,
++ unsigned int domain, unsigned int bus);
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++ int devid, publish_pci_dev_cb publish_cb);
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev);
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn);
++int pciback_init_devices(struct pciback_device *pdev);
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++ publish_pci_root_cb cb);
++void pciback_release_devices(struct pciback_device *pdev);
++
++/* Handles events from front-end */
++irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs);
++void pciback_do_op(void *data);
++
++int pciback_xenbus_register(void);
++void pciback_xenbus_unregister(void);
++
++#ifdef CONFIG_PCI_MSI
++int pciback_enable_msi(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op);
++
++int pciback_disable_msi(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op);
++
++
++int pciback_enable_msix(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op);
++
++int pciback_disable_msix(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op);
++#endif
++extern int verbose_request;
++#endif
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/pciback_ops.c linux-2.6.18-xen-3.3.0/drivers/xen/pciback/pciback_ops.c
+--- linux-2.6.18.8/drivers/xen/pciback/pciback_ops.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/pciback_ops.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,117 @@
++/*
++ * PCI Backend Operations - respond to PCI requests from Frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <asm/bitops.h>
++#include <xen/evtchn.h>
++#include "pciback.h"
++
++int verbose_request = 0;
++module_param(verbose_request, int, 0644);
++
++/* Ensure a device is "turned off" and ready to be exported.
++ * (Also see pciback_config_reset to ensure virtual configuration space is
++ * ready to be re-exported)
++ */
++void pciback_reset_device(struct pci_dev *dev)
++{
++ u16 cmd;
++
++ /* Disable devices (but not bridges) */
++ if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
++ pci_disable_device(dev);
++
++ pci_write_config_word(dev, PCI_COMMAND, 0);
++
++ dev->is_enabled = 0;
++ dev->is_busmaster = 0;
++ } else {
++ pci_read_config_word(dev, PCI_COMMAND, &cmd);
++ if (cmd & (PCI_COMMAND_INVALIDATE)) {
++ cmd &= ~(PCI_COMMAND_INVALIDATE);
++ pci_write_config_word(dev, PCI_COMMAND, cmd);
++
++ dev->is_busmaster = 0;
++ }
++ }
++}
++
++static inline void test_and_schedule_op(struct pciback_device *pdev)
++{
++ /* Check that frontend is requesting an operation and that we are not
++ * already processing a request */
++ if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
++ && !test_and_set_bit(_PDEVF_op_active, &pdev->flags))
++ schedule_work(&pdev->op_work);
++}
++
++/* Performing the configuration space reads/writes must not be done in atomic
++ * context because some of the pci_* functions can sleep (mostly due to ACPI
++ * use of semaphores). This function is intended to be called from a work
++ * queue in process context taking a struct pciback_device as a parameter */
++void pciback_do_op(void *data)
++{
++ struct pciback_device *pdev = data;
++ struct pci_dev *dev;
++ struct xen_pci_op *op = &pdev->sh_info->op;
++
++ dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
++
++ if (dev == NULL)
++ op->err = XEN_PCI_ERR_dev_not_found;
++ else
++ {
++ switch (op->cmd)
++ {
++ case XEN_PCI_OP_conf_read:
++ op->err = pciback_config_read(dev,
++ op->offset, op->size, &op->value);
++ break;
++ case XEN_PCI_OP_conf_write:
++ op->err = pciback_config_write(dev,
++ op->offset, op->size, op->value);
++ break;
++#ifdef CONFIG_PCI_MSI
++ case XEN_PCI_OP_enable_msi:
++ op->err = pciback_enable_msi(pdev, dev, op);
++ break;
++ case XEN_PCI_OP_disable_msi:
++ op->err = pciback_disable_msi(pdev, dev, op);
++ break;
++ case XEN_PCI_OP_enable_msix:
++ op->err = pciback_enable_msix(pdev, dev, op);
++ break;
++ case XEN_PCI_OP_disable_msix:
++ op->err = pciback_disable_msix(pdev, dev, op);
++ break;
++#endif
++ default:
++ op->err = XEN_PCI_ERR_not_implemented;
++ break;
++ }
++ }
++ /* Tell the driver domain that we're done. */
++ wmb();
++ clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
++ notify_remote_via_irq(pdev->evtchn_irq);
++
++ /* Mark that we're done. */
++ smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
++ clear_bit(_PDEVF_op_active, &pdev->flags);
++ smp_mb__after_clear_bit(); /* /before/ final check for work */
++
++ /* Check to see if the driver domain tried to start another request in
++ * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. */
++ test_and_schedule_op(pdev);
++}
++
++irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs)
++{
++ struct pciback_device *pdev = dev_id;
++
++ test_and_schedule_op(pdev);
++
++ return IRQ_HANDLED;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/pci_stub.c linux-2.6.18-xen-3.3.0/drivers/xen/pciback/pci_stub.c
+--- linux-2.6.18.8/drivers/xen/pciback/pci_stub.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/pci_stub.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,948 @@
++/*
++ * PCI Stub Driver - Grabs devices in backend to be exported later
++ *
++ * Ryan Wilson <hap9@epoch.ncsc.mil>
++ * Chris Bookholt <hap10@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/kref.h>
++#include <asm/atomic.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_quirks.h"
++
++static char *pci_devs_to_hide = NULL;
++module_param_named(hide, pci_devs_to_hide, charp, 0444);
++
++struct pcistub_device_id {
++ struct list_head slot_list;
++ int domain;
++ unsigned char bus;
++ unsigned int devfn;
++};
++static LIST_HEAD(pcistub_device_ids);
++static DEFINE_SPINLOCK(device_ids_lock);
++
++struct pcistub_device {
++ struct kref kref;
++ struct list_head dev_list;
++ spinlock_t lock;
++
++ struct pci_dev *dev;
++ struct pciback_device *pdev; /* non-NULL if struct pci_dev is in use */
++};
++
++/* Access to pcistub_devices & seized_devices lists and the initialize_devices
++ * flag must be locked with pcistub_devices_lock
++ */
++static DEFINE_SPINLOCK(pcistub_devices_lock);
++static LIST_HEAD(pcistub_devices);
++
++/* wait for device_initcall before initializing our devices
++ * (see pcistub_init_devices_late)
++ */
++static int initialize_devices = 0;
++static LIST_HEAD(seized_devices);
++
++static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
++{
++ struct pcistub_device *psdev;
++
++ dev_dbg(&dev->dev, "pcistub_device_alloc\n");
++
++ psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
++ if (!psdev)
++ return NULL;
++
++ psdev->dev = pci_dev_get(dev);
++ if (!psdev->dev) {
++ kfree(psdev);
++ return NULL;
++ }
++
++ kref_init(&psdev->kref);
++ spin_lock_init(&psdev->lock);
++
++ return psdev;
++}
++
++/* Don't call this directly as it's called by pcistub_device_put */
++static void pcistub_device_release(struct kref *kref)
++{
++ struct pcistub_device *psdev;
++
++ psdev = container_of(kref, struct pcistub_device, kref);
++
++ dev_dbg(&psdev->dev->dev, "pcistub_device_release\n");
++
++ /* Clean-up the device */
++ pciback_reset_device(psdev->dev);
++ pciback_config_free_dyn_fields(psdev->dev);
++ pciback_config_free_dev(psdev->dev);
++ kfree(pci_get_drvdata(psdev->dev));
++ pci_set_drvdata(psdev->dev, NULL);
++
++ pci_dev_put(psdev->dev);
++
++ kfree(psdev);
++}
++
++static inline void pcistub_device_get(struct pcistub_device *psdev)
++{
++ kref_get(&psdev->kref);
++}
++
++static inline void pcistub_device_put(struct pcistub_device *psdev)
++{
++ kref_put(&psdev->kref, pcistub_device_release);
++}
++
++static struct pcistub_device *pcistub_device_find(int domain, int bus,
++ int slot, int func)
++{
++ struct pcistub_device *psdev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev != NULL
++ && domain == pci_domain_nr(psdev->dev->bus)
++ && bus == psdev->dev->bus->number
++ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
++ pcistub_device_get(psdev);
++ goto out;
++ }
++ }
++
++ /* didn't find it */
++ psdev = NULL;
++
++ out:
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++ return psdev;
++}
++
++static struct pci_dev *pcistub_device_get_pci_dev(struct pciback_device *pdev,
++ struct pcistub_device *psdev)
++{
++ struct pci_dev *pci_dev = NULL;
++ unsigned long flags;
++
++ pcistub_device_get(psdev);
++
++ spin_lock_irqsave(&psdev->lock, flags);
++ if (!psdev->pdev) {
++ psdev->pdev = pdev;
++ pci_dev = psdev->dev;
++ }
++ spin_unlock_irqrestore(&psdev->lock, flags);
++
++ if (!pci_dev)
++ pcistub_device_put(psdev);
++
++ return pci_dev;
++}
++
++struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
++ int domain, int bus,
++ int slot, int func)
++{
++ struct pcistub_device *psdev;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev != NULL
++ && domain == pci_domain_nr(psdev->dev->bus)
++ && bus == psdev->dev->bus->number
++ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
++ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++ return found_dev;
++}
++
++struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
++ struct pci_dev *dev)
++{
++ struct pcistub_device *psdev;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev == dev) {
++ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++ return found_dev;
++}
++
++void pcistub_put_pci_dev(struct pci_dev *dev)
++{
++ struct pcistub_device *psdev, *found_psdev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev == dev) {
++ found_psdev = psdev;
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ /* Cleanup our device
++ * (so it's ready for the next domain)
++ */
++ pciback_reset_device(found_psdev->dev);
++ pciback_config_free_dyn_fields(found_psdev->dev);
++ pciback_config_reset_dev(found_psdev->dev);
++
++ spin_lock_irqsave(&found_psdev->lock, flags);
++ found_psdev->pdev = NULL;
++ spin_unlock_irqrestore(&found_psdev->lock, flags);
++
++ pcistub_device_put(found_psdev);
++}
++
++static int __devinit pcistub_match_one(struct pci_dev *dev,
++ struct pcistub_device_id *pdev_id)
++{
++ /* Match the specified device by domain, bus, slot, func and also if
++ * any of the device's parent bridges match.
++ */
++ for (; dev != NULL; dev = dev->bus->self) {
++ if (pci_domain_nr(dev->bus) == pdev_id->domain
++ && dev->bus->number == pdev_id->bus
++ && dev->devfn == pdev_id->devfn)
++ return 1;
++
++ /* Sometimes topmost bridge links to itself. */
++ if (dev == dev->bus->self)
++ break;
++ }
++
++ return 0;
++}
++
++static int __devinit pcistub_match(struct pci_dev *dev)
++{
++ struct pcistub_device_id *pdev_id;
++ unsigned long flags;
++ int found = 0;
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
++ if (pcistub_match_one(dev, pdev_id)) {
++ found = 1;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return found;
++}
++
++static int __devinit pcistub_init_device(struct pci_dev *dev)
++{
++ struct pciback_dev_data *dev_data;
++ int err = 0;
++
++ dev_dbg(&dev->dev, "initializing...\n");
++
++ /* The PCI backend is not intended to be a module (or to work with
++ * removable PCI devices (yet). If it were, pciback_config_free()
++ * would need to be called somewhere to free the memory allocated
++ * here and then to call kfree(pci_get_drvdata(psdev->dev)).
++ */
++ dev_data = kzalloc(sizeof(*dev_data), GFP_ATOMIC);
++ if (!dev_data) {
++ err = -ENOMEM;
++ goto out;
++ }
++ pci_set_drvdata(dev, dev_data);
++
++ dev_dbg(&dev->dev, "initializing config\n");
++ err = pciback_config_init_dev(dev);
++ if (err)
++ goto out;
++
++ /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
++ * must do this here because pcibios_enable_device may specify
++ * the pci device's true irq (and possibly its other resources)
++ * if they differ from what's in the configuration space.
++ * This makes the assumption that the device's resources won't
++ * change after this point (otherwise this code may break!)
++ */
++ dev_dbg(&dev->dev, "enabling device\n");
++ err = pci_enable_device(dev);
++ if (err)
++ goto config_release;
++
++ /* Now disable the device (this also ensures some private device
++ * data is setup before we export)
++ */
++ dev_dbg(&dev->dev, "reset device\n");
++ pciback_reset_device(dev);
++
++ return 0;
++
++ config_release:
++ pciback_config_free_dev(dev);
++
++ out:
++ pci_set_drvdata(dev, NULL);
++ kfree(dev_data);
++ return err;
++}
++
++/*
++ * Because some initialization still happens on
++ * devices during fs_initcall, we need to defer
++ * full initialization of our devices until
++ * device_initcall.
++ */
++static int __init pcistub_init_devices_late(void)
++{
++ struct pcistub_device *psdev;
++ unsigned long flags;
++ int err = 0;
++
++ pr_debug("pciback: pcistub_init_devices_late\n");
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ while (!list_empty(&seized_devices)) {
++ psdev = container_of(seized_devices.next,
++ struct pcistub_device, dev_list);
++ list_del(&psdev->dev_list);
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ err = pcistub_init_device(psdev->dev);
++ if (err) {
++ dev_err(&psdev->dev->dev,
++ "error %d initializing device\n", err);
++ kfree(psdev);
++ psdev = NULL;
++ }
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ if (psdev)
++ list_add_tail(&psdev->dev_list, &pcistub_devices);
++ }
++
++ initialize_devices = 1;
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ return 0;
++}
++
++static int __devinit pcistub_seize(struct pci_dev *dev)
++{
++ struct pcistub_device *psdev;
++ unsigned long flags;
++ int err = 0;
++
++ psdev = pcistub_device_alloc(dev);
++ if (!psdev)
++ return -ENOMEM;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ if (initialize_devices) {
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ /* don't want irqs disabled when calling pcistub_init_device */
++ err = pcistub_init_device(psdev->dev);
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ if (!err)
++ list_add(&psdev->dev_list, &pcistub_devices);
++ } else {
++ dev_dbg(&dev->dev, "deferring initialization\n");
++ list_add(&psdev->dev_list, &seized_devices);
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ if (err)
++ pcistub_device_put(psdev);
++
++ return err;
++}
++
++static int __devinit pcistub_probe(struct pci_dev *dev,
++ const struct pci_device_id *id)
++{
++ int err = 0;
++
++ dev_dbg(&dev->dev, "probing...\n");
++
++ if (pcistub_match(dev)) {
++
++ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
++ && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
++ dev_err(&dev->dev, "can't export pci devices that "
++ "don't have a normal (0) or bridge (1) "
++ "header type!\n");
++ err = -ENODEV;
++ goto out;
++ }
++
++ dev_info(&dev->dev, "seizing device\n");
++ err = pcistub_seize(dev);
++ } else
++ /* Didn't find the device */
++ err = -ENODEV;
++
++ out:
++ return err;
++}
++
++static void pcistub_remove(struct pci_dev *dev)
++{
++ struct pcistub_device *psdev, *found_psdev = NULL;
++ unsigned long flags;
++
++ dev_dbg(&dev->dev, "removing\n");
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ pciback_config_quirk_release(dev);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev == dev) {
++ found_psdev = psdev;
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ if (found_psdev) {
++ dev_dbg(&dev->dev, "found device to remove - in use? %p\n",
++ found_psdev->pdev);
++
++ if (found_psdev->pdev) {
++ printk(KERN_WARNING "pciback: ****** removing device "
++ "%s while still in-use! ******\n",
++ pci_name(found_psdev->dev));
++ printk(KERN_WARNING "pciback: ****** driver domain may "
++ "still access this device's i/o resources!\n");
++ printk(KERN_WARNING "pciback: ****** shutdown driver "
++ "domain before binding device\n");
++ printk(KERN_WARNING "pciback: ****** to other drivers "
++ "or domains\n");
++
++ pciback_release_pci_dev(found_psdev->pdev,
++ found_psdev->dev);
++ }
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++ list_del(&found_psdev->dev_list);
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ /* the final put for releasing from the list */
++ pcistub_device_put(found_psdev);
++ }
++}
++
++static struct pci_device_id pcistub_ids[] = {
++ {
++ .vendor = PCI_ANY_ID,
++ .device = PCI_ANY_ID,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ },
++ {0,},
++};
++
++/*
++ * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
++ * for a normal device. I don't want it to be loaded automatically.
++ */
++
++static struct pci_driver pciback_pci_driver = {
++ .name = "pciback",
++ .id_table = pcistub_ids,
++ .probe = pcistub_probe,
++ .remove = pcistub_remove,
++};
++
++static inline int str_to_slot(const char *buf, int *domain, int *bus,
++ int *slot, int *func)
++{
++ int err;
++
++ err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func);
++ if (err == 4)
++ return 0;
++ else if (err < 0)
++ return -EINVAL;
++
++ /* try again without domain */
++ *domain = 0;
++ err = sscanf(buf, " %x:%x.%x", bus, slot, func);
++ if (err == 3)
++ return 0;
++
++ return -EINVAL;
++}
++
++static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
++ *slot, int *func, int *reg, int *size, int *mask)
++{
++ int err;
++
++ err =
++ sscanf(buf, " %04x:%02x:%02x.%1x-%08x:%1x:%08x", domain, bus, slot,
++ func, reg, size, mask);
++ if (err == 7)
++ return 0;
++ return -EINVAL;
++}
++
++static int pcistub_device_id_add(int domain, int bus, int slot, int func)
++{
++ struct pcistub_device_id *pci_dev_id;
++ unsigned long flags;
++
++ pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
++ if (!pci_dev_id)
++ return -ENOMEM;
++
++ pci_dev_id->domain = domain;
++ pci_dev_id->bus = bus;
++ pci_dev_id->devfn = PCI_DEVFN(slot, func);
++
++ pr_debug("pciback: wants to seize %04x:%02x:%02x.%01x\n",
++ domain, bus, slot, func);
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids);
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return 0;
++}
++
++static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
++{
++ struct pcistub_device_id *pci_dev_id, *t;
++ int devfn = PCI_DEVFN(slot, func);
++ int err = -ENOENT;
++ unsigned long flags;
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids, slot_list) {
++
++ if (pci_dev_id->domain == domain
++ && pci_dev_id->bus == bus && pci_dev_id->devfn == devfn) {
++ /* Don't break; here because it's possible the same
++ * slot could be in the list more than once
++ */
++ list_del(&pci_dev_id->slot_list);
++ kfree(pci_dev_id);
++
++ err = 0;
++
++ pr_debug("pciback: removed %04x:%02x:%02x.%01x from "
++ "seize list\n", domain, bus, slot, func);
++ }
++ }
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return err;
++}
++
++static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
++ int size, int mask)
++{
++ int err = 0;
++ struct pcistub_device *psdev;
++ struct pci_dev *dev;
++ struct config_field *field;
++
++ psdev = pcistub_device_find(domain, bus, slot, func);
++ if (!psdev || !psdev->dev) {
++ err = -ENODEV;
++ goto out;
++ }
++ dev = psdev->dev;
++
++ field = kzalloc(sizeof(*field), GFP_ATOMIC);
++ if (!field) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ field->offset = reg;
++ field->size = size;
++ field->mask = mask;
++ field->init = NULL;
++ field->reset = NULL;
++ field->release = NULL;
++ field->clean = pciback_config_field_free;
++
++ err = pciback_config_quirks_add_field(dev, field);
++ if (err)
++ kfree(field);
++ out:
++ return err;
++}
++
++static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
++ size_t count)
++{
++ int domain, bus, slot, func;
++ int err;
++
++ err = str_to_slot(buf, &domain, &bus, &slot, &func);
++ if (err)
++ goto out;
++
++ err = pcistub_device_id_add(domain, bus, slot, func);
++
++ out:
++ if (!err)
++ err = count;
++ return err;
++}
++
++DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
++
++static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
++ size_t count)
++{
++ int domain, bus, slot, func;
++ int err;
++
++ err = str_to_slot(buf, &domain, &bus, &slot, &func);
++ if (err)
++ goto out;
++
++ err = pcistub_device_id_remove(domain, bus, slot, func);
++
++ out:
++ if (!err)
++ err = count;
++ return err;
++}
++
++DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
++
++static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
++{
++ struct pcistub_device_id *pci_dev_id;
++ size_t count = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
++ if (count >= PAGE_SIZE)
++ break;
++
++ count += scnprintf(buf + count, PAGE_SIZE - count,
++ "%04x:%02x:%02x.%01x\n",
++ pci_dev_id->domain, pci_dev_id->bus,
++ PCI_SLOT(pci_dev_id->devfn),
++ PCI_FUNC(pci_dev_id->devfn));
++ }
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return count;
++}
++
++DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
++
++static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
++ size_t count)
++{
++ int domain, bus, slot, func, reg, size, mask;
++ int err;
++
++ err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size,
++ &mask);
++ if (err)
++ goto out;
++
++ err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
++
++ out:
++ if (!err)
++ err = count;
++ return err;
++}
++
++static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
++{
++ int count = 0;
++ unsigned long flags;
++ extern struct list_head pciback_quirks;
++ struct pciback_config_quirk *quirk;
++ struct pciback_dev_data *dev_data;
++ struct config_field *field;
++ struct config_field_entry *cfg_entry;
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_for_each_entry(quirk, &pciback_quirks, quirks_list) {
++ if (count >= PAGE_SIZE)
++ goto out;
++
++ count += scnprintf(buf + count, PAGE_SIZE - count,
++ "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
++ quirk->pdev->bus->number,
++ PCI_SLOT(quirk->pdev->devfn),
++ PCI_FUNC(quirk->pdev->devfn),
++ quirk->devid.vendor, quirk->devid.device,
++ quirk->devid.subvendor,
++ quirk->devid.subdevice);
++
++ dev_data = pci_get_drvdata(quirk->pdev);
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++ if (count >= PAGE_SIZE)
++ goto out;
++
++ count += scnprintf(buf + count, PAGE_SIZE - count,
++ "\t\t%08x:%01x:%08x\n",
++ cfg_entry->base_offset + field->offset,
++ field->size, field->mask);
++ }
++ }
++
++ out:
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return count;
++}
++
++DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add);
++
++static ssize_t permissive_add(struct device_driver *drv, const char *buf,
++ size_t count)
++{
++ int domain, bus, slot, func;
++ int err;
++ struct pcistub_device *psdev;
++ struct pciback_dev_data *dev_data;
++ err = str_to_slot(buf, &domain, &bus, &slot, &func);
++ if (err)
++ goto out;
++ psdev = pcistub_device_find(domain, bus, slot, func);
++ if (!psdev) {
++ err = -ENODEV;
++ goto out;
++ }
++ if (!psdev->dev) {
++ err = -ENODEV;
++ goto release;
++ }
++ dev_data = pci_get_drvdata(psdev->dev);
++ /* the driver data for a device should never be null at this point */
++ if (!dev_data) {
++ err = -ENXIO;
++ goto release;
++ }
++ if (!dev_data->permissive) {
++ dev_data->permissive = 1;
++ /* Let user know that what they're doing could be unsafe */
++ dev_warn(&psdev->dev->dev,
++ "enabling permissive mode configuration space accesses!\n");
++ dev_warn(&psdev->dev->dev,
++ "permissive mode is potentially unsafe!\n");
++ }
++ release:
++ pcistub_device_put(psdev);
++ out:
++ if (!err)
++ err = count;
++ return err;
++}
++
++static ssize_t permissive_show(struct device_driver *drv, char *buf)
++{
++ struct pcistub_device *psdev;
++ struct pciback_dev_data *dev_data;
++ size_t count = 0;
++ unsigned long flags;
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (count >= PAGE_SIZE)
++ break;
++ if (!psdev->dev)
++ continue;
++ dev_data = pci_get_drvdata(psdev->dev);
++ if (!dev_data || !dev_data->permissive)
++ continue;
++ count +=
++ scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
++ pci_name(psdev->dev));
++ }
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++ return count;
++}
++
++DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add);
++
++#ifdef CONFIG_PCI_MSI
++
++int pciback_get_owner(struct pci_dev *dev)
++{
++ struct pcistub_device *psdev;
++
++ psdev = pcistub_device_find(pci_domain_nr(dev->bus), dev->bus->number,
++ PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
++
++ if (!psdev || !psdev->pdev)
++ return -1;
++
++ return psdev->pdev->xdev->otherend_id;
++}
++#endif
++
++static void pcistub_exit(void)
++{
++ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_new_slot);
++ driver_remove_file(&pciback_pci_driver.driver,
++ &driver_attr_remove_slot);
++ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_slots);
++ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_quirks);
++ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_permissive);
++
++ pci_unregister_driver(&pciback_pci_driver);
++ WARN_ON(unregister_msi_get_owner(pciback_get_owner));
++}
++
++static int __init pcistub_init(void)
++{
++ int pos = 0;
++ int err = 0;
++ int domain, bus, slot, func;
++ int parsed;
++
++ if (pci_devs_to_hide && *pci_devs_to_hide) {
++ do {
++ parsed = 0;
++
++ err = sscanf(pci_devs_to_hide + pos,
++ " (%x:%x:%x.%x) %n",
++ &domain, &bus, &slot, &func, &parsed);
++ if (err != 4) {
++ domain = 0;
++ err = sscanf(pci_devs_to_hide + pos,
++ " (%x:%x.%x) %n",
++ &bus, &slot, &func, &parsed);
++ if (err != 3)
++ goto parse_error;
++ }
++
++ err = pcistub_device_id_add(domain, bus, slot, func);
++ if (err)
++ goto out;
++
++ /* if parsed<=0, we've reached the end of the string */
++ pos += parsed;
++ } while (parsed > 0 && pci_devs_to_hide[pos]);
++ }
++
++ /* If we're the first PCI Device Driver to register, we're the
++ * first one to get offered PCI devices as they become
++ * available (and thus we can be the first to grab them)
++ */
++ err = pci_register_driver(&pciback_pci_driver);
++ if (err < 0)
++ goto out;
++
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_new_slot);
++ if (!err)
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_remove_slot);
++ if (!err)
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_slots);
++ if (!err)
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_quirks);
++ if (!err)
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_permissive);
++
++ if (!err)
++ err = register_msi_get_owner(pciback_get_owner);
++ if (err)
++ pcistub_exit();
++
++ out:
++ return err;
++
++ parse_error:
++ printk(KERN_ERR "pciback: Error parsing pci_devs_to_hide at \"%s\"\n",
++ pci_devs_to_hide + pos);
++ return -EINVAL;
++}
++
++#ifndef MODULE
++/*
++ * fs_initcall happens before device_initcall
++ * so pciback *should* get called first (b/c we
++ * want to suck up any device before other drivers
++ * get a chance by being the first pci device
++ * driver to register)
++ */
++fs_initcall(pcistub_init);
++#endif
++
++static int __init pciback_init(void)
++{
++ int err;
++
++ err = pciback_config_init();
++ if (err)
++ return err;
++
++#ifdef MODULE
++ err = pcistub_init();
++ if (err < 0)
++ return err;
++#endif
++
++ pcistub_init_devices_late();
++ err = pciback_xenbus_register();
++ if (err)
++ pcistub_exit();
++
++ return err;
++}
++
++static void __exit pciback_cleanup(void)
++{
++ pciback_xenbus_unregister();
++ pcistub_exit();
++}
++
++module_init(pciback_init);
++module_exit(pciback_cleanup);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/slot.c linux-2.6.18-xen-3.3.0/drivers/xen/pciback/slot.c
+--- linux-2.6.18.8/drivers/xen/pciback/slot.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/slot.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,157 @@
++/*
++ * PCI Backend - Provides a Virtual PCI bus (with real devices)
++ * to the frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil> (vpci.c)
++ * Author: Tristan Gingold <tristan.gingold@bull.net>, from vpci.c
++ */
++
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
++
++/* There are at most 32 slots in a pci bus. */
++#define PCI_SLOT_MAX 32
++
++#define PCI_BUS_NBR 2
++
++struct slot_dev_data {
++ /* Access to dev_list must be protected by lock */
++ struct pci_dev *slots[PCI_BUS_NBR][PCI_SLOT_MAX];
++ spinlock_t lock;
++};
++
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn)
++{
++ struct pci_dev *dev = NULL;
++ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++ unsigned long flags;
++
++ if (domain != 0 || PCI_FUNC(devfn) != 0)
++ return NULL;
++
++ if (PCI_SLOT(devfn) >= PCI_SLOT_MAX || bus >= PCI_BUS_NBR)
++ return NULL;
++
++ spin_lock_irqsave(&slot_dev->lock, flags);
++ dev = slot_dev->slots[bus][PCI_SLOT(devfn)];
++ spin_unlock_irqrestore(&slot_dev->lock, flags);
++
++ return dev;
++}
++
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++ int devid, publish_pci_dev_cb publish_cb)
++{
++ int err = 0, slot, bus;
++ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++ unsigned long flags;
++
++ if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
++ err = -EFAULT;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Can't export bridges on the virtual PCI bus");
++ goto out;
++ }
++
++ spin_lock_irqsave(&slot_dev->lock, flags);
++
++ /* Assign to a new slot on the virtual PCI bus */
++ for (bus = 0; bus < PCI_BUS_NBR; bus++)
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ if (slot_dev->slots[bus][slot] == NULL) {
++ printk(KERN_INFO
++ "pciback: slot: %s: assign to virtual slot %d, bus %d\n",
++ pci_name(dev), slot, bus);
++ slot_dev->slots[bus][slot] = dev;
++ goto unlock;
++ }
++ }
++
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "No more space on root virtual PCI bus");
++
++ unlock:
++ spin_unlock_irqrestore(&slot_dev->lock, flags);
++
++ /* Publish this device. */
++ if(!err)
++ err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, 0), devid);
++
++ out:
++ return err;
++}
++
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++ int slot, bus;
++ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&slot_dev->lock, flags);
++
++ for (bus = 0; bus < PCI_BUS_NBR; bus++)
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ if (slot_dev->slots[bus][slot] == dev) {
++ slot_dev->slots[bus][slot] = NULL;
++ found_dev = dev;
++ goto out;
++ }
++ }
++
++ out:
++ spin_unlock_irqrestore(&slot_dev->lock, flags);
++
++ if (found_dev)
++ pcistub_put_pci_dev(found_dev);
++}
++
++int pciback_init_devices(struct pciback_device *pdev)
++{
++ int slot, bus;
++ struct slot_dev_data *slot_dev;
++
++ slot_dev = kmalloc(sizeof(*slot_dev), GFP_KERNEL);
++ if (!slot_dev)
++ return -ENOMEM;
++
++ spin_lock_init(&slot_dev->lock);
++
++ for (bus = 0; bus < PCI_BUS_NBR; bus++)
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++)
++ slot_dev->slots[bus][slot] = NULL;
++
++ pdev->pci_dev_data = slot_dev;
++
++ return 0;
++}
++
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++ publish_pci_root_cb publish_cb)
++{
++ /* The Virtual PCI bus has only one root */
++ return publish_cb(pdev, 0, 0);
++}
++
++void pciback_release_devices(struct pciback_device *pdev)
++{
++ int slot, bus;
++ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++ struct pci_dev *dev;
++
++ for (bus = 0; bus < PCI_BUS_NBR; bus++)
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ dev = slot_dev->slots[bus][slot];
++ if (dev != NULL)
++ pcistub_put_pci_dev(dev);
++ }
++
++ kfree(slot_dev);
++ pdev->pci_dev_data = NULL;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/vpci.c linux-2.6.18-xen-3.3.0/drivers/xen/pciback/vpci.c
+--- linux-2.6.18.8/drivers/xen/pciback/vpci.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/vpci.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,212 @@
++/*
++ * PCI Backend - Provides a Virtual PCI bus (with real devices)
++ * to the frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
++
++#define PCI_SLOT_MAX 32
++
++struct vpci_dev_data {
++ /* Access to dev_list must be protected by lock */
++ struct list_head dev_list[PCI_SLOT_MAX];
++ spinlock_t lock;
++};
++
++static inline struct list_head *list_first(struct list_head *head)
++{
++ return head->next;
++}
++
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn)
++{
++ struct pci_dev_entry *entry;
++ struct pci_dev *dev = NULL;
++ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++ unsigned long flags;
++
++ if (domain != 0 || bus != 0)
++ return NULL;
++
++ if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
++ spin_lock_irqsave(&vpci_dev->lock, flags);
++
++ list_for_each_entry(entry,
++ &vpci_dev->dev_list[PCI_SLOT(devfn)],
++ list) {
++ if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
++ dev = entry->dev;
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&vpci_dev->lock, flags);
++ }
++ return dev;
++}
++
++static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
++{
++ if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
++ && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
++ return 1;
++
++ return 0;
++}
++
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++ int devid, publish_pci_dev_cb publish_cb)
++{
++ int err = 0, slot, func;
++ struct pci_dev_entry *t, *dev_entry;
++ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++ unsigned long flags;
++
++ if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
++ err = -EFAULT;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Can't export bridges on the virtual PCI bus");
++ goto out;
++ }
++
++ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
++ if (!dev_entry) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error adding entry to virtual PCI bus");
++ goto out;
++ }
++
++ dev_entry->dev = dev;
++
++ spin_lock_irqsave(&vpci_dev->lock, flags);
++
++ /* Keep multi-function devices together on the virtual PCI bus */
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ if (!list_empty(&vpci_dev->dev_list[slot])) {
++ t = list_entry(list_first(&vpci_dev->dev_list[slot]),
++ struct pci_dev_entry, list);
++
++ if (match_slot(dev, t->dev)) {
++ pr_info("pciback: vpci: %s: "
++ "assign to virtual slot %d func %d\n",
++ pci_name(dev), slot,
++ PCI_FUNC(dev->devfn));
++ list_add_tail(&dev_entry->list,
++ &vpci_dev->dev_list[slot]);
++ func = PCI_FUNC(dev->devfn);
++ goto unlock;
++ }
++ }
++ }
++
++ /* Assign to a new slot on the virtual PCI bus */
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ if (list_empty(&vpci_dev->dev_list[slot])) {
++ printk(KERN_INFO
++ "pciback: vpci: %s: assign to virtual slot %d\n",
++ pci_name(dev), slot);
++ list_add_tail(&dev_entry->list,
++ &vpci_dev->dev_list[slot]);
++ func = PCI_FUNC(dev->devfn);
++ goto unlock;
++ }
++ }
++
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "No more space on root virtual PCI bus");
++
++ unlock:
++ spin_unlock_irqrestore(&vpci_dev->lock, flags);
++
++ /* Publish this device. */
++ if(!err)
++ err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
++
++ out:
++ return err;
++}
++
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++ int slot;
++ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&vpci_dev->lock, flags);
++
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ struct pci_dev_entry *e, *tmp;
++ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
++ list) {
++ if (e->dev == dev) {
++ list_del(&e->list);
++ found_dev = e->dev;
++ kfree(e);
++ goto out;
++ }
++ }
++ }
++
++ out:
++ spin_unlock_irqrestore(&vpci_dev->lock, flags);
++
++ if (found_dev)
++ pcistub_put_pci_dev(found_dev);
++}
++
++int pciback_init_devices(struct pciback_device *pdev)
++{
++ int slot;
++ struct vpci_dev_data *vpci_dev;
++
++ vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
++ if (!vpci_dev)
++ return -ENOMEM;
++
++ spin_lock_init(&vpci_dev->lock);
++
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
++ }
++
++ pdev->pci_dev_data = vpci_dev;
++
++ return 0;
++}
++
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++ publish_pci_root_cb publish_cb)
++{
++ /* The Virtual PCI bus has only one root */
++ return publish_cb(pdev, 0, 0);
++}
++
++void pciback_release_devices(struct pciback_device *pdev)
++{
++ int slot;
++ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ struct pci_dev_entry *e, *tmp;
++ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
++ list) {
++ list_del(&e->list);
++ pcistub_put_pci_dev(e->dev);
++ kfree(e);
++ }
++ }
++
++ kfree(vpci_dev);
++ pdev->pci_dev_data = NULL;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/pciback/xenbus.c linux-2.6.18-xen-3.3.0/drivers/xen/pciback/xenbus.c
+--- linux-2.6.18.8/drivers/xen/pciback/xenbus.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pciback/xenbus.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,704 @@
++/*
++ * PCI Backend Xenbus Setup - handles setup with frontend and xend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/vmalloc.h>
++#include <xen/xenbus.h>
++#include <xen/evtchn.h>
++#include "pciback.h"
++
++#define INVALID_EVTCHN_IRQ (-1)
++
++static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
++{
++ struct pciback_device *pdev;
++
++ pdev = kzalloc(sizeof(struct pciback_device), GFP_KERNEL);
++ if (pdev == NULL)
++ goto out;
++ dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
++
++ pdev->xdev = xdev;
++ xdev->dev.driver_data = pdev;
++
++ spin_lock_init(&pdev->dev_lock);
++
++ pdev->sh_area = NULL;
++ pdev->sh_info = NULL;
++ pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
++ pdev->be_watching = 0;
++
++ INIT_WORK(&pdev->op_work, pciback_do_op, pdev);
++
++ if (pciback_init_devices(pdev)) {
++ kfree(pdev);
++ pdev = NULL;
++ }
++ out:
++ return pdev;
++}
++
++static void pciback_disconnect(struct pciback_device *pdev)
++{
++ spin_lock(&pdev->dev_lock);
++
++ /* Ensure the guest can't trigger our handler before removing devices */
++ if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ) {
++ unbind_from_irqhandler(pdev->evtchn_irq, pdev);
++ pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
++ }
++
++ /* If the driver domain started an op, make sure we complete it or
++ * delete it before releasing the shared memory */
++ cancel_delayed_work(&pdev->op_work);
++ flush_scheduled_work();
++
++ if (pdev->sh_info != NULL) {
++ xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_area);
++ pdev->sh_info = NULL;
++ }
++
++ spin_unlock(&pdev->dev_lock);
++}
++
++static void free_pdev(struct pciback_device *pdev)
++{
++ if (pdev->be_watching)
++ unregister_xenbus_watch(&pdev->be_watch);
++
++ pciback_disconnect(pdev);
++
++ pciback_release_devices(pdev);
++
++ pdev->xdev->dev.driver_data = NULL;
++ pdev->xdev = NULL;
++
++ kfree(pdev);
++}
++
++static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
++ int remote_evtchn)
++{
++ int err = 0;
++ struct vm_struct *area;
++
++ dev_dbg(&pdev->xdev->dev,
++ "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
++ gnt_ref, remote_evtchn);
++
++ area = xenbus_map_ring_valloc(pdev->xdev, gnt_ref);
++ if (IS_ERR(area)) {
++ err = PTR_ERR(area);
++ goto out;
++ }
++ pdev->sh_area = area;
++ pdev->sh_info = area->addr;
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ pdev->xdev->otherend_id, remote_evtchn, pciback_handle_event,
++ SA_SAMPLE_RANDOM, "pciback", pdev);
++ if (err < 0) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error binding event channel to IRQ");
++ goto out;
++ }
++ pdev->evtchn_irq = err;
++ err = 0;
++
++ dev_dbg(&pdev->xdev->dev, "Attached!\n");
++ out:
++ return err;
++}
++
++static int pciback_attach(struct pciback_device *pdev)
++{
++ int err = 0;
++ int gnt_ref, remote_evtchn;
++ char *magic = NULL;
++
++ spin_lock(&pdev->dev_lock);
++
++ /* Make sure we only do this setup once */
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateInitialised)
++ goto out;
++
++ /* Wait for frontend to state that it has published the configuration */
++ if (xenbus_read_driver_state(pdev->xdev->otherend) !=
++ XenbusStateInitialised)
++ goto out;
++
++ dev_dbg(&pdev->xdev->dev, "Reading frontend config\n");
++
++ err = xenbus_gather(XBT_NIL, pdev->xdev->otherend,
++ "pci-op-ref", "%u", &gnt_ref,
++ "event-channel", "%u", &remote_evtchn,
++ "magic", NULL, &magic, NULL);
++ if (err) {
++ /* If configuration didn't get read correctly, wait longer */
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading configuration from frontend");
++ goto out;
++ }
++
++ if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {
++ xenbus_dev_fatal(pdev->xdev, -EFAULT,
++ "version mismatch (%s/%s) with pcifront - "
++ "halting pciback",
++ magic, XEN_PCI_MAGIC);
++ goto out;
++ }
++
++ err = pciback_do_attach(pdev, gnt_ref, remote_evtchn);
++ if (err)
++ goto out;
++
++ dev_dbg(&pdev->xdev->dev, "Connecting...\n");
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
++ if (err)
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error switching to connected state!");
++
++ dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
++ out:
++ spin_unlock(&pdev->dev_lock);
++
++ if (magic)
++ kfree(magic);
++
++ return err;
++}
++
++static int pciback_publish_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn, unsigned int devid)
++{
++ int err;
++ int len;
++ char str[64];
++
++ len = snprintf(str, sizeof(str), "vdev-%d", devid);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
++ "%04x:%02x:%02x.%02x", domain, bus,
++ PCI_SLOT(devfn), PCI_FUNC(devfn));
++
++ out:
++ return err;
++}
++
++static int pciback_export_device(struct pciback_device *pdev,
++ int domain, int bus, int slot, int func,
++ int devid)
++{
++ struct pci_dev *dev;
++ int err = 0;
++
++ dev_dbg(&pdev->xdev->dev, "exporting dom %x bus %x slot %x func %x\n",
++ domain, bus, slot, func);
++
++ dev = pcistub_get_pci_dev_by_slot(pdev, domain, bus, slot, func);
++ if (!dev) {
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Couldn't locate PCI device "
++ "(%04x:%02x:%02x.%01x)! "
++ "perhaps already in-use?",
++ domain, bus, slot, func);
++ goto out;
++ }
++
++ err = pciback_add_pci_dev(pdev, dev, devid, pciback_publish_pci_dev);
++ if (err)
++ goto out;
++
++ /* TODO: It'd be nice to export a bridge and have all of its children
++ * get exported with it. This may be best done in xend (which will
++ * have to calculate resource usage anyway) but we probably want to
++ * put something in here to ensure that if a bridge gets given to a
++ * driver domain, that all devices under that bridge are not given
++ * to other driver domains (as he who controls the bridge can disable
++ * it and stop the other devices from working).
++ */
++ out:
++ return err;
++}
++
++static int pciback_remove_device(struct pciback_device *pdev,
++ int domain, int bus, int slot, int func)
++{
++ int err = 0;
++ struct pci_dev *dev;
++
++ dev_dbg(&pdev->xdev->dev, "removing dom %x bus %x slot %x func %x\n",
++ domain, bus, slot, func);
++
++ dev = pciback_get_pci_dev(pdev, domain, bus, PCI_DEVFN(slot, func));
++ if (!dev) {
++ err = -EINVAL;
++ dev_dbg(&pdev->xdev->dev, "Couldn't locate PCI device "
++ "(%04x:%02x:%02x.%01x)! not owned by this domain\n",
++ domain, bus, slot, func);
++ goto out;
++ }
++
++ pciback_release_pci_dev(pdev, dev);
++
++ out:
++ return err;
++}
++
++static int pciback_publish_pci_root(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus)
++{
++ unsigned int d, b;
++ int i, root_num, len, err;
++ char str[64];
++
++ dev_dbg(&pdev->xdev->dev, "Publishing pci roots\n");
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++ "root_num", "%d", &root_num);
++ if (err == 0 || err == -ENOENT)
++ root_num = 0;
++ else if (err < 0)
++ goto out;
++
++ /* Verify that we haven't already published this pci root */
++ for (i = 0; i < root_num; i++) {
++ len = snprintf(str, sizeof(str), "root-%d", i);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++ str, "%x:%x", &d, &b);
++ if (err < 0)
++ goto out;
++ if (err != 2) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ if (d == domain && b == bus) {
++ err = 0;
++ goto out;
++ }
++ }
++
++ len = snprintf(str, sizeof(str), "root-%d", root_num);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ dev_dbg(&pdev->xdev->dev, "writing root %d at %04x:%02x\n",
++ root_num, domain, bus);
++
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
++ "%04x:%02x", domain, bus);
++ if (err)
++ goto out;
++
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
++ "root_num", "%d", (root_num + 1));
++
++ out:
++ return err;
++}
++
++static int pciback_reconfigure(struct pciback_device *pdev)
++{
++ int err = 0;
++ int num_devs;
++ int domain, bus, slot, func;
++ int substate;
++ int i, len;
++ char state_str[64];
++ char dev_str[64];
++
++ spin_lock(&pdev->dev_lock);
++
++ dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
++
++ /* Make sure we only reconfigure once */
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateReconfiguring)
++ goto out;
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
++ &num_devs);
++ if (err != 1) {
++ if (err >= 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading number of devices");
++ goto out;
++ }
++
++ for (i = 0; i < num_devs; i++) {
++ len = snprintf(state_str, sizeof(state_str), "state-%d", i);
++ if (unlikely(len >= (sizeof(state_str) - 1))) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "String overflow while reading "
++ "configuration");
++ goto out;
++ }
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, state_str,
++ "%d", &substate);
++ if (err != 1)
++ substate = XenbusStateUnknown;
++
++ switch (substate) {
++ case XenbusStateInitialising:
++ dev_dbg(&pdev->xdev->dev, "Attaching dev-%d ...\n", i);
++
++ len = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
++ if (unlikely(len >= (sizeof(dev_str) - 1))) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "String overflow while "
++ "reading configuration");
++ goto out;
++ }
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++ dev_str, "%x:%x:%x.%x",
++ &domain, &bus, &slot, &func);
++ if (err < 0) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading device "
++ "configuration");
++ goto out;
++ }
++ if (err != 4) {
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error parsing pci device "
++ "configuration");
++ goto out;
++ }
++
++ err = pciback_export_device(pdev, domain, bus, slot,
++ func, i);
++ if (err)
++ goto out;
++
++ /* Publish pci roots. */
++ err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error while publish PCI root"
++ "buses for frontend");
++ goto out;
++ }
++
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
++ state_str, "%d",
++ XenbusStateInitialised);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error switching substate of "
++ "dev-%d\n", i);
++ goto out;
++ }
++ break;
++
++ case XenbusStateClosing:
++ dev_dbg(&pdev->xdev->dev, "Detaching dev-%d ...\n", i);
++
++ len = snprintf(dev_str, sizeof(dev_str), "vdev-%d", i);
++ if (unlikely(len >= (sizeof(dev_str) - 1))) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "String overflow while "
++ "reading configuration");
++ goto out;
++ }
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++ dev_str, "%x:%x:%x.%x",
++ &domain, &bus, &slot, &func);
++ if (err < 0) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading device "
++ "configuration");
++ goto out;
++ }
++ if (err != 4) {
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error parsing pci device "
++ "configuration");
++ goto out;
++ }
++
++ err = pciback_remove_device(pdev, domain, bus, slot,
++ func);
++ if(err)
++ goto out;
++
++ /* TODO: If at some point we implement support for pci
++ * root hot-remove on pcifront side, we'll need to
++ * remove unnecessary xenstore nodes of pci roots here.
++ */
++
++ break;
++
++ default:
++ break;
++ }
++ }
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error switching to reconfigured state!");
++ goto out;
++ }
++
++ out:
++ spin_unlock(&pdev->dev_lock);
++
++ return 0;
++}
++
++static void pciback_frontend_changed(struct xenbus_device *xdev,
++ enum xenbus_state fe_state)
++{
++ struct pciback_device *pdev = xdev->dev.driver_data;
++
++ dev_dbg(&xdev->dev, "fe state changed %d\n", fe_state);
++
++ switch (fe_state) {
++ case XenbusStateInitialised:
++ pciback_attach(pdev);
++ break;
++
++ case XenbusStateReconfiguring:
++ pciback_reconfigure(pdev);
++ break;
++
++ case XenbusStateConnected:
++ /* pcifront switched its state from reconfiguring to connected.
++ * Then switch to connected state.
++ */
++ xenbus_switch_state(xdev, XenbusStateConnected);
++ break;
++
++ case XenbusStateClosing:
++ pciback_disconnect(pdev);
++ xenbus_switch_state(xdev, XenbusStateClosing);
++ break;
++
++ case XenbusStateClosed:
++ pciback_disconnect(pdev);
++ xenbus_switch_state(xdev, XenbusStateClosed);
++ if (xenbus_dev_is_online(xdev))
++ break;
++ /* fall through if not online */
++ case XenbusStateUnknown:
++ dev_dbg(&xdev->dev, "frontend is gone! unregister device\n");
++ device_unregister(&xdev->dev);
++ break;
++
++ default:
++ break;
++ }
++}
++
++static int pciback_setup_backend(struct pciback_device *pdev)
++{
++ /* Get configuration from xend (if available now) */
++ int domain, bus, slot, func;
++ int err = 0;
++ int i, num_devs;
++ char dev_str[64];
++ char state_str[64];
++
++ spin_lock(&pdev->dev_lock);
++
++ /* It's possible we could get the call to setup twice, so make sure
++ * we're not already connected.
++ */
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateInitWait)
++ goto out;
++
++ dev_dbg(&pdev->xdev->dev, "getting be setup\n");
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
++ &num_devs);
++ if (err != 1) {
++ if (err >= 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading number of devices");
++ goto out;
++ }
++
++ for (i = 0; i < num_devs; i++) {
++ int l = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
++ if (unlikely(l >= (sizeof(dev_str) - 1))) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "String overflow while reading "
++ "configuration");
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, dev_str,
++ "%x:%x:%x.%x", &domain, &bus, &slot, &func);
++ if (err < 0) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading device configuration");
++ goto out;
++ }
++ if (err != 4) {
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error parsing pci device "
++ "configuration");
++ goto out;
++ }
++
++ err = pciback_export_device(pdev, domain, bus, slot, func, i);
++ if (err)
++ goto out;
++
++ /* Switch substate of this device. */
++ l = snprintf(state_str, sizeof(state_str), "state-%d", i);
++ if (unlikely(l >= (sizeof(state_str) - 1))) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "String overflow while reading "
++ "configuration");
++ goto out;
++ }
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, state_str,
++ "%d", XenbusStateInitialised);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err, "Error switching "
++ "substate of dev-%d\n", i);
++ goto out;
++ }
++ }
++
++ err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error while publish PCI root buses "
++ "for frontend");
++ goto out;
++ }
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
++ if (err)
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error switching to initialised state!");
++
++ out:
++ spin_unlock(&pdev->dev_lock);
++
++ if (!err)
++ /* see if pcifront is already configured (if not, we'll wait) */
++ pciback_attach(pdev);
++
++ return err;
++}
++
++static void pciback_be_watch(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ struct pciback_device *pdev =
++ container_of(watch, struct pciback_device, be_watch);
++
++ switch (xenbus_read_driver_state(pdev->xdev->nodename)) {
++ case XenbusStateInitWait:
++ pciback_setup_backend(pdev);
++ break;
++
++ default:
++ break;
++ }
++}
++
++static int pciback_xenbus_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err = 0;
++ struct pciback_device *pdev = alloc_pdev(dev);
++
++ if (pdev == NULL) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(dev, err,
++ "Error allocating pciback_device struct");
++ goto out;
++ }
++
++ /* wait for xend to configure us */
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto out;
++
++ /* watch the backend node for backend configuration information */
++ err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
++ pciback_be_watch);
++ if (err)
++ goto out;
++ pdev->be_watching = 1;
++
++ /* We need to force a call to our callback here in case
++ * xend already configured us!
++ */
++ pciback_be_watch(&pdev->be_watch, NULL, 0);
++
++ out:
++ return err;
++}
++
++static int pciback_xenbus_remove(struct xenbus_device *dev)
++{
++ struct pciback_device *pdev = dev->dev.driver_data;
++
++ if (pdev != NULL)
++ free_pdev(pdev);
++
++ return 0;
++}
++
++static const struct xenbus_device_id xenpci_ids[] = {
++ {"pci"},
++ {{0}},
++};
++
++static struct xenbus_driver xenbus_pciback_driver = {
++ .name = "pciback",
++ .owner = THIS_MODULE,
++ .ids = xenpci_ids,
++ .probe = pciback_xenbus_probe,
++ .remove = pciback_xenbus_remove,
++ .otherend_changed = pciback_frontend_changed,
++};
++
++int __init pciback_xenbus_register(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ return xenbus_register_backend(&xenbus_pciback_driver);
++}
++
++void __exit pciback_xenbus_unregister(void)
++{
++ xenbus_unregister_driver(&xenbus_pciback_driver);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/pcifront/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/pcifront/Makefile
+--- linux-2.6.18.8/drivers/xen/pcifront/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pcifront/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,7 @@
++obj-y += pcifront.o
++
++pcifront-y := pci_op.o xenbus.o pci.o
++
++ifeq ($(CONFIG_XEN_PCIDEV_FE_DEBUG),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
+diff -rpuN linux-2.6.18.8/drivers/xen/pcifront/pci.c linux-2.6.18-xen-3.3.0/drivers/xen/pcifront/pci.c
+--- linux-2.6.18.8/drivers/xen/pcifront/pci.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pcifront/pci.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,46 @@
++/*
++ * PCI Frontend Operations - ensure only one PCI frontend runs at a time
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pcifront.h"
++
++DEFINE_SPINLOCK(pcifront_dev_lock);
++static struct pcifront_device *pcifront_dev = NULL;
++
++int pcifront_connect(struct pcifront_device *pdev)
++{
++ int err = 0;
++
++ spin_lock(&pcifront_dev_lock);
++
++ if (!pcifront_dev) {
++ dev_info(&pdev->xdev->dev, "Installing PCI frontend\n");
++ pcifront_dev = pdev;
++ }
++ else {
++ dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n");
++ err = -EEXIST;
++ }
++
++ spin_unlock(&pcifront_dev_lock);
++
++ return err;
++}
++
++void pcifront_disconnect(struct pcifront_device *pdev)
++{
++ spin_lock(&pcifront_dev_lock);
++
++ if (pdev == pcifront_dev) {
++ dev_info(&pdev->xdev->dev,
++ "Disconnecting PCI Frontend Buses\n");
++ pcifront_dev = NULL;
++ }
++
++ spin_unlock(&pcifront_dev_lock);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/pcifront/pcifront.h linux-2.6.18-xen-3.3.0/drivers/xen/pcifront/pcifront.h
+--- linux-2.6.18.8/drivers/xen/pcifront/pcifront.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pcifront/pcifront.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,42 @@
++/*
++ * PCI Frontend - Common data structures & function declarations
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#ifndef __XEN_PCIFRONT_H__
++#define __XEN_PCIFRONT_H__
++
++#include <linux/spinlock.h>
++#include <linux/pci.h>
++#include <xen/xenbus.h>
++#include <xen/interface/io/pciif.h>
++#include <xen/pcifront.h>
++
++struct pci_bus_entry {
++ struct list_head list;
++ struct pci_bus *bus;
++};
++
++struct pcifront_device {
++ struct xenbus_device *xdev;
++ struct list_head root_buses;
++ spinlock_t dev_lock;
++
++ int evtchn;
++ int gnt_ref;
++
++ /* Lock this when doing any operations in sh_info */
++ spinlock_t sh_info_lock;
++ struct xen_pci_sharedinfo *sh_info;
++};
++
++int pcifront_connect(struct pcifront_device *pdev);
++void pcifront_disconnect(struct pcifront_device *pdev);
++
++int pcifront_scan_root(struct pcifront_device *pdev,
++ unsigned int domain, unsigned int bus);
++int pcifront_rescan_root(struct pcifront_device *pdev,
++ unsigned int domain, unsigned int bus);
++void pcifront_free_roots(struct pcifront_device *pdev);
++
++#endif /* __XEN_PCIFRONT_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/pcifront/pci_op.c linux-2.6.18-xen-3.3.0/drivers/xen/pcifront/pci_op.c
+--- linux-2.6.18.8/drivers/xen/pcifront/pci_op.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pcifront/pci_op.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,551 @@
++/*
++ * PCI Frontend Operations - Communicates with frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/time.h>
++#include <xen/evtchn.h>
++#include "pcifront.h"
++
++static int verbose_request = 0;
++module_param(verbose_request, int, 0644);
++
++#ifdef __ia64__
++static void pcifront_init_sd(struct pcifront_sd *sd,
++ unsigned int domain, unsigned int bus,
++ struct pcifront_device *pdev)
++{
++ int err, i, j, k, len, root_num, res_count;
++ struct acpi_resource res;
++ unsigned int d, b, byte;
++ unsigned long magic;
++ char str[64], tmp[3];
++ unsigned char *buf, *bufp;
++ u8 *ptr;
++
++ memset(sd, 0, sizeof(*sd));
++
++ sd->segment = domain;
++ sd->node = -1; /* Revisit for NUMA */
++ sd->platform_data = pdev;
++
++ /* Look for resources for this controller in xenbus. */
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "root_num",
++ "%d", &root_num);
++ if (err != 1)
++ return;
++
++ for (i = 0; i < root_num; i++) {
++ len = snprintf(str, sizeof(str), "root-%d", i);
++ if (unlikely(len >= (sizeof(str) - 1)))
++ return;
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++ str, "%x:%x", &d, &b);
++ if (err != 2)
++ return;
++
++ if (d == domain && b == bus)
++ break;
++ }
++
++ if (i == root_num)
++ return;
++
++ len = snprintf(str, sizeof(str), "root-resource-magic");
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++ str, "%lx", &magic);
++
++ if (err != 1)
++ return; /* No resources, nothing to do */
++
++ if (magic != (sizeof(res) * 2) + 1) {
++ printk(KERN_WARNING "pcifront: resource magic mismatch\n");
++ return;
++ }
++
++ len = snprintf(str, sizeof(str), "root-%d-resources", i);
++ if (unlikely(len >= (sizeof(str) - 1)))
++ return;
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++ str, "%d", &res_count);
++
++ if (err != 1)
++ return; /* No resources, nothing to do */
++
++ sd->window = kzalloc(sizeof(*sd->window) * res_count, GFP_KERNEL);
++ if (!sd->window)
++ return;
++
++ /* magic is also the size of the byte stream in xenbus */
++ buf = kmalloc(magic, GFP_KERNEL);
++ if (!buf) {
++ kfree(sd->window);
++ sd->window = NULL;
++ return;
++ }
++
++ /* Read the resources out of xenbus */
++ for (j = 0; j < res_count; j++) {
++ memset(&res, 0, sizeof(res));
++ memset(buf, 0, magic);
++
++ len = snprintf(str, sizeof(str), "root-%d-resource-%d", i, j);
++ if (unlikely(len >= (sizeof(str) - 1)))
++ return;
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
++ "%s", buf);
++ if (err != 1) {
++ printk(KERN_WARNING "pcifront: error reading "
++ "resource %d on bus %04x:%02x\n",
++ j, domain, bus);
++ continue;
++ }
++
++ bufp = buf;
++ ptr = (u8 *)&res;
++ memset(tmp, 0, sizeof(tmp));
++
++ /* Copy ASCII byte stream into structure */
++ for (k = 0; k < magic - 1; k += 2) {
++ memcpy(tmp, bufp, 2);
++ bufp += 2;
++
++ sscanf(tmp, "%02x", &byte);
++ *ptr = byte;
++ ptr++;
++ }
++
++ xen_add_resource(sd, domain, bus, &res);
++ sd->windows++;
++ }
++ kfree(buf);
++}
++#endif
++
++static int errno_to_pcibios_err(int errno)
++{
++ switch (errno) {
++ case XEN_PCI_ERR_success:
++ return PCIBIOS_SUCCESSFUL;
++
++ case XEN_PCI_ERR_dev_not_found:
++ return PCIBIOS_DEVICE_NOT_FOUND;
++
++ case XEN_PCI_ERR_invalid_offset:
++ case XEN_PCI_ERR_op_failed:
++ return PCIBIOS_BAD_REGISTER_NUMBER;
++
++ case XEN_PCI_ERR_not_implemented:
++ return PCIBIOS_FUNC_NOT_SUPPORTED;
++
++ case XEN_PCI_ERR_access_denied:
++ return PCIBIOS_SET_FAILED;
++ }
++ return errno;
++}
++
++static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
++{
++ int err = 0;
++ struct xen_pci_op *active_op = &pdev->sh_info->op;
++ unsigned long irq_flags;
++ evtchn_port_t port = pdev->evtchn;
++ s64 ns, ns_timeout;
++ struct timeval tv;
++
++ spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
++
++ memcpy(active_op, op, sizeof(struct xen_pci_op));
++
++ /* Go */
++ wmb();
++ set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
++ notify_remote_via_evtchn(port);
++
++ /*
++ * We set a poll timeout of 3 seconds but give up on return after
++ * 2 seconds. It is better to time out too late rather than too early
++ * (in the latter case we end up continually re-executing poll() with a
++ * timeout in the past). 1s difference gives plenty of slack for error.
++ */
++ do_gettimeofday(&tv);
++ ns_timeout = timeval_to_ns(&tv) + 2 * (s64)NSEC_PER_SEC;
++
++ clear_evtchn(port);
++
++ while (test_bit(_XEN_PCIF_active,
++ (unsigned long *)&pdev->sh_info->flags)) {
++ if (HYPERVISOR_poll(&port, 1, jiffies + 3*HZ))
++ BUG();
++ clear_evtchn(port);
++ do_gettimeofday(&tv);
++ ns = timeval_to_ns(&tv);
++ if (ns > ns_timeout) {
++ dev_err(&pdev->xdev->dev,
++ "pciback not responding!!!\n");
++ clear_bit(_XEN_PCIF_active,
++ (unsigned long *)&pdev->sh_info->flags);
++ err = XEN_PCI_ERR_dev_not_found;
++ goto out;
++ }
++ }
++
++ memcpy(op, active_op, sizeof(struct xen_pci_op));
++
++ err = op->err;
++ out:
++ spin_unlock_irqrestore(&pdev->sh_info_lock, irq_flags);
++ return err;
++}
++
++/* Access to this function is spinlocked in drivers/pci/access.c */
++static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 * val)
++{
++ int err = 0;
++ struct xen_pci_op op = {
++ .cmd = XEN_PCI_OP_conf_read,
++ .domain = pci_domain_nr(bus),
++ .bus = bus->number,
++ .devfn = devfn,
++ .offset = where,
++ .size = size,
++ };
++ struct pcifront_sd *sd = bus->sysdata;
++ struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++ if (verbose_request)
++ dev_info(&pdev->xdev->dev,
++ "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
++ pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
++ PCI_FUNC(devfn), where, size);
++
++ err = do_pci_op(pdev, &op);
++
++ if (likely(!err)) {
++ if (verbose_request)
++ dev_info(&pdev->xdev->dev, "read got back value %x\n",
++ op.value);
++
++ *val = op.value;
++ } else if (err == -ENODEV) {
++ /* No device here, pretend that it just returned 0 */
++ err = 0;
++ *val = 0;
++ }
++
++ return errno_to_pcibios_err(err);
++}
++
++/* Access to this function is spinlocked in drivers/pci/access.c */
++static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 val)
++{
++ struct xen_pci_op op = {
++ .cmd = XEN_PCI_OP_conf_write,
++ .domain = pci_domain_nr(bus),
++ .bus = bus->number,
++ .devfn = devfn,
++ .offset = where,
++ .size = size,
++ .value = val,
++ };
++ struct pcifront_sd *sd = bus->sysdata;
++ struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++ if (verbose_request)
++ dev_info(&pdev->xdev->dev,
++ "write dev=%04x:%02x:%02x.%01x - "
++ "offset %x size %d val %x\n",
++ pci_domain_nr(bus), bus->number,
++ PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
++
++ return errno_to_pcibios_err(do_pci_op(pdev, &op));
++}
++
++struct pci_ops pcifront_bus_ops = {
++ .read = pcifront_bus_read,
++ .write = pcifront_bus_write,
++};
++
++#ifdef CONFIG_PCI_MSI
++int pci_frontend_enable_msix(struct pci_dev *dev,
++ struct msix_entry *entries,
++ int nvec)
++{
++ int err;
++ int i;
++ struct xen_pci_op op = {
++ .cmd = XEN_PCI_OP_enable_msix,
++ .domain = pci_domain_nr(dev->bus),
++ .bus = dev->bus->number,
++ .devfn = dev->devfn,
++ .value = nvec,
++ };
++ struct pcifront_sd *sd = dev->bus->sysdata;
++ struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++ if (nvec > SH_INFO_MAX_VEC) {
++ printk("too much vector for pci frontend%x\n", nvec);
++ return -EINVAL;
++ }
++
++ for (i = 0; i < nvec; i++) {
++ op.msix_entries[i].entry = entries[i].entry;
++ op.msix_entries[i].vector = entries[i].vector;
++ }
++
++ err = do_pci_op(pdev, &op);
++
++ if (!err) {
++ if (!op.value) {
++ /* we get the result */
++ for ( i = 0; i < nvec; i++)
++ entries[i].vector = op.msix_entries[i].vector;
++ return 0;
++ }
++ else {
++ printk("enable msix get value %x\n", op.value);
++ return op.value;
++ }
++ }
++ else {
++ printk("enable msix get err %x\n", err);
++ return err;
++ }
++}
++
++void pci_frontend_disable_msix(struct pci_dev* dev)
++{
++ int err;
++ struct xen_pci_op op = {
++ .cmd = XEN_PCI_OP_disable_msix,
++ .domain = pci_domain_nr(dev->bus),
++ .bus = dev->bus->number,
++ .devfn = dev->devfn,
++ };
++ struct pcifront_sd *sd = dev->bus->sysdata;
++ struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++ err = do_pci_op(pdev, &op);
++
++ /* What should do for error ? */
++ if (err)
++ printk("pci_disable_msix get err %x\n", err);
++}
++
++int pci_frontend_enable_msi(struct pci_dev *dev)
++{
++ int err;
++ struct xen_pci_op op = {
++ .cmd = XEN_PCI_OP_enable_msi,
++ .domain = pci_domain_nr(dev->bus),
++ .bus = dev->bus->number,
++ .devfn = dev->devfn,
++ };
++ struct pcifront_sd *sd = dev->bus->sysdata;
++ struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++ err = do_pci_op(pdev, &op);
++ if (likely(!err)) {
++ dev->irq = op.value;
++ }
++ else {
++ printk("pci frontend enable msi failed for dev %x:%x \n",
++ op.bus, op.devfn);
++ err = -EINVAL;
++ }
++ return err;
++}
++
++void pci_frontend_disable_msi(struct pci_dev* dev)
++{
++ int err;
++ struct xen_pci_op op = {
++ .cmd = XEN_PCI_OP_disable_msi,
++ .domain = pci_domain_nr(dev->bus),
++ .bus = dev->bus->number,
++ .devfn = dev->devfn,
++ };
++ struct pcifront_sd *sd = dev->bus->sysdata;
++ struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++ err = do_pci_op(pdev, &op);
++ if (err == XEN_PCI_ERR_dev_not_found) {
++ /* XXX No response from backend, what shall we do? */
++ printk("get no response from backend for disable MSI\n");
++ return;
++ }
++ if (likely(!err))
++ dev->irq = op.value;
++ else
++ /* how can pciback notify us fail? */
++ printk("get fake response frombackend \n");
++}
++#endif /* CONFIG_PCI_MSI */
++
++/* Claim resources for the PCI frontend as-is, backend won't allow changes */
++static void pcifront_claim_resource(struct pci_dev *dev, void *data)
++{
++ struct pcifront_device *pdev = data;
++ int i;
++ struct resource *r;
++
++ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
++ r = &dev->resource[i];
++
++ if (!r->parent && r->start && r->flags) {
++ dev_dbg(&pdev->xdev->dev, "claiming resource %s/%d\n",
++ pci_name(dev), i);
++ pci_claim_resource(dev, i);
++ }
++ }
++}
++
++int __devinit pcifront_scan_root(struct pcifront_device *pdev,
++ unsigned int domain, unsigned int bus)
++{
++ struct pci_bus *b;
++ struct pcifront_sd *sd = NULL;
++ struct pci_bus_entry *bus_entry = NULL;
++ int err = 0;
++
++#ifndef CONFIG_PCI_DOMAINS
++ if (domain != 0) {
++ dev_err(&pdev->xdev->dev,
++ "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
++ dev_err(&pdev->xdev->dev,
++ "Please compile with CONFIG_PCI_DOMAINS\n");
++ err = -EINVAL;
++ goto err_out;
++ }
++#endif
++
++ dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
++ domain, bus);
++
++ bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
++ sd = kmalloc(sizeof(*sd), GFP_KERNEL);
++ if (!bus_entry || !sd) {
++ err = -ENOMEM;
++ goto err_out;
++ }
++ pcifront_init_sd(sd, domain, bus, pdev);
++
++ b = pci_scan_bus_parented(&pdev->xdev->dev, bus,
++ &pcifront_bus_ops, sd);
++ if (!b) {
++ dev_err(&pdev->xdev->dev,
++ "Error creating PCI Frontend Bus!\n");
++ err = -ENOMEM;
++ goto err_out;
++ }
++
++ pcifront_setup_root_resources(b, sd);
++ bus_entry->bus = b;
++
++ list_add(&bus_entry->list, &pdev->root_buses);
++
++ /* Claim resources before going "live" with our devices */
++ pci_walk_bus(b, pcifront_claim_resource, pdev);
++
++ pci_bus_add_devices(b);
++
++ return 0;
++
++ err_out:
++ kfree(bus_entry);
++ kfree(sd);
++
++ return err;
++}
++
++int __devinit pcifront_rescan_root(struct pcifront_device *pdev,
++ unsigned int domain, unsigned int bus)
++{
++ struct pci_bus *b;
++ struct pci_dev *d;
++ unsigned int devfn;
++
++#ifndef CONFIG_PCI_DOMAINS
++ if (domain != 0) {
++ dev_err(&pdev->xdev->dev,
++ "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
++ dev_err(&pdev->xdev->dev,
++ "Please compile with CONFIG_PCI_DOMAINS\n");
++ return -EINVAL;
++ }
++#endif
++
++ dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n",
++ domain, bus);
++
++ b = pci_find_bus(domain, bus);
++ if(!b)
++ /* If the bus is unknown, create it. */
++ return pcifront_scan_root(pdev, domain, bus);
++
++ /* Rescan the bus for newly attached functions and add.
++ * We omit handling of PCI bridge attachment because pciback prevents
++ * bridges from being exported.
++ */
++ for (devfn = 0; devfn < 0x100; devfn++) {
++ d = pci_get_slot(b, devfn);
++ if(d) {
++ /* Device is already known. */
++ pci_dev_put(d);
++ continue;
++ }
++
++ d = pci_scan_single_device(b, devfn);
++ if (d) {
++ dev_info(&pdev->xdev->dev, "New device on "
++ "%04x:%02x:%02x.%02x found.\n", domain, bus,
++ PCI_SLOT(devfn), PCI_FUNC(devfn));
++ pci_bus_add_device(d);
++ }
++ }
++
++ return 0;
++}
++
++static void free_root_bus_devs(struct pci_bus *bus)
++{
++ struct pci_dev *dev;
++
++ while (!list_empty(&bus->devices)) {
++ dev = container_of(bus->devices.next, struct pci_dev,
++ bus_list);
++ dev_dbg(&dev->dev, "removing device\n");
++ pci_remove_bus_device(dev);
++ }
++}
++
++void pcifront_free_roots(struct pcifront_device *pdev)
++{
++ struct pci_bus_entry *bus_entry, *t;
++
++ dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n");
++
++ list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) {
++ list_del(&bus_entry->list);
++
++ free_root_bus_devs(bus_entry->bus);
++
++ kfree(bus_entry->bus->sysdata);
++
++ device_unregister(bus_entry->bus->bridge);
++ pci_remove_bus(bus_entry->bus);
++
++ kfree(bus_entry);
++ }
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/pcifront/xenbus.c linux-2.6.18-xen-3.3.0/drivers/xen/pcifront/xenbus.c
+--- linux-2.6.18.8/drivers/xen/pcifront/xenbus.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/pcifront/xenbus.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,455 @@
++/*
++ * PCI Frontend Xenbus Setup - handles setup with backend (imports page/evtchn)
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/mm.h>
++#include <xen/xenbus.h>
++#include <xen/gnttab.h>
++#include "pcifront.h"
++
++#ifndef __init_refok
++#define __init_refok
++#endif
++
++#define INVALID_GRANT_REF (0)
++#define INVALID_EVTCHN (-1)
++
++static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev)
++{
++ struct pcifront_device *pdev;
++
++ pdev = kzalloc(sizeof(struct pcifront_device), GFP_KERNEL);
++ if (pdev == NULL)
++ goto out;
++
++ pdev->sh_info =
++ (struct xen_pci_sharedinfo *)__get_free_page(GFP_KERNEL);
++ if (pdev->sh_info == NULL) {
++ kfree(pdev);
++ pdev = NULL;
++ goto out;
++ }
++ pdev->sh_info->flags = 0;
++
++ xdev->dev.driver_data = pdev;
++ pdev->xdev = xdev;
++
++ INIT_LIST_HEAD(&pdev->root_buses);
++
++ spin_lock_init(&pdev->dev_lock);
++ spin_lock_init(&pdev->sh_info_lock);
++
++ pdev->evtchn = INVALID_EVTCHN;
++ pdev->gnt_ref = INVALID_GRANT_REF;
++
++ dev_dbg(&xdev->dev, "Allocated pdev @ 0x%p pdev->sh_info @ 0x%p\n",
++ pdev, pdev->sh_info);
++ out:
++ return pdev;
++}
++
++static void free_pdev(struct pcifront_device *pdev)
++{
++ dev_dbg(&pdev->xdev->dev, "freeing pdev @ 0x%p\n", pdev);
++
++ pcifront_free_roots(pdev);
++
++ if (pdev->evtchn != INVALID_EVTCHN)
++ xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
++
++ if (pdev->gnt_ref != INVALID_GRANT_REF)
++ gnttab_end_foreign_access(pdev->gnt_ref,
++ (unsigned long)pdev->sh_info);
++
++ pdev->xdev->dev.driver_data = NULL;
++
++ kfree(pdev);
++}
++
++static int pcifront_publish_info(struct pcifront_device *pdev)
++{
++ int err = 0;
++ struct xenbus_transaction trans;
++
++ err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info));
++ if (err < 0)
++ goto out;
++
++ pdev->gnt_ref = err;
++
++ err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn);
++ if (err)
++ goto out;
++
++ do_publish:
++ err = xenbus_transaction_start(&trans);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error writing configuration for backend "
++ "(start transaction)");
++ goto out;
++ }
++
++ err = xenbus_printf(trans, pdev->xdev->nodename,
++ "pci-op-ref", "%u", pdev->gnt_ref);
++ if (!err)
++ err = xenbus_printf(trans, pdev->xdev->nodename,
++ "event-channel", "%u", pdev->evtchn);
++ if (!err)
++ err = xenbus_printf(trans, pdev->xdev->nodename,
++ "magic", XEN_PCI_MAGIC);
++
++ if (err) {
++ xenbus_transaction_end(trans, 1);
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error writing configuration for backend");
++ goto out;
++ } else {
++ err = xenbus_transaction_end(trans, 0);
++ if (err == -EAGAIN)
++ goto do_publish;
++ else if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error completing transaction "
++ "for backend");
++ goto out;
++ }
++ }
++
++ xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
++
++ dev_dbg(&pdev->xdev->dev, "publishing successful!\n");
++
++ out:
++ return err;
++}
++
++static int __devinit pcifront_try_connect(struct pcifront_device *pdev)
++{
++ int err = -EFAULT;
++ int i, num_roots, len;
++ char str[64];
++ unsigned int domain, bus;
++
++ spin_lock(&pdev->dev_lock);
++
++ /* Only connect once */
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateInitialised)
++ goto out;
++
++ err = pcifront_connect(pdev);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error connecting PCI Frontend");
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++ "root_num", "%d", &num_roots);
++ if (err == -ENOENT) {
++ xenbus_dev_error(pdev->xdev, err,
++ "No PCI Roots found, trying 0000:00");
++ err = pcifront_scan_root(pdev, 0, 0);
++ num_roots = 0;
++ } else if (err != 1) {
++ if (err == 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading number of PCI roots");
++ goto out;
++ }
++
++ for (i = 0; i < num_roots; i++) {
++ len = snprintf(str, sizeof(str), "root-%d", i);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
++ "%x:%x", &domain, &bus);
++ if (err != 2) {
++ if (err >= 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading PCI root %d", i);
++ goto out;
++ }
++
++ err = pcifront_scan_root(pdev, domain, bus);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error scanning PCI root %04x:%02x",
++ domain, bus);
++ goto out;
++ }
++ }
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
++ if (err)
++ goto out;
++
++ out:
++ spin_unlock(&pdev->dev_lock);
++ return err;
++}
++
++static int pcifront_try_disconnect(struct pcifront_device *pdev)
++{
++ int err = 0;
++ enum xenbus_state prev_state;
++
++ spin_lock(&pdev->dev_lock);
++
++ prev_state = xenbus_read_driver_state(pdev->xdev->nodename);
++
++ if (prev_state >= XenbusStateClosing)
++ goto out;
++
++ if(prev_state == XenbusStateConnected) {
++ pcifront_free_roots(pdev);
++ pcifront_disconnect(pdev);
++ }
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateClosed);
++
++ out:
++ spin_unlock(&pdev->dev_lock);
++
++ return err;
++}
++
++static int __devinit pcifront_attach_devices(struct pcifront_device *pdev)
++{
++ int err = -EFAULT;
++ int i, num_roots, len;
++ unsigned int domain, bus;
++ char str[64];
++
++ spin_lock(&pdev->dev_lock);
++
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateReconfiguring)
++ goto out;
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++ "root_num", "%d", &num_roots);
++ if (err == -ENOENT) {
++ xenbus_dev_error(pdev->xdev, err,
++ "No PCI Roots found, trying 0000:00");
++ err = pcifront_rescan_root(pdev, 0, 0);
++ num_roots = 0;
++ } else if (err != 1) {
++ if (err == 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading number of PCI roots");
++ goto out;
++ }
++
++ for (i = 0; i < num_roots; i++) {
++ len = snprintf(str, sizeof(str), "root-%d", i);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
++ "%x:%x", &domain, &bus);
++ if (err != 2) {
++ if (err >= 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading PCI root %d", i);
++ goto out;
++ }
++
++ err = pcifront_rescan_root(pdev, domain, bus);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error scanning PCI root %04x:%02x",
++ domain, bus);
++ goto out;
++ }
++ }
++
++ xenbus_switch_state(pdev->xdev, XenbusStateConnected);
++
++ out:
++ spin_unlock(&pdev->dev_lock);
++ return err;
++}
++
++static int pcifront_detach_devices(struct pcifront_device *pdev)
++{
++ int err = 0;
++ int i, num_devs;
++ unsigned int domain, bus, slot, func;
++ struct pci_bus *pci_bus;
++ struct pci_dev *pci_dev;
++ char str[64];
++
++ spin_lock(&pdev->dev_lock);
++
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateConnected)
++ goto out;
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "num_devs", "%d",
++ &num_devs);
++ if (err != 1) {
++ if (err >= 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading number of PCI devices");
++ goto out;
++ }
++
++ /* Find devices being detached and remove them. */
++ for (i = 0; i < num_devs; i++) {
++ int l, state;
++ l = snprintf(str, sizeof(str), "state-%d", i);
++ if (unlikely(l >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d",
++ &state);
++ if (err != 1)
++ state = XenbusStateUnknown;
++
++ if (state != XenbusStateClosing)
++ continue;
++
++ /* Remove device. */
++ l = snprintf(str, sizeof(str), "vdev-%d", i);
++ if (unlikely(l >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
++ "%x:%x:%x.%x", &domain, &bus, &slot, &func);
++ if (err != 4) {
++ if (err >= 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading PCI device %d", i);
++ goto out;
++ }
++
++ pci_bus = pci_find_bus(domain, bus);
++ if(!pci_bus) {
++ dev_dbg(&pdev->xdev->dev, "Cannot get bus %04x:%02x\n",
++ domain, bus);
++ continue;
++ }
++ pci_dev = pci_get_slot(pci_bus, PCI_DEVFN(slot, func));
++ if(!pci_dev) {
++ dev_dbg(&pdev->xdev->dev,
++ "Cannot get PCI device %04x:%02x:%02x.%02x\n",
++ domain, bus, slot, func);
++ continue;
++ }
++ pci_remove_bus_device(pci_dev);
++ pci_dev_put(pci_dev);
++
++ dev_dbg(&pdev->xdev->dev,
++ "PCI device %04x:%02x:%02x.%02x removed.\n",
++ domain, bus, slot, func);
++ }
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateReconfiguring);
++
++ out:
++ spin_unlock(&pdev->dev_lock);
++ return err;
++}
++
++static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev,
++ enum xenbus_state be_state)
++{
++ struct pcifront_device *pdev = xdev->dev.driver_data;
++
++ switch (be_state) {
++ case XenbusStateUnknown:
++ case XenbusStateInitialising:
++ case XenbusStateInitWait:
++ case XenbusStateInitialised:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateConnected:
++ pcifront_try_connect(pdev);
++ break;
++
++ case XenbusStateClosing:
++ dev_warn(&xdev->dev, "backend going away!\n");
++ pcifront_try_disconnect(pdev);
++ break;
++
++ case XenbusStateReconfiguring:
++ pcifront_detach_devices(pdev);
++ break;
++
++ case XenbusStateReconfigured:
++ pcifront_attach_devices(pdev);
++ break;
++ }
++}
++
++static int pcifront_xenbus_probe(struct xenbus_device *xdev,
++ const struct xenbus_device_id *id)
++{
++ int err = 0;
++ struct pcifront_device *pdev = alloc_pdev(xdev);
++
++ if (pdev == NULL) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(xdev, err,
++ "Error allocating pcifront_device struct");
++ goto out;
++ }
++
++ err = pcifront_publish_info(pdev);
++
++ out:
++ return err;
++}
++
++static int pcifront_xenbus_remove(struct xenbus_device *xdev)
++{
++ if (xdev->dev.driver_data)
++ free_pdev(xdev->dev.driver_data);
++
++ return 0;
++}
++
++static const struct xenbus_device_id xenpci_ids[] = {
++ {"pci"},
++ {{0}},
++};
++MODULE_ALIAS("xen:pci");
++
++static struct xenbus_driver xenbus_pcifront_driver = {
++ .name = "pcifront",
++ .owner = THIS_MODULE,
++ .ids = xenpci_ids,
++ .probe = pcifront_xenbus_probe,
++ .remove = pcifront_xenbus_remove,
++ .otherend_changed = pcifront_backend_changed,
++};
++
++static int __init pcifront_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ return xenbus_register_frontend(&xenbus_pcifront_driver);
++}
++
++/* Initialize after the Xen PCI Frontend Stub is initialized */
++subsys_initcall(pcifront_init);
+diff -rpuN linux-2.6.18.8/drivers/xen/privcmd/compat_privcmd.c linux-2.6.18-xen-3.3.0/drivers/xen/privcmd/compat_privcmd.c
+--- linux-2.6.18.8/drivers/xen/privcmd/compat_privcmd.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/privcmd/compat_privcmd.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,73 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/compat.h>
++#include <linux/ioctl.h>
++#include <linux/syscalls.h>
++#include <asm/hypervisor.h>
++#include <asm/uaccess.h>
++#include <xen/public/privcmd.h>
++#include <xen/compat_ioctl.h>
++
++int privcmd_ioctl_32(int fd, unsigned int cmd, unsigned long arg)
++{
++ int ret;
++
++ switch (cmd) {
++ case IOCTL_PRIVCMD_MMAP_32: {
++ struct privcmd_mmap *p;
++ struct privcmd_mmap_32 *p32;
++ struct privcmd_mmap_32 n32;
++
++ p32 = compat_ptr(arg);
++ p = compat_alloc_user_space(sizeof(*p));
++ if (copy_from_user(&n32, p32, sizeof(n32)) ||
++ put_user(n32.num, &p->num) ||
++ put_user(n32.dom, &p->dom) ||
++ put_user(compat_ptr(n32.entry), &p->entry))
++ return -EFAULT;
++
++ ret = sys_ioctl(fd, IOCTL_PRIVCMD_MMAP, (unsigned long)p);
++ }
++ break;
++ case IOCTL_PRIVCMD_MMAPBATCH_32: {
++ struct privcmd_mmapbatch *p;
++ struct privcmd_mmapbatch_32 *p32;
++ struct privcmd_mmapbatch_32 n32;
++
++ p32 = compat_ptr(arg);
++ p = compat_alloc_user_space(sizeof(*p));
++ if (copy_from_user(&n32, p32, sizeof(n32)) ||
++ put_user(n32.num, &p->num) ||
++ put_user(n32.dom, &p->dom) ||
++ put_user(n32.addr, &p->addr) ||
++ put_user(compat_ptr(n32.arr), &p->arr))
++ return -EFAULT;
++
++ ret = sys_ioctl(fd, IOCTL_PRIVCMD_MMAPBATCH, (unsigned long)p);
++ }
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++ return ret;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/privcmd/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/privcmd/Makefile
+--- linux-2.6.18.8/drivers/xen/privcmd/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/privcmd/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,3 @@
++
++obj-y += privcmd.o
++obj-$(CONFIG_COMPAT) += compat_privcmd.o
+diff -rpuN linux-2.6.18.8/drivers/xen/privcmd/privcmd.c linux-2.6.18-xen-3.3.0/drivers/xen/privcmd/privcmd.c
+--- linux-2.6.18.8/drivers/xen/privcmd/privcmd.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/privcmd/privcmd.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,356 @@
++/******************************************************************************
++ * privcmd.c
++ *
++ * Interface to privileged domain-0 commands.
++ *
++ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
++ */
++
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/swap.h>
++#include <linux/smp_lock.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <asm/hypervisor.h>
++
++#include <asm/pgalloc.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/tlb.h>
++#include <asm/hypervisor.h>
++#include <xen/public/privcmd.h>
++#include <xen/interface/xen.h>
++#include <xen/xen_proc.h>
++#include <xen/features.h>
++
++static struct proc_dir_entry *privcmd_intf;
++static struct proc_dir_entry *capabilities_intf;
++
++#ifndef HAVE_ARCH_PRIVCMD_MMAP
++static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
++#endif
++
++static long privcmd_ioctl(struct file *file,
++ unsigned int cmd, unsigned long data)
++{
++ int ret = -ENOSYS;
++ void __user *udata = (void __user *) data;
++
++ switch (cmd) {
++ case IOCTL_PRIVCMD_HYPERCALL: {
++ privcmd_hypercall_t hypercall;
++
++ if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
++ return -EFAULT;
++
++#if defined(__i386__)
++ if (hypercall.op >= (PAGE_SIZE >> 5))
++ break;
++ __asm__ __volatile__ (
++ "pushl %%ebx; pushl %%ecx; pushl %%edx; "
++ "pushl %%esi; pushl %%edi; "
++ "movl 8(%%eax),%%ebx ;"
++ "movl 16(%%eax),%%ecx ;"
++ "movl 24(%%eax),%%edx ;"
++ "movl 32(%%eax),%%esi ;"
++ "movl 40(%%eax),%%edi ;"
++ "movl (%%eax),%%eax ;"
++ "shll $5,%%eax ;"
++ "addl $hypercall_page,%%eax ;"
++ "call *%%eax ;"
++ "popl %%edi; popl %%esi; popl %%edx; "
++ "popl %%ecx; popl %%ebx"
++ : "=a" (ret) : "0" (&hypercall) : "memory" );
++#elif defined (__x86_64__)
++ if (hypercall.op < (PAGE_SIZE >> 5)) {
++ long ign1, ign2, ign3;
++ __asm__ __volatile__ (
++ "movq %8,%%r10; movq %9,%%r8;"
++ "shll $5,%%eax ;"
++ "addq $hypercall_page,%%rax ;"
++ "call *%%rax"
++ : "=a" (ret), "=D" (ign1),
++ "=S" (ign2), "=d" (ign3)
++ : "0" ((unsigned int)hypercall.op),
++ "1" (hypercall.arg[0]),
++ "2" (hypercall.arg[1]),
++ "3" (hypercall.arg[2]),
++ "g" (hypercall.arg[3]),
++ "g" (hypercall.arg[4])
++ : "r8", "r10", "memory" );
++ }
++#else
++ ret = privcmd_hypercall(&hypercall);
++#endif
++ }
++ break;
++
++ case IOCTL_PRIVCMD_MMAP: {
++#define MMAP_NR_PER_PAGE (int)((PAGE_SIZE-sizeof(struct list_head))/sizeof(privcmd_mmap_entry_t))
++ privcmd_mmap_t mmapcmd;
++ privcmd_mmap_entry_t *msg;
++ privcmd_mmap_entry_t __user *p;
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ unsigned long va;
++ int i, rc;
++ LIST_HEAD(pagelist);
++ struct list_head *l,*l2;
++
++ if (!is_initial_xendomain())
++ return -EPERM;
++
++ if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
++ return -EFAULT;
++
++ p = mmapcmd.entry;
++ for (i = 0; i < mmapcmd.num;) {
++ int nr = min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
++
++ rc = -ENOMEM;
++ l = (struct list_head *) __get_free_page(GFP_KERNEL);
++ if (l == NULL)
++ goto mmap_out;
++
++ INIT_LIST_HEAD(l);
++ list_add_tail(l, &pagelist);
++ msg = (privcmd_mmap_entry_t*)(l + 1);
++
++ rc = -EFAULT;
++ if (copy_from_user(msg, p, nr*sizeof(*msg)))
++ goto mmap_out;
++ i += nr;
++ p += nr;
++ }
++
++ l = pagelist.next;
++ msg = (privcmd_mmap_entry_t*)(l + 1);
++
++ down_write(&mm->mmap_sem);
++
++ vma = find_vma(mm, msg->va);
++ rc = -EINVAL;
++ if (!vma || (msg->va != vma->vm_start) ||
++ !privcmd_enforce_singleshot_mapping(vma))
++ goto mmap_out;
++
++ va = vma->vm_start;
++
++ i = 0;
++ list_for_each(l, &pagelist) {
++ int nr = i + min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
++
++ msg = (privcmd_mmap_entry_t*)(l + 1);
++ while (i<nr) {
++
++ /* Do not allow range to wrap the address space. */
++ rc = -EINVAL;
++ if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
++ ((unsigned long)(msg->npages << PAGE_SHIFT) >= -va))
++ goto mmap_out;
++
++ /* Range chunks must be contiguous in va space. */
++ if ((msg->va != va) ||
++ ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
++ goto mmap_out;
++
++ if ((rc = direct_remap_pfn_range(
++ vma,
++ msg->va & PAGE_MASK,
++ msg->mfn,
++ msg->npages << PAGE_SHIFT,
++ vma->vm_page_prot,
++ mmapcmd.dom)) < 0)
++ goto mmap_out;
++
++ va += msg->npages << PAGE_SHIFT;
++ msg++;
++ i++;
++ }
++ }
++
++ rc = 0;
++
++ mmap_out:
++ up_write(&mm->mmap_sem);
++ list_for_each_safe(l,l2,&pagelist)
++ free_page((unsigned long)l);
++ ret = rc;
++ }
++#undef MMAP_NR_PER_PAGE
++ break;
++
++ case IOCTL_PRIVCMD_MMAPBATCH: {
++#define MMAPBATCH_NR_PER_PAGE (unsigned long)((PAGE_SIZE-sizeof(struct list_head))/sizeof(unsigned long))
++ privcmd_mmapbatch_t m;
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ xen_pfn_t __user *p;
++ unsigned long addr, *mfn, nr_pages;
++ int i;
++ LIST_HEAD(pagelist);
++ struct list_head *l, *l2;
++
++ if (!is_initial_xendomain())
++ return -EPERM;
++
++ if (copy_from_user(&m, udata, sizeof(m)))
++ return -EFAULT;
++
++ nr_pages = m.num;
++ if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
++ return -EINVAL;
++
++ p = m.arr;
++ for (i=0; i<nr_pages; ) {
++ int nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
++
++ ret = -ENOMEM;
++ l = (struct list_head *)__get_free_page(GFP_KERNEL);
++ if (l == NULL)
++ goto mmapbatch_out;
++
++ INIT_LIST_HEAD(l);
++ list_add_tail(l, &pagelist);
++
++ mfn = (unsigned long*)(l + 1);
++ ret = -EFAULT;
++ if (copy_from_user(mfn, p, nr*sizeof(*mfn)))
++ goto mmapbatch_out;
++
++ i += nr; p+= nr;
++ }
++
++ down_write(&mm->mmap_sem);
++
++ vma = find_vma(mm, m.addr);
++ ret = -EINVAL;
++ if (!vma ||
++ (m.addr != vma->vm_start) ||
++ ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
++ !privcmd_enforce_singleshot_mapping(vma)) {
++ up_write(&mm->mmap_sem);
++ goto mmapbatch_out;
++ }
++
++ p = m.arr;
++ addr = m.addr;
++ i = 0;
++ ret = 0;
++ list_for_each(l, &pagelist) {
++ int nr = i + min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
++ mfn = (unsigned long *)(l + 1);
++
++ while (i<nr) {
++ if(direct_remap_pfn_range(vma, addr & PAGE_MASK,
++ *mfn, PAGE_SIZE,
++ vma->vm_page_prot, m.dom) < 0) {
++ *mfn |= 0xf0000000U;
++ ret++;
++ }
++ mfn++; i++; addr += PAGE_SIZE;
++ }
++ }
++
++ up_write(&mm->mmap_sem);
++ if (ret > 0) {
++ p = m.arr;
++ i = 0;
++ ret = 0;
++ list_for_each(l, &pagelist) {
++ int nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
++ mfn = (unsigned long *)(l + 1);
++ if (copy_to_user(p, mfn, nr*sizeof(*mfn)))
++ ret = -EFAULT;
++ i += nr; p += nr;
++ }
++ }
++ mmapbatch_out:
++ list_for_each_safe(l,l2,&pagelist)
++ free_page((unsigned long)l);
++#undef MMAPBATCH_NR_PER_PAGE
++ }
++ break;
++
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++#ifndef HAVE_ARCH_PRIVCMD_MMAP
++static struct page *privcmd_nopage(struct vm_area_struct *vma,
++ unsigned long address,
++ int *type)
++{
++ return NOPAGE_SIGBUS;
++}
++
++static struct vm_operations_struct privcmd_vm_ops = {
++ .nopage = privcmd_nopage
++};
++
++static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
++{
++ /* Unsupported for auto-translate guests. */
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return -ENOSYS;
++
++ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
++ vma->vm_ops = &privcmd_vm_ops;
++ vma->vm_private_data = NULL;
++
++ return 0;
++}
++
++static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
++{
++ return (xchg(&vma->vm_private_data, (void *)1) == NULL);
++}
++#endif
++
++static const struct file_operations privcmd_file_ops = {
++ .unlocked_ioctl = privcmd_ioctl,
++ .mmap = privcmd_mmap,
++};
++
++static int capabilities_read(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int len = 0;
++ *page = 0;
++
++ if (is_initial_xendomain())
++ len = sprintf( page, "control_d\n" );
++
++ *eof = 1;
++ return len;
++}
++
++static int __init privcmd_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ privcmd_intf = create_xen_proc_entry("privcmd", 0400);
++ if (privcmd_intf != NULL)
++ privcmd_intf->proc_fops = &privcmd_file_ops;
++
++ capabilities_intf = create_xen_proc_entry("capabilities", 0400 );
++ if (capabilities_intf != NULL)
++ capabilities_intf->read_proc = capabilities_read;
++
++ return 0;
++}
++
++__initcall(privcmd_init);
+diff -rpuN linux-2.6.18.8/drivers/xen/scsiback/common.h linux-2.6.18-xen-3.3.0/drivers/xen/scsiback/common.h
+--- linux-2.6.18.8/drivers/xen/scsiback/common.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/scsiback/common.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,181 @@
++/*
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * Based on the blkback driver code.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __SCSIIF__BACKEND__COMMON_H__
++#define __SCSIIF__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++#include <linux/kthread.h>
++#include <linux/blkdev.h>
++#include <linux/list.h>
++#include <linux/kthread.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi_dbg.h>
++#include <scsi/scsi_eh.h>
++#include <asm/io.h>
++#include <asm/setup.h>
++#include <asm/pgalloc.h>
++#include <asm/delay.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++#include <xen/xenbus.h>
++#include <xen/interface/io/ring.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/vscsiif.h>
++
++
++#define DPRINTK(_f, _a...) \
++ pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++
++struct ids_tuple {
++ unsigned int hst; /* host */
++ unsigned int chn; /* channel */
++ unsigned int tgt; /* target */
++ unsigned int lun; /* LUN */
++};
++
++struct v2p_entry {
++ struct ids_tuple v; /* translate from */
++ struct scsi_device *sdev; /* translate to */
++ struct list_head l;
++};
++
++struct vscsibk_info {
++ struct xenbus_device *dev;
++
++ domid_t domid;
++ unsigned int evtchn;
++ unsigned int irq;
++
++ struct vscsiif_back_ring ring;
++ struct vm_struct *ring_area;
++ grant_handle_t shmem_handle;
++ grant_ref_t shmem_ref;
++
++ spinlock_t ring_lock;
++ atomic_t nr_unreplied_reqs;
++
++ spinlock_t v2p_lock;
++ struct list_head v2p_entry_lists;
++
++ struct task_struct *kthread;
++ wait_queue_head_t waiting_to_free;
++ wait_queue_head_t wq;
++ unsigned int waiting_reqs;
++ struct page **mmap_pages;
++
++};
++
++typedef struct {
++ unsigned char act;
++ struct vscsibk_info *info;
++ struct scsi_device *sdev;
++
++ uint16_t rqid;
++
++ uint8_t nr_segments;
++ uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
++ uint8_t cmd_len;
++
++ uint8_t sc_data_direction;
++ uint16_t timeout_per_command;
++
++ uint32_t request_bufflen;
++ struct scatterlist *sgl;
++ grant_ref_t gref[VSCSIIF_SG_TABLESIZE];
++
++ int32_t rslt;
++ uint32_t resid;
++ uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
++
++ struct list_head free_list;
++} pending_req_t;
++
++
++
++#define scsiback_get(_b) (atomic_inc(&(_b)->nr_unreplied_reqs))
++#define scsiback_put(_b) \
++ do { \
++ if (atomic_dec_and_test(&(_b)->nr_unreplied_reqs)) \
++ wake_up(&(_b)->waiting_to_free);\
++ } while (0)
++
++#define VSCSIIF_TIMEOUT (900*HZ)
++
++
++irqreturn_t scsiback_intr(int, void *, struct pt_regs *);
++int scsiback_init_sring(struct vscsibk_info *info,
++ unsigned long ring_ref, unsigned int evtchn);
++int scsiback_schedule(void *data);
++
++
++struct vscsibk_info *vscsibk_info_alloc(domid_t domid);
++void scsiback_free(struct vscsibk_info *info);
++void scsiback_disconnect(struct vscsibk_info *info);
++int __init scsiback_interface_init(void);
++void scsiback_interface_exit(void);
++int scsiback_xenbus_init(void);
++void scsiback_xenbus_unregister(void);
++
++void scsiback_init_translation_table(struct vscsibk_info *info);
++
++int scsiback_add_translation_entry(struct vscsibk_info *info,
++ struct scsi_device *sdev, struct ids_tuple *v);
++
++int scsiback_del_translation_entry(struct vscsibk_info *info,
++ struct ids_tuple *v);
++struct scsi_device *scsiback_do_translation(struct vscsibk_info *info,
++ struct ids_tuple *v);
++void scsiback_release_translation_entry(struct vscsibk_info *info);
++
++
++void scsiback_cmd_exec(pending_req_t *pending_req);
++void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
++ uint32_t resid, pending_req_t *pending_req);
++void scsiback_fast_flush_area(pending_req_t *req);
++
++void scsiback_rsp_emulation(pending_req_t *pending_req);
++void scsiback_req_emulation_or_cmdexec(pending_req_t *pending_req);
++void scsiback_emulation_init(void);
++
++
++#endif /* __SCSIIF__BACKEND__COMMON_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/scsiback/emulate.c linux-2.6.18-xen-3.3.0/drivers/xen/scsiback/emulate.c
+--- linux-2.6.18.8/drivers/xen/scsiback/emulate.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/scsiback/emulate.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,454 @@
++/*
++ * Xen SCSI backend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_device.h>
++#include "common.h"
++
++/* Following SCSI commands are not defined in scsi/scsi.h */
++#define EXTENDED_COPY 0x83 /* EXTENDED COPY command */
++#define REPORT_ALIASES 0xa3 /* REPORT ALIASES command */
++#define CHANGE_ALIASES 0xa4 /* CHANGE ALIASES command */
++#define SET_PRIORITY 0xa4 /* SET PRIORITY command */
++
++
++/*
++ The bitmap in order to control emulation.
++ (Bit 3 to 7 are reserved for future use.)
++*/
++#define VSCSIIF_NEED_CMD_EXEC 0x01 /* If this bit is set, cmd exec */
++ /* is required. */
++#define VSCSIIF_NEED_EMULATE_REQBUF 0x02 /* If this bit is set, need */
++ /* emulation reqest buff before */
++ /* cmd exec. */
++#define VSCSIIF_NEED_EMULATE_RSPBUF 0x04 /* If this bit is set, need */
++ /* emulation resp buff after */
++ /* cmd exec. */
++
++/* Additional Sense Code (ASC) used */
++#define NO_ADDITIONAL_SENSE 0x0
++#define LOGICAL_UNIT_NOT_READY 0x4
++#define UNRECOVERED_READ_ERR 0x11
++#define PARAMETER_LIST_LENGTH_ERR 0x1a
++#define INVALID_OPCODE 0x20
++#define ADDR_OUT_OF_RANGE 0x21
++#define INVALID_FIELD_IN_CDB 0x24
++#define INVALID_FIELD_IN_PARAM_LIST 0x26
++#define POWERON_RESET 0x29
++#define SAVING_PARAMS_UNSUP 0x39
++#define THRESHOLD_EXCEEDED 0x5d
++#define LOW_POWER_COND_ON 0x5e
++
++
++
++/* Number os SCSI op_code */
++#define VSCSI_MAX_SCSI_OP_CODE 256
++static unsigned char bitmap[VSCSI_MAX_SCSI_OP_CODE];
++
++
++
++/*
++ Emulation routines for each SCSI op_code.
++*/
++static void (*pre_function[VSCSI_MAX_SCSI_OP_CODE])(pending_req_t *, void *);
++static void (*post_function[VSCSI_MAX_SCSI_OP_CODE])(pending_req_t *, void *);
++
++
++static const int check_condition_result =
++ (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
++
++static void scsiback_mk_sense_buffer(uint8_t *data, uint8_t key,
++ uint8_t asc, uint8_t asq)
++{
++ data[0] = 0x70; /* fixed, current */
++ data[2] = key;
++ data[7] = 0xa; /* implies 18 byte sense buffer */
++ data[12] = asc;
++ data[13] = asq;
++}
++
++static void resp_not_supported_cmd(pending_req_t *pending_req, void *data)
++{
++ scsiback_mk_sense_buffer(pending_req->sense_buffer, ILLEGAL_REQUEST,
++ INVALID_OPCODE, 0);
++ pending_req->resid = 0;
++ pending_req->rslt = check_condition_result;
++}
++
++
++static int __copy_to_sg(struct scatterlist *sg, unsigned int nr_sg,
++ void *buf, unsigned int buflen)
++{
++ void *from = buf;
++ void *to;
++ unsigned int from_rest = buflen;
++ unsigned int to_capa;
++ unsigned int copy_size = 0;
++ unsigned int i;
++ unsigned long pfn;
++
++ for (i = 0; i < nr_sg; i++) {
++ if (sg->page == NULL) {
++ printk(KERN_WARNING "%s: inconsistent length field in "
++ "scatterlist\n", __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ to_capa = sg->length;
++ copy_size = min_t(unsigned int, to_capa, from_rest);
++
++ pfn = page_to_pfn(sg->page);
++ to = pfn_to_kaddr(pfn) + (sg->offset);
++ memcpy(to, from, copy_size);
++
++ from_rest -= copy_size;
++ if (from_rest == 0) {
++ return 0;
++ }
++
++ sg++;
++ from += copy_size;
++ }
++
++ printk(KERN_WARNING "%s: no space in scatterlist\n",
++ __FUNCTION__);
++ return -ENOMEM;
++}
++
++static int __copy_from_sg(struct scatterlist *sg, unsigned int nr_sg,
++ void *buf, unsigned int buflen)
++{
++ void *from;
++ void *to = buf;
++ unsigned int from_rest;
++ unsigned int to_capa = buflen;
++ unsigned int copy_size;
++ unsigned int i;
++ unsigned long pfn;
++
++ for (i = 0; i < nr_sg; i++) {
++ if (sg->page == NULL) {
++ printk(KERN_WARNING "%s: inconsistent length field in "
++ "scatterlist\n", __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ from_rest = sg->length;
++ if ((from_rest > 0) && (to_capa < from_rest)) {
++ printk(KERN_WARNING
++ "%s: no space in destination buffer\n",
++ __FUNCTION__);
++ return -ENOMEM;
++ }
++ copy_size = from_rest;
++
++ pfn = page_to_pfn(sg->page);
++ from = pfn_to_kaddr(pfn) + (sg->offset);
++ memcpy(to, from, copy_size);
++
++ to_capa -= copy_size;
++
++ sg++;
++ to += copy_size;
++ }
++
++ return 0;
++}
++
++static int __nr_luns_under_host(struct vscsibk_info *info)
++{
++ struct v2p_entry *entry;
++ struct list_head *head = &(info->v2p_entry_lists);
++ unsigned long flags;
++ int lun_cnt = 0;
++
++ spin_lock_irqsave(&info->v2p_lock, flags);
++ list_for_each_entry(entry, head, l) {
++ lun_cnt++;
++ }
++ spin_unlock_irqrestore(&info->v2p_lock, flags);
++
++ return (lun_cnt);
++}
++
++
++/* REPORT LUNS Define*/
++#define VSCSI_REPORT_LUNS_HEADER 8
++#define VSCSI_REPORT_LUNS_RETRY 3
++
++/* quoted scsi_debug.c/resp_report_luns() */
++static void __report_luns(pending_req_t *pending_req, void *data)
++{
++ struct vscsibk_info *info = pending_req->info;
++ unsigned int channel = pending_req->sdev->channel;
++ unsigned int target = pending_req->sdev->id;
++ unsigned int nr_seg = pending_req->nr_segments;
++ unsigned char *cmd = (unsigned char *)pending_req->cmnd;
++
++ unsigned char *buff = NULL;
++ unsigned char alloc_len;
++ unsigned int alloc_luns = 0;
++ unsigned int req_bufflen = 0;
++ unsigned int actual_len = 0;
++ unsigned int retry_cnt = 0;
++ int select_report = (int)cmd[2];
++ int i, lun_cnt = 0, lun, upper, err = 0;
++
++ struct v2p_entry *entry;
++ struct list_head *head = &(info->v2p_entry_lists);
++ unsigned long flags;
++
++ struct scsi_lun *one_lun;
++
++ req_bufflen = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
++ if ((req_bufflen < 4) || (select_report != 0))
++ goto fail;
++
++ alloc_luns = __nr_luns_under_host(info);
++ alloc_len = sizeof(struct scsi_lun) * alloc_luns
++ + VSCSI_REPORT_LUNS_HEADER;
++retry:
++ if ((buff = kmalloc(alloc_len, GFP_KERNEL)) == NULL) {
++ printk(KERN_ERR "scsiback:%s kmalloc err\n", __FUNCTION__);
++ goto fail;
++ }
++
++ memset(buff, 0, alloc_len);
++
++ one_lun = (struct scsi_lun *) &buff[8];
++ spin_lock_irqsave(&info->v2p_lock, flags);
++ list_for_each_entry(entry, head, l) {
++ if ((entry->v.chn == channel) &&
++ (entry->v.tgt == target)) {
++
++ /* check overflow */
++ if (lun_cnt >= alloc_luns) {
++ spin_unlock_irqrestore(&info->v2p_lock,
++ flags);
++
++ if (retry_cnt < VSCSI_REPORT_LUNS_RETRY) {
++ retry_cnt++;
++ if (buff)
++ kfree(buff);
++ goto retry;
++ }
++
++ goto fail;
++ }
++
++ lun = entry->v.lun;
++ upper = (lun >> 8) & 0x3f;
++ if (upper)
++ one_lun[lun_cnt].scsi_lun[0] = upper;
++ one_lun[lun_cnt].scsi_lun[1] = lun & 0xff;
++ lun_cnt++;
++ }
++ }
++
++ spin_unlock_irqrestore(&info->v2p_lock, flags);
++
++ buff[2] = ((sizeof(struct scsi_lun) * lun_cnt) >> 8) & 0xff;
++ buff[3] = (sizeof(struct scsi_lun) * lun_cnt) & 0xff;
++
++ actual_len = lun_cnt * sizeof(struct scsi_lun)
++ + VSCSI_REPORT_LUNS_HEADER;
++ req_bufflen = 0;
++ for (i = 0; i < nr_seg; i++)
++ req_bufflen += pending_req->sgl[i].length;
++
++ err = __copy_to_sg(pending_req->sgl, nr_seg, buff,
++ min(req_bufflen, actual_len));
++ if (err)
++ goto fail;
++
++ memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
++ pending_req->rslt = 0x00;
++ pending_req->resid = req_bufflen - min(req_bufflen, actual_len);
++
++ kfree(buff);
++ return;
++
++fail:
++ scsiback_mk_sense_buffer(pending_req->sense_buffer, ILLEGAL_REQUEST,
++ INVALID_FIELD_IN_CDB, 0);
++ pending_req->rslt = check_condition_result;
++ pending_req->resid = 0;
++ if (buff)
++ kfree(buff);
++ return;
++}
++
++
++
++int __pre_do_emulation(pending_req_t *pending_req, void *data)
++{
++ uint8_t op_code = pending_req->cmnd[0];
++
++ if ((bitmap[op_code] & VSCSIIF_NEED_EMULATE_REQBUF) &&
++ pre_function[op_code] != NULL) {
++ pre_function[op_code](pending_req, data);
++ }
++
++ /*
++ 0: no need for native driver call, so should return immediately.
++ 1: non emulation or should call native driver
++ after modifing the request buffer.
++ */
++ return !!(bitmap[op_code] & VSCSIIF_NEED_CMD_EXEC);
++}
++
++void scsiback_rsp_emulation(pending_req_t *pending_req)
++{
++ uint8_t op_code = pending_req->cmnd[0];
++
++ if ((bitmap[op_code] & VSCSIIF_NEED_EMULATE_RSPBUF) &&
++ post_function[op_code] != NULL) {
++ post_function[op_code](pending_req, NULL);
++ }
++
++ return;
++}
++
++
++void scsiback_req_emulation_or_cmdexec(pending_req_t *pending_req)
++{
++ if (__pre_do_emulation(pending_req, NULL)) {
++ scsiback_cmd_exec(pending_req);
++ }
++ else {
++ scsiback_fast_flush_area(pending_req);
++ scsiback_do_resp_with_sense(pending_req->sense_buffer,
++ pending_req->rslt, pending_req->resid, pending_req);
++ }
++}
++
++
++/*
++ Following are not customizable functions.
++*/
++void scsiback_emulation_init(void)
++{
++ int i;
++
++ /* Initialize to default state */
++ for (i = 0; i < VSCSI_MAX_SCSI_OP_CODE; i++) {
++ bitmap[i] = (VSCSIIF_NEED_EMULATE_REQBUF |
++ VSCSIIF_NEED_EMULATE_RSPBUF);
++ pre_function[i] = resp_not_supported_cmd;
++ post_function[i] = NULL;
++ /* means,
++ - no need for pre-emulation
++ - no need for post-emulation
++ - call native driver
++ */
++ }
++
++ /*
++ Register appropriate functions below as you need.
++ (See scsi/scsi.h for definition of SCSI op_code.)
++ */
++
++ /*
++ This command is Non emulation.
++ */
++ bitmap[TEST_UNIT_READY] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[TEST_UNIT_READY] = NULL;
++ post_function[TEST_UNIT_READY] = NULL;
++
++ bitmap[REZERO_UNIT] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[REZERO_UNIT] = NULL;
++ post_function[REZERO_UNIT] = NULL;
++
++ bitmap[REQUEST_SENSE] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[REQUEST_SENSE] = NULL;
++ post_function[REQUEST_SENSE] = NULL;
++
++ bitmap[FORMAT_UNIT] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[FORMAT_UNIT] = NULL;
++ post_function[FORMAT_UNIT] = NULL;
++
++ bitmap[READ_BLOCK_LIMITS] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[READ_BLOCK_LIMITS] = NULL;
++ post_function[READ_BLOCK_LIMITS] = NULL;
++
++ bitmap[READ_6] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[READ_6] = NULL;
++ post_function[READ_6] = NULL;
++
++ bitmap[WRITE_6] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[WRITE_6] = NULL;
++ post_function[WRITE_6] = NULL;
++
++ bitmap[WRITE_FILEMARKS] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[WRITE_FILEMARKS] = NULL;
++ post_function[WRITE_FILEMARKS] = NULL;
++
++ bitmap[SPACE] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[SPACE] = NULL;
++ post_function[SPACE] = NULL;
++
++ bitmap[INQUIRY] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[INQUIRY] = NULL;
++ post_function[INQUIRY] = NULL;
++
++ bitmap[ERASE] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[ERASE] = NULL;
++ post_function[ERASE] = NULL;
++
++ bitmap[MODE_SENSE] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[MODE_SENSE] = NULL;
++ post_function[MODE_SENSE] = NULL;
++
++ bitmap[SEND_DIAGNOSTIC] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[SEND_DIAGNOSTIC] = NULL;
++ post_function[SEND_DIAGNOSTIC] = NULL;
++
++ bitmap[READ_CAPACITY] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[READ_CAPACITY] = NULL;
++ post_function[READ_CAPACITY] = NULL;
++
++ bitmap[READ_10] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[READ_10] = NULL;
++ post_function[READ_10] = NULL;
++
++ bitmap[WRITE_10] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[WRITE_10] = NULL;
++ post_function[WRITE_10] = NULL;
++
++ /*
++ This command is Full emulation.
++ */
++ pre_function[REPORT_LUNS] = __report_luns;
++ bitmap[REPORT_LUNS] = (VSCSIIF_NEED_EMULATE_REQBUF |
++ VSCSIIF_NEED_EMULATE_RSPBUF);
++
++ return;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/scsiback/interface.c linux-2.6.18-xen-3.3.0/drivers/xen/scsiback/interface.c
+--- linux-2.6.18.8/drivers/xen/scsiback/interface.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/scsiback/interface.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,182 @@
++/*
++ * interface management.
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * Based on the blkback driver code.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <scsi/scsi.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_device.h>
++#include "common.h"
++
++#include <xen/evtchn.h>
++#include <linux/kthread.h>
++
++
++static kmem_cache_t *scsiback_cachep;
++
++struct vscsibk_info *vscsibk_info_alloc(domid_t domid)
++{
++ struct vscsibk_info *info;
++
++ info = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL);
++ if (!info)
++ return ERR_PTR(-ENOMEM);
++
++ memset(info, 0, sizeof(*info));
++ info->domid = domid;
++ spin_lock_init(&info->ring_lock);
++ atomic_set(&info->nr_unreplied_reqs, 0);
++ init_waitqueue_head(&info->wq);
++ init_waitqueue_head(&info->waiting_to_free);
++
++ return info;
++}
++
++static int map_frontend_page( struct vscsibk_info *info,
++ unsigned long ring_ref)
++{
++ struct gnttab_map_grant_ref op;
++ int err;
++
++ gnttab_set_map_op(&op, (unsigned long)info->ring_area->addr,
++ GNTMAP_host_map, ring_ref,
++ info->domid);
++
++ err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
++ BUG_ON(err);
++
++ if (op.status) {
++ printk(KERN_ERR "scsiback: Grant table operation failure !\n");
++ return op.status;
++ }
++
++ info->shmem_ref = ring_ref;
++ info->shmem_handle = op.handle;
++
++ return (GNTST_okay);
++}
++
++static void unmap_frontend_page(struct vscsibk_info *info)
++{
++ struct gnttab_unmap_grant_ref op;
++ int err;
++
++ gnttab_set_unmap_op(&op, (unsigned long)info->ring_area->addr,
++ GNTMAP_host_map, info->shmem_handle);
++
++ err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
++ BUG_ON(err);
++
++}
++
++int scsiback_init_sring(struct vscsibk_info *info,
++ unsigned long ring_ref, unsigned int evtchn)
++{
++ struct vscsiif_sring *sring;
++ int err;
++
++ if (info->irq) {
++ printk(KERN_ERR "scsiback: Already connected through?\n");
++ return -1;
++ }
++
++ info->ring_area = alloc_vm_area(PAGE_SIZE);
++ if (!info)
++ return -ENOMEM;
++
++ err = map_frontend_page(info, ring_ref);
++ if (err)
++ goto free_vm;
++
++ sring = (struct vscsiif_sring *) info->ring_area->addr;
++ BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ info->domid, evtchn,
++ scsiback_intr, 0, "vscsiif-backend", info);
++
++ if (err < 0)
++ goto unmap_page;
++
++ info->irq = err;
++
++ return 0;
++
++unmap_page:
++ unmap_frontend_page(info);
++free_vm:
++ free_vm_area(info->ring_area);
++
++ return err;
++}
++
++void scsiback_disconnect(struct vscsibk_info *info)
++{
++ if (info->kthread) {
++ kthread_stop(info->kthread);
++ info->kthread = NULL;
++ }
++
++ wait_event(info->waiting_to_free,
++ atomic_read(&info->nr_unreplied_reqs) == 0);
++
++ if (info->irq) {
++ unbind_from_irqhandler(info->irq, info);
++ info->irq = 0;
++ }
++
++ if (info->ring.sring) {
++ unmap_frontend_page(info);
++ free_vm_area(info->ring_area);
++ info->ring.sring = NULL;
++ }
++}
++
++void scsiback_free(struct vscsibk_info *info)
++{
++ kmem_cache_free(scsiback_cachep, info);
++}
++
++int __init scsiback_interface_init(void)
++{
++ scsiback_cachep = kmem_cache_create("vscsiif_cache",
++ sizeof(struct vscsibk_info), 0, 0, NULL, NULL);
++ if (!scsiback_cachep) {
++ printk(KERN_ERR "scsiback: can't init scsi cache\n");
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++void scsiback_interface_exit(void)
++{
++ kmem_cache_destroy(scsiback_cachep);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/scsiback/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/scsiback/Makefile
+--- linux-2.6.18.8/drivers/xen/scsiback/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/scsiback/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,4 @@
++obj-$(CONFIG_XEN_SCSI_BACKEND) := xen-scsibk.o
++
++xen-scsibk-y := interface.o scsiback.o xenbus.o translate.o emulate.o
++
+diff -rpuN linux-2.6.18.8/drivers/xen/scsiback/scsiback.c linux-2.6.18-xen-3.3.0/drivers/xen/scsiback/scsiback.c
+--- linux-2.6.18.8/drivers/xen/scsiback/scsiback.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/scsiback/scsiback.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,717 @@
++/*
++ * Xen SCSI backend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * Based on the blkback driver code.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/kthread.h>
++#include <linux/list.h>
++#include <linux/delay.h>
++#include <xen/balloon.h>
++#include <asm/hypervisor.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi_dbg.h>
++#include <scsi/scsi_eh.h>
++
++#include "common.h"
++
++
++struct list_head pending_free;
++DEFINE_SPINLOCK(pending_free_lock);
++DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
++
++int vscsiif_reqs = VSCSIIF_BACK_MAX_PENDING_REQS;
++module_param_named(reqs, vscsiif_reqs, int, 0);
++MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
++
++static unsigned int log_print_stat = 0;
++module_param(log_print_stat, int, 0644);
++
++#define SCSIBACK_INVALID_HANDLE (~0)
++
++static pending_req_t *pending_reqs;
++static struct page **pending_pages;
++static grant_handle_t *pending_grant_handles;
++
++static int vaddr_pagenr(pending_req_t *req, int seg)
++{
++ return (req - pending_reqs) * VSCSIIF_SG_TABLESIZE + seg;
++}
++
++static unsigned long vaddr(pending_req_t *req, int seg)
++{
++ unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
++ return (unsigned long)pfn_to_kaddr(pfn);
++}
++
++#define pending_handle(_req, _seg) \
++ (pending_grant_handles[vaddr_pagenr(_req, _seg)])
++
++
++void scsiback_fast_flush_area(pending_req_t *req)
++{
++ struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
++ unsigned int i, invcount = 0;
++ grant_handle_t handle;
++ int err;
++
++ if (req->nr_segments) {
++ for (i = 0; i < req->nr_segments; i++) {
++ handle = pending_handle(req, i);
++ if (handle == SCSIBACK_INVALID_HANDLE)
++ continue;
++ gnttab_set_unmap_op(&unmap[i], vaddr(req, i),
++ GNTMAP_host_map, handle);
++ pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
++ invcount++;
++ }
++
++ err = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, unmap, invcount);
++ BUG_ON(err);
++ kfree(req->sgl);
++ }
++
++ return;
++}
++
++
++static pending_req_t * alloc_req(struct vscsibk_info *info)
++{
++ pending_req_t *req = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++ if (!list_empty(&pending_free)) {
++ req = list_entry(pending_free.next, pending_req_t, free_list);
++ list_del(&req->free_list);
++ }
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++ return req;
++}
++
++
++static void free_req(pending_req_t *req)
++{
++ unsigned long flags;
++ int was_empty;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++ was_empty = list_empty(&pending_free);
++ list_add(&req->free_list, &pending_free);
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++ if (was_empty)
++ wake_up(&pending_free_wq);
++}
++
++
++static void scsiback_notify_work(struct vscsibk_info *info)
++{
++ info->waiting_reqs = 1;
++ wake_up(&info->wq);
++}
++
++void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
++ uint32_t resid, pending_req_t *pending_req)
++{
++ vscsiif_response_t *ring_res;
++ struct vscsibk_info *info = pending_req->info;
++ int notify;
++ int more_to_do = 1;
++ unsigned long flags;
++
++ DPRINTK("%s\n",__FUNCTION__);
++
++ spin_lock_irqsave(&info->ring_lock, flags);
++
++ ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
++ info->ring.rsp_prod_pvt++;
++
++ ring_res->rslt = result;
++ ring_res->rqid = pending_req->rqid;
++
++ if (sense_buffer != NULL) {
++ memcpy(ring_res->sense_buffer, sense_buffer,
++ VSCSIIF_SENSE_BUFFERSIZE);
++ ring_res->sense_len = VSCSIIF_SENSE_BUFFERSIZE;
++ } else {
++ ring_res->sense_len = 0;
++ }
++
++ ring_res->residual_len = resid;
++
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
++ if (info->ring.rsp_prod_pvt == info->ring.req_cons) {
++ RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
++ } else if (RING_HAS_UNCONSUMED_REQUESTS(&info->ring)) {
++ more_to_do = 1;
++ }
++
++ spin_unlock_irqrestore(&info->ring_lock, flags);
++
++ if (more_to_do)
++ scsiback_notify_work(info);
++
++ if (notify)
++ notify_remote_via_irq(info->irq);
++
++ free_req(pending_req);
++}
++
++static void scsiback_print_status(char *sense_buffer, int errors,
++ pending_req_t *pending_req)
++{
++ struct scsi_device *sdev = pending_req->sdev;
++
++ printk(KERN_ERR "scsiback: %d:%d:%d:%d ",sdev->host->host_no,
++ sdev->channel, sdev->id, sdev->lun);
++ printk(KERN_ERR "status = 0x%02x, message = 0x%02x, host = 0x%02x, driver = 0x%02x\n",
++ status_byte(errors), msg_byte(errors),
++ host_byte(errors), driver_byte(errors));
++
++ printk(KERN_ERR "scsiback: cmnd[0]=0x%02X\n",
++ pending_req->cmnd[0]);
++
++ if (CHECK_CONDITION & status_byte(errors))
++ __scsi_print_sense("scsiback", sense_buffer, SCSI_SENSE_BUFFERSIZE);
++}
++
++
++static void scsiback_cmd_done(struct request *req, int errors)
++{
++ pending_req_t *pending_req = req->end_io_data;
++ unsigned char *sense_buffer;
++ unsigned int resid;
++
++ sense_buffer = req->sense;
++ resid = req->data_len;
++
++ if (errors != 0) {
++ if (log_print_stat)
++ scsiback_print_status(sense_buffer, errors, pending_req);
++ }
++
++ scsiback_rsp_emulation(pending_req);
++
++ scsiback_fast_flush_area(pending_req);
++ scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
++ scsiback_put(pending_req->info);
++
++ __blk_put_request(req->q, req);
++}
++
++
++static int scsiback_gnttab_data_map(vscsiif_request_t *ring_req,
++ pending_req_t *pending_req)
++{
++ u32 flags;
++ int write;
++ int i, err = 0;
++ unsigned int data_len = 0;
++ struct gnttab_map_grant_ref map[VSCSIIF_SG_TABLESIZE];
++ struct vscsibk_info *info = pending_req->info;
++
++ int data_dir = (int)pending_req->sc_data_direction;
++ unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
++
++ write = (data_dir == DMA_TO_DEVICE);
++
++ if (nr_segments) {
++ /* free of (sgl) in fast_flush_area()*/
++ pending_req->sgl = kmalloc(sizeof(struct scatterlist) * nr_segments,
++ GFP_KERNEL);
++ if (!pending_req->sgl) {
++ printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < nr_segments; i++) {
++ flags = GNTMAP_host_map;
++ if (write)
++ flags |= GNTMAP_readonly;
++ gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
++ ring_req->seg[i].gref,
++ info->domid);
++ }
++
++ err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments);
++ BUG_ON(err);
++
++ for (i = 0; i < nr_segments; i++) {
++ if (unlikely(map[i].status != 0)) {
++ printk(KERN_ERR "scsiback: invalid buffer -- could not remap it\n");
++ map[i].handle = SCSIBACK_INVALID_HANDLE;
++ err |= 1;
++ }
++
++ pending_handle(pending_req, i) = map[i].handle;
++
++ if (err)
++ continue;
++
++ set_phys_to_machine(__pa(vaddr(
++ pending_req, i)) >> PAGE_SHIFT,
++ FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
++
++ pending_req->sgl[i].page = virt_to_page(vaddr(pending_req, i));
++ pending_req->sgl[i].offset = ring_req->seg[i].offset;
++ pending_req->sgl[i].length = ring_req->seg[i].length;
++ data_len += pending_req->sgl[i].length;
++
++ barrier();
++ if (pending_req->sgl[i].offset >= PAGE_SIZE ||
++ pending_req->sgl[i].length > PAGE_SIZE ||
++ pending_req->sgl[i].offset + pending_req->sgl[i].length > PAGE_SIZE)
++ err |= 1;
++
++ }
++
++ if (err)
++ goto fail_flush;
++ }
++
++ pending_req->request_bufflen = data_len;
++
++ return 0;
++
++fail_flush:
++ scsiback_fast_flush_area(pending_req);
++ return -ENOMEM;
++}
++
++/* quoted scsi_lib.c/scsi_merge_bio */
++static int scsiback_merge_bio(struct request *rq, struct bio *bio)
++{
++ struct request_queue *q = rq->q;
++
++ bio->bi_flags &= ~(1 << BIO_SEG_VALID);
++ if (rq_data_dir(rq) == WRITE)
++ bio->bi_rw |= (1 << BIO_RW);
++
++ blk_queue_bounce(q, &bio);
++
++ if (!rq->bio)
++ blk_rq_bio_prep(q, rq, bio);
++ else if (!q->back_merge_fn(q, rq, bio))
++ return -EINVAL;
++ else {
++ rq->biotail->bi_next = bio;
++ rq->biotail = bio;
++ rq->hard_nr_sectors += bio_sectors(bio);
++ rq->nr_sectors = rq->hard_nr_sectors;
++ }
++
++ return 0;
++}
++
++
++/* quoted scsi_lib.c/scsi_bi_endio */
++static int scsiback_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
++{
++ if (bio->bi_size)
++ return 1;
++
++ bio_put(bio);
++ return 0;
++}
++
++
++
++/* quoted scsi_lib.c/scsi_req_map_sg . */
++static int request_map_sg(struct request *rq, pending_req_t *pending_req, unsigned int count)
++{
++ struct request_queue *q = rq->q;
++ int nr_pages;
++ unsigned int nsegs = count;
++
++ unsigned int data_len = 0, len, bytes, off;
++ struct page *page;
++ struct bio *bio = NULL;
++ int i, err, nr_vecs = 0;
++
++ for (i = 0; i < nsegs; i++) {
++ page = pending_req->sgl[i].page;
++ off = (unsigned int)pending_req->sgl[i].offset;
++ len = (unsigned int)pending_req->sgl[i].length;
++ data_len += len;
++
++ nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ while (len > 0) {
++ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
++
++ if (!bio) {
++ nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
++ nr_pages -= nr_vecs;
++ bio = bio_alloc(GFP_KERNEL, nr_vecs);
++ if (!bio) {
++ err = -ENOMEM;
++ goto free_bios;
++ }
++ bio->bi_end_io = scsiback_bi_endio;
++ }
++
++ if (bio_add_pc_page(q, bio, page, bytes, off) !=
++ bytes) {
++ bio_put(bio);
++ err = -EINVAL;
++ goto free_bios;
++ }
++
++ if (bio->bi_vcnt >= nr_vecs) {
++ err = scsiback_merge_bio(rq, bio);
++ if (err) {
++ bio_endio(bio, bio->bi_size, 0);
++ goto free_bios;
++ }
++ bio = NULL;
++ }
++
++ page++;
++ len -= bytes;
++ off = 0;
++ }
++ }
++
++ rq->buffer = rq->data = NULL;
++ rq->data_len = data_len;
++
++ return 0;
++
++free_bios:
++ while ((bio = rq->bio) != NULL) {
++ rq->bio = bio->bi_next;
++ /*
++ * call endio instead of bio_put incase it was bounced
++ */
++ bio_endio(bio, bio->bi_size, 0);
++ }
++
++ return err;
++}
++
++
++void scsiback_cmd_exec(pending_req_t *pending_req)
++{
++ int cmd_len = (int)pending_req->cmd_len;
++ int data_dir = (int)pending_req->sc_data_direction;
++ unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
++ unsigned int timeout;
++ struct request *rq;
++ int write;
++
++ DPRINTK("%s\n",__FUNCTION__);
++
++ /* because it doesn't timeout backend earlier than frontend.*/
++ if (pending_req->timeout_per_command)
++ timeout = pending_req->timeout_per_command * HZ;
++ else
++ timeout = VSCSIIF_TIMEOUT;
++
++ write = (data_dir == DMA_TO_DEVICE);
++ rq = blk_get_request(pending_req->sdev->request_queue, write, GFP_KERNEL);
++
++ rq->flags |= REQ_BLOCK_PC;
++ rq->cmd_len = cmd_len;
++ memcpy(rq->cmd, pending_req->cmnd, cmd_len);
++
++ memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
++ rq->sense = pending_req->sense_buffer;
++ rq->sense_len = 0;
++
++ /* not allowed to retry in backend. */
++ rq->retries = 0;
++ rq->timeout = timeout;
++ rq->end_io_data = pending_req;
++
++ if (nr_segments) {
++
++ if (request_map_sg(rq, pending_req, nr_segments)) {
++ printk(KERN_ERR "scsiback: SG Request Map Error\n");
++ return;
++ }
++ }
++
++ scsiback_get(pending_req->info);
++ blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done);
++
++ return ;
++}
++
++
++static void scsiback_device_reset_exec(pending_req_t *pending_req)
++{
++ struct vscsibk_info *info = pending_req->info;
++ int err;
++ struct scsi_device *sdev = pending_req->sdev;
++
++ scsiback_get(info);
++ err = scsi_reset_provider(sdev, SCSI_TRY_RESET_DEVICE);
++
++ scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
++ scsiback_put(info);
++
++ return;
++}
++
++
++irqreturn_t scsiback_intr(int irq, void *dev_id, struct pt_regs *regs)
++{
++ scsiback_notify_work((struct vscsibk_info *)dev_id);
++ return IRQ_HANDLED;
++}
++
++static int prepare_pending_reqs(struct vscsibk_info *info,
++ vscsiif_request_t *ring_req, pending_req_t *pending_req)
++{
++ struct scsi_device *sdev;
++ struct ids_tuple vir;
++ int err = -EINVAL;
++
++ DPRINTK("%s\n",__FUNCTION__);
++
++ pending_req->rqid = ring_req->rqid;
++ pending_req->act = ring_req->act;
++
++ pending_req->info = info;
++
++ vir.chn = ring_req->channel;
++ vir.tgt = ring_req->id;
++ vir.lun = ring_req->lun;
++
++ rmb();
++ sdev = scsiback_do_translation(info, &vir);
++ if (!sdev) {
++ pending_req->sdev = NULL;
++ DPRINTK("scsiback: doesn't exist.\n");
++ err = -ENODEV;
++ goto invalid_value;
++ }
++ pending_req->sdev = sdev;
++
++ /* request range check from frontend */
++ pending_req->sc_data_direction = ring_req->sc_data_direction;
++ barrier();
++ if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
++ (pending_req->sc_data_direction != DMA_TO_DEVICE) &&
++ (pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
++ (pending_req->sc_data_direction != DMA_NONE)) {
++ DPRINTK("scsiback: invalid parameter data_dir = %d\n",
++ pending_req->sc_data_direction);
++ err = -EINVAL;
++ goto invalid_value;
++ }
++
++ pending_req->nr_segments = ring_req->nr_segments;
++ barrier();
++ if (pending_req->nr_segments > VSCSIIF_SG_TABLESIZE) {
++ DPRINTK("scsiback: invalid parameter nr_seg = %d\n",
++ pending_req->nr_segments);
++ err = -EINVAL;
++ goto invalid_value;
++ }
++
++ pending_req->cmd_len = ring_req->cmd_len;
++ barrier();
++ if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
++ DPRINTK("scsiback: invalid parameter cmd_len = %d\n",
++ pending_req->cmd_len);
++ err = -EINVAL;
++ goto invalid_value;
++ }
++ memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
++
++ pending_req->timeout_per_command = ring_req->timeout_per_command;
++
++ if(scsiback_gnttab_data_map(ring_req, pending_req)) {
++ DPRINTK("scsiback: invalid buffer\n");
++ err = -EINVAL;
++ goto invalid_value;
++ }
++
++ return 0;
++
++invalid_value:
++ return err;
++}
++
++
++static int scsiback_do_cmd_fn(struct vscsibk_info *info)
++{
++ struct vscsiif_back_ring *ring = &info->ring;
++ vscsiif_request_t *ring_req;
++
++ pending_req_t *pending_req;
++ RING_IDX rc, rp;
++ int err, more_to_do = 0;
++
++ DPRINTK("%s\n",__FUNCTION__);
++
++ rc = ring->req_cons;
++ rp = ring->sring->req_prod;
++ rmb();
++
++ while ((rc != rp)) {
++ if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
++ break;
++ pending_req = alloc_req(info);
++ if (NULL == pending_req) {
++ more_to_do = 1;
++ break;
++ }
++
++ ring_req = RING_GET_REQUEST(ring, rc);
++ ring->req_cons = ++rc;
++
++ err = prepare_pending_reqs(info, ring_req,
++ pending_req);
++ if (err == -EINVAL) {
++ scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
++ 0, pending_req);
++ continue;
++ } else if (err == -ENODEV) {
++ scsiback_do_resp_with_sense(NULL, (DID_NO_CONNECT << 16),
++ 0, pending_req);
++ continue;
++ }
++
++ if (pending_req->act == VSCSIIF_ACT_SCSI_CDB) {
++ scsiback_req_emulation_or_cmdexec(pending_req);
++ } else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) {
++ scsiback_device_reset_exec(pending_req);
++ } else {
++ printk(KERN_ERR "scsiback: invalid parameter for request\n");
++ scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
++ 0, pending_req);
++ continue;
++ }
++ }
++
++ if (RING_HAS_UNCONSUMED_REQUESTS(ring))
++ more_to_do = 1;
++
++ /* Yield point for this unbounded loop. */
++ cond_resched();
++
++ return more_to_do;
++}
++
++
++int scsiback_schedule(void *data)
++{
++ struct vscsibk_info *info = (struct vscsibk_info *)data;
++
++ DPRINTK("%s\n",__FUNCTION__);
++
++ while (!kthread_should_stop()) {
++ wait_event_interruptible(
++ info->wq,
++ info->waiting_reqs || kthread_should_stop());
++ wait_event_interruptible(
++ pending_free_wq,
++ !list_empty(&pending_free) || kthread_should_stop());
++
++ info->waiting_reqs = 0;
++ smp_mb();
++
++ if (scsiback_do_cmd_fn(info))
++ info->waiting_reqs = 1;
++ }
++
++ return 0;
++}
++
++
++static int __init scsiback_init(void)
++{
++ int i, mmap_pages;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE;
++
++ pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
++ vscsiif_reqs, GFP_KERNEL);
++ pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
++ mmap_pages, GFP_KERNEL);
++ pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
++
++ if (!pending_reqs || !pending_grant_handles || !pending_pages)
++ goto out_of_memory;
++
++ for (i = 0; i < mmap_pages; i++)
++ pending_grant_handles[i] = SCSIBACK_INVALID_HANDLE;
++
++ if (scsiback_interface_init() < 0)
++ goto out_of_kmem;
++
++ memset(pending_reqs, 0, sizeof(pending_reqs));
++ INIT_LIST_HEAD(&pending_free);
++
++ for (i = 0; i < vscsiif_reqs; i++)
++ list_add_tail(&pending_reqs[i].free_list, &pending_free);
++
++ if (scsiback_xenbus_init())
++ goto out_of_xenbus;
++
++ scsiback_emulation_init();
++
++ return 0;
++
++out_of_xenbus:
++ scsiback_xenbus_unregister();
++out_of_kmem:
++ scsiback_interface_exit();
++out_of_memory:
++ kfree(pending_reqs);
++ kfree(pending_grant_handles);
++ free_empty_pages_and_pagevec(pending_pages, mmap_pages);
++ printk(KERN_ERR "scsiback: %s: out of memory\n", __FUNCTION__);
++ return -ENOMEM;
++}
++
++static void __exit scsiback_exit(void)
++{
++ scsiback_xenbus_unregister();
++ scsiback_interface_exit();
++ kfree(pending_reqs);
++ kfree(pending_grant_handles);
++ free_empty_pages_and_pagevec(pending_pages, (vscsiif_reqs * VSCSIIF_SG_TABLESIZE));
++
++}
++
++module_init(scsiback_init);
++module_exit(scsiback_exit);
++
++MODULE_DESCRIPTION("Xen SCSI backend driver");
++MODULE_LICENSE("Dual BSD/GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/scsiback/translate.c linux-2.6.18-xen-3.3.0/drivers/xen/scsiback/translate.c
+--- linux-2.6.18.8/drivers/xen/scsiback/translate.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/scsiback/translate.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,168 @@
++/*
++ * Xen SCSI backend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/list.h>
++#include <linux/gfp.h>
++
++#include "common.h"
++
++/*
++ Initialize the translation entry list
++*/
++void scsiback_init_translation_table(struct vscsibk_info *info)
++{
++ INIT_LIST_HEAD(&info->v2p_entry_lists);
++ spin_lock_init(&info->v2p_lock);
++}
++
++
++/*
++ Add a new translation entry
++*/
++int scsiback_add_translation_entry(struct vscsibk_info *info,
++ struct scsi_device *sdev, struct ids_tuple *v)
++{
++ int err = 0;
++ struct v2p_entry *entry;
++ struct v2p_entry *new;
++ struct list_head *head = &(info->v2p_entry_lists);
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->v2p_lock, flags);
++
++ /* Check double assignment to identical virtual ID */
++ list_for_each_entry(entry, head, l) {
++ if ((entry->v.chn == v->chn) &&
++ (entry->v.tgt == v->tgt) &&
++ (entry->v.lun == v->lun)) {
++ printk(KERN_WARNING "scsiback: Virtual ID is already used. "
++ "Assignment was not performed.\n");
++ err = -EEXIST;
++ goto out;
++ }
++
++ }
++
++ /* Create a new translation entry and add to the list */
++ if ((new = kmalloc(sizeof(struct v2p_entry), GFP_ATOMIC)) == NULL) {
++ printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__);
++ err = -ENOMEM;
++ goto out;
++ }
++ new->v = *v;
++ new->sdev = sdev;
++ list_add_tail(&new->l, head);
++
++out:
++ spin_unlock_irqrestore(&info->v2p_lock, flags);
++ return err;
++}
++
++
++/*
++ Delete the translation entry specfied
++*/
++int scsiback_del_translation_entry(struct vscsibk_info *info,
++ struct ids_tuple *v)
++{
++ struct v2p_entry *entry;
++ struct list_head *head = &(info->v2p_entry_lists);
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->v2p_lock, flags);
++ /* Find out the translation entry specified */
++ list_for_each_entry(entry, head, l) {
++ if ((entry->v.chn == v->chn) &&
++ (entry->v.tgt == v->tgt) &&
++ (entry->v.lun == v->lun)) {
++ goto found;
++ }
++ }
++
++ spin_unlock_irqrestore(&info->v2p_lock, flags);
++ return 1;
++
++found:
++ /* Delete the translation entry specfied */
++ scsi_device_put(entry->sdev);
++ list_del(&entry->l);
++ kfree(entry);
++
++ spin_unlock_irqrestore(&info->v2p_lock, flags);
++ return 0;
++}
++
++
++/*
++ Perform virtual to physical translation
++*/
++struct scsi_device *scsiback_do_translation(struct vscsibk_info *info,
++ struct ids_tuple *v)
++{
++ struct v2p_entry *entry;
++ struct list_head *head = &(info->v2p_entry_lists);
++ struct scsi_device *sdev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->v2p_lock, flags);
++ list_for_each_entry(entry, head, l) {
++ if ((entry->v.chn == v->chn) &&
++ (entry->v.tgt == v->tgt) &&
++ (entry->v.lun == v->lun)) {
++ sdev = entry->sdev;
++ goto out;
++ }
++ }
++out:
++ spin_unlock_irqrestore(&info->v2p_lock, flags);
++ return sdev;
++}
++
++
++/*
++ Release the translation entry specfied
++*/
++void scsiback_release_translation_entry(struct vscsibk_info *info)
++{
++ struct v2p_entry *entry, *tmp;
++ struct list_head *head = &(info->v2p_entry_lists);
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->v2p_lock, flags);
++ list_for_each_entry_safe(entry, tmp, head, l) {
++ scsi_device_put(entry->sdev);
++ list_del(&entry->l);
++ kfree(entry);
++ }
++
++ spin_unlock_irqrestore(&info->v2p_lock, flags);
++ return;
++
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/scsiback/xenbus.c linux-2.6.18-xen-3.3.0/drivers/xen/scsiback/xenbus.c
+--- linux-2.6.18.8/drivers/xen/scsiback/xenbus.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/scsiback/xenbus.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,368 @@
++/*
++ * Xen SCSI backend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * Based on the blkback driver code.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <stdarg.h>
++#include <linux/module.h>
++#include <linux/kthread.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_device.h>
++
++#include "common.h"
++
++struct backend_info
++{
++ struct xenbus_device *dev;
++ struct vscsibk_info *info;
++};
++
++
++static int __vscsiif_name(struct backend_info *be, char *buf)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned int domid, id;
++
++ sscanf(dev->nodename, "backend/vscsi/%u/%u", &domid, &id);
++ snprintf(buf, TASK_COMM_LEN, "vscsi.%u.%u", be->info->domid, id);
++
++ return 0;
++}
++
++static int scsiback_map(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned long ring_ref;
++ unsigned int evtchn;
++ int err;
++ char name[TASK_COMM_LEN];
++
++ err = xenbus_gather(XBT_NIL, dev->otherend,
++ "ring-ref", "%lu", &ring_ref,
++ "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "reading %s ring", dev->otherend);
++ return err;
++ }
++
++ err = scsiback_init_sring(be->info, ring_ref, evtchn);
++ if (err)
++ return err;
++
++ err = __vscsiif_name(be, name);
++ if (err) {
++ xenbus_dev_error(dev, err, "get scsiback dev name");
++ return err;
++ }
++
++ be->info->kthread = kthread_run(scsiback_schedule, be->info, name);
++ if (IS_ERR(be->info->kthread)) {
++ err = PTR_ERR(be->info->kthread);
++ be->info->kthread = NULL;
++ xenbus_dev_error(be->dev, err, "start vscsiif");
++ return err;
++ }
++
++ return 0;
++}
++
++
++struct scsi_device *scsiback_get_scsi_device(struct ids_tuple *phy)
++{
++ struct Scsi_Host *shost;
++ struct scsi_device *sdev = NULL;
++
++ shost = scsi_host_lookup(phy->hst);
++ if (IS_ERR(shost)) {
++ printk(KERN_ERR "scsiback: host%d doesn't exist.\n",
++ phy->hst);
++ return NULL;
++ }
++ sdev = scsi_device_lookup(shost, phy->chn, phy->tgt, phy->lun);
++ if (!sdev) {
++ printk(KERN_ERR "scsiback: %d:%d:%d:%d doesn't exist.\n",
++ phy->hst, phy->chn, phy->tgt, phy->lun);
++ scsi_host_put(shost);
++ return NULL;
++ }
++
++ scsi_host_put(shost);
++ return (sdev);
++}
++
++#define VSCSIBACK_OP_ADD_OR_DEL_LUN 1
++#define VSCSIBACK_OP_UPDATEDEV_STATE 2
++
++
++static void scsiback_do_lun_hotplug(struct backend_info *be, int op)
++{
++ int i, err = 0;
++ struct ids_tuple phy, vir;
++ int device_state;
++ char str[64], state_str[64];
++ char **dir;
++ unsigned int dir_n = 0;
++ struct xenbus_device *dev = be->dev;
++ struct scsi_device *sdev;
++
++ dir = xenbus_directory(XBT_NIL, dev->nodename, "vscsi-devs", &dir_n);
++ if (IS_ERR(dir))
++ return;
++
++ for (i = 0; i < dir_n; i++) {
++
++ /* read status */
++ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]);
++ err = xenbus_scanf(XBT_NIL, dev->nodename, state_str, "%u",
++ &device_state);
++ if (XENBUS_EXIST_ERR(err))
++ continue;
++
++ /* physical SCSI device */
++ snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", dir[i]);
++ err = xenbus_scanf(XBT_NIL, dev->nodename, str,
++ "%u:%u:%u:%u", &phy.hst, &phy.chn, &phy.tgt, &phy.lun);
++ if (XENBUS_EXIST_ERR(err)) {
++ xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateClosed);
++ continue;
++ }
++
++ /* virtual SCSI device */
++ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
++ err = xenbus_scanf(XBT_NIL, dev->nodename, str,
++ "%u:%u:%u:%u", &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
++ if (XENBUS_EXIST_ERR(err)) {
++ xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateClosed);
++ continue;
++ }
++
++ switch (op) {
++ case VSCSIBACK_OP_ADD_OR_DEL_LUN:
++ if (device_state == XenbusStateInitialising) {
++ sdev = scsiback_get_scsi_device(&phy);
++ if (!sdev)
++ xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateClosed);
++ else {
++ err = scsiback_add_translation_entry(be->info, sdev, &vir);
++ if (!err) {
++ if (xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateInitialised)) {
++ printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
++ scsiback_del_translation_entry(be->info, &vir);
++ }
++ } else {
++ scsi_device_put(sdev);
++ xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateClosed);
++ }
++ }
++ }
++
++ if (device_state == XenbusStateClosing) {
++ if (!scsiback_del_translation_entry(be->info, &vir)) {
++ if (xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateClosed))
++ printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
++ }
++ }
++ break;
++
++ case VSCSIBACK_OP_UPDATEDEV_STATE:
++ if (device_state == XenbusStateInitialised) {
++ /* modify vscsi-devs/dev-x/state */
++ if (xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateConnected)) {
++ printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
++ scsiback_del_translation_entry(be->info, &vir);
++ xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateClosed);
++ }
++ }
++ break;
++ /*When it is necessary, processing is added here.*/
++ default:
++ break;
++ }
++ }
++
++ kfree(dir);
++ return ;
++}
++
++
++static void scsiback_frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
++{
++ struct backend_info *be = dev->dev.driver_data;
++ int err;
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ break;
++ case XenbusStateInitialised:
++ err = scsiback_map(be);
++ if (err)
++ break;
++
++ scsiback_do_lun_hotplug(be, VSCSIBACK_OP_ADD_OR_DEL_LUN);
++ xenbus_switch_state(dev, XenbusStateConnected);
++
++ break;
++ case XenbusStateConnected:
++
++ scsiback_do_lun_hotplug(be, VSCSIBACK_OP_UPDATEDEV_STATE);
++
++ if (dev->state == XenbusStateConnected)
++ break;
++
++ xenbus_switch_state(dev, XenbusStateConnected);
++
++ break;
++
++ case XenbusStateClosing:
++ scsiback_disconnect(be->info);
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
++
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ if (xenbus_dev_is_online(dev))
++ break;
++ /* fall through if not online */
++ case XenbusStateUnknown:
++ device_unregister(&dev->dev);
++ break;
++
++ case XenbusStateReconfiguring:
++ scsiback_do_lun_hotplug(be, VSCSIBACK_OP_ADD_OR_DEL_LUN);
++
++ xenbus_switch_state(dev, XenbusStateReconfigured);
++
++ break;
++
++ default:
++ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
++
++
++static int scsiback_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ if (be->info) {
++ scsiback_disconnect(be->info);
++ scsiback_release_translation_entry(be->info);
++ scsiback_free(be->info);
++ be->info = NULL;
++ }
++
++ kfree(be);
++ dev->dev.driver_data = NULL;
++
++ return 0;
++}
++
++
++static int scsiback_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++
++ DPRINTK("%p %d\n", dev, dev->otherend_id);
++
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
++ be->dev = dev;
++ dev->dev.driver_data = be;
++
++ be->info = vscsibk_info_alloc(dev->otherend_id);
++ if (IS_ERR(be->info)) {
++ err = PTR_ERR(be->info);
++ be->info = NULL;
++ xenbus_dev_fatal(dev, err, "creating scsihost interface");
++ goto fail;
++ }
++
++ be->info->dev = dev;
++ be->info->irq = 0;
++
++ scsiback_init_translation_table(be->info);
++
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto fail;
++
++ return 0;
++
++
++fail:
++ printk(KERN_WARNING "scsiback: %s failed\n",__FUNCTION__);
++ scsiback_remove(dev);
++
++ return err;
++}
++
++
++static struct xenbus_device_id scsiback_ids[] = {
++ { "vscsi" },
++ { "" }
++};
++
++static struct xenbus_driver scsiback = {
++ .name = "vscsi",
++ .owner = THIS_MODULE,
++ .ids = scsiback_ids,
++ .probe = scsiback_probe,
++ .remove = scsiback_remove,
++ .otherend_changed = scsiback_frontend_changed
++};
++
++int scsiback_xenbus_init(void)
++{
++ return xenbus_register_backend(&scsiback);
++}
++
++void scsiback_xenbus_unregister(void)
++{
++ xenbus_unregister_driver(&scsiback);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/scsifront/common.h linux-2.6.18-xen-3.3.0/drivers/xen/scsifront/common.h
+--- linux-2.6.18.8/drivers/xen/scsifront/common.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/scsifront/common.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,129 @@
++/*
++ * Xen SCSI frontend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_DRIVERS_SCSIFRONT_H__
++#define __XEN_DRIVERS_SCSIFRONT_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <linux/kthread.h>
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++#include <linux/blkdev.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_host.h>
++#include <xen/xenbus.h>
++#include <xen/gnttab.h>
++#include <xen/evtchn.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/io/ring.h>
++#include <xen/interface/io/vscsiif.h>
++#include <asm/delay.h>
++
++
++#define GRANT_INVALID_REF 0
++#define VSCSI_IN_ABORT 1
++#define VSCSI_IN_RESET 2
++
++/* tuning point*/
++#define VSCSIIF_DEFAULT_CMD_PER_LUN 10
++#define VSCSIIF_MAX_TARGET 64
++#define VSCSIIF_MAX_LUN 255
++
++#define VSCSIIF_RING_SIZE \
++ __RING_SIZE((struct vscsiif_sring *)0, PAGE_SIZE)
++#define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE
++
++struct vscsifrnt_shadow {
++ uint16_t next_free;
++
++ /* command between backend and frontend
++ * VSCSIIF_ACT_SCSI_CDB or VSCSIIF_ACT_SCSI_RESET */
++ unsigned char act;
++
++ /* do reset function */
++ wait_queue_head_t wq_reset; /* reset work queue */
++ int wait_reset; /* reset work queue condition */
++ int32_t rslt_reset; /* reset response status */
++ /* (SUCESS or FAILED) */
++
++ /* for DMA_TO_DEVICE(1), DMA_FROM_DEVICE(2), DMA_NONE(3)
++ requests */
++ unsigned int sc_data_direction;
++
++ /* Number of pieces of scatter-gather */
++ unsigned int nr_segments;
++
++ /* requested struct scsi_cmnd is stored from kernel */
++ unsigned long req_scsi_cmnd;
++ int gref[VSCSIIF_SG_TABLESIZE];
++};
++
++struct vscsifrnt_info {
++ struct xenbus_device *dev;
++
++ struct Scsi_Host *host;
++
++ spinlock_t io_lock;
++ spinlock_t shadow_lock;
++ unsigned int evtchn;
++ unsigned int irq;
++
++ grant_ref_t ring_ref;
++ struct vscsiif_front_ring ring;
++ struct vscsiif_response ring_res;
++
++ struct vscsifrnt_shadow shadow[VSCSIIF_MAX_REQS];
++ uint32_t shadow_free;
++
++ struct task_struct *kthread;
++ wait_queue_head_t wq;
++ unsigned int waiting_resp;
++
++};
++
++#define DPRINTK(_f, _a...) \
++ pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++
++int scsifront_xenbus_init(void);
++void scsifront_xenbus_unregister(void);
++int scsifront_schedule(void *data);
++irqreturn_t scsifront_intr(int irq, void *dev_id, struct pt_regs *ptregs);
++int scsifront_cmd_done(struct vscsifrnt_info *info);
++
++
++#endif /* __XEN_DRIVERS_SCSIFRONT_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/scsifront/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/scsifront/Makefile
+--- linux-2.6.18.8/drivers/xen/scsifront/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/scsifront/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,3 @@
++
++obj-$(CONFIG_XEN_SCSI_FRONTEND) := xenscsi.o
++xenscsi-objs := scsifront.o xenbus.o
+diff -rpuN linux-2.6.18.8/drivers/xen/scsifront/scsifront.c linux-2.6.18-xen-3.3.0/drivers/xen/scsifront/scsifront.c
+--- linux-2.6.18.8/drivers/xen/scsifront/scsifront.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/scsifront/scsifront.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,511 @@
++/*
++ * Xen SCSI frontend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++
++#include <linux/version.h>
++#include "common.h"
++
++static int get_id_from_freelist(struct vscsifrnt_info *info)
++{
++ unsigned long flags;
++ uint32_t free;
++
++ spin_lock_irqsave(&info->shadow_lock, flags);
++
++ free = info->shadow_free;
++ BUG_ON(free > VSCSIIF_MAX_REQS);
++ info->shadow_free = info->shadow[free].next_free;
++ info->shadow[free].next_free = 0x0fff;
++
++ info->shadow[free].wait_reset = 0;
++
++ spin_unlock_irqrestore(&info->shadow_lock, flags);
++
++ return free;
++}
++
++static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->shadow_lock, flags);
++
++ info->shadow[id].next_free = info->shadow_free;
++ info->shadow[id].req_scsi_cmnd = 0;
++ info->shadow_free = id;
++
++ spin_unlock_irqrestore(&info->shadow_lock, flags);
++}
++
++
++struct vscsiif_request * scsifront_pre_request(struct vscsifrnt_info *info)
++{
++ struct vscsiif_front_ring *ring = &(info->ring);
++ vscsiif_request_t *ring_req;
++ uint32_t id;
++
++ ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
++
++ ring->req_prod_pvt++;
++
++ id = get_id_from_freelist(info); /* use id by response */
++ ring_req->rqid = (uint16_t)id;
++
++ return ring_req;
++}
++
++
++static void scsifront_notify_work(struct vscsifrnt_info *info)
++{
++ info->waiting_resp = 1;
++ wake_up(&info->wq);
++}
++
++
++static void scsifront_do_request(struct vscsifrnt_info *info)
++{
++ struct vscsiif_front_ring *ring = &(info->ring);
++ unsigned int irq = info->irq;
++ int notify;
++
++ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
++ if (notify)
++ notify_remote_via_irq(irq);
++}
++
++irqreturn_t scsifront_intr(int irq, void *dev_id, struct pt_regs *ptregs)
++{
++ scsifront_notify_work((struct vscsifrnt_info *)dev_id);
++ return IRQ_HANDLED;
++}
++
++
++static void scsifront_gnttab_done(struct vscsifrnt_shadow *s, uint32_t id)
++{
++ int i;
++
++ if (s->sc_data_direction == DMA_NONE)
++ return;
++
++ if (s->nr_segments) {
++ for (i = 0; i < s->nr_segments; i++) {
++ if (unlikely(gnttab_query_foreign_access(
++ s->gref[i]) != 0)) {
++ printk(KERN_ALERT "scsifront: "
++ "grant still in use by backend.\n");
++ BUG();
++ }
++ gnttab_end_foreign_access(s->gref[i], 0UL);
++ }
++ }
++
++ return;
++}
++
++
++static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
++ vscsiif_response_t *ring_res)
++{
++ struct scsi_cmnd *sc;
++ uint32_t id;
++ uint8_t sense_len;
++
++ id = ring_res->rqid;
++ sc = (struct scsi_cmnd *)info->shadow[id].req_scsi_cmnd;
++
++ if (sc == NULL)
++ BUG();
++
++ scsifront_gnttab_done(&info->shadow[id], id);
++ add_id_to_freelist(info, id);
++
++ sc->result = ring_res->rslt;
++ sc->resid = ring_res->residual_len;
++
++ if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE)
++ sense_len = VSCSIIF_SENSE_BUFFERSIZE;
++ else
++ sense_len = ring_res->sense_len;
++
++ if (sense_len)
++ memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len);
++
++ sc->scsi_done(sc);
++
++ return;
++}
++
++
++static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
++ vscsiif_response_t *ring_res)
++{
++ uint16_t id = ring_res->rqid;
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->shadow_lock, flags);
++ info->shadow[id].wait_reset = 1;
++ info->shadow[id].rslt_reset = ring_res->rslt;
++ spin_unlock_irqrestore(&info->shadow_lock, flags);
++
++ wake_up(&(info->shadow[id].wq_reset));
++}
++
++
++int scsifront_cmd_done(struct vscsifrnt_info *info)
++{
++ vscsiif_response_t *ring_res;
++
++ RING_IDX i, rp;
++ int more_to_do = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->io_lock, flags);
++
++ rp = info->ring.sring->rsp_prod;
++ rmb();
++ for (i = info->ring.rsp_cons; i != rp; i++) {
++
++ ring_res = RING_GET_RESPONSE(&info->ring, i);
++
++ if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB)
++ scsifront_cdb_cmd_done(info, ring_res);
++ else
++ scsifront_sync_cmd_done(info, ring_res);
++ }
++
++ info->ring.rsp_cons = i;
++
++ if (i != info->ring.req_prod_pvt) {
++ RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
++ } else {
++ info->ring.sring->rsp_event = i + 1;
++ }
++
++ spin_unlock_irqrestore(&info->io_lock, flags);
++
++
++ /* Yield point for this unbounded loop. */
++ cond_resched();
++
++ return more_to_do;
++}
++
++
++
++
++int scsifront_schedule(void *data)
++{
++ struct vscsifrnt_info *info = (struct vscsifrnt_info *)data;
++
++ while (!kthread_should_stop()) {
++ wait_event_interruptible(
++ info->wq,
++ info->waiting_resp || kthread_should_stop());
++
++ info->waiting_resp = 0;
++ smp_mb();
++
++ if (scsifront_cmd_done(info))
++ info->waiting_resp = 1;
++ }
++
++ return 0;
++}
++
++
++
++static int map_data_for_request(struct vscsifrnt_info *info,
++ struct scsi_cmnd *sc, vscsiif_request_t *ring_req, uint32_t id)
++{
++ grant_ref_t gref_head;
++ struct page *page;
++ int err, i, ref, ref_cnt = 0;
++ int write = (sc->sc_data_direction == DMA_TO_DEVICE);
++ int nr_pages, off, len, bytes;
++ unsigned long buffer_pfn;
++ unsigned int data_len = 0;
++
++ if (sc->sc_data_direction == DMA_NONE)
++ return 0;
++
++ err = gnttab_alloc_grant_references(VSCSIIF_SG_TABLESIZE, &gref_head);
++ if (err) {
++ printk(KERN_ERR "scsifront: gnttab_alloc_grant_references() error\n");
++ return -ENOMEM;
++ }
++
++ if (sc->use_sg) {
++ /* quoted scsi_lib.c/scsi_req_map_sg . */
++ struct scatterlist *sg = (struct scatterlist *)sc->request_buffer;
++ nr_pages = (sc->request_bufflen + sg[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++ if (nr_pages > VSCSIIF_SG_TABLESIZE) {
++ printk(KERN_ERR "scsifront: Unable to map request_buffer for command!\n");
++ ref_cnt = (-E2BIG);
++ goto big_to_sg;
++ }
++
++ for (i = 0; i < sc->use_sg; i++) {
++ page = sg[i].page;
++ off = sg[i].offset;
++ len = sg[i].length;
++ data_len += len;
++
++ buffer_pfn = page_to_phys(page) >> PAGE_SHIFT;
++
++ while (len > 0) {
++ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
++
++ ref = gnttab_claim_grant_reference(&gref_head);
++ BUG_ON(ref == -ENOSPC);
++
++ gnttab_grant_foreign_access_ref(ref, info->dev->otherend_id,
++ buffer_pfn, write);
++
++ info->shadow[id].gref[ref_cnt] = ref;
++ ring_req->seg[ref_cnt].gref = ref;
++ ring_req->seg[ref_cnt].offset = (uint16_t)off;
++ ring_req->seg[ref_cnt].length = (uint16_t)bytes;
++
++ buffer_pfn++;
++ len -= bytes;
++ off = 0;
++ ref_cnt++;
++ }
++ }
++ } else if (sc->request_bufflen) {
++ unsigned long end = ((unsigned long)sc->request_buffer
++ + sc->request_bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ unsigned long start = (unsigned long)sc->request_buffer >> PAGE_SHIFT;
++
++ page = virt_to_page(sc->request_buffer);
++ nr_pages = end - start;
++ len = sc->request_bufflen;
++
++ if (nr_pages > VSCSIIF_SG_TABLESIZE) {
++ ref_cnt = (-E2BIG);
++ goto big_to_sg;
++ }
++
++ buffer_pfn = page_to_phys(page) >> PAGE_SHIFT;
++
++ off = offset_in_page((unsigned long)sc->request_buffer);
++ for (i = 0; i < nr_pages; i++) {
++ bytes = PAGE_SIZE - off;
++
++ if (bytes > len)
++ bytes = len;
++
++ ref = gnttab_claim_grant_reference(&gref_head);
++ BUG_ON(ref == -ENOSPC);
++
++ gnttab_grant_foreign_access_ref(ref, info->dev->otherend_id,
++ buffer_pfn, write);
++
++ info->shadow[id].gref[i] = ref;
++ ring_req->seg[i].gref = ref;
++ ring_req->seg[i].offset = (uint16_t)off;
++ ring_req->seg[i].length = (uint16_t)bytes;
++
++ buffer_pfn++;
++ len -= bytes;
++ off = 0;
++ ref_cnt++;
++ }
++ }
++
++big_to_sg:
++
++ gnttab_free_grant_references(gref_head);
++
++ return ref_cnt;
++}
++
++static int scsifront_queuecommand(struct scsi_cmnd *sc,
++ void (*done)(struct scsi_cmnd *))
++{
++ struct vscsifrnt_info *info =
++ (struct vscsifrnt_info *) sc->device->host->hostdata;
++ vscsiif_request_t *ring_req;
++ int ref_cnt;
++ uint16_t rqid;
++
++ if (RING_FULL(&info->ring)) {
++ goto out_host_busy;
++ }
++
++ sc->scsi_done = done;
++ sc->result = 0;
++
++ ring_req = scsifront_pre_request(info);
++ rqid = ring_req->rqid;
++ ring_req->act = VSCSIIF_ACT_SCSI_CDB;
++
++ ring_req->id = sc->device->id;
++ ring_req->lun = sc->device->lun;
++ ring_req->channel = sc->device->channel;
++ ring_req->cmd_len = sc->cmd_len;
++
++ BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
++
++ if ( sc->cmd_len )
++ memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
++ else
++ memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
++
++ ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
++ ring_req->timeout_per_command = (sc->timeout_per_command / HZ);
++
++ info->shadow[rqid].req_scsi_cmnd = (unsigned long)sc;
++ info->shadow[rqid].sc_data_direction = sc->sc_data_direction;
++ info->shadow[rqid].act = ring_req->act;
++
++ ref_cnt = map_data_for_request(info, sc, ring_req, rqid);
++ if (ref_cnt < 0) {
++ add_id_to_freelist(info, rqid);
++ if (ref_cnt == (-ENOMEM))
++ goto out_host_busy;
++ else {
++ sc->result = (DID_ERROR << 16);
++ goto out_fail_command;
++ }
++ }
++
++ ring_req->nr_segments = (uint8_t)ref_cnt;
++ info->shadow[rqid].nr_segments = ref_cnt;
++
++ scsifront_do_request(info);
++
++ return 0;
++
++out_host_busy:
++ return SCSI_MLQUEUE_HOST_BUSY;
++
++out_fail_command:
++ done(sc);
++ return 0;
++}
++
++
++static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
++{
++ return (FAILED);
++}
++
++/* vscsi supports only device_reset, because it is each of LUNs */
++static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
++{
++ struct Scsi_Host *host = sc->device->host;
++ struct vscsifrnt_info *info =
++ (struct vscsifrnt_info *) sc->device->host->hostdata;
++
++ vscsiif_request_t *ring_req;
++ uint16_t rqid;
++ int err;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
++ spin_lock_irq(host->host_lock);
++#endif
++
++ ring_req = scsifront_pre_request(info);
++ ring_req->act = VSCSIIF_ACT_SCSI_RESET;
++
++ rqid = ring_req->rqid;
++ info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET;
++
++ ring_req->channel = sc->device->channel;
++ ring_req->id = sc->device->id;
++ ring_req->lun = sc->device->lun;
++ ring_req->cmd_len = sc->cmd_len;
++
++ if ( sc->cmd_len )
++ memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
++ else
++ memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
++
++ ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
++ ring_req->timeout_per_command = (sc->timeout_per_command / HZ);
++ ring_req->nr_segments = 0;
++
++ scsifront_do_request(info);
++
++ spin_unlock_irq(host->host_lock);
++ wait_event_interruptible(info->shadow[rqid].wq_reset,
++ info->shadow[rqid].wait_reset);
++ spin_lock_irq(host->host_lock);
++
++ err = info->shadow[rqid].rslt_reset;
++
++ add_id_to_freelist(info, rqid);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
++ spin_unlock_irq(host->host_lock);
++#endif
++ return (err);
++}
++
++
++struct scsi_host_template scsifront_sht = {
++ .module = THIS_MODULE,
++ .name = "Xen SCSI frontend driver",
++ .queuecommand = scsifront_queuecommand,
++ .eh_abort_handler = scsifront_eh_abort_handler,
++ .eh_device_reset_handler= scsifront_dev_reset_handler,
++ .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN,
++ .can_queue = VSCSIIF_MAX_REQS,
++ .this_id = -1,
++ .sg_tablesize = VSCSIIF_SG_TABLESIZE,
++ .use_clustering = DISABLE_CLUSTERING,
++ .proc_name = "scsifront",
++};
++
++
++static int __init scsifront_init(void)
++{
++ int err;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ err = scsifront_xenbus_init();
++
++ return err;
++}
++
++static void __exit scsifront_exit(void)
++{
++ scsifront_xenbus_unregister();
++}
++
++module_init(scsifront_init);
++module_exit(scsifront_exit);
++
++MODULE_DESCRIPTION("Xen SCSI frontend driver");
++MODULE_LICENSE("GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/scsifront/xenbus.c linux-2.6.18-xen-3.3.0/drivers/xen/scsifront/xenbus.c
+--- linux-2.6.18.8/drivers/xen/scsifront/xenbus.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/scsifront/xenbus.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,421 @@
++/*
++ * Xen SCSI frontend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++
++#include <linux/version.h>
++#include "common.h"
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
++ #define DEFAULT_TASK_COMM_LEN 16
++#else
++ #define DEFAULT_TASK_COMM_LEN TASK_COMM_LEN
++#endif
++
++extern struct scsi_host_template scsifront_sht;
++
++static void scsifront_free(struct vscsifrnt_info *info)
++{
++ struct Scsi_Host *host = info->host;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
++ if (host->shost_state != SHOST_DEL) {
++#else
++ if (!test_bit(SHOST_DEL, &host->shost_state)) {
++#endif
++ scsi_remove_host(info->host);
++ }
++
++ if (info->ring_ref != GRANT_INVALID_REF) {
++ gnttab_end_foreign_access(info->ring_ref,
++ (unsigned long)info->ring.sring);
++ info->ring_ref = GRANT_INVALID_REF;
++ info->ring.sring = NULL;
++ }
++
++ if (info->irq)
++ unbind_from_irqhandler(info->irq, info);
++ info->irq = 0;
++
++ scsi_host_put(info->host);
++}
++
++
++static int scsifront_alloc_ring(struct vscsifrnt_info *info)
++{
++ struct xenbus_device *dev = info->dev;
++ struct vscsiif_sring *sring;
++ int err = -ENOMEM;
++
++
++ info->ring_ref = GRANT_INVALID_REF;
++
++ /***** Frontend to Backend ring start *****/
++ sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL);
++ if (!sring) {
++ xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)");
++ return err;
++ }
++ SHARED_RING_INIT(sring);
++ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(sring));
++ if (err < 0) {
++ free_page((unsigned long) sring);
++ info->ring.sring = NULL;
++ xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)");
++ goto free_sring;
++ }
++ info->ring_ref = err;
++
++ err = bind_listening_port_to_irqhandler(
++ dev->otherend_id, scsifront_intr,
++ SA_SAMPLE_RANDOM, "scsifront", info);
++
++ if (err <= 0) {
++ xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler");
++ goto free_sring;
++ }
++ info->irq = err;
++
++ return 0;
++
++/* free resource */
++free_sring:
++ scsifront_free(info);
++
++ return err;
++}
++
++
++static int scsifront_init_ring(struct vscsifrnt_info *info)
++{
++ struct xenbus_device *dev = info->dev;
++ struct xenbus_transaction xbt;
++ int err;
++
++ DPRINTK("%s\n",__FUNCTION__);
++
++ err = scsifront_alloc_ring(info);
++ if (err)
++ return err;
++ DPRINTK("%u %u\n", info->ring_ref, info->evtchn);
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",
++ info->ring_ref);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");
++ goto fail;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(info->irq));
++
++ if (err) {
++ xenbus_dev_fatal(dev, err, "%s", "writing event-channel");
++ goto fail;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err) {
++ if (err == -EAGAIN)
++ goto again;
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto free_sring;
++ }
++
++ return 0;
++
++fail:
++ xenbus_transaction_end(xbt, 1);
++free_sring:
++ /* free resource */
++ scsifront_free(info);
++
++ return err;
++}
++
++
++static int scsifront_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ struct vscsifrnt_info *info;
++ struct Scsi_Host *host;
++ int i, err = -ENOMEM;
++ char name[DEFAULT_TASK_COMM_LEN];
++
++ host = scsi_host_alloc(&scsifront_sht, sizeof(*info));
++ if (!host) {
++ xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
++ return err;
++ }
++ info = (struct vscsifrnt_info *) host->hostdata;
++ info->host = host;
++
++
++ dev->dev.driver_data = info;
++ info->dev = dev;
++
++ for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
++ info->shadow[i].next_free = i + 1;
++ init_waitqueue_head(&(info->shadow[i].wq_reset));
++ info->shadow[i].wait_reset = 0;
++ }
++ info->shadow[VSCSIIF_MAX_REQS - 1].next_free = 0x0fff;
++
++ err = scsifront_init_ring(info);
++ if (err) {
++ scsi_host_put(host);
++ return err;
++ }
++
++ init_waitqueue_head(&info->wq);
++ spin_lock_init(&info->io_lock);
++ spin_lock_init(&info->shadow_lock);
++
++ snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no);
++
++ info->kthread = kthread_run(scsifront_schedule, info, name);
++ if (IS_ERR(info->kthread)) {
++ err = PTR_ERR(info->kthread);
++ info->kthread = NULL;
++ printk(KERN_ERR "scsifront: kthread start err %d\n", err);
++ goto free_sring;
++ }
++
++ host->max_id = VSCSIIF_MAX_TARGET;
++ host->max_channel = 0;
++ host->max_lun = VSCSIIF_MAX_LUN;
++ host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512;
++
++ err = scsi_add_host(host, &dev->dev);
++ if (err) {
++ printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err);
++ goto free_sring;
++ }
++
++ xenbus_switch_state(dev, XenbusStateInitialised);
++
++ return 0;
++
++free_sring:
++ /* free resource */
++ scsifront_free(info);
++ return err;
++}
++
++static int scsifront_remove(struct xenbus_device *dev)
++{
++ struct vscsifrnt_info *info = dev->dev.driver_data;
++
++ DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename);
++
++ if (info->kthread) {
++ kthread_stop(info->kthread);
++ info->kthread = NULL;
++ }
++
++ scsifront_free(info);
++
++ return 0;
++}
++
++
++static int scsifront_disconnect(struct vscsifrnt_info *info)
++{
++ struct xenbus_device *dev = info->dev;
++ struct Scsi_Host *host = info->host;
++
++ DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename);
++
++ /*
++ When this function is executed, all devices of
++ Frontend have been deleted.
++ Therefore, it need not block I/O before remove_host.
++ */
++
++ scsi_remove_host(host);
++ xenbus_frontend_closed(dev);
++
++ return 0;
++}
++
++#define VSCSIFRONT_OP_ADD_LUN 1
++#define VSCSIFRONT_OP_DEL_LUN 2
++
++static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
++{
++ struct xenbus_device *dev = info->dev;
++ int i, err = 0;
++ char str[64], state_str[64];
++ char **dir;
++ unsigned int dir_n = 0;
++ unsigned int device_state;
++ unsigned int hst, chn, tgt, lun;
++ struct scsi_device *sdev;
++
++ dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
++ if (IS_ERR(dir))
++ return;
++
++ for (i = 0; i < dir_n; i++) {
++ /* read status */
++ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]);
++ err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u",
++ &device_state);
++ if (XENBUS_EXIST_ERR(err))
++ continue;
++
++ /* virtual SCSI device */
++ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
++ err = xenbus_scanf(XBT_NIL, dev->otherend, str,
++ "%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
++ if (XENBUS_EXIST_ERR(err))
++ continue;
++
++ /* front device state path */
++ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]);
++
++ switch (op) {
++ case VSCSIFRONT_OP_ADD_LUN:
++ if (device_state == XenbusStateInitialised) {
++ sdev = scsi_device_lookup(info->host, chn, tgt, lun);
++ if (sdev) {
++ printk(KERN_ERR "scsifront: Device already in use.\n");
++ scsi_device_put(sdev);
++ xenbus_printf(XBT_NIL, dev->nodename,
++ state_str, "%d", XenbusStateClosed);
++ } else {
++ scsi_add_device(info->host, chn, tgt, lun);
++ xenbus_printf(XBT_NIL, dev->nodename,
++ state_str, "%d", XenbusStateConnected);
++ }
++ }
++ break;
++ case VSCSIFRONT_OP_DEL_LUN:
++ if (device_state == XenbusStateClosing) {
++ sdev = scsi_device_lookup(info->host, chn, tgt, lun);
++ if (sdev) {
++ scsi_remove_device(sdev);
++ scsi_device_put(sdev);
++ xenbus_printf(XBT_NIL, dev->nodename,
++ state_str, "%d", XenbusStateClosed);
++ }
++ }
++ break;
++ default:
++ break;
++ }
++ }
++
++ kfree(dir);
++ return;
++}
++
++
++
++
++static void scsifront_backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct vscsifrnt_info *info = dev->dev.driver_data;
++
++ DPRINTK("%p %u %u\n", dev, dev->state, backend_state);
++
++ switch (backend_state) {
++ case XenbusStateUnknown:
++ case XenbusStateInitialising:
++ case XenbusStateInitWait:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateInitialised:
++ break;
++
++ case XenbusStateConnected:
++ if (xenbus_read_driver_state(dev->nodename) ==
++ XenbusStateInitialised) {
++ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
++ }
++
++ if (dev->state == XenbusStateConnected)
++ break;
++
++ xenbus_switch_state(dev, XenbusStateConnected);
++ break;
++
++ case XenbusStateClosing:
++ scsifront_disconnect(info);
++ break;
++
++ case XenbusStateReconfiguring:
++ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
++ xenbus_switch_state(dev, XenbusStateReconfiguring);
++ break;
++
++ case XenbusStateReconfigured:
++ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
++ xenbus_switch_state(dev, XenbusStateConnected);
++ break;
++ }
++}
++
++
++static struct xenbus_device_id scsifront_ids[] = {
++ { "vscsi" },
++ { "" }
++};
++
++
++static struct xenbus_driver scsifront_driver = {
++ .name = "vscsi",
++ .owner = THIS_MODULE,
++ .ids = scsifront_ids,
++ .probe = scsifront_probe,
++ .remove = scsifront_remove,
++/* .resume = scsifront_resume, */
++ .otherend_changed = scsifront_backend_changed,
++};
++
++int scsifront_xenbus_init(void)
++{
++ return xenbus_register_frontend(&scsifront_driver);
++}
++
++void scsifront_xenbus_unregister(void)
++{
++ xenbus_unregister_driver(&scsifront_driver);
++}
++
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/accel.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel.c
+--- linux-2.6.18.8/drivers/xen/sfc_netback/accel.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,129 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include "accel.h"
++#include "accel_msg_iface.h"
++#include "accel_solarflare.h"
++
++#include <linux/notifier.h>
++
++#ifdef EFX_GCOV
++#include "gcov.h"
++#endif
++
++static int netback_accel_netdev_event(struct notifier_block *nb,
++ unsigned long event, void *ptr)
++{
++ struct net_device *net_dev = (struct net_device *)ptr;
++ struct netback_accel *bend;
++
++ if ((event == NETDEV_UP) || (event == NETDEV_DOWN)) {
++ mutex_lock(&bend_list_mutex);
++ bend = bend_list;
++ while (bend != NULL) {
++ mutex_lock(&bend->bend_mutex);
++ /*
++ * This happens when the shared pages have
++ * been unmapped, but the bend not yet removed
++ * from list
++ */
++ if (bend->shared_page == NULL)
++ goto next;
++
++ if (bend->net_dev->ifindex == net_dev->ifindex)
++ netback_accel_set_interface_state
++ (bend, event == NETDEV_UP);
++
++ next:
++ mutex_unlock(&bend->bend_mutex);
++ bend = bend->next_bend;
++ }
++ mutex_unlock(&bend_list_mutex);
++ }
++
++ return NOTIFY_DONE;
++}
++
++
++static struct notifier_block netback_accel_netdev_notifier = {
++ .notifier_call = netback_accel_netdev_event,
++};
++
++
++unsigned sfc_netback_max_pages = NETBACK_ACCEL_DEFAULT_MAX_BUF_PAGES;
++module_param_named(max_pages, sfc_netback_max_pages, uint, 0644);
++MODULE_PARM_DESC(max_pages,
++ "The number of buffer pages to enforce on each guest");
++
++/* Initialise subsystems need for the accelerated fast path */
++static int __init netback_accel_init(void)
++{
++ int rc = 0;
++
++#ifdef EFX_GCOV
++ gcov_provider_init(THIS_MODULE);
++#endif
++
++ rc = netback_accel_init_fwd();
++
++ if (rc == 0)
++ netback_accel_debugfs_init();
++
++ if (rc == 0)
++ rc = netback_accel_sf_init();
++
++ if (rc == 0)
++ rc = register_netdevice_notifier
++ (&netback_accel_netdev_notifier);
++
++ /*
++ * What if no device was found, shouldn't we clean up stuff
++ * we've allocated for acceleration subsystem?
++ */
++
++ return rc;
++}
++
++module_init(netback_accel_init);
++
++static void __exit netback_accel_exit(void)
++{
++ unregister_netdevice_notifier(&netback_accel_netdev_notifier);
++
++ netback_accel_sf_shutdown();
++
++ netback_accel_shutdown_bends();
++
++ netback_accel_debugfs_fini();
++
++ netback_accel_shutdown_fwd();
++
++#ifdef EFX_GCOV
++ gcov_provider_fini(THIS_MODULE);
++#endif
++}
++
++module_exit(netback_accel_exit);
++
++MODULE_LICENSE("GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/accel_debugfs.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel_debugfs.c
+--- linux-2.6.18.8/drivers/xen/sfc_netback/accel_debugfs.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel_debugfs.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,148 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/fs.h>
++#include <linux/debugfs.h>
++
++#include "accel.h"
++
++#if defined(CONFIG_DEBUG_FS)
++static struct dentry *sfc_debugfs_root = NULL;
++#endif
++
++#if NETBACK_ACCEL_STATS
++struct netback_accel_global_stats global_stats;
++#if defined(CONFIG_DEBUG_FS)
++static struct netback_accel_global_dbfs global_dbfs;
++#endif
++#endif
++
++void netback_accel_debugfs_init(void)
++{
++#if defined(CONFIG_DEBUG_FS)
++ sfc_debugfs_root = debugfs_create_dir("sfc_netback", NULL);
++ if (sfc_debugfs_root == NULL)
++ return;
++
++ global_dbfs.num_fwds = debugfs_create_u32
++ ("num_fwds", S_IRUSR | S_IRGRP | S_IROTH,
++ sfc_debugfs_root, &global_stats.num_fwds);
++ global_dbfs.dl_tx_packets = debugfs_create_u64
++ ("dl_tx_packets", S_IRUSR | S_IRGRP | S_IROTH,
++ sfc_debugfs_root, &global_stats.dl_tx_packets);
++ global_dbfs.dl_rx_packets = debugfs_create_u64
++ ("dl_rx_packets", S_IRUSR | S_IRGRP | S_IROTH,
++ sfc_debugfs_root, &global_stats.dl_rx_packets);
++ global_dbfs.dl_tx_bad_packets = debugfs_create_u64
++ ("dl_tx_bad_packets", S_IRUSR | S_IRGRP | S_IROTH,
++ sfc_debugfs_root, &global_stats.dl_tx_bad_packets);
++#endif
++}
++
++
++void netback_accel_debugfs_fini(void)
++{
++#if defined(CONFIG_DEBUG_FS)
++ debugfs_remove(global_dbfs.num_fwds);
++ debugfs_remove(global_dbfs.dl_tx_packets);
++ debugfs_remove(global_dbfs.dl_rx_packets);
++ debugfs_remove(global_dbfs.dl_tx_bad_packets);
++
++ debugfs_remove(sfc_debugfs_root);
++#endif
++}
++
++
++int netback_accel_debugfs_create(struct netback_accel *bend)
++{
++#if defined(CONFIG_DEBUG_FS)
++ /* Smallest length is 7 (vif0.0\n) */
++ int length = 7, temp;
++
++ if (sfc_debugfs_root == NULL)
++ return -ENOENT;
++
++ /* Work out length of string representation of far_end and vif_num */
++ temp = bend->far_end;
++ while (temp > 9) {
++ length++;
++ temp = temp / 10;
++ }
++ temp = bend->vif_num;
++ while (temp > 9) {
++ length++;
++ temp = temp / 10;
++ }
++
++ bend->dbfs_dir_name = kmalloc(length, GFP_KERNEL);
++ if (bend->dbfs_dir_name == NULL)
++ return -ENOMEM;
++ sprintf(bend->dbfs_dir_name, "vif%d.%d", bend->far_end, bend->vif_num);
++
++ bend->dbfs_dir = debugfs_create_dir(bend->dbfs_dir_name,
++ sfc_debugfs_root);
++ if (bend->dbfs_dir == NULL) {
++ kfree(bend->dbfs_dir_name);
++ return -ENOMEM;
++ }
++
++#if NETBACK_ACCEL_STATS
++ bend->dbfs.evq_wakeups = debugfs_create_u64
++ ("evq_wakeups", S_IRUSR | S_IRGRP | S_IROTH,
++ bend->dbfs_dir, &bend->stats.evq_wakeups);
++ bend->dbfs.evq_timeouts = debugfs_create_u64
++ ("evq_timeouts", S_IRUSR | S_IRGRP | S_IROTH,
++ bend->dbfs_dir, &bend->stats.evq_timeouts);
++ bend->dbfs.num_filters = debugfs_create_u32
++ ("num_filters", S_IRUSR | S_IRGRP | S_IROTH,
++ bend->dbfs_dir, &bend->stats.num_filters);
++ bend->dbfs.num_buffer_pages = debugfs_create_u32
++ ("num_buffer_pages", S_IRUSR | S_IRGRP | S_IROTH,
++ bend->dbfs_dir, &bend->stats.num_buffer_pages);
++#endif
++#endif
++ return 0;
++}
++
++
++int netback_accel_debugfs_remove(struct netback_accel *bend)
++{
++#if defined(CONFIG_DEBUG_FS)
++ if (bend->dbfs_dir != NULL) {
++#if NETBACK_ACCEL_STATS
++ debugfs_remove(bend->dbfs.evq_wakeups);
++ debugfs_remove(bend->dbfs.evq_timeouts);
++ debugfs_remove(bend->dbfs.num_filters);
++ debugfs_remove(bend->dbfs.num_buffer_pages);
++#endif
++ debugfs_remove(bend->dbfs_dir);
++ }
++
++ if (bend->dbfs_dir_name)
++ kfree(bend->dbfs_dir_name);
++#endif
++ return 0;
++}
++
++
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/accel_fwd.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel_fwd.c
+--- linux-2.6.18.8/drivers/xen/sfc_netback/accel_fwd.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel_fwd.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,420 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include "accel.h"
++#include "accel_cuckoo_hash.h"
++#include "accel_util.h"
++#include "accel_solarflare.h"
++
++#include "driverlink_api.h"
++
++#include <linux/if_arp.h>
++#include <linux/skbuff.h>
++#include <linux/list.h>
++
++/* State stored in the forward table */
++struct fwd_struct {
++ struct list_head link; /* Forms list */
++ void * context;
++ __u8 valid;
++ __u8 mac[ETH_ALEN];
++};
++
++/* Max value we support */
++#define NUM_FWDS_BITS 8
++#define NUM_FWDS (1 << NUM_FWDS_BITS)
++#define FWD_MASK (NUM_FWDS - 1)
++
++struct port_fwd {
++ /* Make a list */
++ struct list_head link;
++ /* Hash table to store the fwd_structs */
++ cuckoo_hash_table fwd_hash_table;
++ /* The array of fwd_structs */
++ struct fwd_struct *fwd_array;
++ /* Linked list of entries in use. */
++ struct list_head fwd_list;
++ /* Could do something clever with a reader/writer lock. */
++ spinlock_t fwd_lock;
++ /* Make find_free_entry() a bit faster by caching this */
++ int last_free_index;
++};
++
++/*
++ * This is unlocked as it's only called from dl probe and remove,
++ * which are themselves synchronised. Could get rid of it entirely as
++ * it's never iterated, but useful for debug
++ */
++static struct list_head port_fwds;
++
++
++/* Search the fwd_array for an unused entry */
++static int fwd_find_free_entry(struct port_fwd *fwd_set)
++{
++ int index = fwd_set->last_free_index;
++
++ do {
++ if (!fwd_set->fwd_array[index].valid) {
++ fwd_set->last_free_index = index;
++ return index;
++ }
++ index++;
++ if (index >= NUM_FWDS)
++ index = 0;
++ } while (index != fwd_set->last_free_index);
++
++ return -ENOMEM;
++}
++
++
++/* Look up a MAC in the hash table. Caller should hold table lock. */
++static inline struct fwd_struct *fwd_find_entry(const __u8 *mac,
++ struct port_fwd *fwd_set)
++{
++ cuckoo_hash_value value;
++ cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac);
++
++ if (cuckoo_hash_lookup(&fwd_set->fwd_hash_table,
++ (cuckoo_hash_key *)(&key),
++ &value)) {
++ struct fwd_struct *fwd = &fwd_set->fwd_array[value];
++ DPRINTK_ON(memcmp(fwd->mac, mac, ETH_ALEN) != 0);
++ return fwd;
++ }
++
++ return NULL;
++}
++
++
++/* Initialise each nic port's fowarding table */
++void *netback_accel_init_fwd_port(void)
++{
++ struct port_fwd *fwd_set;
++
++ fwd_set = kzalloc(sizeof(struct port_fwd), GFP_KERNEL);
++ if (fwd_set == NULL) {
++ return NULL;
++ }
++
++ spin_lock_init(&fwd_set->fwd_lock);
++
++ fwd_set->fwd_array = kzalloc(sizeof (struct fwd_struct) * NUM_FWDS,
++ GFP_KERNEL);
++ if (fwd_set->fwd_array == NULL) {
++ kfree(fwd_set);
++ return NULL;
++ }
++
++ if (cuckoo_hash_init(&fwd_set->fwd_hash_table, NUM_FWDS_BITS, 8) != 0) {
++ kfree(fwd_set->fwd_array);
++ kfree(fwd_set);
++ return NULL;
++ }
++
++ INIT_LIST_HEAD(&fwd_set->fwd_list);
++
++ list_add(&fwd_set->link, &port_fwds);
++
++ return fwd_set;
++}
++
++
++void netback_accel_shutdown_fwd_port(void *fwd_priv)
++{
++ struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
++
++ BUG_ON(fwd_priv == NULL);
++
++ BUG_ON(list_empty(&port_fwds));
++ list_del(&fwd_set->link);
++
++ BUG_ON(!list_empty(&fwd_set->fwd_list));
++
++ cuckoo_hash_destroy(&fwd_set->fwd_hash_table);
++ kfree(fwd_set->fwd_array);
++ kfree(fwd_set);
++}
++
++
++int netback_accel_init_fwd()
++{
++ INIT_LIST_HEAD(&port_fwds);
++ return 0;
++}
++
++
++void netback_accel_shutdown_fwd()
++{
++ BUG_ON(!list_empty(&port_fwds));
++}
++
++
++/*
++ * Add an entry to the forwarding table. Returns -ENOMEM if no
++ * space.
++ */
++int netback_accel_fwd_add(const __u8 *mac, void *context, void *fwd_priv)
++{
++ struct fwd_struct *fwd;
++ int rc = 0, index;
++ unsigned long flags;
++ cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac);
++ struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
++
++ BUG_ON(fwd_priv == NULL);
++
++ DPRINTK("Adding mac " MAC_FMT "\n", MAC_ARG(mac));
++
++ spin_lock_irqsave(&fwd_set->fwd_lock, flags);
++
++ if ((rc = fwd_find_free_entry(fwd_set)) < 0 ) {
++ spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
++ return rc;
++ }
++
++ index = rc;
++
++ /* Shouldn't already be in the table */
++ if (cuckoo_hash_lookup(&fwd_set->fwd_hash_table,
++ (cuckoo_hash_key *)(&key), &rc) != 0) {
++ spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
++ EPRINTK("MAC address " MAC_FMT " already accelerated.\n",
++ MAC_ARG(mac));
++ return -EEXIST;
++ }
++
++ if ((rc = cuckoo_hash_add(&fwd_set->fwd_hash_table,
++ (cuckoo_hash_key *)(&key), index, 1)) == 0) {
++ fwd = &fwd_set->fwd_array[index];
++ fwd->valid = 1;
++ fwd->context = context;
++ memcpy(fwd->mac, mac, ETH_ALEN);
++ list_add(&fwd->link, &fwd_set->fwd_list);
++ NETBACK_ACCEL_STATS_OP(global_stats.num_fwds++);
++ }
++
++ spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
++
++ /*
++ * No need to tell frontend that this mac address is local -
++ * it should auto-discover through packets on fastpath what is
++ * local and what is not, and just being on same server
++ * doesn't make it local (it could be on a different
++ * bridge)
++ */
++
++ return rc;
++}
++
++
++/* remove an entry from the forwarding tables. */
++void netback_accel_fwd_remove(const __u8 *mac, void *fwd_priv)
++{
++ struct fwd_struct *fwd;
++ unsigned long flags;
++ cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac);
++ struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
++
++ DPRINTK("Removing mac " MAC_FMT "\n", MAC_ARG(mac));
++
++ BUG_ON(fwd_priv == NULL);
++
++ spin_lock_irqsave(&fwd_set->fwd_lock, flags);
++
++ fwd = fwd_find_entry(mac, fwd_set);
++ if (fwd != NULL) {
++ BUG_ON(list_empty(&fwd_set->fwd_list));
++ list_del(&fwd->link);
++
++ fwd->valid = 0;
++ cuckoo_hash_remove(&fwd_set->fwd_hash_table,
++ (cuckoo_hash_key *)(&key));
++ NETBACK_ACCEL_STATS_OP(global_stats.num_fwds--);
++ }
++ spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
++
++ /*
++ * No need to tell frontend that this is no longer present -
++ * the frontend is currently only interested in remote
++ * addresses and it works these out (mostly) by itself
++ */
++}
++
++
++/* Set the context pointer for a hash table entry. */
++int netback_accel_fwd_set_context(const __u8 *mac, void *context,
++ void *fwd_priv)
++{
++ struct fwd_struct *fwd;
++ unsigned long flags;
++ int rc = -ENOENT;
++ struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
++
++ BUG_ON(fwd_priv == NULL);
++
++ spin_lock_irqsave(&fwd_set->fwd_lock, flags);
++ fwd = fwd_find_entry(mac, fwd_set);
++ if (fwd != NULL) {
++ fwd->context = context;
++ rc = 0;
++ }
++ spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
++ return rc;
++}
++
++
++/**************************************************************************
++ * Process a received packet
++ **************************************************************************/
++
++/*
++ * Returns whether or not we have a match in our forward table for the
++ * this skb. Must be called with appropriate fwd_lock already held
++ */
++static struct netback_accel *for_a_vnic(struct netback_pkt_buf *skb,
++ struct port_fwd *fwd_set)
++{
++ struct fwd_struct *fwd;
++ struct netback_accel *retval = NULL;
++
++ fwd = fwd_find_entry(skb->mac.raw, fwd_set);
++ if (fwd != NULL)
++ retval = fwd->context;
++ return retval;
++}
++
++
++static inline int packet_is_arp_reply(struct sk_buff *skb)
++{
++ return skb->protocol == ntohs(ETH_P_ARP)
++ && skb->nh.arph->ar_op == ntohs(ARPOP_REPLY);
++}
++
++
++static inline void hdr_to_filt(struct ethhdr *ethhdr, struct iphdr *ip,
++ struct netback_accel_filter_spec *spec)
++{
++ spec->proto = ip->protocol;
++ spec->destip_be = ip->daddr;
++ memcpy(spec->mac, ethhdr->h_source, ETH_ALEN);
++
++ if (ip->protocol == IPPROTO_TCP) {
++ struct tcphdr *tcp = (struct tcphdr *)((char *)ip + 4 * ip->ihl);
++ spec->destport_be = tcp->dest;
++ } else {
++ struct udphdr *udp = (struct udphdr *)((char *)ip + 4 * ip->ihl);
++ EPRINTK_ON(ip->protocol != IPPROTO_UDP);
++ spec->destport_be = udp->dest;
++ }
++}
++
++
++static inline int netback_accel_can_filter(struct netback_pkt_buf *skb)
++{
++ return (skb->protocol == htons(ETH_P_IP) &&
++ ((skb->nh.iph->protocol == IPPROTO_TCP) ||
++ (skb->nh.iph->protocol == IPPROTO_UDP)));
++}
++
++
++static inline void netback_accel_filter_packet(struct netback_accel *bend,
++ struct netback_pkt_buf *skb)
++{
++ struct netback_accel_filter_spec fs;
++ struct ethhdr *eh = (struct ethhdr *)(skb->mac.raw);
++
++ hdr_to_filt(eh, skb->nh.iph, &fs);
++
++ netback_accel_filter_check_add(bend, &fs);
++}
++
++
++/*
++ * Receive a packet and do something appropriate with it. Return true
++ * to take exclusive ownership of the packet. This is verging on
++ * solarflare specific
++ */
++void netback_accel_rx_packet(struct netback_pkt_buf *skb, void *fwd_priv)
++{
++ struct netback_accel *bend;
++ struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
++ unsigned long flags;
++
++ BUG_ON(fwd_priv == NULL);
++
++ /* Checking for bcast is cheaper so do that first */
++ if (is_broadcast_ether_addr(skb->mac.raw)) {
++ /* pass through the slow path by not claiming ownership */
++ return;
++ } else if (is_multicast_ether_addr(skb->mac.raw)) {
++ /* pass through the slow path by not claiming ownership */
++ return;
++ } else {
++ /* It is unicast */
++ spin_lock_irqsave(&fwd_set->fwd_lock, flags);
++ /* We insert filter to pass it off to a VNIC */
++ if ((bend = for_a_vnic(skb, fwd_set)) != NULL)
++ if (netback_accel_can_filter(skb))
++ netback_accel_filter_packet(bend, skb);
++ spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
++ }
++ return;
++}
++
++
++void netback_accel_tx_packet(struct sk_buff *skb, void *fwd_priv)
++{
++ __u8 *mac;
++ unsigned long flags;
++ struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
++ struct fwd_struct *fwd;
++
++ BUG_ON(fwd_priv == NULL);
++
++ if (is_broadcast_ether_addr(skb->mac.raw) && packet_is_arp_reply(skb)) {
++ /*
++ * update our fast path forwarding to reflect this
++ * gratuitous ARP
++ */
++ mac = skb->mac.raw+ETH_ALEN;
++
++ DPRINTK("%s: found gratuitous ARP for " MAC_FMT "\n",
++ __FUNCTION__, MAC_ARG(mac));
++
++ spin_lock_irqsave(&fwd_set->fwd_lock, flags);
++ /*
++ * Might not be local, but let's tell them all it is,
++ * and they can restore the fastpath if they continue
++ * to get packets that way
++ */
++ list_for_each_entry(fwd, &fwd_set->fwd_list, link) {
++ struct netback_accel *bend = fwd->context;
++ if (bend != NULL)
++ netback_accel_msg_tx_new_localmac(bend, mac);
++ }
++
++ spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
++ }
++ return;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/accel.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/accel.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,393 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NETBACK_ACCEL_H
++#define NETBACK_ACCEL_H
++
++#include <linux/slab.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/in.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/mutex.h>
++#include <linux/wait.h>
++
++#include <xen/xenbus.h>
++
++#include "accel_shared_fifo.h"
++#include "accel_msg_iface.h"
++#include "accel_util.h"
++
++/**************************************************************************
++ * Datatypes
++ **************************************************************************/
++
++#define NETBACK_ACCEL_DEFAULT_MAX_FILTERS (8)
++#define NETBACK_ACCEL_DEFAULT_MAX_MCASTS (8)
++#define NETBACK_ACCEL_DEFAULT_MAX_BUF_PAGES (384)
++/* Variable to store module parameter for max_buf_pages */
++extern unsigned sfc_netback_max_pages;
++
++#define NETBACK_ACCEL_STATS 1
++
++#if NETBACK_ACCEL_STATS
++#define NETBACK_ACCEL_STATS_OP(x) x
++#else
++#define NETBACK_ACCEL_STATS_OP(x)
++#endif
++
++/*! Statistics for a given backend */
++struct netback_accel_stats {
++ /*! Number of eventq wakeup events */
++ u64 evq_wakeups;
++ /*! Number of eventq timeout events */
++ u64 evq_timeouts;
++ /*! Number of filters used */
++ u32 num_filters;
++ /*! Number of buffer pages registered */
++ u32 num_buffer_pages;
++};
++
++
++/* Debug fs nodes for each of the above stats */
++struct netback_accel_dbfs {
++ struct dentry *evq_wakeups;
++ struct dentry *evq_timeouts;
++ struct dentry *num_filters;
++ struct dentry *num_buffer_pages;
++};
++
++
++/*! Resource limits for a given NIC */
++struct netback_accel_limits {
++ int max_filters; /*!< Max. number of filters to use. */
++ int max_mcasts; /*!< Max. number of mcast subscriptions */
++ int max_buf_pages; /*!< Max. number of pages of NIC buffers */
++};
++
++
++/*! The state for an instance of the back end driver. */
++struct netback_accel {
++ /*! mutex to protect this state */
++ struct mutex bend_mutex;
++
++ /*! Watches on xenstore */
++ struct xenbus_watch domu_accel_watch;
++ struct xenbus_watch config_accel_watch;
++
++ /*! Pointer to whatever device cookie ties us in to the hypervisor */
++ void *hdev_data;
++
++ /*! FIFO indices. Next page is msg FIFOs */
++ struct net_accel_shared_page *shared_page;
++
++ /*! Defer control message processing */
++ struct work_struct handle_msg;
++
++ /*! Identifies other end VM and interface.*/
++ int far_end;
++ int vif_num;
++
++ /*!< To unmap the shared pages */
++ void *sh_pages_unmap;
++
++ /* Resource tracking */
++ /*! Limits on H/W & Dom0 resources */
++ struct netback_accel_limits quotas;
++
++ /* Hardware resources */
++ /*! The H/W type of associated NIC */
++ enum net_accel_hw_type hw_type;
++ /*! State of allocation */
++ int hw_state;
++ /*! Index into ci_driver.nics[] for this interface */
++ int nic_index;
++ /*! How to set up the acceleration for this hardware */
++ int (*accel_setup)(struct netback_accel *);
++ /*! And how to stop it. */
++ void (*accel_shutdown)(struct netback_accel *);
++
++ /*! The physical/real net_dev for this interface */
++ struct net_device *net_dev;
++
++ /*! Magic pointer to locate state in fowarding table */
++ void *fwd_priv;
++
++ /*! Message FIFO */
++ sh_msg_fifo2 to_domU;
++ /*! Message FIFO */
++ sh_msg_fifo2 from_domU;
++
++ /*! General notification channel id */
++ int msg_channel;
++ /*! General notification channel irq */
++ int msg_channel_irq;
++
++ /*! Event channel id dedicated to network packet interrupts. */
++ int net_channel;
++ /*! Event channel irq dedicated to network packets interrupts */
++ int net_channel_irq;
++
++ /*! The MAC address the frontend goes by. */
++ u8 mac[ETH_ALEN];
++ /*! Driver name of associated NIC */
++ char *nicname;
++
++ /*! Array of pointers to buffer pages mapped */
++ grant_handle_t *buffer_maps;
++ u64 *buffer_addrs;
++ /*! Index into buffer_maps */
++ int buffer_maps_index;
++ /*! Max number of pages that domU is allowed/will request to map */
++ int max_pages;
++
++ /*! Pointer to hardware specific private area */
++ void *accel_hw_priv;
++
++ /*! Wait queue for changes in accelstate. */
++ wait_queue_head_t state_wait_queue;
++
++ /*! Current state of the frontend according to the xenbus
++ * watch. */
++ XenbusState frontend_state;
++
++ /*! Current state of this backend. */
++ XenbusState backend_state;
++
++ /*! Non-zero if the backend is being removed. */
++ int removing;
++
++ /*! Non-zero if the setup_vnic has been called. */
++ int vnic_is_setup;
++
++#if NETBACK_ACCEL_STATS
++ struct netback_accel_stats stats;
++#endif
++#if defined(CONFIG_DEBUG_FS)
++ char *dbfs_dir_name;
++ struct dentry *dbfs_dir;
++ struct netback_accel_dbfs dbfs;
++#endif
++
++ /*! List */
++ struct netback_accel *next_bend;
++};
++
++
++/*
++ * Values for netback_accel.hw_state. States of resource allocation
++ * we can go through
++ */
++/*! No hardware has yet been allocated. */
++#define NETBACK_ACCEL_RES_NONE (0)
++/*! Hardware has been allocated. */
++#define NETBACK_ACCEL_RES_ALLOC (1)
++#define NETBACK_ACCEL_RES_FILTER (2)
++#define NETBACK_ACCEL_RES_HWINFO (3)
++
++/*! Filtering specification. This assumes that for VNIC support we
++ * will always want wildcard entries, so only specifies the
++ * destination IP/port
++ */
++struct netback_accel_filter_spec {
++ /*! Internal, used to access efx_vi API */
++ void *filter_handle;
++
++ /*! Destination IP in network order */
++ u32 destip_be;
++ /*! Destination port in network order */
++ u16 destport_be;
++ /*! Mac address */
++ u8 mac[ETH_ALEN];
++ /*! TCP or UDP */
++ u8 proto;
++};
++
++
++/**************************************************************************
++ * From accel.c
++ **************************************************************************/
++
++/*! \brief Start up all the acceleration plugins
++ *
++ * \return 0 on success, an errno on failure
++ */
++extern int netback_accel_init_accel(void);
++
++/*! \brief Shut down all the acceleration plugins
++ */
++extern void netback_accel_shutdown_accel(void);
++
++
++/**************************************************************************
++ * From accel_fwd.c
++ **************************************************************************/
++
++/*! \brief Init the forwarding infrastructure
++ * \return 0 on success, or -ENOMEM if it couldn't get memory for the
++ * forward table
++ */
++extern int netback_accel_init_fwd(void);
++
++/*! \brief Shut down the forwarding and free memory. */
++extern void netback_accel_shutdown_fwd(void);
++
++/*! Initialise each nic port's fowarding table */
++extern void *netback_accel_init_fwd_port(void);
++extern void netback_accel_shutdown_fwd_port(void *fwd_priv);
++
++/*! \brief Add an entry to the forwarding table.
++ * \param mac : MAC address, used as hash key
++ * \param ctxt : value to associate with key (can be NULL, see
++ * netback_accel_fwd_set_context)
++ * \return 0 on success, -ENOMEM if table was full and could no grow it
++ */
++extern int netback_accel_fwd_add(const __u8 *mac, void *context,
++ void *fwd_priv);
++
++/*! \brief Remove an entry from the forwarding table.
++ * \param mac : the MAC address to remove
++ * \return nothing: it is not an error if the mac was not in the table
++ */
++extern void netback_accel_fwd_remove(const __u8 *mac, void *fwd_priv);
++
++/*! \brief Set the context pointer for an existing fwd table entry.
++ * \param mac : key that is already present in the table
++ * \param context : new value to associate with key
++ * \return 0 on success, -ENOENT if mac not present in table.
++ */
++extern int netback_accel_fwd_set_context(const __u8 *mac, void *context,
++ void *fwd_priv);
++
++/**************************************************************************
++ * From accel_msg.c
++ **************************************************************************/
++
++
++/*! \brief Send the start-of-day message that handshakes with the VNIC
++ * and tells it its MAC address.
++ *
++ * \param bend The back end driver data structure
++ * \param version The version of communication to use, e.g. NET_ACCEL_MSG_VERSION
++ */
++extern void netback_accel_msg_tx_hello(struct netback_accel *bend,
++ unsigned version);
++
++/*! \brief Send a "there's a new local mac address" message
++ *
++ * \param bend The back end driver data structure for the vnic to send
++ * the message to
++ * \param mac Pointer to the new mac address
++ */
++extern void netback_accel_msg_tx_new_localmac(struct netback_accel *bend,
++ const void *mac);
++
++/*! \brief Send a "a mac address that was local has gone away" message
++ *
++ * \param bend The back end driver data structure for the vnic to send
++ * the message to
++ * \param mac Pointer to the old mac address
++ */
++extern void netback_accel_msg_tx_old_localmac(struct netback_accel *bend,
++ const void *mac);
++
++extern void netback_accel_set_interface_state(struct netback_accel *bend,
++ int up);
++
++/*! \brief Process the message queue for a bend that has just
++ * interrupted.
++ *
++ * Demultiplexs an interrupt from the front end driver, taking
++ * messages from the fifo and taking appropriate action.
++ *
++ * \param bend The back end driver data structure
++ */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++extern void netback_accel_msg_rx_handler(struct work_struct *arg);
++#else
++extern void netback_accel_msg_rx_handler(void *bend_void);
++#endif
++
++/**************************************************************************
++ * From accel_xenbus.c
++ **************************************************************************/
++/*! List of all the bends currently in existence. */
++extern struct netback_accel *bend_list;
++extern struct mutex bend_list_mutex;
++
++/*! \brief Probe a new network interface. */
++extern int netback_accel_probe(struct xenbus_device *dev);
++
++/*! \brief Remove a network interface. */
++extern int netback_accel_remove(struct xenbus_device *dev);
++
++/*! \brief Shutdown all accelerator backends */
++extern void netback_accel_shutdown_bends(void);
++
++/*! \brief Initiate the xenbus state teardown handshake */
++extern void netback_accel_set_closing(struct netback_accel *bend);
++
++/**************************************************************************
++ * From accel_debugfs.c
++ **************************************************************************/
++/*! Global statistics */
++struct netback_accel_global_stats {
++ /*! Number of TX packets seen through driverlink */
++ u64 dl_tx_packets;
++ /*! Number of TX packets seen through driverlink we didn't like */
++ u64 dl_tx_bad_packets;
++ /*! Number of RX packets seen through driverlink */
++ u64 dl_rx_packets;
++ /*! Number of mac addresses we are forwarding to */
++ u32 num_fwds;
++};
++
++/*! Debug fs entries for each of the above stats */
++struct netback_accel_global_dbfs {
++ struct dentry *dl_tx_packets;
++ struct dentry *dl_tx_bad_packets;
++ struct dentry *dl_rx_packets;
++ struct dentry *num_fwds;
++};
++
++#if NETBACK_ACCEL_STATS
++extern struct netback_accel_global_stats global_stats;
++#endif
++
++/*! \brief Initialise the debugfs root and populate with global stats */
++extern void netback_accel_debugfs_init(void);
++
++/*! \brief Remove our debugfs root directory */
++extern void netback_accel_debugfs_fini(void);
++
++/*! \brief Add per-bend statistics to debug fs */
++extern int netback_accel_debugfs_create(struct netback_accel *bend);
++/*! \brief Remove per-bend statistics from debug fs */
++extern int netback_accel_debugfs_remove(struct netback_accel *bend);
++
++#endif /* NETBACK_ACCEL_H */
++
++
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/accel_msg.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel_msg.c
+--- linux-2.6.18.8/drivers/xen/sfc_netback/accel_msg.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel_msg.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,392 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <xen/evtchn.h>
++
++#include "accel.h"
++#include "accel_msg_iface.h"
++#include "accel_util.h"
++#include "accel_solarflare.h"
++
++/* Send a HELLO to front end to start things off */
++void netback_accel_msg_tx_hello(struct netback_accel *bend, unsigned version)
++{
++ unsigned long lock_state;
++ struct net_accel_msg *msg =
++ net_accel_msg_start_send(bend->shared_page,
++ &bend->to_domU, &lock_state);
++ /* The queue _cannot_ be full, we're the first users. */
++ EPRINTK_ON(msg == NULL);
++
++ if (msg != NULL) {
++ net_accel_msg_init(msg, NET_ACCEL_MSG_HELLO);
++ msg->u.hello.version = version;
++ msg->u.hello.max_pages = bend->quotas.max_buf_pages;
++ VPRINTK("Sending hello to channel %d\n", bend->msg_channel);
++ net_accel_msg_complete_send_notify(bend->shared_page,
++ &bend->to_domU,
++ &lock_state,
++ bend->msg_channel_irq);
++ }
++}
++
++/* Send a local mac message to vnic */
++static void netback_accel_msg_tx_localmac(struct netback_accel *bend,
++ int type, const void *mac)
++{
++ unsigned long lock_state;
++ struct net_accel_msg *msg;
++
++ BUG_ON(bend == NULL || mac == NULL);
++
++ VPRINTK("Sending local mac message: " MAC_FMT "\n",
++ MAC_ARG((const char *)mac));
++
++ msg = net_accel_msg_start_send(bend->shared_page, &bend->to_domU,
++ &lock_state);
++
++ if (msg != NULL) {
++ net_accel_msg_init(msg, NET_ACCEL_MSG_LOCALMAC);
++ msg->u.localmac.flags = type;
++ memcpy(msg->u.localmac.mac, mac, ETH_ALEN);
++ net_accel_msg_complete_send_notify(bend->shared_page,
++ &bend->to_domU,
++ &lock_state,
++ bend->msg_channel_irq);
++ } else {
++ /*
++ * TODO if this happens we may leave a domU
++ * fastpathing packets when they should be delivered
++ * locally. Solution is get domU to timeout entries
++ * in its fastpath lookup table when it receives no RX
++ * traffic
++ */
++ EPRINTK("%s: saw full queue, may need ARP timer to recover\n",
++ __FUNCTION__);
++ }
++}
++
++/* Send an add local mac message to vnic */
++void netback_accel_msg_tx_new_localmac(struct netback_accel *bend,
++ const void *mac)
++{
++ netback_accel_msg_tx_localmac(bend, NET_ACCEL_MSG_ADD, mac);
++}
++
++
++static int netback_accel_msg_rx_buffer_map(struct netback_accel *bend,
++ struct net_accel_msg *msg)
++{
++ int log2_pages, rc;
++
++ /* Can only allocate in power of two */
++ log2_pages = log2_ge(msg->u.mapbufs.pages, 0);
++ if (msg->u.mapbufs.pages != pow2(log2_pages)) {
++ EPRINTK("%s: Can only alloc bufs in power of 2 sizes (%d)\n",
++ __FUNCTION__, msg->u.mapbufs.pages);
++ rc = -EINVAL;
++ goto err_out;
++ }
++
++ /*
++ * Sanity. Assumes NET_ACCEL_MSG_MAX_PAGE_REQ is same for
++ * both directions/domains
++ */
++ if (msg->u.mapbufs.pages > NET_ACCEL_MSG_MAX_PAGE_REQ) {
++ EPRINTK("%s: too many pages in a single message: %d %d\n",
++ __FUNCTION__, msg->u.mapbufs.pages,
++ NET_ACCEL_MSG_MAX_PAGE_REQ);
++ rc = -EINVAL;
++ goto err_out;
++ }
++
++ if ((rc = netback_accel_add_buffers(bend, msg->u.mapbufs.pages,
++ log2_pages, msg->u.mapbufs.grants,
++ &msg->u.mapbufs.buf)) < 0) {
++ goto err_out;
++ }
++
++ msg->id |= NET_ACCEL_MSG_REPLY;
++
++ return 0;
++
++ err_out:
++ EPRINTK("%s: err_out\n", __FUNCTION__);
++ msg->id |= NET_ACCEL_MSG_ERROR | NET_ACCEL_MSG_REPLY;
++ return rc;
++}
++
++
++/* Hint from frontend that one of our filters is out of date */
++static int netback_accel_process_fastpath(struct netback_accel *bend,
++ struct net_accel_msg *msg)
++{
++ struct netback_accel_filter_spec spec;
++
++ if (msg->u.fastpath.flags & NET_ACCEL_MSG_REMOVE) {
++ /*
++ * Would be nice to BUG() this but would leave us
++ * vulnerable to naughty frontend
++ */
++ EPRINTK_ON(msg->u.fastpath.flags & NET_ACCEL_MSG_ADD);
++
++ memcpy(spec.mac, msg->u.fastpath.mac, ETH_ALEN);
++ spec.destport_be = msg->u.fastpath.port;
++ spec.destip_be = msg->u.fastpath.ip;
++ spec.proto = msg->u.fastpath.proto;
++
++ netback_accel_filter_remove_spec(bend, &spec);
++ }
++
++ return 0;
++}
++
++
++/* Flow control for message queues */
++inline void set_queue_not_full(struct netback_accel *bend)
++{
++ if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL_B,
++ (unsigned long *)&bend->shared_page->aflags))
++ notify_remote_via_irq(bend->msg_channel_irq);
++ else
++ VPRINTK("queue not full bit already set, not signalling\n");
++}
++
++
++/* Flow control for message queues */
++inline void set_queue_full(struct netback_accel *bend)
++{
++ if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0FULL_B,
++ (unsigned long *)&bend->shared_page->aflags))
++ notify_remote_via_irq(bend->msg_channel_irq);
++ else
++ VPRINTK("queue full bit already set, not signalling\n");
++}
++
++
++void netback_accel_set_interface_state(struct netback_accel *bend, int up)
++{
++ bend->shared_page->net_dev_up = up;
++ if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_NETUPDOWN_B,
++ (unsigned long *)&bend->shared_page->aflags))
++ notify_remote_via_irq(bend->msg_channel_irq);
++ else
++ VPRINTK("interface up/down bit already set, not signalling\n");
++}
++
++
++static int check_rx_hello_version(unsigned version)
++{
++ /* Should only happen if there's been a version mismatch */
++ BUG_ON(version == NET_ACCEL_MSG_VERSION);
++
++ if (version > NET_ACCEL_MSG_VERSION) {
++ /* Newer protocol, we must refuse */
++ return -EPROTO;
++ }
++
++ if (version < NET_ACCEL_MSG_VERSION) {
++ /*
++ * We are newer, so have discretion to accept if we
++ * wish. For now however, just reject
++ */
++ return -EPROTO;
++ }
++
++ return -EINVAL;
++}
++
++
++static int process_rx_msg(struct netback_accel *bend,
++ struct net_accel_msg *msg)
++{
++ int err = 0;
++
++ switch (msg->id) {
++ case NET_ACCEL_MSG_REPLY | NET_ACCEL_MSG_HELLO:
++ /* Reply to a HELLO; mark ourselves as connected */
++ DPRINTK("got Hello reply, version %.8x\n",
++ msg->u.hello.version);
++
++ /*
++ * Check that we've not successfully done this
++ * already. NB no check at the moment that this reply
++ * comes after we've actually sent a HELLO as that's
++ * not possible with the current code structure
++ */
++ if (bend->hw_state != NETBACK_ACCEL_RES_NONE)
++ return -EPROTO;
++
++ /* Store max_pages for accel_setup */
++ if (msg->u.hello.max_pages > bend->quotas.max_buf_pages) {
++ EPRINTK("More pages than quota allows (%d > %d)\n",
++ msg->u.hello.max_pages,
++ bend->quotas.max_buf_pages);
++ /* Force it down to the quota */
++ msg->u.hello.max_pages = bend->quotas.max_buf_pages;
++ }
++ bend->max_pages = msg->u.hello.max_pages;
++
++ /* Set up the hardware visible to the other end */
++ err = bend->accel_setup(bend);
++ if (err) {
++ /* This is fatal */
++ DPRINTK("Hello gave accel_setup error %d\n", err);
++ netback_accel_set_closing(bend);
++ } else {
++ /*
++ * Now add the context so that packet
++ * forwarding will commence
++ */
++ netback_accel_fwd_set_context(bend->mac, bend,
++ bend->fwd_priv);
++ }
++ break;
++ case NET_ACCEL_MSG_REPLY | NET_ACCEL_MSG_HELLO | NET_ACCEL_MSG_ERROR:
++ EPRINTK("got Hello error, versions us:%.8x them:%.8x\n",
++ NET_ACCEL_MSG_VERSION, msg->u.hello.version);
++
++ if (bend->hw_state != NETBACK_ACCEL_RES_NONE)
++ return -EPROTO;
++
++ if (msg->u.hello.version != NET_ACCEL_MSG_VERSION) {
++ /* Error is due to version mismatch */
++ err = check_rx_hello_version(msg->u.hello.version);
++ if (err == 0) {
++ /*
++ * It's OK to be compatible, send
++ * another hello with compatible version
++ */
++ netback_accel_msg_tx_hello
++ (bend, msg->u.hello.version);
++ } else {
++ /*
++ * Tell frontend that we're not going to
++ * send another HELLO by going to Closing.
++ */
++ netback_accel_set_closing(bend);
++ }
++ }
++ break;
++ case NET_ACCEL_MSG_MAPBUF:
++ VPRINTK("Got mapped buffers request %d\n",
++ msg->u.mapbufs.reqid);
++
++ if (bend->hw_state == NETBACK_ACCEL_RES_NONE)
++ return -EPROTO;
++
++ /*
++ * Frontend wants a buffer table entry for the
++ * supplied pages
++ */
++ err = netback_accel_msg_rx_buffer_map(bend, msg);
++ if (net_accel_msg_reply_notify(bend->shared_page,
++ bend->msg_channel_irq,
++ &bend->to_domU, msg)) {
++ /*
++ * This is fatal as we can't tell the frontend
++ * about the problem through the message
++ * queue, and so would otherwise stalemate
++ */
++ netback_accel_set_closing(bend);
++ }
++ break;
++ case NET_ACCEL_MSG_FASTPATH:
++ DPRINTK("Got fastpath request\n");
++
++ if (bend->hw_state == NETBACK_ACCEL_RES_NONE)
++ return -EPROTO;
++
++ err = netback_accel_process_fastpath(bend, msg);
++ break;
++ default:
++ EPRINTK("Huh? Message code is %x\n", msg->id);
++ err = -EPROTO;
++ break;
++ }
++ return err;
++}
++
++
++/* Demultiplex an IRQ from the frontend driver. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++void netback_accel_msg_rx_handler(struct work_struct *arg)
++#else
++void netback_accel_msg_rx_handler(void *bend_void)
++#endif
++{
++ struct net_accel_msg msg;
++ int err, queue_was_full = 0;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++ struct netback_accel *bend =
++ container_of(arg, struct netback_accel, handle_msg);
++#else
++ struct netback_accel *bend = (struct netback_accel *)bend_void;
++#endif
++
++ mutex_lock(&bend->bend_mutex);
++
++ /*
++ * This happens when the shared pages have been unmapped, but
++ * the workqueue not flushed yet
++ */
++ if (bend->shared_page == NULL)
++ goto done;
++
++ if ((bend->shared_page->aflags &
++ NET_ACCEL_MSG_AFLAGS_TO_DOM0_MASK) != 0) {
++ if (bend->shared_page->aflags &
++ NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL) {
++ /* We've been told there may now be space. */
++ clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL_B,
++ (unsigned long *)&bend->shared_page->aflags);
++ }
++
++ if (bend->shared_page->aflags &
++ NET_ACCEL_MSG_AFLAGS_QUEUEUFULL) {
++ clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUFULL_B,
++ (unsigned long *)&bend->shared_page->aflags);
++ queue_was_full = 1;
++ }
++ }
++
++ while ((err = net_accel_msg_recv(bend->shared_page, &bend->from_domU,
++ &msg)) == 0) {
++ err = process_rx_msg(bend, &msg);
++
++ if (err != 0) {
++ EPRINTK("%s: Error %d\n", __FUNCTION__, err);
++ goto err;
++ }
++ }
++
++ err:
++ /* There will be space now if we can make any. */
++ if (queue_was_full)
++ set_queue_not_full(bend);
++ done:
++ mutex_unlock(&bend->bend_mutex);
++
++ return;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/accel_solarflare.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel_solarflare.c
+--- linux-2.6.18.8/drivers/xen/sfc_netback/accel_solarflare.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel_solarflare.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1253 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include "common.h"
++
++#include "accel.h"
++#include "accel_solarflare.h"
++#include "accel_msg_iface.h"
++#include "accel_util.h"
++
++#include "accel_cuckoo_hash.h"
++
++#include "ci/driver/resource/efx_vi.h"
++
++#include "ci/efrm/nic_table.h"
++#include "ci/efhw/public.h"
++
++#include <xen/evtchn.h>
++#include <xen/driver_util.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++
++#include "driverlink_api.h"
++
++#define SF_XEN_RX_USR_BUF_SIZE 2048
++
++struct falcon_bend_accel_priv {
++ struct efx_vi_state *efx_vih;
++
++ /*! Array of pointers to dma_map state, used so VNIC can
++ * request their removal in a single message
++ */
++ struct efx_vi_dma_map_state **dma_maps;
++ /*! Index into dma_maps */
++ int dma_maps_index;
++
++ /*! Serialises access to filters */
++ spinlock_t filter_lock;
++ /*! Bitmap of which filters are free */
++ unsigned long free_filters;
++ /*! Used for index normalisation */
++ u32 filter_idx_mask;
++ struct netback_accel_filter_spec *fspecs;
++ cuckoo_hash_table filter_hash_table;
++
++ u32 txdmaq_gnt;
++ u32 rxdmaq_gnt;
++ u32 doorbell_gnt;
++ u32 evq_rptr_gnt;
++ u32 evq_mem_gnts[EF_HW_FALCON_EVQ_PAGES];
++ u32 evq_npages;
++};
++
++/* Forward declaration */
++static int netback_accel_filter_init(struct netback_accel *);
++static void netback_accel_filter_shutdown(struct netback_accel *);
++
++/**************************************************************************
++ *
++ * Driverlink stuff
++ *
++ **************************************************************************/
++
++struct driverlink_port {
++ struct list_head link;
++ enum net_accel_hw_type type;
++ struct net_device *net_dev;
++ struct efx_dl_device *efx_dl_dev;
++ int nic_index;
++ void *fwd_priv;
++};
++
++static struct list_head dl_ports;
++
++/* This mutex protects global state, such as the dl_ports list */
++DEFINE_MUTEX(accel_mutex);
++
++static int init_done = 0;
++
++/* The DL callbacks */
++
++
++#if defined(EFX_USE_FASTCALL)
++static enum efx_veto fastcall
++#else
++static enum efx_veto
++#endif
++bend_dl_tx_packet(struct efx_dl_device *efx_dl_dev,
++ struct sk_buff *skb)
++{
++ struct driverlink_port *port = efx_dl_dev->priv;
++
++ BUG_ON(port == NULL);
++
++ NETBACK_ACCEL_STATS_OP(global_stats.dl_tx_packets++);
++ if (skb->mac.raw != NULL)
++ netback_accel_tx_packet(skb, port->fwd_priv);
++ else {
++ DPRINTK("Ignoring packet with missing mac address\n");
++ NETBACK_ACCEL_STATS_OP(global_stats.dl_tx_bad_packets++);
++ }
++ return EFX_ALLOW_PACKET;
++}
++
++/* EFX_USE_FASTCALL */
++#if defined(EFX_USE_FASTCALL)
++static enum efx_veto fastcall
++#else
++static enum efx_veto
++#endif
++bend_dl_rx_packet(struct efx_dl_device *efx_dl_dev,
++ const char *pkt_buf, int pkt_len)
++{
++ struct driverlink_port *port = efx_dl_dev->priv;
++ struct netback_pkt_buf pkt;
++ struct ethhdr *eh;
++
++ BUG_ON(port == NULL);
++
++ pkt.mac.raw = (char *)pkt_buf;
++ pkt.nh.raw = (char *)pkt_buf + ETH_HLEN;
++ eh = (struct ethhdr *)pkt_buf;
++ pkt.protocol = eh->h_proto;
++
++ NETBACK_ACCEL_STATS_OP(global_stats.dl_rx_packets++);
++ netback_accel_rx_packet(&pkt, port->fwd_priv);
++ return EFX_ALLOW_PACKET;
++}
++
++
++/* Callbacks we'd like to get from the netdriver through driverlink */
++struct efx_dl_callbacks bend_dl_callbacks =
++ {
++ .tx_packet = bend_dl_tx_packet,
++ .rx_packet = bend_dl_rx_packet,
++ };
++
++
++static struct netback_accel_hooks accel_hooks = {
++ THIS_MODULE,
++ &netback_accel_probe,
++ &netback_accel_remove
++};
++
++
++/*
++ * Handy helper which given an efx_dl_device works out which
++ * efab_nic_t index into efrm_nic_table.nics[] it corresponds to
++ */
++static int efx_device_to_efab_nic_index(struct efx_dl_device *efx_dl_dev)
++{
++ int i;
++
++ for (i = 0; i < EFHW_MAX_NR_DEVS; i++) {
++ struct efhw_nic *nic = efrm_nic_table.nic[i];
++
++ /*
++ * It's possible for the nic structure to have not
++ * been initialised if the resource driver failed its
++ * driverlink probe
++ */
++ if (nic == NULL || nic->net_driver_dev == NULL)
++ continue;
++
++ /* Work out if these are talking about the same NIC */
++ if (nic->net_driver_dev->pci_dev == efx_dl_dev->pci_dev)
++ return i;
++ }
++
++ return -1;
++}
++
++
++/* Driver link probe - register our callbacks */
++static int bend_dl_probe(struct efx_dl_device *efx_dl_dev,
++ const struct net_device *net_dev,
++ const struct efx_dl_device_info *dev_info,
++ const char* silicon_rev)
++{
++ int rc;
++ enum net_accel_hw_type type;
++ struct driverlink_port *port;
++
++ DPRINTK("%s: %s\n", __FUNCTION__, silicon_rev);
++
++ if (strcmp(silicon_rev, "falcon/a1") == 0)
++ type = NET_ACCEL_MSG_HWTYPE_FALCON_A;
++ else if (strcmp(silicon_rev, "falcon/b0") == 0)
++ type = NET_ACCEL_MSG_HWTYPE_FALCON_B;
++ else {
++ EPRINTK("%s: unsupported silicon %s\n", __FUNCTION__,
++ silicon_rev);
++ rc = -EINVAL;
++ goto fail1;
++ }
++
++ port = kmalloc(sizeof(struct driverlink_port), GFP_KERNEL);
++ if (port == NULL) {
++ EPRINTK("%s: no memory for dl probe\n", __FUNCTION__);
++ rc = -ENOMEM;
++ goto fail1;
++ }
++
++ port->efx_dl_dev = efx_dl_dev;
++ efx_dl_dev->priv = port;
++
++ port->nic_index = efx_device_to_efab_nic_index(efx_dl_dev);
++ if (port->nic_index < 0) {
++ /*
++ * This can happen in theory if the resource driver
++ * failed to initialise properly
++ */
++ EPRINTK("%s: nic structure not found\n", __FUNCTION__);
++ rc = -EINVAL;
++ goto fail2;
++ }
++
++ port->fwd_priv = netback_accel_init_fwd_port();
++ if (port->fwd_priv == NULL) {
++ EPRINTK("%s: failed to set up forwarding for port\n",
++ __FUNCTION__);
++ rc = -ENOMEM;
++ goto fail2;
++ }
++
++ rc = efx_dl_register_callbacks(efx_dl_dev, &bend_dl_callbacks);
++ if (rc != 0) {
++ EPRINTK("%s: register_callbacks failed\n", __FUNCTION__);
++ goto fail3;
++ }
++
++ port->type = type;
++ port->net_dev = (struct net_device *)net_dev;
++
++ mutex_lock(&accel_mutex);
++ list_add(&port->link, &dl_ports);
++ mutex_unlock(&accel_mutex);
++
++ rc = netback_connect_accelerator(NETBACK_ACCEL_VERSION, 0,
++ port->net_dev->name, &accel_hooks);
++
++ if (rc < 0) {
++ EPRINTK("Xen netback accelerator version mismatch\n");
++ goto fail4;
++ } else if (rc > 0) {
++ /*
++ * In future may want to add backwards compatibility
++ * and accept certain subsets of previous versions
++ */
++ EPRINTK("Xen netback accelerator version mismatch\n");
++ goto fail4;
++ }
++
++ return 0;
++
++ fail4:
++ mutex_lock(&accel_mutex);
++ list_del(&port->link);
++ mutex_unlock(&accel_mutex);
++
++ efx_dl_unregister_callbacks(efx_dl_dev, &bend_dl_callbacks);
++ fail3:
++ netback_accel_shutdown_fwd_port(port->fwd_priv);
++ fail2:
++ efx_dl_dev->priv = NULL;
++ kfree(port);
++ fail1:
++ return rc;
++}
++
++
++static void bend_dl_remove(struct efx_dl_device *efx_dl_dev)
++{
++ struct driverlink_port *port;
++
++ DPRINTK("Unregistering driverlink callbacks.\n");
++
++ mutex_lock(&accel_mutex);
++
++ port = (struct driverlink_port *)efx_dl_dev->priv;
++
++ BUG_ON(list_empty(&dl_ports));
++ BUG_ON(port == NULL);
++ BUG_ON(port->efx_dl_dev != efx_dl_dev);
++
++ netback_disconnect_accelerator(0, port->net_dev->name);
++
++ list_del(&port->link);
++
++ mutex_unlock(&accel_mutex);
++
++ efx_dl_unregister_callbacks(efx_dl_dev, &bend_dl_callbacks);
++ netback_accel_shutdown_fwd_port(port->fwd_priv);
++
++ efx_dl_dev->priv = NULL;
++ kfree(port);
++
++ return;
++}
++
++
++static struct efx_dl_driver bend_dl_driver =
++ {
++ .name = "SFC Xen backend",
++ .probe = bend_dl_probe,
++ .remove = bend_dl_remove,
++ };
++
++
++int netback_accel_sf_init(void)
++{
++ int rc, nic_i;
++ struct efhw_nic *nic;
++
++ INIT_LIST_HEAD(&dl_ports);
++
++ rc = efx_dl_register_driver(&bend_dl_driver);
++ /* If we couldn't find the NET driver, give up */
++ if (rc == -ENOENT)
++ return rc;
++
++ if (rc == 0) {
++ EFRM_FOR_EACH_NIC(nic_i, nic)
++ falcon_nic_set_rx_usr_buf_size(nic,
++ SF_XEN_RX_USR_BUF_SIZE);
++ }
++
++ init_done = (rc == 0);
++ return rc;
++}
++
++
++void netback_accel_sf_shutdown(void)
++{
++ if (!init_done)
++ return;
++ DPRINTK("Unregistering driverlink driver\n");
++
++ /*
++ * This will trigger removal callbacks for all the devices, which
++ * will unregister their callbacks, disconnect from netfront, etc.
++ */
++ efx_dl_unregister_driver(&bend_dl_driver);
++}
++
++
++int netback_accel_sf_hwtype(struct netback_accel *bend)
++{
++ struct driverlink_port *port;
++
++ mutex_lock(&accel_mutex);
++
++ list_for_each_entry(port, &dl_ports, link) {
++ if (strcmp(bend->nicname, port->net_dev->name) == 0) {
++ bend->hw_type = port->type;
++ bend->accel_setup = netback_accel_setup_vnic_hw;
++ bend->accel_shutdown = netback_accel_shutdown_vnic_hw;
++ bend->fwd_priv = port->fwd_priv;
++ /* This is just needed to pass to efx_vi_alloc */
++ bend->nic_index = port->nic_index;
++ bend->net_dev = port->net_dev;
++ mutex_unlock(&accel_mutex);
++ return 0;
++ }
++ }
++
++ mutex_unlock(&accel_mutex);
++
++ EPRINTK("Failed to identify backend device '%s' with a NIC\n",
++ bend->nicname);
++
++ return -ENOENT;
++}
++
++
++/****************************************************************************
++ * Resource management code
++ ***************************************************************************/
++
++static int alloc_page_state(struct netback_accel *bend, int max_pages)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv;
++
++ if (max_pages < 0 || max_pages > bend->quotas.max_buf_pages) {
++ EPRINTK("%s: invalid max_pages: %d\n", __FUNCTION__, max_pages);
++ return -EINVAL;
++ }
++
++ accel_hw_priv = kzalloc(sizeof(struct falcon_bend_accel_priv),
++ GFP_KERNEL);
++ if (accel_hw_priv == NULL) {
++ EPRINTK("%s: no memory for accel_hw_priv\n", __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ accel_hw_priv->dma_maps = kzalloc
++ (sizeof(struct efx_vi_dma_map_state **) *
++ (max_pages / NET_ACCEL_MSG_MAX_PAGE_REQ), GFP_KERNEL);
++ if (accel_hw_priv->dma_maps == NULL) {
++ EPRINTK("%s: no memory for dma_maps\n", __FUNCTION__);
++ kfree(accel_hw_priv);
++ return -ENOMEM;
++ }
++
++ bend->buffer_maps = kzalloc(sizeof(struct vm_struct *) * max_pages,
++ GFP_KERNEL);
++ if (bend->buffer_maps == NULL) {
++ EPRINTK("%s: no memory for buffer_maps\n", __FUNCTION__);
++ kfree(accel_hw_priv->dma_maps);
++ kfree(accel_hw_priv);
++ return -ENOMEM;
++ }
++
++ bend->buffer_addrs = kzalloc(sizeof(u64) * max_pages, GFP_KERNEL);
++ if (bend->buffer_addrs == NULL) {
++ kfree(bend->buffer_maps);
++ kfree(accel_hw_priv->dma_maps);
++ kfree(accel_hw_priv);
++ return -ENOMEM;
++ }
++
++ bend->accel_hw_priv = accel_hw_priv;
++
++ return 0;
++}
++
++
++static int free_page_state(struct netback_accel *bend)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv;
++
++ DPRINTK("%s: %p\n", __FUNCTION__, bend);
++
++ accel_hw_priv = bend->accel_hw_priv;
++
++ if (accel_hw_priv) {
++ kfree(accel_hw_priv->dma_maps);
++ kfree(bend->buffer_maps);
++ kfree(bend->buffer_addrs);
++ kfree(accel_hw_priv);
++ bend->accel_hw_priv = NULL;
++ bend->max_pages = 0;
++ }
++
++ return 0;
++}
++
++
++/* The timeout event callback for the event q */
++static void bend_evq_timeout(void *context, int is_timeout)
++{
++ struct netback_accel *bend = (struct netback_accel *)context;
++ if (is_timeout) {
++ /* Pass event to vnic front end driver */
++ VPRINTK("timeout event to %d\n", bend->net_channel);
++ NETBACK_ACCEL_STATS_OP(bend->stats.evq_timeouts++);
++ notify_remote_via_irq(bend->net_channel_irq);
++ } else {
++ /* It's a wakeup event, used by Falcon */
++ VPRINTK("wakeup to %d\n", bend->net_channel);
++ NETBACK_ACCEL_STATS_OP(bend->stats.evq_wakeups++);
++ notify_remote_via_irq(bend->net_channel_irq);
++ }
++}
++
++
++/*
++ * Create the eventq and associated gubbins for communication with the
++ * front end vnic driver
++ */
++static int ef_get_vnic(struct netback_accel *bend)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv;
++ int rc = 0;
++
++ BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_NONE);
++
++ /* Allocate page related state and accel_hw_priv */
++ rc = alloc_page_state(bend, bend->max_pages);
++ if (rc != 0) {
++ EPRINTK("Failed to allocate page state: %d\n", rc);
++ return rc;
++ }
++
++ accel_hw_priv = bend->accel_hw_priv;
++
++ rc = efx_vi_alloc(&accel_hw_priv->efx_vih, bend->nic_index);
++ if (rc != 0) {
++ EPRINTK("%s: efx_vi_alloc failed %d\n", __FUNCTION__, rc);
++ free_page_state(bend);
++ return rc;
++ }
++
++ rc = efx_vi_eventq_register_callback(accel_hw_priv->efx_vih,
++ bend_evq_timeout,
++ bend);
++ if (rc != 0) {
++ EPRINTK("%s: register_callback failed %d\n", __FUNCTION__, rc);
++ efx_vi_free(accel_hw_priv->efx_vih);
++ free_page_state(bend);
++ return rc;
++ }
++
++ bend->hw_state = NETBACK_ACCEL_RES_ALLOC;
++
++ return 0;
++}
++
++
++static void ef_free_vnic(struct netback_accel *bend)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++
++ BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_ALLOC);
++
++ efx_vi_eventq_kill_callback(accel_hw_priv->efx_vih);
++
++ DPRINTK("Hardware is freeable. Will proceed.\n");
++
++ efx_vi_free(accel_hw_priv->efx_vih);
++ accel_hw_priv->efx_vih = NULL;
++
++ VPRINTK("Free page state...\n");
++ free_page_state(bend);
++
++ bend->hw_state = NETBACK_ACCEL_RES_NONE;
++}
++
++
++static inline void ungrant_or_crash(grant_ref_t gntref, int domain) {
++ if (net_accel_ungrant_page(gntref) == -EBUSY)
++ net_accel_shutdown_remote(domain);
++}
++
++
++static void netback_accel_release_hwinfo(struct netback_accel *bend)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ int i;
++
++ DPRINTK("Remove dma q grants %d %d\n", accel_hw_priv->txdmaq_gnt,
++ accel_hw_priv->rxdmaq_gnt);
++ ungrant_or_crash(accel_hw_priv->txdmaq_gnt, bend->far_end);
++ ungrant_or_crash(accel_hw_priv->rxdmaq_gnt, bend->far_end);
++
++ DPRINTK("Remove doorbell grant %d\n", accel_hw_priv->doorbell_gnt);
++ ungrant_or_crash(accel_hw_priv->doorbell_gnt, bend->far_end);
++
++ if (bend->hw_type == NET_ACCEL_MSG_HWTYPE_FALCON_A) {
++ DPRINTK("Remove rptr grant %d\n", accel_hw_priv->evq_rptr_gnt);
++ ungrant_or_crash(accel_hw_priv->evq_rptr_gnt, bend->far_end);
++ }
++
++ for (i = 0; i < accel_hw_priv->evq_npages; i++) {
++ DPRINTK("Remove evq grant %d\n", accel_hw_priv->evq_mem_gnts[i]);
++ ungrant_or_crash(accel_hw_priv->evq_mem_gnts[i], bend->far_end);
++ }
++
++ bend->hw_state = NETBACK_ACCEL_RES_FILTER;
++
++ return;
++}
++
++
++static int ef_bend_hwinfo_falcon_common(struct netback_accel *bend,
++ struct net_accel_hw_falcon_b *hwinfo)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ struct efx_vi_hw_resource_metadata res_mdata;
++ struct efx_vi_hw_resource res_array[EFX_VI_HW_RESOURCE_MAXSIZE];
++ int rc, len = EFX_VI_HW_RESOURCE_MAXSIZE, i, pfn = 0;
++ unsigned long txdmaq_pfn = 0, rxdmaq_pfn = 0;
++
++ rc = efx_vi_hw_resource_get_phys(accel_hw_priv->efx_vih, &res_mdata,
++ res_array, &len);
++ if (rc != 0) {
++ DPRINTK("%s: resource_get_phys returned %d\n",
++ __FUNCTION__, rc);
++ return rc;
++ }
++
++ if (res_mdata.version != 0)
++ return -EPROTO;
++
++ hwinfo->nic_arch = res_mdata.nic_arch;
++ hwinfo->nic_variant = res_mdata.nic_variant;
++ hwinfo->nic_revision = res_mdata.nic_revision;
++
++ hwinfo->evq_order = res_mdata.evq_order;
++ hwinfo->evq_offs = res_mdata.evq_offs;
++ hwinfo->evq_capacity = res_mdata.evq_capacity;
++ hwinfo->instance = res_mdata.instance;
++ hwinfo->rx_capacity = res_mdata.rx_capacity;
++ hwinfo->tx_capacity = res_mdata.tx_capacity;
++
++ VPRINTK("evq_order %d evq_offs %d evq_cap %d inst %d rx_cap %d tx_cap %d\n",
++ hwinfo->evq_order, hwinfo->evq_offs, hwinfo->evq_capacity,
++ hwinfo->instance, hwinfo->rx_capacity, hwinfo->tx_capacity);
++
++ for (i = 0; i < len; i++) {
++ struct efx_vi_hw_resource *res = &(res_array[i]);
++ switch (res->type) {
++ case EFX_VI_HW_RESOURCE_TXDMAQ:
++ txdmaq_pfn = page_to_pfn(virt_to_page(res->address));
++ break;
++ case EFX_VI_HW_RESOURCE_RXDMAQ:
++ rxdmaq_pfn = page_to_pfn(virt_to_page(res->address));
++ break;
++ case EFX_VI_HW_RESOURCE_EVQTIMER:
++ break;
++ case EFX_VI_HW_RESOURCE_EVQRPTR:
++ case EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET:
++ hwinfo->evq_rptr = res->address;
++ break;
++ case EFX_VI_HW_RESOURCE_EVQMEMKVA:
++ accel_hw_priv->evq_npages = 1 << res_mdata.evq_order;
++ pfn = page_to_pfn(virt_to_page(res->address));
++ break;
++ case EFX_VI_HW_RESOURCE_BELLPAGE:
++ hwinfo->doorbell_mfn = res->address;
++ break;
++ default:
++ EPRINTK("%s: Unknown hardware resource type %d\n",
++ __FUNCTION__, res->type);
++ break;
++ }
++ }
++
++ VPRINTK("Passing txdmaq page pfn %lx\n", txdmaq_pfn);
++ accel_hw_priv->txdmaq_gnt = hwinfo->txdmaq_gnt =
++ net_accel_grant_page(bend->hdev_data, pfn_to_mfn(txdmaq_pfn),
++ 0);
++
++ VPRINTK("Passing rxdmaq page pfn %lx\n", rxdmaq_pfn);
++ accel_hw_priv->rxdmaq_gnt = hwinfo->rxdmaq_gnt =
++ net_accel_grant_page(bend->hdev_data, pfn_to_mfn(rxdmaq_pfn),
++ 0);
++
++ VPRINTK("Passing doorbell page mfn %x\n", hwinfo->doorbell_mfn);
++ /* Make the relevant H/W pages mappable by the far end */
++ accel_hw_priv->doorbell_gnt = hwinfo->doorbell_gnt =
++ net_accel_grant_page(bend->hdev_data, hwinfo->doorbell_mfn, 1);
++
++ /* Now do the same for the memory pages */
++ /* Convert the page + length we got back for the evq to grants. */
++ for (i = 0; i < accel_hw_priv->evq_npages; i++) {
++ accel_hw_priv->evq_mem_gnts[i] = hwinfo->evq_mem_gnts[i] =
++ net_accel_grant_page(bend->hdev_data, pfn_to_mfn(pfn), 0);
++ VPRINTK("Got grant %u for evq pfn %x\n", hwinfo->evq_mem_gnts[i],
++ pfn);
++ pfn++;
++ }
++
++ return 0;
++}
++
++
++static int ef_bend_hwinfo_falcon_a(struct netback_accel *bend,
++ struct net_accel_hw_falcon_a *hwinfo)
++{
++ int rc;
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++
++ if ((rc = ef_bend_hwinfo_falcon_common(bend, &hwinfo->common)) != 0)
++ return rc;
++
++ /*
++ * Note that unlike the above, where the message field is the
++ * page number, here evq_rptr is the entire address because
++ * it is currently a pointer into the densely mapped timer page.
++ */
++ VPRINTK("Passing evq_rptr pfn %x for rptr %x\n",
++ hwinfo->common.evq_rptr >> PAGE_SHIFT,
++ hwinfo->common.evq_rptr);
++ rc = net_accel_grant_page(bend->hdev_data,
++ hwinfo->common.evq_rptr >> PAGE_SHIFT, 0);
++ if (rc < 0)
++ return rc;
++
++ accel_hw_priv->evq_rptr_gnt = hwinfo->evq_rptr_gnt = rc;
++ VPRINTK("evq_rptr_gnt got %d\n", hwinfo->evq_rptr_gnt);
++
++ return 0;
++}
++
++
++static int ef_bend_hwinfo_falcon_b(struct netback_accel *bend,
++ struct net_accel_hw_falcon_b *hwinfo)
++{
++ return ef_bend_hwinfo_falcon_common(bend, hwinfo);
++}
++
++
++/*
++ * Fill in the message with a description of the hardware resources, based on
++ * the H/W type
++ */
++static int netback_accel_hwinfo(struct netback_accel *bend,
++ struct net_accel_msg_hw *msgvi)
++{
++ int rc = 0;
++
++ BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_FILTER);
++
++ msgvi->type = bend->hw_type;
++ switch (bend->hw_type) {
++ case NET_ACCEL_MSG_HWTYPE_FALCON_A:
++ rc = ef_bend_hwinfo_falcon_a(bend, &msgvi->resources.falcon_a);
++ break;
++ case NET_ACCEL_MSG_HWTYPE_FALCON_B:
++ rc = ef_bend_hwinfo_falcon_b(bend, &msgvi->resources.falcon_b);
++ break;
++ case NET_ACCEL_MSG_HWTYPE_NONE:
++ /* Nothing to do. The slow path should just work. */
++ break;
++ }
++
++ if (rc == 0)
++ bend->hw_state = NETBACK_ACCEL_RES_HWINFO;
++
++ return rc;
++}
++
++
++/* Allocate hardware resources and make them available to the client domain */
++int netback_accel_setup_vnic_hw(struct netback_accel *bend)
++{
++ struct net_accel_msg msg;
++ int err;
++
++ /* Allocate the event queue, VI and so on. */
++ err = ef_get_vnic(bend);
++ if (err) {
++ EPRINTK("Failed to allocate hardware resource for bend:"
++ "error %d\n", err);
++ return err;
++ }
++
++ /* Set up the filter management */
++ err = netback_accel_filter_init(bend);
++ if (err) {
++ EPRINTK("Filter setup failed, error %d", err);
++ ef_free_vnic(bend);
++ return err;
++ }
++
++ net_accel_msg_init(&msg, NET_ACCEL_MSG_SETHW);
++
++ /*
++ * Extract the low-level hardware info we will actually pass to the
++ * other end, and set up the grants/ioremap permissions needed
++ */
++ err = netback_accel_hwinfo(bend, &msg.u.hw);
++
++ if (err != 0) {
++ netback_accel_filter_shutdown(bend);
++ ef_free_vnic(bend);
++ return err;
++ }
++
++ /* Send the message, this is a reply to a hello-reply */
++ err = net_accel_msg_reply_notify(bend->shared_page,
++ bend->msg_channel_irq,
++ &bend->to_domU, &msg);
++
++ /*
++ * The message should succeed as it's logically a reply and we
++ * guarantee space for replies, but a misbehaving frontend
++ * could result in that behaviour, so be tolerant
++ */
++ if (err != 0) {
++ netback_accel_release_hwinfo(bend);
++ netback_accel_filter_shutdown(bend);
++ ef_free_vnic(bend);
++ }
++
++ return err;
++}
++
++
++/* Free hardware resources */
++void netback_accel_shutdown_vnic_hw(struct netback_accel *bend)
++{
++ /*
++ * Only try and release resources if accel_hw_priv was setup,
++ * otherwise there is nothing to do as we're on "null-op"
++ * acceleration
++ */
++ switch (bend->hw_state) {
++ case NETBACK_ACCEL_RES_HWINFO:
++ VPRINTK("Release hardware resources\n");
++ netback_accel_release_hwinfo(bend);
++ /* deliberate drop through */
++ case NETBACK_ACCEL_RES_FILTER:
++ VPRINTK("Free filters...\n");
++ netback_accel_filter_shutdown(bend);
++ /* deliberate drop through */
++ case NETBACK_ACCEL_RES_ALLOC:
++ VPRINTK("Free vnic...\n");
++ ef_free_vnic(bend);
++ /* deliberate drop through */
++ case NETBACK_ACCEL_RES_NONE:
++ break;
++ default:
++ BUG();
++ }
++}
++
++/**************************************************************************
++ *
++ * Buffer table stuff
++ *
++ **************************************************************************/
++
++/*
++ * Undo any allocation that netback_accel_msg_rx_buffer_map() has made
++ * if it fails half way through
++ */
++static inline void buffer_map_cleanup(struct netback_accel *bend, int i)
++{
++ while (i > 0) {
++ i--;
++ bend->buffer_maps_index--;
++ net_accel_unmap_device_page(bend->hdev_data,
++ bend->buffer_maps[bend->buffer_maps_index],
++ bend->buffer_addrs[bend->buffer_maps_index]);
++ }
++}
++
++
++int netback_accel_add_buffers(struct netback_accel *bend, int pages, int log2_pages,
++ u32 *grants, u32 *buf_addr_out)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ unsigned long long addr_array[NET_ACCEL_MSG_MAX_PAGE_REQ];
++ int rc, i, index;
++ u64 dev_bus_addr;
++
++ /* Make sure we can't overflow the dma_maps array */
++ if (accel_hw_priv->dma_maps_index >=
++ bend->max_pages / NET_ACCEL_MSG_MAX_PAGE_REQ) {
++ EPRINTK("%s: too many buffer table allocations: %d %d\n",
++ __FUNCTION__, accel_hw_priv->dma_maps_index,
++ bend->max_pages / NET_ACCEL_MSG_MAX_PAGE_REQ);
++ return -EINVAL;
++ }
++
++ /* Make sure we can't overflow the buffer_maps array */
++ if (bend->buffer_maps_index + pages > bend->max_pages) {
++ EPRINTK("%s: too many pages mapped: %d + %d > %d\n",
++ __FUNCTION__, bend->buffer_maps_index,
++ pages, bend->max_pages);
++ return -EINVAL;
++ }
++
++ for (i = 0; i < pages; i++) {
++ VPRINTK("%s: mapping page %d\n", __FUNCTION__, i);
++ rc = net_accel_map_device_page
++ (bend->hdev_data, grants[i],
++ &bend->buffer_maps[bend->buffer_maps_index],
++ &dev_bus_addr);
++
++ if (rc != 0) {
++ EPRINTK("error in net_accel_map_device_page\n");
++ buffer_map_cleanup(bend, i);
++ return rc;
++ }
++
++ bend->buffer_addrs[bend->buffer_maps_index] = dev_bus_addr;
++
++ bend->buffer_maps_index++;
++
++ addr_array[i] = dev_bus_addr;
++ }
++
++ VPRINTK("%s: mapping dma addresses to vih %p\n", __FUNCTION__,
++ accel_hw_priv->efx_vih);
++
++ index = accel_hw_priv->dma_maps_index;
++ if ((rc = efx_vi_dma_map_addrs(accel_hw_priv->efx_vih, addr_array, pages,
++ &(accel_hw_priv->dma_maps[index]))) < 0) {
++ EPRINTK("error in dma_map_pages\n");
++ buffer_map_cleanup(bend, i);
++ return rc;
++ }
++
++ accel_hw_priv->dma_maps_index++;
++ NETBACK_ACCEL_STATS_OP(bend->stats.num_buffer_pages += pages);
++
++ //DPRINTK("%s: getting map address\n", __FUNCTION__);
++
++ *buf_addr_out = efx_vi_dma_get_map_addr(accel_hw_priv->efx_vih,
++ accel_hw_priv->dma_maps[index]);
++
++ //DPRINTK("%s: done\n", __FUNCTION__);
++
++ return 0;
++}
++
++
++int netback_accel_remove_buffers(struct netback_accel *bend)
++{
++ /* Only try to free buffers if accel_hw_priv was setup */
++ if (bend->hw_state != NETBACK_ACCEL_RES_NONE) {
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ int i;
++
++ efx_vi_reset(accel_hw_priv->efx_vih);
++
++ while (accel_hw_priv->dma_maps_index > 0) {
++ accel_hw_priv->dma_maps_index--;
++ i = accel_hw_priv->dma_maps_index;
++ efx_vi_dma_unmap_addrs(accel_hw_priv->efx_vih,
++ accel_hw_priv->dma_maps[i]);
++ }
++
++ while (bend->buffer_maps_index > 0) {
++ VPRINTK("Unmapping granted buffer %d\n",
++ bend->buffer_maps_index);
++ bend->buffer_maps_index--;
++ i = bend->buffer_maps_index;
++ net_accel_unmap_device_page(bend->hdev_data,
++ bend->buffer_maps[i],
++ bend->buffer_addrs[i]);
++ }
++
++ NETBACK_ACCEL_STATS_OP(bend->stats.num_buffer_pages = 0);
++ }
++
++ return 0;
++}
++
++/**************************************************************************
++ *
++ * Filter stuff
++ *
++ **************************************************************************/
++
++static int netback_accel_filter_init(struct netback_accel *bend)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ int i, rc;
++
++ BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_ALLOC);
++
++ spin_lock_init(&accel_hw_priv->filter_lock);
++
++ if ((rc = cuckoo_hash_init(&accel_hw_priv->filter_hash_table,
++ 5 /* space for 32 filters */, 8)) != 0) {
++ EPRINTK("Failed to initialise filter hash table\n");
++ return rc;
++ }
++
++ accel_hw_priv->fspecs = kzalloc(sizeof(struct netback_accel_filter_spec) *
++ bend->quotas.max_filters,
++ GFP_KERNEL);
++
++ if (accel_hw_priv->fspecs == NULL) {
++ EPRINTK("No memory for filter specs.\n");
++ cuckoo_hash_destroy(&accel_hw_priv->filter_hash_table);
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < bend->quotas.max_filters; i++) {
++ accel_hw_priv->free_filters |= (1 << i);
++ }
++
++ /* Base mask on highest set bit in max_filters */
++ accel_hw_priv->filter_idx_mask = (1 << fls(bend->quotas.max_filters)) - 1;
++ VPRINTK("filter setup: max is %x mask is %x\n",
++ bend->quotas.max_filters, accel_hw_priv->filter_idx_mask);
++
++ bend->hw_state = NETBACK_ACCEL_RES_FILTER;
++
++ return 0;
++}
++
++
++static inline void make_filter_key(cuckoo_hash_ip_key *key,
++ struct netback_accel_filter_spec *filt)
++
++{
++ key->local_ip = filt->destip_be;
++ key->local_port = filt->destport_be;
++ key->proto = filt->proto;
++}
++
++
++static inline
++void netback_accel_free_filter(struct falcon_bend_accel_priv *accel_hw_priv,
++ int filter)
++{
++ cuckoo_hash_ip_key filter_key;
++
++ if (!(accel_hw_priv->free_filters & (1 << filter))) {
++ efx_vi_filter_stop(accel_hw_priv->efx_vih,
++ accel_hw_priv->fspecs[filter].filter_handle);
++ make_filter_key(&filter_key, &(accel_hw_priv->fspecs[filter]));
++ if (cuckoo_hash_remove(&accel_hw_priv->filter_hash_table,
++ (cuckoo_hash_key *)&filter_key)) {
++ EPRINTK("%s: Couldn't find filter to remove from table\n",
++ __FUNCTION__);
++ BUG();
++ }
++ }
++}
++
++
++static void netback_accel_filter_shutdown(struct netback_accel *bend)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ int i;
++ unsigned long flags;
++
++ BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_FILTER);
++
++ spin_lock_irqsave(&accel_hw_priv->filter_lock, flags);
++
++ BUG_ON(accel_hw_priv->fspecs == NULL);
++
++ for (i = 0; i < bend->quotas.max_filters; i++) {
++ netback_accel_free_filter(accel_hw_priv, i);
++ }
++
++ kfree(accel_hw_priv->fspecs);
++ accel_hw_priv->fspecs = NULL;
++ accel_hw_priv->free_filters = 0;
++
++ cuckoo_hash_destroy(&accel_hw_priv->filter_hash_table);
++
++ spin_unlock_irqrestore(&accel_hw_priv->filter_lock, flags);
++
++ bend->hw_state = NETBACK_ACCEL_RES_ALLOC;
++}
++
++
++/*! Suggest a filter to replace when we want to insert a new one and have
++ * none free.
++ */
++static unsigned get_victim_filter(struct netback_accel *bend)
++{
++ /*
++ * We could attempt to get really clever, and may do at some
++ * point, but random replacement is v. cheap and low on
++ * pathological worst cases.
++ */
++ unsigned index, cycles;
++
++ rdtscl(cycles);
++
++ /*
++ * Some doubt about the quality of the bottom few bits, so
++ * throw 'em * away
++ */
++ index = (cycles >> 4) & ((struct falcon_bend_accel_priv *)
++ bend->accel_hw_priv)->filter_idx_mask;
++ /*
++ * We don't enforce that the number of filters is a power of
++ * two, but the masking gets us to within one subtraction of a
++ * valid index
++ */
++ if (index >= bend->quotas.max_filters)
++ index -= bend->quotas.max_filters;
++ DPRINTK("backend %s->%d has no free filters. Filter %d will be evicted\n",
++ bend->nicname, bend->far_end, index);
++ return index;
++}
++
++
++/* Add a filter for the specified IP/port to the backend */
++int
++netback_accel_filter_check_add(struct netback_accel *bend,
++ struct netback_accel_filter_spec *filt)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ struct netback_accel_filter_spec *fs;
++ unsigned filter_index;
++ unsigned long flags;
++ int rc, recycling = 0;
++ cuckoo_hash_ip_key filter_key, evict_key;
++
++ BUG_ON(filt->proto != IPPROTO_TCP && filt->proto != IPPROTO_UDP);
++
++ DPRINTK("Will add %s filter for dst ip %08x and dst port %d\n",
++ (filt->proto == IPPROTO_TCP) ? "TCP" : "UDP",
++ be32_to_cpu(filt->destip_be), be16_to_cpu(filt->destport_be));
++
++ spin_lock_irqsave(&accel_hw_priv->filter_lock, flags);
++ /*
++ * Check to see if we're already filtering this IP address and
++ * port. Happens if you insert a filter mid-stream as there
++ * are many packets backed up to be delivered to dom0 already
++ */
++ make_filter_key(&filter_key, filt);
++ if (cuckoo_hash_lookup(&accel_hw_priv->filter_hash_table,
++ (cuckoo_hash_key *)(&filter_key),
++ &filter_index)) {
++ DPRINTK("Found matching filter %d already in table\n",
++ filter_index);
++ rc = -1;
++ goto out;
++ }
++
++ if (accel_hw_priv->free_filters == 0) {
++ filter_index = get_victim_filter(bend);
++ recycling = 1;
++ } else {
++ filter_index = __ffs(accel_hw_priv->free_filters);
++ clear_bit(filter_index, &accel_hw_priv->free_filters);
++ }
++
++ fs = &accel_hw_priv->fspecs[filter_index];
++
++ if (recycling) {
++ DPRINTK("Removing filter index %d handle %p\n", filter_index,
++ fs->filter_handle);
++
++ if ((rc = efx_vi_filter_stop(accel_hw_priv->efx_vih,
++ fs->filter_handle)) != 0) {
++ EPRINTK("Couldn't clear NIC filter table entry %d\n", rc);
++ }
++
++ make_filter_key(&evict_key, fs);
++ if (cuckoo_hash_remove(&accel_hw_priv->filter_hash_table,
++ (cuckoo_hash_key *)&evict_key)) {
++ EPRINTK("Couldn't find filter to remove from table\n");
++ BUG();
++ }
++ NETBACK_ACCEL_STATS_OP(bend->stats.num_filters--);
++ }
++
++ /* Update the filter spec with new details */
++ *fs = *filt;
++
++ if ((rc = cuckoo_hash_add(&accel_hw_priv->filter_hash_table,
++ (cuckoo_hash_key *)&filter_key, filter_index,
++ 1)) != 0) {
++ EPRINTK("Error (%d) adding filter to table\n", rc);
++ accel_hw_priv->free_filters |= (1 << filter_index);
++ goto out;
++ }
++
++ rc = efx_vi_filter(accel_hw_priv->efx_vih, filt->proto, filt->destip_be,
++ filt->destport_be,
++ (struct filter_resource_t **)&fs->filter_handle);
++
++ if (rc != 0) {
++ EPRINTK("Hardware filter insertion failed. Error %d\n", rc);
++ accel_hw_priv->free_filters |= (1 << filter_index);
++ cuckoo_hash_remove(&accel_hw_priv->filter_hash_table,
++ (cuckoo_hash_key *)&filter_key);
++ rc = -1;
++ goto out;
++ }
++
++ NETBACK_ACCEL_STATS_OP(bend->stats.num_filters++);
++
++ VPRINTK("%s: success index %d handle %p\n", __FUNCTION__, filter_index,
++ fs->filter_handle);
++
++ rc = filter_index;
++ out:
++ spin_unlock_irqrestore(&accel_hw_priv->filter_lock, flags);
++ return rc;
++}
++
++
++/* Remove a filter entry for the specific device and IP/port */
++static void netback_accel_filter_remove(struct netback_accel *bend,
++ int filter_index)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++
++ BUG_ON(accel_hw_priv->free_filters & (1 << filter_index));
++ netback_accel_free_filter(accel_hw_priv, filter_index);
++ accel_hw_priv->free_filters |= (1 << filter_index);
++}
++
++
++/* Remove a filter entry for the specific device and IP/port */
++void netback_accel_filter_remove_spec(struct netback_accel *bend,
++ struct netback_accel_filter_spec *filt)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ unsigned filter_found;
++ unsigned long flags;
++ cuckoo_hash_ip_key filter_key;
++ struct netback_accel_filter_spec *fs;
++
++ if (filt->proto == IPPROTO_TCP) {
++ DPRINTK("Remove TCP filter for dst ip %08x and dst port %d\n",
++ be32_to_cpu(filt->destip_be),
++ be16_to_cpu(filt->destport_be));
++ } else if (filt->proto == IPPROTO_UDP) {
++ DPRINTK("Remove UDP filter for dst ip %08x and dst port %d\n",
++ be32_to_cpu(filt->destip_be),
++ be16_to_cpu(filt->destport_be));
++ } else {
++ /*
++ * This could be provoked by an evil frontend, so can't
++ * BUG(), but harmless as it should fail tests below
++ */
++ DPRINTK("Non-TCP/UDP filter dst ip %08x and dst port %d\n",
++ be32_to_cpu(filt->destip_be),
++ be16_to_cpu(filt->destport_be));
++ }
++
++ spin_lock_irqsave(&accel_hw_priv->filter_lock, flags);
++
++ make_filter_key(&filter_key, filt);
++ if (!cuckoo_hash_lookup(&accel_hw_priv->filter_hash_table,
++ (cuckoo_hash_key *)(&filter_key),
++ &filter_found)) {
++ EPRINTK("Couldn't find matching filter already in table\n");
++ goto out;
++ }
++
++ /* Do a full check to make sure we've not had a hash collision */
++ fs = &accel_hw_priv->fspecs[filter_found];
++ if (fs->destip_be == filt->destip_be &&
++ fs->destport_be == filt->destport_be &&
++ fs->proto == filt->proto &&
++ !memcmp(fs->mac, filt->mac, ETH_ALEN)) {
++ netback_accel_filter_remove(bend, filter_found);
++ } else {
++ EPRINTK("Entry in hash table does not match filter spec\n");
++ goto out;
++ }
++
++ out:
++ spin_unlock_irqrestore(&accel_hw_priv->filter_lock, flags);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/accel_solarflare.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel_solarflare.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/accel_solarflare.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel_solarflare.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,88 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NETBACK_ACCEL_SOLARFLARE_H
++#define NETBACK_ACCEL_SOLARFLARE_H
++
++#include "accel.h"
++#include "accel_msg_iface.h"
++
++#include "driverlink_api.h"
++
++#define MAX_NICS 5
++#define MAX_PORTS 2
++
++
++extern int netback_accel_sf_init(void);
++extern void netback_accel_sf_shutdown(void);
++extern int netback_accel_sf_hwtype(struct netback_accel *bend);
++
++extern int netback_accel_sf_char_init(void);
++extern void netback_accel_sf_char_shutdown(void);
++
++extern int netback_accel_setup_vnic_hw(struct netback_accel *bend);
++extern void netback_accel_shutdown_vnic_hw(struct netback_accel *bend);
++
++extern int netback_accel_add_buffers(struct netback_accel *bend, int pages,
++ int log2_pages, u32 *grants,
++ u32 *buf_addr_out);
++extern int netback_accel_remove_buffers(struct netback_accel *bend);
++
++
++/* Add a filter for the specified IP/port to the backend */
++extern int
++netback_accel_filter_check_add(struct netback_accel *bend,
++ struct netback_accel_filter_spec *filt);
++/* Remove a filter entry for the specific device and IP/port */
++extern
++void netback_accel_filter_remove_index(struct netback_accel *bend,
++ int filter_index);
++extern
++void netback_accel_filter_remove_spec(struct netback_accel *bend,
++ struct netback_accel_filter_spec *filt);
++
++/* This is designed to look a bit like a skb */
++struct netback_pkt_buf {
++ union {
++ unsigned char *raw;
++ } mac;
++ union {
++ struct iphdr *iph;
++ struct arphdr *arph;
++ unsigned char *raw;
++ } nh;
++ int protocol;
++};
++
++/*! \brief Handle a received packet: insert fast path filters as necessary
++ * \param skb The packet buffer
++ */
++extern void netback_accel_rx_packet(struct netback_pkt_buf *skb, void *fwd_priv);
++
++/*! \brief Handle a transmitted packet: update fast path filters as necessary
++ * \param skb The packet buffer
++ */
++extern void netback_accel_tx_packet(struct sk_buff *skb, void *fwd_priv);
++
++#endif /* NETBACK_ACCEL_SOLARFLARE_H */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/accel_xenbus.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel_xenbus.c
+--- linux-2.6.18.8/drivers/xen/sfc_netback/accel_xenbus.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/accel_xenbus.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,831 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <xen/evtchn.h>
++#include <linux/mutex.h>
++
++/* drivers/xen/netback/common.h */
++#include "common.h"
++
++#include "accel.h"
++#include "accel_solarflare.h"
++#include "accel_util.h"
++
++#define NODENAME_PATH_FMT "backend/vif/%d/%d"
++
++#define NETBACK_ACCEL_FROM_XENBUS_DEVICE(_dev) (struct netback_accel *) \
++ ((struct backend_info *)(_dev)->dev.driver_data)->netback_accel_priv
++
++/* List of all the bends currently in existence. */
++struct netback_accel *bend_list = NULL;
++DEFINE_MUTEX(bend_list_mutex);
++
++/* Put in bend_list. Must hold bend_list_mutex */
++static void link_bend(struct netback_accel *bend)
++{
++ bend->next_bend = bend_list;
++ bend_list = bend;
++}
++
++/* Remove from bend_list, Must hold bend_list_mutex */
++static void unlink_bend(struct netback_accel *bend)
++{
++ struct netback_accel *tmp = bend_list;
++ struct netback_accel *prev = NULL;
++ while (tmp != NULL) {
++ if (tmp == bend) {
++ if (prev != NULL)
++ prev->next_bend = bend->next_bend;
++ else
++ bend_list = bend->next_bend;
++ return;
++ }
++ prev = tmp;
++ tmp = tmp->next_bend;
++ }
++}
++
++
++/* Demultiplex a message IRQ from the frontend driver. */
++static irqreturn_t msgirq_from_frontend(int irq, void *context,
++ struct pt_regs *unused)
++{
++ struct xenbus_device *dev = context;
++ struct netback_accel *bend = NETBACK_ACCEL_FROM_XENBUS_DEVICE(dev);
++ VPRINTK("irq %d from device %s\n", irq, dev->nodename);
++ schedule_work(&bend->handle_msg);
++ return IRQ_HANDLED;
++}
++
++
++/*
++ * Demultiplex an IRQ from the frontend driver. This is never used
++ * functionally, but we need it to pass to the bind function, and may
++ * get called spuriously
++ */
++static irqreturn_t netirq_from_frontend(int irq, void *context,
++ struct pt_regs *unused)
++{
++ VPRINTK("netirq %d from device %s\n", irq,
++ ((struct xenbus_device *)context)->nodename);
++
++ return IRQ_HANDLED;
++}
++
++
++/* Read the limits values of the xenbus structure. */
++static
++void cfg_hw_quotas(struct xenbus_device *dev, struct netback_accel *bend)
++{
++ int err = xenbus_gather
++ (XBT_NIL, dev->nodename,
++ "limits/max-filters", "%d", &bend->quotas.max_filters,
++ "limits/max-buf-pages", "%d", &bend->quotas.max_buf_pages,
++ "limits/max-mcasts", "%d", &bend->quotas.max_mcasts,
++ NULL);
++ if (err) {
++ /*
++ * TODO what if they have previously been set by the
++ * user? This will overwrite with defaults. Maybe
++ * not what we want to do, but useful in startup
++ * case
++ */
++ DPRINTK("Failed to read quotas from xenbus, using defaults\n");
++ bend->quotas.max_filters = NETBACK_ACCEL_DEFAULT_MAX_FILTERS;
++ bend->quotas.max_buf_pages = sfc_netback_max_pages;
++ bend->quotas.max_mcasts = NETBACK_ACCEL_DEFAULT_MAX_MCASTS;
++ }
++
++ return;
++}
++
++
++static void bend_config_accel_change(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ struct netback_accel *bend;
++
++ bend = container_of(watch, struct netback_accel, config_accel_watch);
++
++ mutex_lock(&bend->bend_mutex);
++ if (bend->config_accel_watch.node != NULL) {
++ struct xenbus_device *dev =
++ (struct xenbus_device *)bend->hdev_data;
++ DPRINTK("Watch matched, got dev %p otherend %p\n",
++ dev, dev->otherend);
++ if(!xenbus_exists(XBT_NIL, watch->node, "")) {
++ DPRINTK("Ignoring watch as otherend seems invalid\n");
++ goto out;
++ }
++
++ cfg_hw_quotas(dev, bend);
++ }
++ out:
++ mutex_unlock(&bend->bend_mutex);
++ return;
++}
++
++
++/*
++ * Setup watch on "limits" in the backend vif info to know when
++ * configuration has been set
++ */
++static int setup_config_accel_watch(struct xenbus_device *dev,
++ struct netback_accel *bend)
++{
++ int err;
++
++ VPRINTK("Setting watch on %s/%s\n", dev->nodename, "limits");
++
++ err = xenbus_watch_path2(dev, dev->nodename, "limits",
++ &bend->config_accel_watch,
++ bend_config_accel_change);
++
++ if (err) {
++ EPRINTK("%s: Failed to register xenbus watch: %d\n",
++ __FUNCTION__, err);
++ bend->config_accel_watch.node = NULL;
++ return err;
++ }
++ return 0;
++}
++
++
++static int
++cfg_frontend_info(struct xenbus_device *dev, struct netback_accel *bend,
++ int *grants)
++{
++ /* Get some info from xenbus on the event channel and shmem grant */
++ int err = xenbus_gather(XBT_NIL, dev->otherend,
++ "accel-msg-channel", "%u", &bend->msg_channel,
++ "accel-ctrl-page", "%d", &(grants[0]),
++ "accel-msg-page", "%d", &(grants[1]),
++ "accel-net-channel", "%u", &bend->net_channel,
++ NULL);
++ if (err)
++ EPRINTK("failed to read event channels or shmem grant: %d\n",
++ err);
++ else
++ DPRINTK("got event chan %d and net chan %d from frontend\n",
++ bend->msg_channel, bend->net_channel);
++ return err;
++}
++
++
++/* Setup all the comms needed to chat with the front end driver */
++static int setup_vnic(struct xenbus_device *dev)
++{
++ struct netback_accel *bend;
++ int grants[2], err, msgs_per_queue;
++
++ bend = NETBACK_ACCEL_FROM_XENBUS_DEVICE(dev);
++
++ err = cfg_frontend_info(dev, bend, grants);
++ if (err)
++ goto fail1;
++
++ /*
++ * If we get here, both frontend Connected and configuration
++ * options available. All is well.
++ */
++
++ /* Get the hardware quotas for the VNIC in question. */
++ cfg_hw_quotas(dev, bend);
++
++ /* Set up the deferred work handlers */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++ INIT_WORK(&bend->handle_msg,
++ netback_accel_msg_rx_handler);
++#else
++ INIT_WORK(&bend->handle_msg,
++ netback_accel_msg_rx_handler,
++ (void*)bend);
++#endif
++
++ /* Request the frontend mac */
++ err = net_accel_xen_net_read_mac(dev, bend->mac);
++ if (err)
++ goto fail2;
++
++ /* Set up the shared page. */
++ bend->shared_page = net_accel_map_grants_contig(dev, grants, 2,
++ &bend->sh_pages_unmap);
++
++ if (bend->shared_page == NULL) {
++ EPRINTK("failed to map shared page for %s\n", dev->otherend);
++ err = -ENOMEM;
++ goto fail2;
++ }
++
++ /* Initialise the shared page(s) used for comms */
++ net_accel_msg_init_page(bend->shared_page, PAGE_SIZE,
++ bend->net_dev->flags & IFF_UP);
++
++ msgs_per_queue = (PAGE_SIZE/2) / sizeof(struct net_accel_msg);
++
++ net_accel_msg_init_queue
++ (&bend->to_domU, &bend->shared_page->queue0,
++ (struct net_accel_msg *)((__u8*)bend->shared_page + PAGE_SIZE),
++ msgs_per_queue);
++
++ net_accel_msg_init_queue
++ (&bend->from_domU, &bend->shared_page->queue1,
++ (struct net_accel_msg *)((__u8*)bend->shared_page +
++ (3 * PAGE_SIZE / 2)),
++ msgs_per_queue);
++
++ /* Bind the message event channel to a handler
++ *
++ * Note that we will probably get a spurious interrupt when we
++ * do this, so it must not be done until we have set up
++ * everything we need to handle it.
++ */
++ err = bind_interdomain_evtchn_to_irqhandler(dev->otherend_id,
++ bend->msg_channel,
++ msgirq_from_frontend,
++ 0,
++ "netback_accel",
++ dev);
++ if (err < 0) {
++ EPRINTK("failed to bind event channel: %d\n", err);
++ goto fail3;
++ }
++ else
++ bend->msg_channel_irq = err;
++
++ /* TODO: No need to bind this evtchn to an irq. */
++ err = bind_interdomain_evtchn_to_irqhandler(dev->otherend_id,
++ bend->net_channel,
++ netirq_from_frontend,
++ 0,
++ "netback_accel",
++ dev);
++ if (err < 0) {
++ EPRINTK("failed to bind net channel: %d\n", err);
++ goto fail4;
++ }
++ else
++ bend->net_channel_irq = err;
++
++ /*
++ * Grab ourselves an entry in the forwarding hash table. We do
++ * this now so we don't have the embarassmesnt of sorting out
++ * an allocation failure while at IRQ. Because we pass NULL as
++ * the context, the actual hash lookup will succeed for this
++ * NIC, but the check for somewhere to forward to will
++ * fail. This is necessary to prevent forwarding before
++ * hardware resources are set up
++ */
++ err = netback_accel_fwd_add(bend->mac, NULL, bend->fwd_priv);
++ if (err) {
++ EPRINTK("failed to add to fwd hash table\n");
++ goto fail5;
++ }
++
++ /*
++ * Say hello to frontend. Important to do this straight after
++ * obtaining the message queue as otherwise we are vulnerable
++ * to an evil frontend sending a HELLO-REPLY before we've sent
++ * the HELLO and confusing us
++ */
++ netback_accel_msg_tx_hello(bend, NET_ACCEL_MSG_VERSION);
++ return 0;
++
++ fail5:
++ unbind_from_irqhandler(bend->net_channel_irq, dev);
++ fail4:
++ unbind_from_irqhandler(bend->msg_channel_irq, dev);
++ fail3:
++ net_accel_unmap_grants_contig(dev, bend->sh_pages_unmap);
++ bend->shared_page = NULL;
++ bend->sh_pages_unmap = NULL;
++ fail2:
++ fail1:
++ return err;
++}
++
++
++static int read_nicname(struct xenbus_device *dev, struct netback_accel *bend)
++{
++ int len;
++
++ /* nic name used to select interface used for acceleration */
++ bend->nicname = xenbus_read(XBT_NIL, dev->nodename, "accel", &len);
++ if (IS_ERR(bend->nicname))
++ return PTR_ERR(bend->nicname);
++
++ return 0;
++}
++
++static const char *frontend_name = "sfc_netfront";
++
++static int publish_frontend_name(struct xenbus_device *dev)
++{
++ struct xenbus_transaction tr;
++ int err;
++
++ /* Publish the name of the frontend driver */
++ do {
++ err = xenbus_transaction_start(&tr);
++ if (err != 0) {
++ EPRINTK("%s: transaction start failed\n", __FUNCTION__);
++ return err;
++ }
++ err = xenbus_printf(tr, dev->nodename, "accel-frontend",
++ "%s", frontend_name);
++ if (err != 0) {
++ EPRINTK("%s: xenbus_printf failed\n", __FUNCTION__);
++ xenbus_transaction_end(tr, 1);
++ return err;
++ }
++ err = xenbus_transaction_end(tr, 0);
++ } while (err == -EAGAIN);
++
++ if (err != 0) {
++ EPRINTK("failed to end frontend name transaction\n");
++ return err;
++ }
++ return 0;
++}
++
++
++static int unpublish_frontend_name(struct xenbus_device *dev)
++{
++ struct xenbus_transaction tr;
++ int err;
++
++ do {
++ err = xenbus_transaction_start(&tr);
++ if (err != 0)
++ break;
++ err = xenbus_rm(tr, dev->nodename, "accel-frontend");
++ if (err != 0) {
++ xenbus_transaction_end(tr, 1);
++ break;
++ }
++ err = xenbus_transaction_end(tr, 0);
++ } while (err == -EAGAIN);
++
++ return err;
++}
++
++
++static void cleanup_vnic(struct netback_accel *bend)
++{
++ struct xenbus_device *dev;
++
++ dev = (struct xenbus_device *)bend->hdev_data;
++
++ DPRINTK("%s: bend %p dev %p\n", __FUNCTION__, bend, dev);
++
++ DPRINTK("%s: Remove %p's mac from fwd table...\n",
++ __FUNCTION__, bend);
++ netback_accel_fwd_remove(bend->mac, bend->fwd_priv);
++
++ /* Free buffer table allocations */
++ netback_accel_remove_buffers(bend);
++
++ DPRINTK("%s: Release hardware resources...\n", __FUNCTION__);
++ if (bend->accel_shutdown)
++ bend->accel_shutdown(bend);
++
++ if (bend->net_channel_irq) {
++ unbind_from_irqhandler(bend->net_channel_irq, dev);
++ bend->net_channel_irq = 0;
++ }
++
++ if (bend->msg_channel_irq) {
++ unbind_from_irqhandler(bend->msg_channel_irq, dev);
++ bend->msg_channel_irq = 0;
++ }
++
++ if (bend->sh_pages_unmap) {
++ DPRINTK("%s: Unmap grants %p\n", __FUNCTION__,
++ bend->sh_pages_unmap);
++ net_accel_unmap_grants_contig(dev, bend->sh_pages_unmap);
++ bend->sh_pages_unmap = NULL;
++ bend->shared_page = NULL;
++ }
++}
++
++
++/*************************************************************************/
++
++/*
++ * The following code handles accelstate changes between the frontend
++ * and the backend. It calls setup_vnic and cleanup_vnic in matching
++ * pairs in response to transitions.
++ *
++ * Valid state transitions for Dom0 are as follows:
++ *
++ * Closed->Init on probe or in response to Init from domU
++ * Closed->Closing on error/remove
++ *
++ * Init->Connected in response to Connected from domU
++ * Init->Closing on error/remove or in response to Closing from domU
++ *
++ * Connected->Closing on error/remove or in response to Closing from domU
++ *
++ * Closing->Closed in response to Closed from domU
++ *
++ */
++
++
++static void netback_accel_frontend_changed(struct xenbus_device *dev,
++ XenbusState frontend_state)
++{
++ struct netback_accel *bend = NETBACK_ACCEL_FROM_XENBUS_DEVICE(dev);
++ XenbusState backend_state;
++
++ DPRINTK("%s: changing from %s to %s. nodename %s, otherend %s\n",
++ __FUNCTION__, xenbus_strstate(bend->frontend_state),
++ xenbus_strstate(frontend_state),dev->nodename, dev->otherend);
++
++ /*
++ * Ignore duplicate state changes. This can happen if the
++ * frontend changes state twice in quick succession and the
++ * first watch fires in the backend after the second
++ * transition has completed.
++ */
++ if (bend->frontend_state == frontend_state)
++ return;
++
++ bend->frontend_state = frontend_state;
++ backend_state = bend->backend_state;
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ if (backend_state == XenbusStateClosed &&
++ !bend->removing)
++ backend_state = XenbusStateInitialising;
++ break;
++
++ case XenbusStateConnected:
++ if (backend_state == XenbusStateInitialising) {
++ if (!bend->vnic_is_setup &&
++ setup_vnic(dev) == 0) {
++ bend->vnic_is_setup = 1;
++ backend_state = XenbusStateConnected;
++ } else {
++ backend_state = XenbusStateClosing;
++ }
++ }
++ break;
++
++ case XenbusStateInitWait:
++ case XenbusStateInitialised:
++ default:
++ DPRINTK("Unknown state %s (%d) from frontend.\n",
++ xenbus_strstate(frontend_state), frontend_state);
++ /* Unknown state. Fall through. */
++ case XenbusStateClosing:
++ if (backend_state != XenbusStateClosed)
++ backend_state = XenbusStateClosing;
++
++ /*
++ * The bend will now persist (with watches active) in
++ * case the frontend comes back again, eg. after
++ * frontend module reload or suspend/resume
++ */
++
++ break;
++
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ if (bend->vnic_is_setup) {
++ bend->vnic_is_setup = 0;
++ cleanup_vnic(bend);
++ }
++
++ if (backend_state == XenbusStateClosing)
++ backend_state = XenbusStateClosed;
++ break;
++ }
++
++ if (backend_state != bend->backend_state) {
++ DPRINTK("Switching from state %s (%d) to %s (%d)\n",
++ xenbus_strstate(bend->backend_state),
++ bend->backend_state,
++ xenbus_strstate(backend_state), backend_state);
++ bend->backend_state = backend_state;
++ net_accel_update_state(dev, backend_state);
++ }
++
++ wake_up(&bend->state_wait_queue);
++}
++
++
++/* accelstate on the frontend's xenbus node has changed */
++static void bend_domu_accel_change(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ int state;
++ struct netback_accel *bend;
++
++ bend = container_of(watch, struct netback_accel, domu_accel_watch);
++ if (bend->domu_accel_watch.node != NULL) {
++ struct xenbus_device *dev =
++ (struct xenbus_device *)bend->hdev_data;
++ VPRINTK("Watch matched, got dev %p otherend %p\n",
++ dev, dev->otherend);
++ /*
++ * dev->otherend != NULL check to protect against
++ * watch firing when domain goes away and we haven't
++ * yet cleaned up
++ */
++ if (!dev->otherend ||
++ !xenbus_exists(XBT_NIL, watch->node, "") ||
++ strncmp(dev->otherend, vec[XS_WATCH_PATH],
++ strlen(dev->otherend))) {
++ DPRINTK("Ignoring watch as otherend seems invalid\n");
++ return;
++ }
++
++ mutex_lock(&bend->bend_mutex);
++
++ xenbus_scanf(XBT_NIL, dev->otherend, "accelstate", "%d",
++ &state);
++ netback_accel_frontend_changed(dev, state);
++
++ mutex_unlock(&bend->bend_mutex);
++ }
++}
++
++/* Setup watch on frontend's accelstate */
++static int setup_domu_accel_watch(struct xenbus_device *dev,
++ struct netback_accel *bend)
++{
++ int err;
++
++ VPRINTK("Setting watch on %s/%s\n", dev->otherend, "accelstate");
++
++ err = xenbus_watch_path2(dev, dev->otherend, "accelstate",
++ &bend->domu_accel_watch,
++ bend_domu_accel_change);
++ if (err) {
++ EPRINTK("%s: Failed to register xenbus watch: %d\n",
++ __FUNCTION__, err);
++ goto fail;
++ }
++ return 0;
++ fail:
++ bend->domu_accel_watch.node = NULL;
++ return err;
++}
++
++
++int netback_accel_probe(struct xenbus_device *dev)
++{
++ struct netback_accel *bend;
++ struct backend_info *binfo;
++ int err;
++
++ DPRINTK("%s: passed device %s\n", __FUNCTION__, dev->nodename);
++
++ /* Allocate structure to store all our state... */
++ bend = kzalloc(sizeof(struct netback_accel), GFP_KERNEL);
++ if (bend == NULL) {
++ DPRINTK("%s: no memory for bend\n", __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ mutex_init(&bend->bend_mutex);
++
++ mutex_lock(&bend->bend_mutex);
++
++ /* ...and store it where we can get at it */
++ binfo = (struct backend_info *) dev->dev.driver_data;
++ binfo->netback_accel_priv = bend;
++ /* And vice-versa */
++ bend->hdev_data = dev;
++
++ DPRINTK("%s: Adding bend %p to list\n", __FUNCTION__, bend);
++
++ init_waitqueue_head(&bend->state_wait_queue);
++ bend->vnic_is_setup = 0;
++ bend->frontend_state = XenbusStateUnknown;
++ bend->backend_state = XenbusStateClosed;
++ bend->removing = 0;
++
++ sscanf(dev->nodename, NODENAME_PATH_FMT, &bend->far_end,
++ &bend->vif_num);
++
++ err = read_nicname(dev, bend);
++ if (err) {
++ /*
++ * Technically not an error, just means we're not
++ * supposed to accelerate this
++ */
++ DPRINTK("failed to get device name\n");
++ goto fail_nicname;
++ }
++
++ /*
++ * Look up the device name in the list of NICs provided by
++ * driverlink to get the hardware type.
++ */
++ err = netback_accel_sf_hwtype(bend);
++ if (err) {
++ /*
++ * Technically not an error, just means we're not
++ * supposed to accelerate this, probably belongs to
++ * some other backend
++ */
++ DPRINTK("failed to match device name\n");
++ goto fail_init_type;
++ }
++
++ err = publish_frontend_name(dev);
++ if (err)
++ goto fail_publish;
++
++ err = netback_accel_debugfs_create(bend);
++ if (err)
++ goto fail_debugfs;
++
++ mutex_unlock(&bend->bend_mutex);
++
++ err = setup_config_accel_watch(dev, bend);
++ if (err)
++ goto fail_config_watch;
++
++ err = setup_domu_accel_watch(dev, bend);
++ if (err)
++ goto fail_domu_watch;
++
++ /*
++ * Indicate to the other end that we're ready to start unless
++ * the watch has already fired.
++ */
++ mutex_lock(&bend->bend_mutex);
++ if (bend->backend_state == XenbusStateClosed) {
++ bend->backend_state = XenbusStateInitialising;
++ net_accel_update_state(dev, XenbusStateInitialising);
++ }
++ mutex_unlock(&bend->bend_mutex);
++
++ mutex_lock(&bend_list_mutex);
++ link_bend(bend);
++ mutex_unlock(&bend_list_mutex);
++
++ return 0;
++
++fail_domu_watch:
++
++ unregister_xenbus_watch(&bend->config_accel_watch);
++ kfree(bend->config_accel_watch.node);
++fail_config_watch:
++
++ /*
++ * Flush the scheduled work queue before freeing bend to get
++ * rid of any pending netback_accel_msg_rx_handler()
++ */
++ flush_scheduled_work();
++
++ mutex_lock(&bend->bend_mutex);
++ net_accel_update_state(dev, XenbusStateUnknown);
++ netback_accel_debugfs_remove(bend);
++fail_debugfs:
++
++ unpublish_frontend_name(dev);
++fail_publish:
++
++ /* No need to reverse netback_accel_sf_hwtype. */
++fail_init_type:
++
++ kfree(bend->nicname);
++fail_nicname:
++ binfo->netback_accel_priv = NULL;
++ mutex_unlock(&bend->bend_mutex);
++ kfree(bend);
++ return err;
++}
++
++
++int netback_accel_remove(struct xenbus_device *dev)
++{
++ struct backend_info *binfo;
++ struct netback_accel *bend;
++ int frontend_state;
++
++ binfo = (struct backend_info *) dev->dev.driver_data;
++ bend = (struct netback_accel *) binfo->netback_accel_priv;
++
++ DPRINTK("%s: dev %p bend %p\n", __FUNCTION__, dev, bend);
++
++ BUG_ON(bend == NULL);
++
++ mutex_lock(&bend_list_mutex);
++ unlink_bend(bend);
++ mutex_unlock(&bend_list_mutex);
++
++ mutex_lock(&bend->bend_mutex);
++
++ /* Reject any requests to connect. */
++ bend->removing = 1;
++
++ /*
++ * Switch to closing to tell the other end that we're going
++ * away.
++ */
++ if (bend->backend_state != XenbusStateClosing) {
++ bend->backend_state = XenbusStateClosing;
++ net_accel_update_state(dev, XenbusStateClosing);
++ }
++
++ frontend_state = (int)XenbusStateUnknown;
++ xenbus_scanf(XBT_NIL, dev->otherend, "accelstate", "%d",
++ &frontend_state);
++
++ mutex_unlock(&bend->bend_mutex);
++
++ /*
++ * Wait until this end goes to the closed state. This happens
++ * in response to the other end going to the closed state.
++ * Don't bother doing this if the other end is already closed
++ * because if it is then there is nothing to do.
++ */
++ if (frontend_state != (int)XenbusStateClosed &&
++ frontend_state != (int)XenbusStateUnknown)
++ wait_event(bend->state_wait_queue,
++ bend->backend_state == XenbusStateClosed);
++
++ unregister_xenbus_watch(&bend->domu_accel_watch);
++ kfree(bend->domu_accel_watch.node);
++
++ unregister_xenbus_watch(&bend->config_accel_watch);
++ kfree(bend->config_accel_watch.node);
++
++ /*
++ * Flush the scheduled work queue before freeing bend to get
++ * rid of any pending netback_accel_msg_rx_handler()
++ */
++ flush_scheduled_work();
++
++ mutex_lock(&bend->bend_mutex);
++
++ /* Tear down the vnic if it was set up. */
++ if (bend->vnic_is_setup) {
++ bend->vnic_is_setup = 0;
++ cleanup_vnic(bend);
++ }
++
++ bend->backend_state = XenbusStateUnknown;
++ net_accel_update_state(dev, XenbusStateUnknown);
++
++ netback_accel_debugfs_remove(bend);
++
++ unpublish_frontend_name(dev);
++
++ kfree(bend->nicname);
++
++ binfo->netback_accel_priv = NULL;
++
++ mutex_unlock(&bend->bend_mutex);
++
++ kfree(bend);
++
++ return 0;
++}
++
++
++void netback_accel_shutdown_bends(void)
++{
++ mutex_lock(&bend_list_mutex);
++ /*
++ * I think we should have had a remove callback for all
++ * interfaces before being allowed to unload the module
++ */
++ BUG_ON(bend_list != NULL);
++ mutex_unlock(&bend_list_mutex);
++}
++
++
++void netback_accel_set_closing(struct netback_accel *bend)
++{
++
++ bend->backend_state = XenbusStateClosing;
++ net_accel_update_state((struct xenbus_device *)bend->hdev_data,
++ XenbusStateClosing);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat/gcc.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat/gcc.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat/gcc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat/gcc.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,158 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_compat */
++
++#ifndef __CI_COMPAT_GCC_H__
++#define __CI_COMPAT_GCC_H__
++
++
++#define CI_HAVE_INT64
++
++
++#if defined(__linux__) && defined(__KERNEL__)
++
++# include <linux/types.h>
++
++typedef __u64 ci_uint64;
++typedef __s64 ci_int64;
++# if BITS_PER_LONG == 32
++typedef __s32 ci_ptr_arith_t;
++typedef __u32 ci_uintptr_t;
++# else
++typedef __s64 ci_ptr_arith_t;
++typedef __u64 ci_uintptr_t;
++# endif
++
++
++/* it's not obvious to me why the below is wrong for x64_64, but
++ * gcc seems to complain on this platform
++ */
++# if defined(__ia64__)
++# define CI_PRId64 "ld"
++# define CI_PRIi64 "li"
++# define CI_PRIo64 "lo"
++# define CI_PRIu64 "lu"
++# define CI_PRIx64 "lx"
++# define CI_PRIX64 "lX"
++# else
++# define CI_PRId64 "lld"
++# define CI_PRIi64 "lli"
++# define CI_PRIo64 "llo"
++# define CI_PRIu64 "llu"
++# define CI_PRIx64 "llx"
++# define CI_PRIX64 "llX"
++# endif
++
++# define CI_PRId32 "d"
++# define CI_PRIi32 "i"
++# define CI_PRIo32 "o"
++# define CI_PRIu32 "u"
++# define CI_PRIx32 "x"
++# define CI_PRIX32 "X"
++
++#else
++
++# include <stdint.h>
++# include <inttypes.h>
++
++typedef uint64_t ci_uint64;
++typedef int64_t ci_int64;
++typedef intptr_t ci_ptr_arith_t;
++typedef uintptr_t ci_uintptr_t;
++
++# define CI_PRId64 PRId64
++# define CI_PRIi64 PRIi64
++# define CI_PRIo64 PRIo64
++# define CI_PRIu64 PRIu64
++# define CI_PRIx64 PRIx64
++# define CI_PRIX64 PRIX64
++
++# define CI_PRId32 PRId32
++# define CI_PRIi32 PRIi32
++# define CI_PRIo32 PRIo32
++# define CI_PRIu32 PRIu32
++# define CI_PRIx32 PRIx32
++# define CI_PRIX32 PRIX32
++
++#endif
++
++
++typedef ci_uint64 ci_fixed_descriptor_t;
++
++#define from_fixed_descriptor(desc) ((ci_uintptr_t)(desc))
++#define to_fixed_descriptor(desc) ((ci_fixed_descriptor_t)(ci_uintptr_t)(desc))
++
++
++#if __GNUC__ >= 3 && !defined(__cplusplus)
++/*
++** Checks that [p_mbr] has the same type as [&c_type::mbr_name].
++*/
++# define CI_CONTAINER(c_type, mbr_name, p_mbr) \
++ __builtin_choose_expr( \
++ __builtin_types_compatible_p(__typeof__(&((c_type*)0)->mbr_name), \
++ __typeof__(p_mbr)), \
++ __CI_CONTAINER(c_type, mbr_name, p_mbr), (void)0)
++
++# define ci_restrict __restrict__
++#endif
++
++
++#if !defined(__KERNEL__) || defined(__unix__)
++#define CI_HAVE_NPRINTF 1
++#endif
++
++
++/* At what version was this introduced? */
++#if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ > 91)
++# define CI_LIKELY(t) __builtin_expect((t), 1)
++# define CI_UNLIKELY(t) __builtin_expect((t), 0)
++#endif
++
++/**********************************************************************
++ * Attributes
++ */
++#if __GNUC__ >= 3 && defined(NDEBUG)
++# define CI_HF __attribute__((visibility("hidden")))
++# define CI_HV __attribute__((visibility("hidden")))
++#else
++# define CI_HF
++# define CI_HV
++#endif
++
++#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
++# define ci_noinline static __attribute__((__noinline__))
++/* (Linux 2.6 defines its own "noinline", so we use the "__noinline__" form) */
++#else
++# define ci_noinline static
++#endif
++
++#define CI_ALIGN(x) __attribute__ ((aligned (x)))
++
++#define CI_PRINTF_LIKE(a,b) __attribute__((format(printf,a,b)))
++
++#endif /* __CI_COMPAT_GCC_H__ */
++
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat/gcc_x86.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat/gcc_x86.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat/gcc_x86.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat/gcc_x86.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,115 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_compat */
++
++#ifndef __CI_COMPAT_GCC_X86_H__
++#define __CI_COMPAT_GCC_X86_H__
++
++/*
++** The facts:
++**
++** SSE sfence
++** SSE2 lfence, mfence, pause
++*/
++
++/*
++ Barriers to enforce ordering with respect to:
++
++ normal memory use: ci_wmb, ci_rmb, ci_wmb
++ IO bus access use: ci_wiob, ci_riob, ci_iob
++*/
++#if defined(__x86_64__)
++# define ci_x86_mb() __asm__ __volatile__ ("lock; addl $0,0(%%rsp)":::"memory")
++#else
++# define ci_x86_mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory")
++#endif
++
++/* ?? measure the impact of latency of sfence on a modern processor before we
++ take a decision on how to integrate with respect to writecombining */
++
++/* DJR: I don't think we need to add "memory" here. It means the asm does
++** something to memory that GCC doesn't understand. But all this does is
++** commit changes that GCC thinks have already happened. NB. GCC will not
++** reorder across a __volatile__ __asm__ anyway.
++*/
++#define ci_gcc_fence() __asm__ __volatile__ ("")
++
++#if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96)
++# define ci_x86_sfence() __asm__ __volatile__ ("sfence")
++# define ci_x86_lfence() __asm__ __volatile__ ("lfence")
++# define ci_x86_mfence() __asm__ __volatile__ ("mfence")
++#else
++# define ci_x86_sfence() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xF8")
++# define ci_x86_lfence() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xE8")
++# define ci_x86_mfence() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xF0")
++#endif
++
++
++/* x86 processors to P4 Xeon store in-order unless executing streaming
++ extensions or when using writecombining
++
++ Hence we do not define ci_wmb to use sfence by default. Requirement is that
++ we do not use writecombining to memory and any code which uses SSE
++ extensions must call sfence directly
++
++ We need to track non intel clones which may support out of order store.
++
++*/
++
++#if CI_CPU_OOS
++# if CI_CPU_HAS_SSE
++# define ci_wmb() ci_x86_sfence()
++# else
++# define ci_wmb() ci_x86_mb()
++# endif
++#else
++# define ci_wmb() ci_gcc_fence()
++#endif
++
++#if CI_CPU_HAS_SSE2
++# define ci_rmb() ci_x86_lfence()
++# define ci_mb() ci_x86_mfence()
++# define ci_riob() ci_x86_lfence()
++# define ci_wiob() ci_x86_sfence()
++# define ci_iob() ci_x86_mfence()
++#else
++# if CI_CPU_HAS_SSE
++# define ci_wiob() ci_x86_sfence()
++# else
++# define ci_wiob() ci_x86_mb()
++# endif
++# define ci_rmb() ci_x86_mb()
++# define ci_mb() ci_x86_mb()
++# define ci_riob() ci_x86_mb()
++# define ci_iob() ci_x86_mb()
++#endif
++
++typedef unsigned long ci_phys_addr_t;
++#define ci_phys_addr_fmt "%lx"
++
++#endif /* __CI_COMPAT_GCC_X86_H__ */
++
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat/primitive.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat/primitive.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat/primitive.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat/primitive.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,77 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++/*! \cidoxg_include_ci_compat */
++
++#ifndef __CI_COMPAT_PRIMITIVE_H__
++#define __CI_COMPAT_PRIMITIVE_H__
++
++
++/**********************************************************************
++ * Primitive types.
++ */
++
++typedef unsigned char ci_uint8;
++typedef char ci_int8;
++
++typedef unsigned short ci_uint16;
++typedef short ci_int16;
++
++typedef unsigned int ci_uint32;
++typedef int ci_int32;
++
++/* 64-bit support is platform dependent. */
++
++
++/**********************************************************************
++ * Other fancy types.
++ */
++
++typedef ci_uint8 ci_octet;
++
++typedef enum {
++ CI_FALSE = 0,
++ CI_TRUE
++} ci_boolean_t;
++
++
++/**********************************************************************
++ * Some nice types you'd always assumed were standards.
++ * (Really, they are SYSV "standards".)
++ */
++
++#ifdef _WIN32
++typedef unsigned long ulong;
++typedef unsigned int uint;
++typedef char* caddr_t;
++#elif defined(__linux__) && defined(__KERNEL__)
++#include <linux/types.h>
++#elif defined(__linux__)
++#include <sys/types.h>
++#endif
++
++
++#endif /* __CI_COMPAT_PRIMITIVE_H__ */
++
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat/sysdep.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat/sysdep.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat/sysdep.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat/sysdep.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,166 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_compat */
++
++#ifndef __CI_COMPAT_SYSDEP_H__
++#define __CI_COMPAT_SYSDEP_H__
++
++
++/**********************************************************************
++ * Platform definition fixups.
++ */
++
++#if defined(__ci_ul_driver__) && !defined(__ci_driver__)
++# define __ci_driver__
++#endif
++
++#if defined(__ci_driver__) && !defined(__ci_ul_driver__) && \
++ !defined(__KERNEL__)
++# define __KERNEL__
++#endif
++
++
++/**********************************************************************
++ * Sanity checks (no cheating!)
++ */
++
++#if defined(__KERNEL__) && !defined(__ci_driver__)
++# error Insane.
++#endif
++
++#if defined(__KERNEL__) && defined(__ci_ul_driver__)
++# error Madness.
++#endif
++
++#if defined(__unix__) && defined(_WIN32)
++# error Strange.
++#endif
++
++#if defined(__GNUC__) && defined(_MSC_VER)
++# error Crazy.
++#endif
++
++
++/**********************************************************************
++ * Compiler and processor dependencies.
++ */
++
++#if defined(__GNUC__)
++
++# include <ci/compat/gcc.h>
++
++# if defined(__i386__)
++# include <ci/compat/x86.h>
++# include <ci/compat/gcc_x86.h>
++# elif defined(__x86_64__)
++# include <ci/compat/x86_64.h>
++# include <ci/compat/gcc_x86.h>
++# elif defined(__PPC__)
++# include <ci/compat/ppc.h>
++# include <ci/compat/gcc_ppc.h>
++# elif defined(__ia64__)
++# include <ci/compat/ia64.h>
++# include <ci/compat/gcc_ia64.h>
++# else
++# error Unknown processor - GNU C
++# endif
++
++#elif defined(_MSC_VER)
++
++# include <ci/compat/msvc.h>
++
++# if defined(__i386__)
++# include <ci/compat/x86.h>
++# include <ci/compat/msvc_x86.h>
++# elif defined(__x86_64__)
++# include <ci/compat/x86_64.h>
++# include <ci/compat/msvc_x86_64.h>
++# else
++# error Unknown processor MSC
++# endif
++
++#elif defined(__PGI)
++
++# include <ci/compat/x86.h>
++# include <ci/compat/pg_x86.h>
++
++#elif defined(__INTEL_COMPILER)
++
++/* Intel compilers v7 claim to be very gcc compatible. */
++# if __INTEL_COMPILER >= 700
++# include <ci/compat/gcc.h>
++# include <ci/compat/x86.h>
++# include <ci/compat/gcc_x86.h>
++# else
++# error Old Intel compiler not supported. Yet.
++# endif
++
++#else
++# error Unknown compiler.
++#endif
++
++
++/**********************************************************************
++ * Misc stuff (that probably shouldn't be here).
++ */
++
++#ifdef __sun
++# ifdef __KERNEL__
++# define _KERNEL
++# define _SYSCALL32
++# ifdef _LP64
++# define _SYSCALL32_IMPL
++# endif
++# else
++# define _REENTRANT
++# endif
++#endif
++
++
++/**********************************************************************
++ * Defaults for anything left undefined.
++ */
++
++#ifndef CI_LIKELY
++# define CI_LIKELY(t) (t)
++# define CI_UNLIKELY(t) (t)
++#endif
++
++#ifndef ci_restrict
++# define ci_restrict
++#endif
++
++#ifndef ci_inline
++# define ci_inline static inline
++#endif
++
++#ifndef ci_noinline
++# define ci_noinline static
++#endif
++
++#endif /* __CI_COMPAT_SYSDEP_H__ */
++
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat/utils.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat/utils.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat/utils.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat/utils.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,269 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Handy utility macros.
++ * \date 2003/01/17
++ */
++
++/*! \cidoxg_include_ci_compat */
++
++#ifndef __CI_COMPAT_UTILS_H__
++#define __CI_COMPAT_UTILS_H__
++
++
++/**********************************************************************
++ * Alignment -- [align] must be a power of 2.
++ **********************************************************************/
++
++ /*! Align forward onto next boundary. */
++
++#define CI_ALIGN_FWD(p, align) (((p)+(align)-1u) & ~((align)-1u))
++
++
++ /*! Align back onto prev boundary. */
++
++#define CI_ALIGN_BACK(p, align) ((p) & ~((align)-1u))
++
++
++ /*! How far to next boundary? */
++
++#define CI_ALIGN_NEEDED(p, align, signed_t) (-(signed_t)(p) & ((align)-1u))
++
++
++ /*! How far beyond prev boundary? */
++
++#define CI_OFFSET(p, align) ((p) & ((align)-1u))
++
++
++ /*! Does object fit in gap before next boundary? */
++
++#define CI_FITS(p, size, align, signed_t) \
++ (CI_ALIGN_NEEDED((p) + 1, (align), signed_t) + 1 >= (size))
++
++
++ /*! Align forward onto next boundary. */
++
++#define CI_PTR_ALIGN_FWD(p, align) \
++ ((char*) CI_ALIGN_FWD(((ci_ptr_arith_t)(p)), ((ci_ptr_arith_t)(align))))
++
++ /*! Align back onto prev boundary. */
++
++#define CI_PTR_ALIGN_BACK(p, align) \
++ ((char*) CI_ALIGN_BACK(((ci_ptr_arith_t)(p)), ((ci_ptr_arith_t)(align))))
++
++ /*! How far to next boundary? */
++
++#define CI_PTR_ALIGN_NEEDED(p, align) \
++ CI_ALIGN_NEEDED(((ci_ptr_arith_t)(p)), ((ci_ptr_arith_t)(align)), \
++ ci_ptr_arith_t)
++
++ /*! How far to next boundary? NZ = not zero i.e. give align if on boundary */
++
++#define CI_PTR_ALIGN_NEEDED_NZ(p, align) \
++ ((align) - (((char*)p) - \
++ ((char*) CI_ALIGN_BACK(((ci_ptr_arith_t)(p)), ((ci_ptr_arith_t)(align))))))
++
++ /*! How far beyond prev boundary? */
++
++#define CI_PTR_OFFSET(p, align) \
++ CI_OFFSET(((ci_ptr_arith_t)(p)), ((ci_ptr_arith_t)(align)))
++
++
++ /* Same as CI_ALIGN_FWD and CI_ALIGN_BACK. */
++
++#define CI_ROUND_UP(i, align) (((i)+(align)-1u) & ~((align)-1u))
++
++#define CI_ROUND_DOWN(i, align) ((i) & ~((align)-1u))
++
++
++/**********************************************************************
++ * Byte-order
++ **********************************************************************/
++
++/* These are not flags. They are enumeration values for use with
++ * CI_MY_BYTE_ORDER. */
++#define CI_BIG_ENDIAN 1
++#define CI_LITTLE_ENDIAN 0
++
++/*
++** Note that these byte-swapping primitives may leave junk in bits above
++** the range they operate on.
++**
++** The CI_BSWAP_nn() routines require that bits above [nn] are zero. Use
++** CI_BSWAPM_nn(x) if this cannot be guaranteed.
++*/
++
++/* ?? May be able to improve on some of these with inline assembler on some
++** platforms.
++*/
++
++#define CI_BSWAP_16(v) ((((v) & 0xff) << 8) | ((v) >> 8))
++#define CI_BSWAPM_16(v) ((((v) & 0xff) << 8) | (((v) & 0xff00) >> 8))
++
++#define CI_BSWAP_32(v) (((v) >> 24) | \
++ (((v) & 0x00ff0000) >> 8) | \
++ (((v) & 0x0000ff00) << 8) | \
++ ((v) << 24))
++#define CI_BSWAPM_32(v) ((((v) & 0xff000000) >> 24) | \
++ (((v) & 0x00ff0000) >> 8) | \
++ (((v) & 0x0000ff00) << 8) | \
++ ((v) << 24))
++
++#define CI_BSWAP_64(v) (((v) >> 56) | \
++ (((v) & 0x00ff000000000000) >> 40) | \
++ (((v) & 0x0000ff0000000000) >> 24) | \
++ (((v) & 0x000000ff00000000) >> 8) | \
++ (((v) & 0x00000000ff000000) << 8) | \
++ (((v) & 0x0000000000ff0000) << 24) | \
++ (((v) & 0x000000000000ff00) << 40) | \
++ ((v) << 56))
++
++# define CI_BSWAPPED_16_IF(c,v) ((c) ? CI_BSWAP_16(v) : (v))
++# define CI_BSWAPPED_32_IF(c,v) ((c) ? CI_BSWAP_32(v) : (v))
++# define CI_BSWAPPED_64_IF(c,v) ((c) ? CI_BSWAP_64(v) : (v))
++# define CI_BSWAP_16_IF(c,v) do{ if((c)) (v) = CI_BSWAP_16(v); }while(0)
++# define CI_BSWAP_32_IF(c,v) do{ if((c)) (v) = CI_BSWAP_32(v); }while(0)
++# define CI_BSWAP_64_IF(c,v) do{ if((c)) (v) = CI_BSWAP_64(v); }while(0)
++
++#if (CI_MY_BYTE_ORDER == CI_LITTLE_ENDIAN)
++# define CI_BSWAP_LE16(v) (v)
++# define CI_BSWAP_LE32(v) (v)
++# define CI_BSWAP_LE64(v) (v)
++# define CI_BSWAP_BE16(v) CI_BSWAP_16(v)
++# define CI_BSWAP_BE32(v) CI_BSWAP_32(v)
++# define CI_BSWAP_BE64(v) CI_BSWAP_64(v)
++# define CI_BSWAPM_LE16(v) (v)
++# define CI_BSWAPM_LE32(v) (v)
++# define CI_BSWAPM_LE64(v) (v)
++# define CI_BSWAPM_BE16(v) CI_BSWAPM_16(v)
++# define CI_BSWAPM_BE32(v) CI_BSWAPM_32(v)
++#elif (CI_MY_BYTE_ORDER == CI_BIG_ENDIAN)
++# define CI_BSWAP_BE16(v) (v)
++# define CI_BSWAP_BE32(v) (v)
++# define CI_BSWAP_BE64(v) (v)
++# define CI_BSWAP_LE16(v) CI_BSWAP_16(v)
++# define CI_BSWAP_LE32(v) CI_BSWAP_32(v)
++# define CI_BSWAP_LE64(v) CI_BSWAP_64(v)
++# define CI_BSWAPM_BE16(v) (v)
++# define CI_BSWAPM_BE32(v) (v)
++# define CI_BSWAPM_BE64(v) (v)
++# define CI_BSWAPM_LE16(v) CI_BSWAPM_16(v)
++# define CI_BSWAPM_LE32(v) CI_BSWAPM_32(v)
++#else
++# error Bad endian.
++#endif
++
++
++/**********************************************************************
++ * Get pointer to struct from pointer to member
++ **********************************************************************/
++
++#define CI_MEMBER_OFFSET(c_type, mbr_name) \
++ ((ci_uint32) (ci_uintptr_t)(&((c_type*)0)->mbr_name))
++
++#define CI_MEMBER_SIZE(c_type, mbr_name) \
++ sizeof(((c_type*)0)->mbr_name)
++
++#define __CI_CONTAINER(c_type, mbr_name, p_mbr) \
++ ( (c_type*) ((char*)(p_mbr) - CI_MEMBER_OFFSET(c_type, mbr_name)) )
++
++#ifndef CI_CONTAINER
++# define CI_CONTAINER(t,m,p) __CI_CONTAINER(t,m,p)
++#endif
++
++
++/**********************************************************************
++ * Structure member initialiser.
++ **********************************************************************/
++
++#ifndef CI_STRUCT_MBR
++# define CI_STRUCT_MBR(name, val) .name = val
++#endif
++
++
++/**********************************************************************
++ * min / max
++ **********************************************************************/
++
++#define CI_MIN(x,y) (((x) < (y)) ? (x) : (y))
++#define CI_MAX(x,y) (((x) > (y)) ? (x) : (y))
++
++/**********************************************************************
++ * abs
++ **********************************************************************/
++
++#define CI_ABS(x) (((x) < 0) ? -(x) : (x))
++
++/**********************************************************************
++ * Conditional debugging
++ **********************************************************************/
++
++#ifdef NDEBUG
++# define CI_DEBUG(x)
++# define CI_NDEBUG(x) x
++# define CI_IF_DEBUG(y,n) (n)
++# define CI_DEBUG_ARG(x)
++#else
++# define CI_DEBUG(x) x
++# define CI_NDEBUG(x)
++# define CI_IF_DEBUG(y,n) (y)
++# define CI_DEBUG_ARG(x) ,x
++#endif
++
++#ifdef __KERNEL__
++#define CI_KERNEL_ARG(x) ,x
++#else
++#define CI_KERNEL_ARG(x)
++#endif
++
++#ifdef _WIN32
++# define CI_KERNEL_ARG_WIN(x) CI_KERNEL_ARG(x)
++# define CI_ARG_WIN(x) ,x
++#else
++# define CI_KERNEL_ARG_WIN(x)
++# define CI_ARG_WIN(x)
++#endif
++
++#ifdef __unix__
++# define CI_KERNEL_ARG_UNIX(x) CI_KERNEL_ARG(x)
++# define CI_ARG_UNIX(x) ,x
++#else
++# define CI_KERNEL_ARG_UNIX(x)
++# define CI_ARG_UNIX(x)
++#endif
++
++#ifdef __linux__
++# define CI_KERNEL_ARG_LINUX(x) CI_KERNEL_ARG(x)
++# define CI_ARG_LINUX(x) ,x
++#else
++# define CI_KERNEL_ARG_LINUX(x)
++# define CI_ARG_LINUX(x)
++#endif
++
++
++#endif /* __CI_COMPAT_UTILS_H__ */
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat/x86_64.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat/x86_64.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat/x86_64.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat/x86_64.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,54 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Arch stuff for AMD x86_64.
++ * \date 2004/08/17
++ */
++
++/*! \cidoxg_include_ci_compat */
++#ifndef __CI_COMPAT_X86_64_H__
++#define __CI_COMPAT_X86_64_H__
++
++
++#define CI_MY_BYTE_ORDER CI_LITTLE_ENDIAN
++
++#define CI_WORD_SIZE 8
++#define CI_PTR_SIZE 8
++
++#define CI_PAGE_SIZE 4096
++#define CI_PAGE_SHIFT 12
++#define CI_PAGE_MASK (~(CI_PAGE_SIZE - 1))
++
++#define CI_CPU_HAS_SSE 1 /* SSE extensions supported */
++
++/* SSE2 disabled while investigating BUG1060 */
++#define CI_CPU_HAS_SSE2 0 /* SSE2 extensions supported */
++#define CI_CPU_OOS 0 /* CPU does out of order stores */
++
++
++#endif /* __CI_COMPAT_X86_64_H__ */
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat/x86.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat/x86.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat/x86.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat/x86.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,48 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_compat */
++
++#ifndef __CI_COMPAT_X86_H__
++#define __CI_COMPAT_X86_H__
++
++
++#define CI_MY_BYTE_ORDER CI_LITTLE_ENDIAN
++
++#define CI_WORD_SIZE 4
++#define CI_PTR_SIZE 4
++
++#define CI_PAGE_SIZE 4096
++#define CI_PAGE_SHIFT 12
++#define CI_PAGE_MASK (~(CI_PAGE_SIZE - 1))
++
++#define CI_CPU_HAS_SSE 1 /* SSE extensions supported */
++#define CI_CPU_HAS_SSE2 0 /* SSE2 extensions supported */
++#define CI_CPU_OOS 0 /* CPU does out of order stores */
++
++
++#endif /* __CI_COMPAT_X86_H__ */
++
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/compat.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/compat.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,53 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Compatability layer. Provides definitions of fundamental
++ * types and definitions that are used throughout CI source
++ * code. It does not introduce any link time dependencies,
++ * or include any unnecessary system headers.
++ */
++/*! \cidoxg_include_ci */
++
++#ifndef __CI_COMPAT_H__
++#define __CI_COMPAT_H__
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#include <ci/compat/primitive.h>
++#include <ci/compat/sysdep.h>
++#include <ci/compat/utils.h>
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __CI_COMPAT_H__ */
++
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/driver/resource/efx_vi.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/driver/resource/efx_vi.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/driver/resource/efx_vi.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/driver/resource/efx_vi.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,276 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains public EFX VI API to Solarflare resource manager.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_DRIVER_RESOURCE_EFX_VI_H__
++#define __CI_DRIVER_RESOURCE_EFX_VI_H__
++
++/* Default size of event queue in the efx_vi resource. Copied from
++ * CI_CFG_NETIF_EVENTQ_SIZE */
++#define EFX_VI_EVENTQ_SIZE_DEFAULT 1024
++
++extern int efx_vi_eventq_size;
++
++/**************************************************************************
++ * efx_vi_state types, allocation and free
++ **************************************************************************/
++
++/*! Handle for refering to a efx_vi */
++struct efx_vi_state;
++
++/*!
++ * Allocate an efx_vi, including event queue and pt_endpoint
++ *
++ * \param vih_out Pointer to a handle that is set on success
++ * \param nic_index Index of NIC to apply this resource to
++ * \return Zero on success (and vih_out set), non-zero on failure.
++ */
++extern int
++efx_vi_alloc(struct efx_vi_state **vih_out, int nic_index);
++
++/*!
++ * Free a previously allocated efx_vi
++ *
++ * \param vih The handle of the efx_vi to free
++ */
++extern void
++efx_vi_free(struct efx_vi_state *vih);
++
++/*!
++ * Reset a previously allocated efx_vi
++ *
++ * \param vih The handle of the efx_vi to reset
++ */
++extern void
++efx_vi_reset(struct efx_vi_state *vih);
++
++/**************************************************************************
++ * efx_vi_eventq types and functions
++ **************************************************************************/
++
++/*!
++ * Register a function to receive callbacks when event queue timeouts
++ * or wakeups occur. Only one function per efx_vi can be registered
++ * at once.
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param callback The function to callback
++ * \param context An argument to pass to the callback function
++ * \return Zero on success, non-zero on failure.
++ */
++extern int
++efx_vi_eventq_register_callback(struct efx_vi_state *vih,
++ void (*callback)(void *context, int is_timeout),
++ void *context);
++
++/*!
++ * Remove the current eventq timeout or wakeup callback function
++ *
++ * \param vih The handle to identify the efx_vi
++ * \return Zero on success, non-zero on failure
++ */
++extern int
++efx_vi_eventq_kill_callback(struct efx_vi_state *vih);
++
++/**************************************************************************
++ * efx_vi_dma_map types and functions
++ **************************************************************************/
++
++/*!
++ * Handle for refering to a efx_vi
++ */
++struct efx_vi_dma_map_state;
++
++/*!
++ * Map a list of buffer pages so they are registered with the hardware
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param addrs An array of page pointers to map
++ * \param n_addrs Length of the page pointer array. Must be a power of two.
++ * \param dmh_out Set on success to a handle used to refer to this mapping
++ * \return Zero on success, non-zero on failure.
++ */
++extern int
++efx_vi_dma_map_pages(struct efx_vi_state *vih, struct page **pages,
++ int n_pages, struct efx_vi_dma_map_state **dmh_out);
++extern int
++efx_vi_dma_map_addrs(struct efx_vi_state *vih,
++ unsigned long long *dev_bus_addrs, int n_pages,
++ struct efx_vi_dma_map_state **dmh_out);
++
++/*!
++ * Unmap a previously mapped set of pages so they are no longer registered
++ * with the hardware.
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param dmh The handle to identify the dma mapping
++ */
++extern void
++efx_vi_dma_unmap_pages(struct efx_vi_state *vih,
++ struct efx_vi_dma_map_state *dmh);
++extern void
++efx_vi_dma_unmap_addrs(struct efx_vi_state *vih,
++ struct efx_vi_dma_map_state *dmh);
++
++/*!
++ * Retrieve the buffer address of the mapping
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param dmh The handle to identify the buffer mapping
++ * \return The buffer address on success, or zero on failure
++ */
++extern unsigned
++efx_vi_dma_get_map_addr(struct efx_vi_state *vih,
++ struct efx_vi_dma_map_state *dmh);
++
++/**************************************************************************
++ * efx_vi filter functions
++ **************************************************************************/
++
++#define EFX_VI_STATIC_FILTERS 32
++
++/*! Handle to refer to a filter instance */
++struct filter_resource_t;
++
++/*!
++ * Allocate and add a filter
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param protocol The protocol of the new filter: UDP or TCP
++ * \param ip_addr_be32 The local ip address of the filter
++ * \param port_le16 The local port of the filter
++ * \param fh_out Set on success to be a handle to refer to this filter
++ * \return Zero on success, non-zero on failure.
++ */
++extern int
++efx_vi_filter(struct efx_vi_state *vih, int protocol, unsigned ip_addr_be32,
++ int port_le16, struct filter_resource_t **fh_out);
++
++/*!
++ * Remove a filter and free resources associated with it
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param fh The handle to identify the filter
++ * \return Zero on success, non-zero on failure
++ */
++extern int
++efx_vi_filter_stop(struct efx_vi_state *vih, struct filter_resource_t *fh);
++
++/**************************************************************************
++ * efx_vi hw resources types and functions
++ **************************************************************************/
++
++/*! Constants for the type field in efx_vi_hw_resource */
++#define EFX_VI_HW_RESOURCE_TXDMAQ 0x0 /* PFN of TX DMA Q */
++#define EFX_VI_HW_RESOURCE_RXDMAQ 0x1 /* PFN of RX DMA Q */
++#define EFX_VI_HW_RESOURCE_TXBELL 0x2 /* PFN of TX Doorbell (EF1) */
++#define EFX_VI_HW_RESOURCE_RXBELL 0x3 /* PFN of RX Doorbell (EF1) */
++#define EFX_VI_HW_RESOURCE_EVQTIMER 0x4 /* Address of event q timer */
++
++/* Address of event q pointer (EF1) */
++#define EFX_VI_HW_RESOURCE_EVQPTR 0x5
++/* Address of register pointer (Falcon A) */
++#define EFX_VI_HW_RESOURCE_EVQRPTR 0x6
++/* Offset of register pointer (Falcon B) */
++#define EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET 0x7
++/* Address of mem KVA */
++#define EFX_VI_HW_RESOURCE_EVQMEMKVA 0x8
++/* PFN of doorbell page (Falcon) */
++#define EFX_VI_HW_RESOURCE_BELLPAGE 0x9
++
++/*! How large an array to allocate for the get_() functions - smaller
++ than the total number of constants as some are mutually exclusive */
++#define EFX_VI_HW_RESOURCE_MAXSIZE 0x7
++
++/*! Constants for the mem_type field in efx_vi_hw_resource */
++#define EFX_VI_HW_RESOURCE_IOBUFFER 0 /* Host memory */
++#define EFX_VI_HW_RESOURCE_PERIPHERAL 1 /* Card memory/registers */
++
++/*!
++ * Data structure providing information on a hardware resource mapping
++ */
++struct efx_vi_hw_resource {
++ u8 type; /*!< What this resource represents */
++ u8 mem_type; /*!< What type of memory is it in, eg,
++ * host or iomem */
++ u8 more_to_follow; /*!< Is this part of a multi-region resource */
++ u32 length; /*!< Length of the resource in bytes */
++ unsigned long address; /*!< Address of this resource */
++};
++
++/*!
++ * Metadata concerning the list of hardware resource mappings
++ */
++struct efx_vi_hw_resource_metadata {
++ int version;
++ int evq_order;
++ int evq_offs;
++ int evq_capacity;
++ int instance;
++ unsigned rx_capacity;
++ unsigned tx_capacity;
++ int nic_arch;
++ int nic_revision;
++ char nic_variant;
++};
++
++/*!
++ * Obtain a list of hardware resource mappings, using virtual addresses
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param mdata Pointer to a structure to receive the metadata
++ * \param hw_res_array An array to receive the list of hardware resources
++ * \param length The length of hw_res_array. Updated on success to contain
++ * the number of entries in the supplied array that were used.
++ * \return Zero on success, non-zero on failure
++ */
++extern int
++efx_vi_hw_resource_get_virt(struct efx_vi_state *vih,
++ struct efx_vi_hw_resource_metadata *mdata,
++ struct efx_vi_hw_resource *hw_res_array,
++ int *length);
++
++/*!
++ * Obtain a list of hardware resource mappings, using physical addresses
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param mdata Pointer to a structure to receive the metadata
++ * \param hw_res_array An array to receive the list of hardware resources
++ * \param length The length of hw_res_array. Updated on success to contain
++ * the number of entries in the supplied array that were used.
++ * \return Zero on success, non-zero on failure
++ */
++extern int
++efx_vi_hw_resource_get_phys(struct efx_vi_state *vih,
++ struct efx_vi_hw_resource_metadata *mdata,
++ struct efx_vi_hw_resource *hw_res_array,
++ int *length);
++
++#endif /* __CI_DRIVER_RESOURCE_EFX_VI_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/common.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/common.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/common.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/common.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,102 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides API of the efhw library which may be used both from
++ * the kernel and from the user-space code.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_COMMON_H__
++#define __CI_EFHW_COMMON_H__
++
++#include <ci/efhw/common_sysdep.h>
++
++enum efhw_arch {
++ EFHW_ARCH_FALCON,
++ EFHW_ARCH_SIENA,
++};
++
++typedef uint32_t efhw_buffer_addr_t;
++#define EFHW_BUFFER_ADDR_FMT "[ba:%"PRIx32"]"
++
++/*! Comment? */
++typedef union {
++ uint64_t u64;
++ struct {
++ uint32_t a;
++ uint32_t b;
++ } opaque;
++ struct {
++ uint32_t code;
++ uint32_t status;
++ } ev1002;
++} efhw_event_t;
++
++/* Flags for TX/RX queues */
++#define EFHW_VI_JUMBO_EN 0x01 /*! scatter RX over multiple desc */
++#define EFHW_VI_ISCSI_RX_HDIG_EN 0x02 /*! iscsi rx header digest */
++#define EFHW_VI_ISCSI_TX_HDIG_EN 0x04 /*! iscsi tx header digest */
++#define EFHW_VI_ISCSI_RX_DDIG_EN 0x08 /*! iscsi rx data digest */
++#define EFHW_VI_ISCSI_TX_DDIG_EN 0x10 /*! iscsi tx data digest */
++#define EFHW_VI_TX_PHYS_ADDR_EN 0x20 /*! TX physical address mode */
++#define EFHW_VI_RX_PHYS_ADDR_EN 0x40 /*! RX physical address mode */
++#define EFHW_VI_RM_WITH_INTERRUPT 0x80 /*! VI with an interrupt */
++#define EFHW_VI_TX_IP_CSUM_DIS 0x100 /*! enable ip checksum generation */
++#define EFHW_VI_TX_TCPUDP_CSUM_DIS 0x200 /*! enable tcp/udp checksum
++ generation */
++#define EFHW_VI_TX_TCPUDP_ONLY 0x400 /*! drop non-tcp/udp packets */
++
++/* Types of hardware filter */
++/* Each of these values implicitly selects scatter filters on B0 - or in
++ EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK if a non-scatter filter is required */
++#define EFHW_IP_FILTER_TYPE_UDP_WILDCARD (0) /* dest host only */
++#define EFHW_IP_FILTER_TYPE_UDP_FULL (1) /* dest host and port */
++#define EFHW_IP_FILTER_TYPE_TCP_WILDCARD (2) /* dest based filter */
++#define EFHW_IP_FILTER_TYPE_TCP_FULL (3) /* src filter */
++/* Same again, but with RSS (for B0 only) */
++#define EFHW_IP_FILTER_TYPE_UDP_WILDCARD_RSS_B0 (4)
++#define EFHW_IP_FILTER_TYPE_UDP_FULL_RSS_B0 (5)
++#define EFHW_IP_FILTER_TYPE_TCP_WILDCARD_RSS_B0 (6)
++#define EFHW_IP_FILTER_TYPE_TCP_FULL_RSS_B0 (7)
++
++#define EFHW_IP_FILTER_TYPE_FULL_MASK (0x1) /* Mask for full / wildcard */
++#define EFHW_IP_FILTER_TYPE_TCP_MASK (0x2) /* Mask for TCP type */
++#define EFHW_IP_FILTER_TYPE_RSS_B0_MASK (0x4) /* Mask for B0 RSS enable */
++#define EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK (0x8) /* Mask for B0 SCATTER dsbl */
++
++#define EFHW_IP_FILTER_TYPE_MASK (0xffff) /* Mask of types above */
++
++#define EFHW_IP_FILTER_BROADCAST (0x10000) /* driverlink filter
++ support */
++
++#endif /* __CI_EFHW_COMMON_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/common_sysdep.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/common_sysdep.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/common_sysdep.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/common_sysdep.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,67 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides version-independent Linux kernel API for
++ * userland-to-kernel interfaces.
++ * Only kernels >=2.6.9 are supported.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_COMMON_LINUX_H__
++#define __CI_EFHW_COMMON_LINUX_H__
++
++#include <linux/types.h>
++#include <linux/version.h>
++
++/* Dirty hack, but Linux kernel does not provide DMA_ADDR_T_FMT */
++#if BITS_PER_LONG == 64 || defined(CONFIG_HIGHMEM64G)
++#define DMA_ADDR_T_FMT "%llx"
++#else
++#define DMA_ADDR_T_FMT "%x"
++#endif
++
++/* Linux kernel also does not provide PRIx32... Sigh. */
++#define PRIx32 "x"
++#define PRIx64 "llx"
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++enum {
++ false = 0,
++ true = 1
++};
++
++typedef _Bool bool;
++#endif /* LINUX_VERSION_CODE < 2.6.19 */
++
++#endif /* __CI_EFHW_COMMON_LINUX_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/debug.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/debug.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/debug.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/debug.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,84 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides debug-related API for efhw library using Linux kernel
++ * primitives.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_DEBUG_LINUX_H__
++#define __CI_EFHW_DEBUG_LINUX_H__
++
++#define EFHW_PRINTK_PREFIX "[sfc efhw] "
++
++#define EFHW_PRINTK(level, fmt, ...) \
++ printk(level EFHW_PRINTK_PREFIX fmt "\n", __VA_ARGS__)
++
++/* Following macros should be used with non-zero format parameters
++ * due to __VA_ARGS__ limitations. Use "%s" with __FUNCTION__ if you can't
++ * find better parameters. */
++#define EFHW_ERR(fmt, ...) EFHW_PRINTK(KERN_ERR, fmt, __VA_ARGS__)
++#define EFHW_WARN(fmt, ...) EFHW_PRINTK(KERN_WARNING, fmt, __VA_ARGS__)
++#define EFHW_NOTICE(fmt, ...) EFHW_PRINTK(KERN_NOTICE, fmt, __VA_ARGS__)
++#if 0 && !defined(NDEBUG)
++#define EFHW_TRACE(fmt, ...) EFHW_PRINTK(KERN_DEBUG, fmt, __VA_ARGS__)
++#else
++#define EFHW_TRACE(fmt, ...)
++#endif
++
++#ifndef NDEBUG
++#define EFHW_ASSERT(cond) BUG_ON((cond) == 0)
++#define EFHW_DO_DEBUG(expr) expr
++#else
++#define EFHW_ASSERT(cond)
++#define EFHW_DO_DEBUG(expr)
++#endif
++
++#define EFHW_TEST(expr) \
++ do { \
++ if (unlikely(!(expr))) \
++ BUG(); \
++ } while (0)
++
++/* Build time asserts. We paste the line number into the type name
++ * so that the macro can be used more than once per file even if the
++ * compiler objects to multiple identical typedefs. Collisions
++ * between use in different header files is still possible. */
++#ifndef EFHW_BUILD_ASSERT
++#define __EFHW_BUILD_ASSERT_NAME(_x) __EFHW_BUILD_ASSERT_ILOATHECPP(_x)
++#define __EFHW_BUILD_ASSERT_ILOATHECPP(_x) __EFHW_BUILD_ASSERT__ ##_x
++#define EFHW_BUILD_ASSERT(e) \
++ typedef char __EFHW_BUILD_ASSERT_NAME(__LINE__)[(e) ? 1 : -1]
++#endif
++
++#endif /* __CI_EFHW_DEBUG_LINUX_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/efhw_config.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/efhw_config.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/efhw_config.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/efhw_config.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,43 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides some limits used in both kernel and userland code.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_EFAB_CONFIG_H__
++#define __CI_EFHW_EFAB_CONFIG_H__
++
++#define EFHW_MAX_NR_DEVS 5 /* max number of efhw devices supported */
++
++#endif /* __CI_EFHW_EFAB_CONFIG_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/efhw_types.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/efhw_types.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/efhw_types.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/efhw_types.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,342 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides struct efhw_nic and some related types.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_EFAB_TYPES_H__
++#define __CI_EFHW_EFAB_TYPES_H__
++
++#include <ci/efhw/efhw_config.h>
++#include <ci/efhw/hardware_sysdep.h>
++#include <ci/efhw/iopage_types.h>
++#include <ci/efhw/sysdep.h>
++
++/*--------------------------------------------------------------------
++ *
++ * hardware limits used in the types
++ *
++ *--------------------------------------------------------------------*/
++
++#define EFHW_KEVENTQ_MAX 8
++
++/*--------------------------------------------------------------------
++ *
++ * forward type declarations
++ *
++ *--------------------------------------------------------------------*/
++
++struct efhw_nic;
++
++/*--------------------------------------------------------------------
++ *
++ * Managed interface
++ *
++ *--------------------------------------------------------------------*/
++
++struct efhw_buffer_table_allocation{
++ unsigned base;
++ unsigned order;
++};
++
++struct eventq_resource_hardware {
++ /*!iobuffer allocated for eventq - can be larger than eventq */
++ efhw_iopages_t iobuff;
++ unsigned iobuff_off;
++ struct efhw_buffer_table_allocation buf_tbl_alloc;
++ int capacity; /*!< capacity of event queue */
++};
++
++/*--------------------------------------------------------------------
++ *
++ * event queues and event driven callbacks
++ *
++ *--------------------------------------------------------------------*/
++
++struct efhw_keventq {
++ volatile int lock;
++ caddr_t evq_base;
++ int32_t evq_ptr;
++ uint32_t evq_mask;
++ unsigned instance;
++ struct eventq_resource_hardware hw;
++ struct efhw_ev_handler *ev_handlers;
++};
++
++/**********************************************************************
++ * Portable HW interface. ***************************************
++ **********************************************************************/
++
++/*--------------------------------------------------------------------
++ *
++ * EtherFabric Functional units - configuration and control
++ *
++ *--------------------------------------------------------------------*/
++
++struct efhw_func_ops {
++
++ /*-------------- Initialisation ------------ */
++
++ /*! close down all hardware functional units - leaves NIC in a safe
++ state for driver unload */
++ void (*close_hardware) (struct efhw_nic *nic);
++
++ /*! initialise all hardware functional units */
++ int (*init_hardware) (struct efhw_nic *nic,
++ struct efhw_ev_handler *,
++ const uint8_t *mac_addr);
++
++ /*-------------- Interrupt support ------------ */
++
++ /*! Main interrupt routine
++ ** This function returns,
++ ** - zero, if the IRQ was not generated by EF1
++ ** - non-zero, if EF1 was the source of the IRQ
++ **
++ **
++ ** opaque is an OS provided pointer for use by the OS callbacks
++ ** e.g in Windows used to indicate DPC scheduled
++ */
++ int (*interrupt) (struct efhw_nic *nic);
++
++ /*! Enable given interrupt mask for the given IRQ unit */
++ void (*interrupt_enable) (struct efhw_nic *nic, uint idx);
++
++ /*! Disable given interrupt mask for the given IRQ unit */
++ void (*interrupt_disable) (struct efhw_nic *nic, uint idx);
++
++ /*! Set interrupt moderation strategy for the given IRQ unit
++ ** val is in usec
++ */
++ void (*set_interrupt_moderation)(struct efhw_nic *nic,
++ uint idx, uint val);
++
++ /*-------------- Event support ------------ */
++
++ /*! Enable the given event queue
++ depending on the underlying implementation (EF1 or Falcon) then
++ either a q_base_addr in host memory, or a buffer base id should
++ be proivded
++ */
++ void (*event_queue_enable) (struct efhw_nic *nic,
++ uint evq, /* evnt queue index */
++ uint evq_size, /* units of #entries */
++ dma_addr_t q_base_addr, uint buf_base_id);
++
++ /*! Disable the given event queue (and any associated timer) */
++ void (*event_queue_disable) (struct efhw_nic *nic, uint evq,
++ int timer_only);
++
++ /*! request wakeup from the NIC on a given event Q */
++ void (*wakeup_request) (struct efhw_nic *nic, dma_addr_t q_base_addr,
++ int next_i, int evq);
++
++ /*! Push a SW event on a given eventQ */
++ void (*sw_event) (struct efhw_nic *nic, int data, int evq);
++
++ /*-------------- Filter support ------------ */
++
++ /*! Setup a given filter - The software can request a filter_i,
++ * but some EtherFabric implementations will override with
++ * a more suitable index
++ */
++ int (*ipfilter_set) (struct efhw_nic *nic, int type,
++ int *filter_i, int dmaq,
++ unsigned saddr_be32, unsigned sport_be16,
++ unsigned daddr_be32, unsigned dport_be16);
++
++ /*! Attach a given filter to a DMAQ */
++ void (*ipfilter_attach) (struct efhw_nic *nic, int filter_idx,
++ int dmaq_idx);
++
++ /*! Detach a filter from its DMAQ */
++ void (*ipfilter_detach) (struct efhw_nic *nic, int filter_idx);
++
++ /*! Clear down a given filter */
++ void (*ipfilter_clear) (struct efhw_nic *nic, int filter_idx);
++
++ /*-------------- DMA support ------------ */
++
++ /*! Initialise NIC state for a given TX DMAQ */
++ void (*dmaq_tx_q_init) (struct efhw_nic *nic,
++ uint dmaq, uint evq, uint owner, uint tag,
++ uint dmaq_size, uint buf_idx, uint flags);
++
++ /*! Initialise NIC state for a given RX DMAQ */
++ void (*dmaq_rx_q_init) (struct efhw_nic *nic,
++ uint dmaq, uint evq, uint owner, uint tag,
++ uint dmaq_size, uint buf_idx, uint flags);
++
++ /*! Disable a given TX DMAQ */
++ void (*dmaq_tx_q_disable) (struct efhw_nic *nic, uint dmaq);
++
++ /*! Disable a given RX DMAQ */
++ void (*dmaq_rx_q_disable) (struct efhw_nic *nic, uint dmaq);
++
++ /*! Flush a given TX DMA channel */
++ int (*flush_tx_dma_channel) (struct efhw_nic *nic, uint dmaq);
++
++ /*! Flush a given RX DMA channel */
++ int (*flush_rx_dma_channel) (struct efhw_nic *nic, uint dmaq);
++
++ /*-------------- Buffer table Support ------------ */
++
++ /*! Initialise a buffer table page */
++ void (*buffer_table_set) (struct efhw_nic *nic,
++ dma_addr_t dma_addr,
++ uint bufsz, uint region,
++ int own_id, int buffer_id);
++
++ /*! Initialise a block of buffer table pages */
++ void (*buffer_table_set_n) (struct efhw_nic *nic, int buffer_id,
++ dma_addr_t dma_addr,
++ uint bufsz, uint region,
++ int n_pages, int own_id);
++
++ /*! Clear a block of buffer table pages */
++ void (*buffer_table_clear) (struct efhw_nic *nic, int buffer_id,
++ int num);
++
++ /*! Commit a buffer table update */
++ void (*buffer_table_commit) (struct efhw_nic *nic);
++
++};
++
++
++/*----------------------------------------------------------------------------
++ *
++ * NIC type
++ *
++ *---------------------------------------------------------------------------*/
++
++struct efhw_device_type {
++ int arch; /* enum efhw_arch */
++ char variant; /* 'A', 'B', ... */
++ int revision; /* 0, 1, ... */
++};
++
++
++/*----------------------------------------------------------------------------
++ *
++ * EtherFabric NIC instance - nic.c for HW independent functions
++ *
++ *---------------------------------------------------------------------------*/
++
++/*! */
++struct efhw_nic {
++ /*! zero base index in efrm_nic_table.nic array */
++ volatile int index;
++ int ifindex; /*!< OS level nic index */
++#ifdef HAS_NET_NAMESPACE
++ struct net *nd_net;
++#endif
++
++ struct efhw_device_type devtype;
++
++ /*! Options that can be set by user. */
++ unsigned options;
++# define NIC_OPT_EFTEST 0x1 /* owner is an eftest app */
++
++# define NIC_OPT_DEFAULT 0
++
++ /*! Internal flags that indicate hardware properties at runtime. */
++ unsigned flags;
++# define NIC_FLAG_NO_INTERRUPT 0x01 /* to be set at init time only */
++# define NIC_FLAG_TRY_MSI 0x02
++# define NIC_FLAG_MSI 0x04
++# define NIC_FLAG_OS_IRQ_EN 0x08
++# define NIC_FLAG_10G 0x10
++
++ unsigned mtu; /*!< MAC MTU (includes MAC hdr) */
++
++ /* hardware resources */
++
++ /*! I/O address of the start of the bar */
++ efhw_ioaddr_t bar_ioaddr;
++
++ /*! Bar number of control aperture. */
++ unsigned ctr_ap_bar;
++ /*! Length of control aperture in bytes. */
++ unsigned ctr_ap_bytes;
++
++ uint8_t mac_addr[ETH_ALEN]; /*!< mac address */
++
++ /*! EtherFabric Functional Units -- functions */
++ const struct efhw_func_ops *efhw_func;
++
++ /* Value read from FPGA version register. Zero for asic. */
++ unsigned fpga_version;
++
++ /*! This lock protects a number of misc NIC resources. It should
++ * only be used for things that can be at the bottom of the lock
++ * order. ie. You mustn't attempt to grab any other lock while
++ * holding this one.
++ */
++ spinlock_t *reg_lock;
++ spinlock_t the_reg_lock;
++
++ int buf_commit_outstanding; /*!< outstanding buffer commits */
++
++ /*! interrupt callbacks (hard-irq) */
++ void (*irq_handler) (struct efhw_nic *, int unit);
++
++ /*! event queues per driver */
++ struct efhw_keventq evq[EFHW_KEVENTQ_MAX];
++
++/* for marking when we are not using an IRQ unit
++ - 0 is a valid offset to an IRQ unit on EF1! */
++#define EFHW_IRQ_UNIT_UNUSED 0xffff
++ /*! interrupt unit in use */
++ unsigned int irq_unit[EFHW_KEVENTQ_MAX];
++ efhw_iopage_t irq_iobuff; /*!< Falcon SYSERR interrupt */
++
++ /* The new driverlink infrastructure. */
++ struct efx_dl_device *net_driver_dev;
++ struct efx_dlfilt_cb_s *dlfilter_cb;
++
++ /*! Bit masks of the sizes of event queues and dma queues supported
++ * by the nic. */
++ unsigned evq_sizes;
++ unsigned rxq_sizes;
++ unsigned txq_sizes;
++
++ /* Size of filter table (including odd and even banks). */
++ unsigned filter_tbl_size;
++};
++
++
++#define EFHW_KVA(nic) ((nic)->bar_ioaddr)
++
++
++#endif /* __CI_EFHW_EFHW_TYPES_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/hardware_sysdep.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/hardware_sysdep.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/hardware_sysdep.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/hardware_sysdep.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,84 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides version-independent Linux kernel API for header files
++ * with hardware-related definitions (in ci/driver/efab/hardware*).
++ * Only kernels >=2.6.9 are supported.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_HARDWARE_LINUX_H__
++#define __CI_EFHW_HARDWARE_LINUX_H__
++
++#include <asm/io.h>
++
++#ifdef __LITTLE_ENDIAN
++#define EFHW_IS_LITTLE_ENDIAN
++#elif __BIG_ENDIAN
++#define EFHW_IS_BIG_ENDIAN
++#else
++#error Unknown endianness
++#endif
++
++#ifndef mmiowb
++ #if defined(__i386__) || defined(__x86_64__)
++ #define mmiowb()
++ #elif defined(__ia64__)
++ #ifndef ia64_mfa
++ #define ia64_mfa() asm volatile ("mf.a" ::: "memory")
++ #endif
++ #define mmiowb ia64_mfa
++ #else
++ #error "Need definition for mmiowb()"
++ #endif
++#endif
++
++typedef char *efhw_ioaddr_t;
++
++#ifndef readq
++static inline uint64_t __readq(void __iomem *addr)
++{
++ return *(volatile uint64_t *)addr;
++}
++#define readq(x) __readq(x)
++#endif
++
++#ifndef writeq
++static inline void __writeq(uint64_t v, void __iomem *addr)
++{
++ *(volatile uint64_t *)addr = v;
++}
++#define writeq(val, addr) __writeq((val), (addr))
++#endif
++
++#endif /* __CI_EFHW_HARDWARE_LINUX_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/iopage_types.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/iopage_types.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/iopage_types.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/iopage_types.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,188 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides efhw_page_t and efhw_iopage_t for Linux kernel.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_IOPAGE_LINUX_H__
++#define __CI_EFHW_IOPAGE_LINUX_H__
++
++#include <linux/gfp.h>
++#include <linux/hardirq.h>
++#include <ci/efhw/debug.h>
++
++/*--------------------------------------------------------------------
++ *
++ * efhw_page_t: A single page of memory. Directly mapped in the driver,
++ * and can be mapped to userlevel.
++ *
++ *--------------------------------------------------------------------*/
++
++typedef struct {
++ unsigned long kva;
++} efhw_page_t;
++
++static inline int efhw_page_alloc(efhw_page_t *p)
++{
++ p->kva = __get_free_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL);
++ return p->kva ? 0 : -ENOMEM;
++}
++
++static inline int efhw_page_alloc_zeroed(efhw_page_t *p)
++{
++ p->kva = get_zeroed_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL);
++ return p->kva ? 0 : -ENOMEM;
++}
++
++static inline void efhw_page_free(efhw_page_t *p)
++{
++ free_page(p->kva);
++ EFHW_DO_DEBUG(memset(p, 0, sizeof(*p)));
++}
++
++static inline char *efhw_page_ptr(efhw_page_t *p)
++{
++ return (char *)p->kva;
++}
++
++static inline unsigned efhw_page_pfn(efhw_page_t *p)
++{
++ return (unsigned)(__pa(p->kva) >> PAGE_SHIFT);
++}
++
++static inline void efhw_page_mark_invalid(efhw_page_t *p)
++{
++ p->kva = 0;
++}
++
++static inline int efhw_page_is_valid(efhw_page_t *p)
++{
++ return p->kva != 0;
++}
++
++static inline void efhw_page_init_from_va(efhw_page_t *p, void *va)
++{
++ p->kva = (unsigned long)va;
++}
++
++/*--------------------------------------------------------------------
++ *
++ * efhw_iopage_t: A single page of memory. Directly mapped in the driver,
++ * and can be mapped to userlevel. Can also be accessed by the NIC.
++ *
++ *--------------------------------------------------------------------*/
++
++typedef struct {
++ efhw_page_t p;
++ dma_addr_t dma_addr;
++} efhw_iopage_t;
++
++static inline dma_addr_t efhw_iopage_dma_addr(efhw_iopage_t *p)
++{
++ return p->dma_addr;
++}
++
++#define efhw_iopage_ptr(iop) efhw_page_ptr(&(iop)->p)
++#define efhw_iopage_pfn(iop) efhw_page_pfn(&(iop)->p)
++#define efhw_iopage_mark_invalid(iop) efhw_page_mark_invalid(&(iop)->p)
++#define efhw_iopage_is_valid(iop) efhw_page_is_valid(&(iop)->p)
++
++/*--------------------------------------------------------------------
++ *
++ * efhw_iopages_t: A set of pages that are contiguous in physical memory.
++ * Directly mapped in the driver, and can be mapped to userlevel. Can also
++ * be accessed by the NIC.
++ *
++ * NB. The O/S may be unwilling to allocate many, or even any of these. So
++ * only use this type where the NIC really needs a physically contiguous
++ * buffer.
++ *
++ *--------------------------------------------------------------------*/
++
++typedef struct {
++ caddr_t kva;
++ unsigned order;
++ dma_addr_t dma_addr;
++} efhw_iopages_t;
++
++static inline caddr_t efhw_iopages_ptr(efhw_iopages_t *p)
++{
++ return p->kva;
++}
++
++static inline unsigned efhw_iopages_pfn(efhw_iopages_t *p)
++{
++ return (unsigned)(__pa(p->kva) >> PAGE_SHIFT);
++}
++
++static inline dma_addr_t efhw_iopages_dma_addr(efhw_iopages_t *p)
++{
++ return p->dma_addr;
++}
++
++static inline unsigned efhw_iopages_size(efhw_iopages_t *p)
++{
++ return 1u << (p->order + PAGE_SHIFT);
++}
++
++/* efhw_iopage_t <-> efhw_iopages_t conversions for handling physically
++ * contiguous allocations in iobufsets for iSCSI. This allows the
++ * essential information about contiguous allocations from
++ * efhw_iopages_alloc() to be saved away in the efhw_iopage_t array in an
++ * iobufset. (Changing the iobufset resource to use a union type would
++ * involve a lot of code changes, and make the iobufset's metadata larger
++ * which could be bad as it's supposed to fit into a single page on some
++ * platforms.)
++ */
++static inline void
++efhw_iopage_init_from_iopages(efhw_iopage_t *iopage,
++ efhw_iopages_t *iopages, unsigned pageno)
++{
++ iopage->p.kva = ((unsigned long)efhw_iopages_ptr(iopages))
++ + (pageno * PAGE_SIZE);
++ iopage->dma_addr = efhw_iopages_dma_addr(iopages) +
++ (pageno * PAGE_SIZE);
++}
++
++static inline void
++efhw_iopages_init_from_iopage(efhw_iopages_t *iopages,
++ efhw_iopage_t *iopage, unsigned order)
++{
++ iopages->kva = (caddr_t) efhw_iopage_ptr(iopage);
++ EFHW_ASSERT(iopages->kva);
++ iopages->order = order;
++ iopages->dma_addr = efhw_iopage_dma_addr(iopage);
++}
++
++#endif /* __CI_EFHW_IOPAGE_LINUX_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/public.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/public.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/public.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/public.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,83 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides public API of efhw library exported from the SFC
++ * resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_PUBLIC_H__
++#define __CI_EFHW_PUBLIC_H__
++
++#include <ci/efhw/common.h>
++#include <ci/efhw/efhw_types.h>
++
++/*! Returns true if we have some EtherFabric functional units -
++ whether configured or not */
++static inline int efhw_nic_have_functional_units(struct efhw_nic *nic)
++{
++ return nic->efhw_func != 0;
++}
++
++/*! Returns true if the EtherFabric functional units have been configured */
++static inline int efhw_nic_have_hw(struct efhw_nic *nic)
++{
++ return efhw_nic_have_functional_units(nic) && (EFHW_KVA(nic) != 0);
++}
++
++/*! Helper function to allocate the iobuffer needed by an eventq
++ * - it ensures the eventq has the correct alignment for the NIC
++ *
++ * \param rm Event-queue resource manager
++ * \param instance Event-queue instance (index)
++ * \param buf_bytes Requested size of eventq
++ * \return < 0 if iobuffer allocation fails
++ */
++int efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic,
++ struct eventq_resource_hardware *h,
++ int evq_instance, unsigned buf_bytes);
++
++extern void falcon_nic_set_rx_usr_buf_size(struct efhw_nic *,
++ int rx_usr_buf_size);
++
++extern void
++falcon_nic_rx_filter_ctl_set(struct efhw_nic *nic, uint32_t tcp_full,
++ uint32_t tcp_wild,
++ uint32_t udp_full, uint32_t udp_wild);
++
++extern void
++falcon_nic_rx_filter_ctl_get(struct efhw_nic *nic, uint32_t *tcp_full,
++ uint32_t *tcp_wild,
++ uint32_t *udp_full, uint32_t *udp_wild);
++
++#endif /* __CI_EFHW_PUBLIC_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/sysdep.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/sysdep.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/efhw/sysdep.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efhw/sysdep.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,72 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides version-independent Linux kernel API for efhw library.
++ * Only kernels >=2.6.9 are supported.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_SYSDEP_LINUX_H__
++#define __CI_EFHW_SYSDEP_LINUX_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/delay.h>
++#include <linux/if_ether.h>
++
++#include <linux/netdevice.h> /* necessary for etherdevice.h on some kernels */
++#include <linux/etherdevice.h>
++
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
++static inline int is_local_ether_addr(const u8 *addr)
++{
++ return (0x02 & addr[0]);
++}
++#endif
++
++typedef unsigned long irq_flags_t;
++
++#define spin_lock_destroy(l_) do {} while (0)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++#define HAS_NET_NAMESPACE
++#endif
++
++/* Funny, but linux has round_up for x86 only, defined in
++ * x86-specific header */
++#ifndef round_up
++#define round_up(x, y) (((x) + (y) - 1) & ~((y)-1))
++#endif
++
++#endif /* __CI_EFHW_SYSDEP_LINUX_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/efrm/nic_table.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efrm/nic_table.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/efrm/nic_table.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efrm/nic_table.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,98 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides public API for NIC table.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_NIC_TABLE_H__
++#define __CI_EFRM_NIC_TABLE_H__
++
++#include <ci/efhw/efhw_types.h>
++#include <ci/efrm/sysdep.h>
++
++/*--------------------------------------------------------------------
++ *
++ * struct efrm_nic_table - top level driver object keeping all NICs -
++ * implemented in driver_object.c
++ *
++ *--------------------------------------------------------------------*/
++
++/*! Comment? */
++struct efrm_nic_table {
++ /*! nics attached to this driver */
++ struct efhw_nic *nic[EFHW_MAX_NR_DEVS];
++ /*! pointer to an arbitrary struct efhw_nic if one exists;
++ * for code which does not care which NIC it wants but
++ * still needs one. Note you cannot assume nic[0] exists. */
++ struct efhw_nic *a_nic;
++ uint32_t nic_count; /*!< number of nics attached to this driver */
++ spinlock_t lock; /*!< lock for table modifications */
++ atomic_t ref_count; /*!< refcount for users of nic table */
++};
++
++/* Resource driver structures used by other drivers as well */
++extern struct efrm_nic_table efrm_nic_table;
++
++static inline void efrm_nic_table_hold(void)
++{
++ atomic_inc(&efrm_nic_table.ref_count);
++}
++
++static inline void efrm_nic_table_rele(void)
++{
++ atomic_dec(&efrm_nic_table.ref_count);
++}
++
++static inline int efrm_nic_table_held(void)
++{
++ return (atomic_read(&efrm_nic_table.ref_count) != 0);
++}
++
++/* Run code block _x multiple times with variable nic set to each
++ * registered NIC in turn.
++ * DO NOT "break" out of this loop early. */
++#define EFRM_FOR_EACH_NIC(_nic_i, _nic) \
++ for ((_nic_i) = (efrm_nic_table_hold(), 0); \
++ (_nic_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \
++ (_nic_i)++) \
++ if (((_nic) = efrm_nic_table.nic[_nic_i]))
++
++#define EFRM_FOR_EACH_NIC_IN_SET(_set, _i, _nic) \
++ for ((_i) = (efrm_nic_table_hold(), 0); \
++ (_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \
++ ++(_i)) \
++ if (((_nic) = efrm_nic_table.nic[_i]) && \
++ efrm_nic_set_read((_set), (_i)))
++
++#endif /* __CI_EFRM_NIC_TABLE_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/efrm/sysdep.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efrm/sysdep.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/efrm/sysdep.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efrm/sysdep.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,54 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides Linux-like system-independent API for efrm library.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_SYSDEP_H__
++#define __CI_EFRM_SYSDEP_H__
++
++/* Spinlocks are defined in efhw/sysdep.h */
++#include <ci/efhw/sysdep.h>
++
++#if defined(__linux__) && defined(__KERNEL__)
++
++# include <ci/efrm/sysdep_linux.h>
++
++#else
++
++# include <ci/efrm/sysdep_ci2linux.h>
++
++#endif
++
++#endif /* __CI_EFRM_SYSDEP_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/efrm/sysdep_linux.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efrm/sysdep_linux.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/efrm/sysdep_linux.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/efrm/sysdep_linux.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,248 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides version-independent Linux kernel API for efrm library.
++ * Only kernels >=2.6.9 are supported.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Kfifo API is partially stolen from linux-2.6.22/include/linux/list.h
++ * Copyright (C) 2004 Stelian Pop <stelian@popies.net>
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_SYSDEP_LINUX_H__
++#define __CI_EFRM_SYSDEP_LINUX_H__
++
++#include <linux/version.h>
++#include <linux/list.h>
++#include <linux/vmalloc.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/workqueue.h>
++#include <linux/gfp.h>
++#include <linux/slab.h>
++#include <linux/hardirq.h>
++#include <linux/kernel.h>
++#include <linux/if_ether.h>
++#include <linux/completion.h>
++#include <linux/in.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++/* get roundup_pow_of_two(), which was in kernel.h in early kernel versions */
++#include <linux/log2.h>
++#endif
++
++/********************************************************************
++ *
++ * List API
++ *
++ ********************************************************************/
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
++static inline void
++list_replace_init(struct list_head *old, struct list_head *new)
++{
++ new->next = old->next;
++ new->next->prev = new;
++ new->prev = old->prev;
++ new->prev->next = new;
++ INIT_LIST_HEAD(old);
++}
++#endif
++
++static inline struct list_head *list_pop(struct list_head *list)
++{
++ struct list_head *link = list->next;
++ list_del(link);
++ return link;
++}
++
++static inline struct list_head *list_pop_tail(struct list_head *list)
++{
++ struct list_head *link = list->prev;
++ list_del(link);
++ return link;
++}
++
++/********************************************************************
++ *
++ * Workqueue API
++ *
++ ********************************************************************/
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++#define NEED_OLD_WORK_API
++
++/**
++ * The old and new work function prototypes just change
++ * the type of the pointer in the only argument, so it's
++ * safe to cast one function type to the other
++ */
++typedef void (*efrm_old_work_func_t) (void *p);
++
++#undef INIT_WORK
++#define INIT_WORK(_work, _func) \
++ do { \
++ INIT_LIST_HEAD(&(_work)->entry); \
++ (_work)->pending = 0; \
++ PREPARE_WORK((_work), \
++ (efrm_old_work_func_t) (_func), \
++ (_work)); \
++ } while (0)
++
++#endif
++
++/********************************************************************
++ *
++ * Kfifo API
++ *
++ ********************************************************************/
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
++
++#if !defined(RHEL_RELEASE_CODE) || (RHEL_RELEASE_CODE < 1029)
++typedef unsigned gfp_t;
++#endif
++
++#define HAS_NO_KFIFO
++
++struct kfifo {
++ unsigned char *buffer; /* the buffer holding the data */
++ unsigned int size; /* the size of the allocated buffer */
++ unsigned int in; /* data is added at offset (in % size) */
++ unsigned int out; /* data is extracted from off. (out % size) */
++ spinlock_t *lock; /* protects concurrent modifications */
++};
++
++extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
++ gfp_t gfp_mask, spinlock_t *lock);
++extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask,
++ spinlock_t *lock);
++extern void kfifo_free(struct kfifo *fifo);
++extern unsigned int __kfifo_put(struct kfifo *fifo,
++ unsigned char *buffer, unsigned int len);
++extern unsigned int __kfifo_get(struct kfifo *fifo,
++ unsigned char *buffer, unsigned int len);
++
++/**
++ * kfifo_put - puts some data into the FIFO
++ * @fifo: the fifo to be used.
++ * @buffer: the data to be added.
++ * @len: the length of the data to be added.
++ *
++ * This function copies at most @len bytes from the @buffer into
++ * the FIFO depending on the free space, and returns the number of
++ * bytes copied.
++ */
++static inline unsigned int
++kfifo_put(struct kfifo *fifo, unsigned char *buffer, unsigned int len)
++{
++ unsigned long flags;
++ unsigned int ret;
++
++ spin_lock_irqsave(fifo->lock, flags);
++
++ ret = __kfifo_put(fifo, buffer, len);
++
++ spin_unlock_irqrestore(fifo->lock, flags);
++
++ return ret;
++}
++
++/**
++ * kfifo_get - gets some data from the FIFO
++ * @fifo: the fifo to be used.
++ * @buffer: where the data must be copied.
++ * @len: the size of the destination buffer.
++ *
++ * This function copies at most @len bytes from the FIFO into the
++ * @buffer and returns the number of copied bytes.
++ */
++static inline unsigned int
++kfifo_get(struct kfifo *fifo, unsigned char *buffer, unsigned int len)
++{
++ unsigned long flags;
++ unsigned int ret;
++
++ spin_lock_irqsave(fifo->lock, flags);
++
++ ret = __kfifo_get(fifo, buffer, len);
++
++ /*
++ * optimization: if the FIFO is empty, set the indices to 0
++ * so we don't wrap the next time
++ */
++ if (fifo->in == fifo->out)
++ fifo->in = fifo->out = 0;
++
++ spin_unlock_irqrestore(fifo->lock, flags);
++
++ return ret;
++}
++
++/**
++ * __kfifo_len - returns the number of bytes available in the FIFO, no locking version
++ * @fifo: the fifo to be used.
++ */
++static inline unsigned int __kfifo_len(struct kfifo *fifo)
++{
++ return fifo->in - fifo->out;
++}
++
++/**
++ * kfifo_len - returns the number of bytes available in the FIFO
++ * @fifo: the fifo to be used.
++ */
++static inline unsigned int kfifo_len(struct kfifo *fifo)
++{
++ unsigned long flags;
++ unsigned int ret;
++
++ spin_lock_irqsave(fifo->lock, flags);
++
++ ret = __kfifo_len(fifo);
++
++ spin_unlock_irqrestore(fifo->lock, flags);
++
++ return ret;
++}
++
++#else
++#include <linux/kfifo.h>
++#endif
++
++static inline void kfifo_vfree(struct kfifo *fifo)
++{
++ vfree(fifo->buffer);
++ kfree(fifo);
++}
++
++#endif /* __CI_EFRM_SYSDEP_LINUX_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/tools/config.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/tools/config.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/tools/config.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/tools/config.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,49 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_tools */
++
++#ifndef __CI_TOOLS_CONFIG_H__
++#define __CI_TOOLS_CONFIG_H__
++
++
++/**********************************************************************
++ * Debugging.
++ */
++
++#define CI_INCLUDE_ASSERT_VALID 0
++
++/* Set non-zero to allow info about who has allocated what to appear in
++ * /proc/drivers/level5/mem.
++ * However - Note that doing so can lead to segfault when you unload the
++ * driver, and other weirdness. i.e. I don't think the code for is quite
++ * right (written by Oktet, hacked by gel), but it does work well enough to be
++ * useful.
++ */
++#define CI_MEMLEAK_DEBUG_ALLOC_TABLE 0
++
++
++#endif /* __CI_TOOLS_CONFIG_H__ */
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/tools/debug.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/tools/debug.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/tools/debug.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/tools/debug.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,336 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_tools */
++
++#ifndef __CI_TOOLS_DEBUG_H__
++#define __CI_TOOLS_DEBUG_H__
++
++#define CI_LOG_E(x) x /* errors */
++#define CI_LOG_W(x) x /* warnings */
++#define CI_LOG_I(x) x /* information */
++#define CI_LOG_V(x) x /* verbose */
++
++/* Build time asserts. We paste the line number into the type name
++ * so that the macro can be used more than once per file even if the
++ * compiler objects to multiple identical typedefs. Collisions
++ * between use in different header files is still possible. */
++#ifndef CI_BUILD_ASSERT
++#define __CI_BUILD_ASSERT_NAME(_x) __CI_BUILD_ASSERT_ILOATHECPP(_x)
++#define __CI_BUILD_ASSERT_ILOATHECPP(_x) __CI_BUILD_ASSERT__ ##_x
++#define CI_BUILD_ASSERT(e)\
++ typedef char __CI_BUILD_ASSERT_NAME(__LINE__)[(e)?1:-1]
++#endif
++
++
++#ifdef NDEBUG
++
++# define _ci_check(exp, file, line)
++# define _ci_assert2(e, x, y, file, line)
++# define _ci_assert(exp, file, line)
++# define _ci_assert_equal(exp1, exp2, file, line)
++# define _ci_assert_equiv(exp1, exp2, file, line)
++# define _ci_assert_nequal(exp1, exp2, file, line)
++# define _ci_assert_le(exp1, exp2, file, line)
++# define _ci_assert_lt(exp1, exp2, file, line)
++# define _ci_assert_ge(exp1, exp2, file, line)
++# define _ci_assert_gt(exp1, exp2, file, line)
++# define _ci_assert_impl(exp1, exp2, file, line)
++
++# define _ci_verify(exp, file, line) \
++ do { \
++ (void)(exp); \
++ } while (0)
++
++# define CI_DEBUG_TRY(exp) \
++ do { \
++ (void)(exp); \
++ } while (0)
++
++#define CI_TRACE(exp,fmt)
++#define CI_TRACE_INT(integer)
++#define CI_TRACE_INT32(integer)
++#define CI_TRACE_INT64(integer)
++#define CI_TRACE_UINT(integer)
++#define CI_TRACE_UINT32(integer)
++#define CI_TRACE_UINT64(integer)
++#define CI_TRACE_HEX(integer)
++#define CI_TRACE_HEX32(integer)
++#define CI_TRACE_HEX64(integer)
++#define CI_TRACE_PTR(pointer)
++#define CI_TRACE_STRING(string)
++#define CI_TRACE_MAC(mac)
++#define CI_TRACE_IP(ip_be32)
++#define CI_TRACE_ARP(arp_pkt)
++
++#else
++
++# define _CI_ASSERT_FMT "\nfrom %s:%d"
++
++# define _ci_check(exp, file, line) \
++ do { \
++ if (CI_UNLIKELY(!(exp))) \
++ ci_warn(("ci_check(%s)"_CI_ASSERT_FMT, #exp, \
++ (file), (line))); \
++ } while (0)
++
++/*
++ * NOTE: ci_fail() emits the file and line where the assert is actually
++ * coded.
++ */
++
++# define _ci_assert(exp, file, line) \
++ do { \
++ if (CI_UNLIKELY(!(exp))) \
++ ci_fail(("ci_assert(%s)"_CI_ASSERT_FMT, #exp, \
++ (file), (line))); \
++ } while (0)
++
++# define _ci_assert2(e, x, y, file, line) do { \
++ if(CI_UNLIKELY( ! (e) )) \
++ ci_fail(("ci_assert(%s)\nwhere [%s=%"CI_PRIx64"] " \
++ "[%s=%"CI_PRIx64"]\nat %s:%d\nfrom %s:%d", #e \
++ , #x, (ci_uint64)(ci_uintptr_t)(x) \
++ , #y, (ci_uint64)(ci_uintptr_t)(y), \
++ __FILE__, __LINE__, (file), (line))); \
++ } while (0)
++
++# define _ci_verify(exp, file, line) \
++ do { \
++ if (CI_UNLIKELY(!(exp))) \
++ ci_fail(("ci_verify(%s)"_CI_ASSERT_FMT, #exp, \
++ (file), (line))); \
++ } while (0)
++
++# define _ci_assert_equal(x, y, f, l) _ci_assert2((x)==(y), x, y, (f), (l))
++# define _ci_assert_nequal(x, y, f, l) _ci_assert2((x)!=(y), x, y, (f), (l))
++# define _ci_assert_le(x, y, f, l) _ci_assert2((x)<=(y), x, y, (f), (l))
++# define _ci_assert_lt(x, y, f, l) _ci_assert2((x)< (y), x, y, (f), (l))
++# define _ci_assert_ge(x, y, f, l) _ci_assert2((x)>=(y), x, y, (f), (l))
++# define _ci_assert_gt(x, y, f, l) _ci_assert2((x)> (y), x, y, (f), (l))
++# define _ci_assert_or(x, y, f, l) _ci_assert2((x)||(y), x, y, (f), (l))
++# define _ci_assert_impl(x, y, f, l) _ci_assert2(!(x) || (y), x, y, (f), (l))
++# define _ci_assert_equiv(x, y, f, l) _ci_assert2(!(x)== !(y), x, y, (f), (l))
++
++#define _ci_assert_equal_msg(exp1, exp2, msg, file, line) \
++ do { \
++ if (CI_UNLIKELY((exp1)!=(exp2))) \
++ ci_fail(("ci_assert_equal_msg(%s == %s) were " \
++ "(%"CI_PRIx64":%"CI_PRIx64") with msg[%c%c%c%c]" \
++ _CI_ASSERT_FMT, #exp1, #exp2, \
++ (ci_uint64)(ci_uintptr_t)(exp1), \
++ (ci_uint64)(ci_uintptr_t)(exp2), \
++ (((ci_uint32)msg) >> 24) && 0xff, \
++ (((ci_uint32)msg) >> 16) && 0xff, \
++ (((ci_uint32)msg) >> 8 ) && 0xff, \
++ (((ci_uint32)msg) ) && 0xff, \
++ (file), (line))); \
++ } while (0)
++
++# define CI_DEBUG_TRY(exp) CI_TRY(exp)
++
++#define CI_TRACE(exp,fmt) \
++ ci_log("%s:%d:%s] " #exp "=" fmt, \
++ __FILE__, __LINE__, __FUNCTION__, (exp))
++
++
++#define CI_TRACE_INT(integer) \
++ ci_log("%s:%d:%s] " #integer "=%d", \
++ __FILE__, __LINE__, __FUNCTION__, (integer))
++
++
++#define CI_TRACE_INT32(integer) \
++ ci_log("%s:%d:%s] " #integer "=%d", \
++ __FILE__, __LINE__, __FUNCTION__, ((ci_int32)integer))
++
++
++#define CI_TRACE_INT64(integer) \
++ ci_log("%s:%d:%s] " #integer "=%lld", \
++ __FILE__, __LINE__, __FUNCTION__, ((ci_int64)integer))
++
++
++#define CI_TRACE_UINT(integer) \
++ ci_log("%s:%d:%s] " #integer "=%ud", \
++ __FILE__, __LINE__, __FUNCTION__, (integer))
++
++
++#define CI_TRACE_UINT32(integer) \
++ ci_log("%s:%d:%s] " #integer "=%ud", \
++ __FILE__, __LINE__, __FUNCTION__, ((ci_uint32)integer))
++
++
++#define CI_TRACE_UINT64(integer) \
++ ci_log("%s:%d:%s] " #integer "=%ulld", \
++ __FILE__, __LINE__, __FUNCTION__, ((ci_uint64)integer))
++
++
++#define CI_TRACE_HEX(integer) \
++ ci_log("%s:%d:%s] " #integer "=0x%x", \
++ __FILE__, __LINE__, __FUNCTION__, (integer))
++
++
++#define CI_TRACE_HEX32(integer) \
++ ci_log("%s:%d:%s] " #integer "=0x%x", \
++ __FILE__, __LINE__, __FUNCTION__, ((ci_uint32)integer))
++
++
++#define CI_TRACE_HEX64(integer) \
++ ci_log("%s:%d:%s] " #integer "=0x%llx", \
++ __FILE__, __LINE__, __FUNCTION__, ((ci_uint64)integer))
++
++
++#define CI_TRACE_PTR(pointer) \
++ ci_log("%s:%d:%s] " #pointer "=0x%p", \
++ __FILE__, __LINE__, __FUNCTION__, (pointer))
++
++
++#define CI_TRACE_STRING(string) \
++ ci_log("%s:%d:%s] " #string "=%s", \
++ __FILE__, __LINE__, __FUNCTION__, (string))
++
++
++#define CI_TRACE_MAC(mac) \
++ ci_log("%s:%d:%s] " #mac "=" CI_MAC_PRINTF_FORMAT, \
++ __FILE__, __LINE__, __FUNCTION__, CI_MAC_PRINTF_ARGS(mac))
++
++
++#define CI_TRACE_IP(ip_be32) \
++ ci_log("%s:%d:%s] " #ip_be32 "=" CI_IP_PRINTF_FORMAT, __FILE__, \
++ __LINE__, __FUNCTION__, CI_IP_PRINTF_ARGS(&(ip_be32)))
++
++
++#define CI_TRACE_ARP(arp_pkt) \
++ ci_log("%s:%d:%s]\n"CI_ARP_PRINTF_FORMAT, \
++ __FILE__, __LINE__, __FUNCTION__, CI_ARP_PRINTF_ARGS(arp_pkt))
++
++#endif /* NDEBUG */
++
++#define ci_check(exp) \
++ _ci_check(exp, __FILE__, __LINE__)
++
++#define ci_assert(exp) \
++ _ci_assert(exp, __FILE__, __LINE__)
++
++#define ci_verify(exp) \
++ _ci_verify(exp, __FILE__, __LINE__)
++
++#define ci_assert_equal(exp1, exp2) \
++ _ci_assert_equal(exp1, exp2, __FILE__, __LINE__)
++
++#define ci_assert_equal_msg(exp1, exp2, msg) \
++ _ci_assert_equal_msg(exp1, exp2, msg, __FILE__, __LINE__)
++
++#define ci_assert_nequal(exp1, exp2) \
++ _ci_assert_nequal(exp1, exp2, __FILE__, __LINE__)
++
++#define ci_assert_le(exp1, exp2) \
++ _ci_assert_le(exp1, exp2, __FILE__, __LINE__)
++
++#define ci_assert_lt(exp1, exp2) \
++ _ci_assert_lt(exp1, exp2, __FILE__, __LINE__)
++
++#define ci_assert_ge(exp1, exp2) \
++ _ci_assert_ge(exp1, exp2, __FILE__, __LINE__)
++
++#define ci_assert_gt(exp1, exp2) \
++ _ci_assert_gt(exp1, exp2, __FILE__, __LINE__)
++
++#define ci_assert_impl(exp1, exp2) \
++ _ci_assert_impl(exp1, exp2, __FILE__, __LINE__)
++
++#define ci_assert_equiv(exp1, exp2) \
++ _ci_assert_equiv(exp1, exp2, __FILE__, __LINE__)
++
++
++#define CI_TEST(exp) \
++ do{ \
++ if( CI_UNLIKELY(!(exp)) ) \
++ ci_fail(("CI_TEST(%s)", #exp)); \
++ }while(0)
++
++
++#define CI_TRY(exp) \
++ do{ \
++ int _trc; \
++ _trc=(exp); \
++ if( CI_UNLIKELY(_trc < 0) ) \
++ ci_sys_fail(#exp, _trc); \
++ }while(0)
++
++
++#define CI_TRY_RET(exp) \
++ do{ \
++ int _trc; \
++ _trc=(exp); \
++ if( CI_UNLIKELY(_trc < 0) ) { \
++ ci_log("%s returned %d at %s:%d", #exp, _trc, __FILE__, __LINE__); \
++ return _trc; \
++ } \
++ }while(0)
++
++#define CI_LOGLEVEL_TRY_RET(logfn, exp) \
++ do{ \
++ int _trc; \
++ _trc=(exp); \
++ if( CI_UNLIKELY(_trc < 0) ) { \
++ logfn (ci_log("%s returned %d at %s:%d", #exp, _trc, __FILE__, __LINE__)); \
++ return _trc; \
++ } \
++ }while(0)
++
++
++#define CI_SOCK_TRY(exp) \
++ do{ \
++ ci_sock_err_t _trc; \
++ _trc=(exp); \
++ if( CI_UNLIKELY(!ci_sock_errok(_trc)) ) \
++ ci_sys_fail(#exp, _trc.val); \
++ }while(0)
++
++
++#define CI_SOCK_TRY_RET(exp) \
++ do{ \
++ ci_sock_err_t _trc; \
++ _trc=(exp); \
++ if( CI_UNLIKELY(!ci_sock_errok(_trc)) ) { \
++ ci_log("%s returned %d at %s:%d", #exp, _trc.val, __FILE__, __LINE__); \
++ return ci_sock_errcode(_trc); \
++ } \
++ }while(0)
++
++
++#define CI_SOCK_TRY_SOCK_RET(exp) \
++ do{ \
++ ci_sock_err_t _trc; \
++ _trc=(exp); \
++ if( CI_UNLIKELY(!ci_sock_errok(_trc)) ) { \
++ ci_log("%s returned %d at %s:%d", #exp, _trc.val, __FILE__, __LINE__); \
++ return _trc; \
++ } \
++ }while(0)
++
++#endif /* __CI_TOOLS_DEBUG_H__ */
++
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/tools/log.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/tools/log.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/tools/log.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/tools/log.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,262 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Functions for logging and pretty-printing.
++ * \date 2002/08/07
++ */
++
++/*! \cidoxg_include_ci_tools */
++
++#ifndef __CI_TOOLS_LOG_H__
++#define __CI_TOOLS_LOG_H__
++
++#include <stdarg.h>
++
++
++/**********************************************************************
++ * Logging.
++ */
++
++/* size of internal log buffer */
++#define CI_LOG_MAX_LINE 512
++/* uses of ci_log must ensure that all trace messages are shorter than this */
++#define CI_LOG_MAX_MSG_LENGTH (CI_LOG_MAX_LINE-50)
++
++extern void ci_vlog(const char* fmt, va_list args) CI_HF;
++extern void ci_log(const char* fmt, ...) CI_PRINTF_LIKE(1,2) CI_HF;
++
++ /*! Set the prefix for log messages.
++ **
++ ** Uses the storage pointed to by \em prefix. Therefore \em prefix must
++ ** be allocated on the heap, or statically.
++ */
++extern void ci_set_log_prefix(const char* prefix) CI_HF;
++
++typedef void (*ci_log_fn_t)(const char* msg);
++extern ci_log_fn_t ci_log_fn CI_HV;
++
++/* Log functions. */
++extern void ci_log_null(const char* msg) CI_HF;
++extern void ci_log_stderr(const char* msg) CI_HF;
++extern void ci_log_stdout(const char* msg) CI_HF;
++extern void ci_log_syslog(const char* msg) CI_HF;
++
++/*! Call the following to install special logging behaviours. */
++extern void ci_log_buffer_till_fail(void) CI_HF;
++extern void ci_log_buffer_till_exit(void) CI_HF;
++
++extern void __ci_log_unique(const char* msg) CI_HF;
++extern ci_log_fn_t __ci_log_unique_fn CI_HV;
++ci_inline void ci_log_uniquify(void) {
++ if( ci_log_fn != __ci_log_unique ) {
++ __ci_log_unique_fn = ci_log_fn;
++ ci_log_fn = __ci_log_unique;
++ }
++}
++
++extern void ci_log_file(const char* msg) CI_HF;
++extern int ci_log_file_fd CI_HV;
++
++extern void __ci_log_nth(const char* msg) CI_HF;
++extern ci_log_fn_t __ci_log_nth_fn CI_HV;
++extern int ci_log_nth_n CI_HV; /* default 100 */
++ci_inline void ci_log_nth(void) {
++ if( ci_log_fn != __ci_log_nth ) {
++ __ci_log_nth_fn = ci_log_fn;
++ ci_log_fn = __ci_log_nth;
++ }
++}
++
++extern int ci_log_level CI_HV;
++
++extern int ci_log_options CI_HV;
++#define CI_LOG_PID 0x1
++#define CI_LOG_TID 0x2
++#define CI_LOG_TIME 0x4
++#define CI_LOG_DELTA 0x8
++
++/**********************************************************************
++ * Used to define which mode we are in
++ */
++#if (defined(_WIN32) && !defined(__KERNEL__))
++typedef enum {
++ ci_log_md_NULL=0,
++ ci_log_md_ioctl,
++ ci_log_md_stderr,
++ ci_log_md_stdout,
++ ci_log_md_file,
++ ci_log_md_serial,
++ ci_log_md_syslog,
++ ci_log_md_pidfile
++} ci_log_mode_t;
++extern ci_log_mode_t ci_log_mode;
++#endif
++
++/**********************************************************************
++ * Pretty-printing.
++ */
++
++extern char ci_printable_char(char c) CI_HF;
++
++extern void (*ci_hex_dump_formatter)(char* buf, const ci_octet* s,
++ int i, int off, int len) CI_HV;
++extern void ci_hex_dump_format_octets(char*,const ci_octet*,int,int,int) CI_HF;
++extern void ci_hex_dump_format_dwords(char*,const ci_octet*,int,int,int) CI_HF;
++
++extern void ci_hex_dump_row(char* buf, volatile const void* s, int len,
++ ci_ptr_arith_t address) CI_HF;
++ /*!< A row contains up to 16 bytes. Row starts at [address & 15u], so
++ ** therefore [len + (address & 15u)] must be <= 16.
++ */
++
++extern void ci_hex_dump(ci_log_fn_t, volatile const void*,
++ int len, ci_ptr_arith_t address) CI_HF;
++
++extern int ci_hex_dump_to_raw(const char* src_hex, void* buf,
++ unsigned* addr_out_opt, int* skip) CI_HF;
++ /*!< Recovers raw data from a single line of a hex dump. [buf] must be at
++ ** least 16 bytes long. Returns the number of bytes written to [buf] (in
++ ** range 1 -> 16), or -1 if [src_hex] doesn't contain hex data. Does not
++ ** cope with missing bytes at the start of a line.
++ */
++
++extern int ci_format_eth_addr(char* buf, const void* eth_mac_addr,
++ char sep) CI_HF;
++ /*!< This will write 18 characters to <buf> including terminating null.
++ ** Returns number of bytes written excluding null. If [sep] is zero, ':'
++ ** is used.
++ */
++
++extern int ci_parse_eth_addr(void* eth_mac_addr,
++ const char* str, char sep) CI_HF;
++ /*!< If [sep] is zero, absolutely any separator is accepted (even
++ ** inconsistent separators). Returns 0 on success, -1 on error.
++ */
++
++extern int ci_format_ip4_addr(char* buf, unsigned addr_be32) CI_HF;
++ /*!< Formats the IP address (in network endian) in dotted-quad. Returns
++ ** the number of bytes written (up to 15), excluding the null. [buf]
++ ** must be at least 16 bytes long.
++ */
++
++
++/**********************************************************************
++ * Error checking.
++ */
++
++extern void (*ci_fail_stop_fn)(void) CI_HV;
++
++extern void ci_fail_stop(void) CI_HF;
++extern void ci_fail_hang(void) CI_HF;
++extern void ci_fail_bomb(void) CI_HF;
++extern void ci_backtrace(void) CI_HF;
++
++#if defined __linux__ && !defined __KERNEL__
++extern void ci_fail_abort (void) CI_HF;
++#endif
++
++#ifdef __GNUC__
++extern void
++__ci_fail(const char*, ...) CI_PRINTF_LIKE(1,2) CI_HF;
++#else
++# if _PREFAST_
++ extern void _declspec(noreturn) __ci_fail(const char* fmt, ...);
++# else
++ extern void __ci_fail(const char* fmt, ...);
++# endif
++
++#endif
++
++#define ci_warn(x) \
++ do{ ci_log("WARN at %s:%d", __FILE__, __LINE__); }while(0)
++
++#define ci_fail(x) \
++ do{ ci_log("FAIL at %s:%d", __FILE__, __LINE__); __ci_fail x; }while(0)
++
++extern void __ci_sys_fail(const char* fn, int rc,
++ const char* file, int line) CI_HF;
++#define ci_sys_fail(fn, rc) __ci_sys_fail(fn, rc, __FILE__, __LINE__)
++
++/**********************************************************************
++ * Logging to buffer (src/citools/log_buffer.c)
++ */
++
++/*! Divert ci_log() messages to the log buffer
++ * normally they go to the system console */
++extern void ci_log_buffer_till_fail(void) CI_HF;
++
++/*! Dump the contents of the log buffer to the system console */
++extern void ci_log_buffer_dump(void) CI_HF;
++
++
++/**********************************************************************
++ * Some useful pretty-printing.
++ */
++
++#ifdef __linux__
++# define CI_SOCKCALL_FLAGS_FMT "%s%s%s%s%s%s%s%s%s%s%s"
++
++# define CI_SOCKCALL_FLAGS_PRI_ARG(x) \
++ (((x) & MSG_OOB ) ? "OOB " :""), \
++ (((x) & MSG_PEEK ) ? "PEEK " :""), \
++ (((x) & MSG_DONTROUTE ) ? "DONTROUTE " :""), \
++ (((x) & MSG_EOR ) ? "EOR " :""), \
++ (((x) & MSG_CTRUNC ) ? "CTRUNC " :""), \
++ (((x) & MSG_TRUNC ) ? "TRUNC " :""), \
++ (((x) & MSG_WAITALL ) ? "WAITALL " :""), \
++ (((x) & MSG_DONTWAIT ) ? "DONTWAIT " :""), \
++ (((x) & MSG_NOSIGNAL ) ? "NOSIGNAL " :""), \
++ (((x) & MSG_ERRQUEUE ) ? "ERRQUEUE " :""), \
++ (((x) & MSG_CONFIRM ) ? "CONFIRM " :"")
++#endif
++
++#ifdef _WIN32
++# define CI_SOCKCALL_FLAGS_FMT "%s%s%s"
++
++# define CI_SOCKCALL_FLAGS_PRI_ARG(x) \
++ (((x) & MSG_OOB ) ? "OOB " :""), \
++ (((x) & MSG_PEEK ) ? "PEEK " :""), \
++ (((x) & MSG_DONTROUTE ) ? "DONTROUTE " :"")
++#endif
++
++#ifdef __sun__
++# define CI_SOCKCALL_FLAGS_FMT "%s%s%s%s%s%s%s%s%s"
++
++# define CI_SOCKCALL_FLAGS_PRI_ARG(x) \
++ (((x) & MSG_OOB ) ? "OOB " :""), \
++ (((x) & MSG_PEEK ) ? "PEEK " :""), \
++ (((x) & MSG_DONTROUTE ) ? "DONTROUTE " :""), \
++ (((x) & MSG_EOR ) ? "EOR " :""), \
++ (((x) & MSG_CTRUNC ) ? "CTRUNC " :""), \
++ (((x) & MSG_TRUNC ) ? "TRUNC " :""), \
++ (((x) & MSG_WAITALL ) ? "WAITALL " :""), \
++ (((x) & MSG_DONTWAIT ) ? "DONTWAIT " :""), \
++ (((x) & MSG_NOTIFICATION) ? "NOTIFICATION" :"")
++#endif
++
++#endif /* __CI_TOOLS_LOG_H__ */
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/tools/platform/gcc_x86.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/tools/platform/gcc_x86.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/tools/platform/gcc_x86.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/tools/platform/gcc_x86.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,361 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_tools_platform */
++
++#ifndef __CI_TOOLS_GCC_X86_H__
++#define __CI_TOOLS_GCC_X86_H__
++
++
++/**********************************************************************
++ * Free-running cycle counters.
++ */
++
++#define CI_HAVE_FRC64
++#define CI_HAVE_FRC32
++
++#define ci_frc32(pval) __asm__ __volatile__("rdtsc" : "=a" (*pval) : : "edx")
++
++#if defined(__x86_64__)
++ci_inline void ci_frc64(ci_uint64* pval) {
++ /* temp fix until we figure how to get this out in one bite */
++ ci_uint64 low, high;
++ __asm__ __volatile__("rdtsc" : "=a" (low) , "=d" (high));
++ *pval = (high << 32) | low;
++}
++
++#else
++#define ci_frc64(pval) __asm__ __volatile__("rdtsc" : "=A" (*pval))
++#endif
++
++#define ci_frc_flush() /* ?? Need a pipeline barrier. */
++
++
++/**********************************************************************
++ * Atomic integer.
++ */
++
++/*
++** int ci_atomic_read(a) { return a->n; }
++** void ci_atomic_set(a, v) { a->n = v; }
++** void ci_atomic_inc(a) { ++a->n; }
++** void ci_atomic_dec(a) { --a->n; }
++** int ci_atomic_inc_and_test(a) { return ++a->n == 0; }
++** int ci_atomic_dec_and_test(a) { return --a->n == 0; }
++** void ci_atomic_and(a, v) { a->n &= v; }
++** void ci_atomic_or(a, v) { a->n |= v; }
++*/
++
++typedef struct { volatile ci_int32 n; } ci_atomic_t;
++
++#define CI_ATOMIC_INITIALISER(i) {(i)}
++
++static inline ci_int32 ci_atomic_read(const ci_atomic_t* a) { return a->n; }
++static inline void ci_atomic_set(ci_atomic_t* a, int v) { a->n = v; ci_wmb(); }
++
++static inline void ci_atomic_inc(ci_atomic_t* a)
++{ __asm__ __volatile__("lock; incl %0" : "+m" (a->n)); }
++
++
++static inline void ci_atomic_dec(ci_atomic_t* a)
++{ __asm__ __volatile__("lock; decl %0" : "+m" (a->n)); }
++
++static inline int ci_atomic_inc_and_test(ci_atomic_t* a) {
++ char r;
++ __asm__ __volatile__("lock; incl %0; sete %1"
++ : "+m" (a->n), "=qm" (r));
++ return r;
++}
++
++static inline int ci_atomic_dec_and_test(ci_atomic_t* a) {
++ char r;
++ __asm__ __volatile__("lock; decl %0; sete %1"
++ : "+m" (a->n), "=qm" (r));
++ return r;
++}
++
++ci_inline int
++ci_atomic_xadd (ci_atomic_t *a, int v) {
++ __asm__ ("lock xadd %0, %1" : "=r" (v), "+m" (a->n) : "0" (v));
++ return v;
++}
++ci_inline int
++ci_atomic_xchg (ci_atomic_t *a, int v) {
++ __asm__ ("lock xchg %0, %1" : "=r" (v), "+m" (a->n) : "0" (v));
++ return v;
++}
++
++ci_inline void ci_atomic32_or(volatile ci_uint32* p, ci_uint32 mask)
++{ __asm__ __volatile__("lock; orl %1, %0" : "+m" (*p) : "ir" (mask)); }
++
++ci_inline void ci_atomic32_and(volatile ci_uint32* p, ci_uint32 mask)
++{ __asm__ __volatile__("lock; andl %1, %0" : "+m" (*p) : "ir" (mask)); }
++
++ci_inline void ci_atomic32_add(volatile ci_uint32* p, ci_uint32 v)
++{ __asm__ __volatile__("lock; addl %1, %0" : "+m" (*p) : "ir" (v)); }
++
++#define ci_atomic_or(a, v) ci_atomic32_or ((ci_uint32*) &(a)->n, (v))
++#define ci_atomic_and(a, v) ci_atomic32_and((ci_uint32*) &(a)->n, (v))
++#define ci_atomic_add(a, v) ci_atomic32_add((ci_uint32*) &(a)->n, (v))
++
++extern int ci_glibc_uses_nptl (void) CI_HF;
++extern int ci_glibc_nptl_broken(void) CI_HF;
++extern int ci_glibc_gs_get_is_multihreaded_offset (void) CI_HF;
++extern int ci_glibc_gs_is_multihreaded_offset CI_HV;
++
++#if !defined(__x86_64__)
++#ifdef __GLIBC__
++/* Returns non-zero if the calling process might be mulithreaded, returns 0 if
++ * it definitely isn't (i.e. if reimplementing this function for other
++ * architectures and platforms, you can safely just return 1).
++ */
++static inline int ci_is_multithreaded (void) {
++
++ while (1) {
++ if (ci_glibc_gs_is_multihreaded_offset >= 0) {
++ /* NPTL keeps a variable that tells us this hanging off gs (i.e. in thread-
++ * local storage); just return this
++ */
++ int r;
++ __asm__ __volatile__ ("movl %%gs:(%1), %0"
++ : "=r" (r)
++ : "r" (ci_glibc_gs_is_multihreaded_offset));
++ return r;
++ }
++
++ if (ci_glibc_gs_is_multihreaded_offset == -2) {
++ /* This means we've already determined that the libc version is NOT good
++ * for our funky "is multithreaded" hack
++ */
++ return 1;
++ }
++
++ /* If we get here, it means this is the first time the function has been
++ * called -- detect the libc version and go around again.
++ */
++ ci_glibc_gs_is_multihreaded_offset = ci_glibc_gs_get_is_multihreaded_offset ();
++
++ /* Go around again. We do the test here rather than at the top so that we go
++ * quicker in the common the case
++ */
++ }
++}
++
++#else /* def __GLIBC__ */
++
++#define ci_is_multithreaded() 1 /* ?? Is the the POSIX way of finding out */
++ /* whether the appication is single */
++ /* threaded? */
++
++#endif /* def __GLIBC__ */
++
++#else /* defined __x86_64__ */
++
++static inline int ci_is_multithreaded (void) {
++ /* Now easy way to tell on x86_64; so assume we're multithreaded */
++ return 1;
++}
++
++#endif /* defined __x86_64__ */
++
++
++/**********************************************************************
++ * Compare and swap.
++ */
++
++#define CI_HAVE_COMPARE_AND_SWAP
++
++ci_inline int ci_cas32_succeed(volatile ci_int32* p, ci_int32 oldval,
++ ci_int32 newval) {
++ char ret;
++ ci_int32 prevval;
++ __asm__ __volatile__("lock; cmpxchgl %3, %1; sete %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++
++ci_inline int ci_cas32_fail(volatile ci_int32* p, ci_int32 oldval,
++ ci_int32 newval) {
++ char ret;
++ ci_int32 prevval;
++ __asm__ __volatile__("lock; cmpxchgl %3, %1; setne %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++
++#ifdef __x86_64__
++ci_inline int ci_cas64_succeed(volatile ci_int64* p, ci_int64 oldval,
++ ci_int64 newval) {
++ char ret;
++ ci_int64 prevval;
++ __asm__ __volatile__("lock; cmpxchgq %3, %1; sete %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++
++ci_inline int ci_cas64_fail(volatile ci_int64* p, ci_int64 oldval,
++ ci_int64 newval) {
++ char ret;
++ ci_int64 prevval;
++ __asm__ __volatile__("lock; cmpxchgq %3, %1; setne %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++#endif
++
++ci_inline int ci_cas32u_succeed(volatile ci_uint32* p, ci_uint32 oldval, ci_uint32 newval) {
++ char ret;
++ ci_uint32 prevval;
++ __asm__ __volatile__("lock; cmpxchgl %3, %1; sete %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++
++ci_inline int ci_cas32u_fail(volatile ci_uint32* p, ci_uint32 oldval, ci_uint32 newval) {
++ char ret;
++ ci_uint32 prevval;
++ __asm__ __volatile__("lock; cmpxchgl %3, %1; setne %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++
++ci_inline int ci_cas64u_succeed(volatile ci_uint64* p, ci_uint64 oldval,
++ ci_uint64 newval) {
++ char ret;
++ ci_uint64 prevval;
++ __asm__ __volatile__("lock; cmpxchgq %3, %1; sete %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++
++ci_inline int ci_cas64u_fail(volatile ci_uint64* p, ci_uint64 oldval,
++ ci_uint64 newval) {
++ char ret;
++ ci_uint64 prevval;
++ __asm__ __volatile__("lock; cmpxchgq %3, %1; setne %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++
++#ifdef __x86_64__
++
++# define ci_cas_uintptr_succeed(p,o,n) \
++ ci_cas64u_succeed((volatile ci_uint64*) (p), (o), (n))
++# define ci_cas_uintptr_fail(p,o,n) \
++ ci_cas64u_fail((volatile ci_uint64*) (p), (o), (n))
++
++#else
++
++# define ci_cas_uintptr_succeed(p,o,n) \
++ ci_cas32u_succeed((volatile ci_uint32*) (p), (o), (n))
++# define ci_cas_uintptr_fail(p,o,n) \
++ ci_cas32u_fail((volatile ci_uint32*) (p), (o), (n))
++
++#endif
++
++
++/**********************************************************************
++ * Atomic bit field.
++ */
++
++typedef ci_uint32 ci_bits;
++#define CI_BITS_N 32u
++
++#define CI_BITS_DECLARE(name, n) \
++ ci_bits name[((n) + CI_BITS_N - 1u) / CI_BITS_N]
++
++ci_inline void ci_bits_clear_all(volatile ci_bits* b, int n_bits)
++{ memset((void*) b, 0, (n_bits+CI_BITS_N-1u) / CI_BITS_N * sizeof(ci_bits)); }
++
++ci_inline void ci_bit_set(volatile ci_bits* b, int i) {
++ __asm__ __volatile__("lock; btsl %1, %0"
++ : "=m" (*b)
++ : "Ir" (i));
++}
++
++ci_inline void ci_bit_clear(volatile ci_bits* b, int i) {
++ __asm__ __volatile__("lock; btrl %1, %0"
++ : "=m" (*b)
++ : "Ir" (i));
++}
++
++ci_inline int ci_bit_test(volatile ci_bits* b, int i) {
++ char rc;
++ __asm__("btl %2, %1; setc %0"
++ : "=r" (rc)
++ : "m" (*b), "Ir" (i));
++ return rc;
++}
++
++ci_inline int ci_bit_test_and_set(volatile ci_bits* b, int i) {
++ char rc;
++ __asm__ __volatile__("lock; btsl %2, %1; setc %0"
++ : "=r" (rc), "+m" (*b)
++ : "Ir" (i));
++ return rc;
++}
++
++ci_inline int ci_bit_test_and_clear(volatile ci_bits* b, int i) {
++ char rc;
++ __asm__ __volatile__("lock; btrl %2, %1; setc %0"
++ : "=r" (rc), "+m" (*b)
++ : "Ir" (i));
++ return rc;
++}
++
++/* These mask ops only work within a single ci_bits word. */
++#define ci_bit_mask_set(b,m) ci_atomic32_or((b), (m))
++#define ci_bit_mask_clear(b,m) ci_atomic32_and((b), ~(m))
++
++
++/**********************************************************************
++ * Misc.
++ */
++
++#if __GNUC__ >= 3
++# define ci_spinloop_pause() __asm__("pause")
++#else
++# define ci_spinloop_pause() __asm__(".byte 0xf3, 0x90")
++#endif
++
++
++#define CI_HAVE_ADDC32
++#define ci_add_carry32(sum, v) __asm__("addl %1, %0 ;" \
++ "adcl $0, %0 ;" \
++ : "=r" (sum) \
++ : "g" ((ci_uint32) v), "0" (sum))
++
++
++#endif /* __CI_TOOLS_GCC_X86_H__ */
++
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/tools/platform/linux_kernel.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/tools/platform/linux_kernel.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/tools/platform/linux_kernel.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/tools/platform/linux_kernel.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,362 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++
++/*! \cidoxg_include_ci_tools_platform */
++
++#ifndef __CI_TOOLS_LINUX_KERNEL_H__
++#define __CI_TOOLS_LINUX_KERNEL_H__
++
++/**********************************************************************
++ * Need to know the kernel version.
++ */
++
++#ifndef LINUX_VERSION_CODE
++# include <linux/version.h>
++# ifndef UTS_RELEASE
++ /* 2.6.18 onwards defines UTS_RELEASE in a separate header */
++# include <linux/utsrelease.h>
++# endif
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) || \
++ LINUX_VERSION_CODE >= KERNEL_VERSION(2,7,0)
++# error "Linux 2.6 required"
++#endif
++
++
++#include <linux/slab.h> /* kmalloc / kfree */
++#include <linux/vmalloc.h> /* vmalloc / vfree */
++#include <linux/interrupt.h>/* in_interrupt() */
++#include <linux/in.h>
++#include <linux/in6.h>
++#include <linux/spinlock.h>
++#include <linux/highmem.h>
++#include <linux/smp_lock.h>
++#include <linux/ctype.h>
++#include <linux/uio.h>
++#include <asm/current.h>
++#include <asm/errno.h>
++#include <asm/kmap_types.h>
++#include <asm/semaphore.h>
++
++#include <ci/tools/config.h>
++
++#define ci_in_irq in_irq
++#define ci_in_interrupt in_interrupt
++#define ci_in_atomic in_atomic
++
++
++/**********************************************************************
++ * Misc stuff.
++ */
++
++#ifdef BUG
++# define CI_BOMB BUG
++#endif
++
++ci_inline void* __ci_alloc(size_t n)
++{ return kmalloc(n, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)); }
++
++ci_inline void* __ci_atomic_alloc(size_t n)
++{ return kmalloc(n, GFP_ATOMIC ); }
++
++ci_inline void __ci_free(void* p) { return kfree(p); }
++ci_inline void* __ci_vmalloc(size_t n) { return vmalloc(n); }
++ci_inline void __ci_vfree(void* p) { return vfree(p); }
++
++
++#if CI_MEMLEAK_DEBUG_ALLOC_TABLE
++ #define ci_alloc(s) ci_alloc_memleak_debug (s, __FILE__, __LINE__)
++ #define ci_atomic_alloc(s) ci_atomic_alloc_memleak_debug(s, __FILE__, __LINE__)
++ #define ci_free ci_free_memleak_debug
++ #define ci_vmalloc(s) ci_vmalloc_memleak_debug (s, __FILE__,__LINE__)
++ #define ci_vfree ci_vfree_memleak_debug
++ #define ci_alloc_fn ci_alloc_fn_memleak_debug
++ #define ci_vmalloc_fn ci_vmalloc_fn_memleak_debug
++#else /* !CI_MEMLEAK_DEBUG_ALLOC_TABLE */
++ #define ci_alloc_fn __ci_alloc
++ #define ci_vmalloc_fn __ci_vmalloc
++#endif
++
++#ifndef ci_alloc
++ #define ci_atomic_alloc __ci_atomic_alloc
++ #define ci_alloc __ci_alloc
++ #define ci_free __ci_free
++ #define ci_vmalloc __ci_vmalloc
++ #define ci_vmalloc_fn __ci_vmalloc
++ #define ci_vfree __ci_vfree
++#endif
++
++#define ci_sprintf sprintf
++#define ci_vsprintf vsprintf
++#define ci_snprintf snprintf
++#define ci_vsnprintf vsnprintf
++#define ci_sscanf sscanf
++
++
++#define CI_LOG_FN_DEFAULT ci_log_syslog
++
++
++/*--------------------------------------------------------------------
++ *
++ * irqs_disabled - needed for kmap helpers on some kernels
++ *
++ *--------------------------------------------------------------------*/
++#ifdef irqs_disabled
++# define ci_irqs_disabled irqs_disabled
++#else
++# if defined(__i386__) | defined(__x86_64__)
++# define ci_irqs_disabled(x) \
++ ({ \
++ unsigned long flags; \
++ local_save_flags(flags); \
++ !(flags & (1<<9)); \
++ })
++# else
++# error "Need to implement irqs_disabled() for your architecture"
++# endif
++#endif
++
++
++/**********************************************************************
++ * kmap helpers.
++ *
++ * Use ci_k(un)map for code paths which are not in an atomic context.
++ * For atomic code you need to use ci_k(un)map_in_atomic. This will grab
++ * one of the per-CPU kmap slots.
++ *
++ * NB in_interrupt != in_irq. If you don't know the difference then
++ * don't use kmap_in_atomic
++ *
++ * 2.4 allocates kmap slots by function. We are going to re-use the
++ * skb module's slot - we also use the same interlock
++ *
++ * 2.6 allocates kmap slots by type as well as by function. We are
++ * going to use the currently (2.6.10) unsused SOFTIRQ slot
++ *
++ */
++
++ci_inline void* ci_kmap(struct page *page) {
++ CI_DEBUG(if( ci_in_atomic() | ci_in_interrupt() | ci_in_irq() ) BUG());
++ return kmap(page);
++}
++
++ci_inline void ci_kunmap(struct page *page) {
++ kunmap(page);
++}
++
++#define CI_KM_SLOT KM_SOFTIRQ0
++
++
++typedef struct semaphore ci_semaphore_t;
++
++ci_inline void
++ci_sem_init (ci_semaphore_t *sem, int val) {
++ sema_init (sem, val);
++}
++
++ci_inline void
++ci_sem_down (ci_semaphore_t *sem) {
++ down (sem);
++}
++
++ci_inline int
++ci_sem_trydown (ci_semaphore_t *sem) {
++ return down_trylock (sem);
++}
++
++ci_inline void
++ci_sem_up (ci_semaphore_t *sem) {
++ up (sem);
++}
++
++ci_inline int
++ci_sem_get_count(ci_semaphore_t *sem) {
++ return sem->count.counter;
++}
++
++ci_inline void* ci_kmap_in_atomic(struct page *page)
++{
++ CI_DEBUG(if( ci_in_irq() ) BUG());
++
++ /* iSCSI can call without in_interrupt() but with irqs_disabled()
++ and in a context that can't sleep, so we need to check that
++ too */
++ if(ci_in_interrupt() || ci_irqs_disabled())
++ return kmap_atomic(page, CI_KM_SLOT);
++ else
++ return kmap(page);
++}
++
++ci_inline void ci_kunmap_in_atomic(struct page *page, void* kaddr)
++{
++ CI_DEBUG(if( ci_in_irq() ) BUG());
++
++ /* iSCSI can call without in_interrupt() but with irqs_disabled()
++ and in a context that can't sleep, so we need to check that
++ too */
++ if(ci_in_interrupt() || ci_irqs_disabled())
++ kunmap_atomic(kaddr, CI_KM_SLOT);
++ else
++ kunmap(page);
++}
++
++/**********************************************************************
++ * spinlock implementation: used by <ci/tools/spinlock.h>
++ */
++
++#define CI_HAVE_SPINLOCKS
++
++typedef ci_uintptr_t ci_lock_holder_t;
++#define ci_lock_thisthread (ci_lock_holder_t)current
++#define ci_lock_no_holder (ci_lock_holder_t)NULL
++
++typedef spinlock_t ci_lock_i;
++typedef spinlock_t ci_irqlock_i;
++typedef unsigned long ci_irqlock_state_t;
++
++#define IRQLOCK_CYCLES 500000
++
++#define ci_lock_ctor_i(l) spin_lock_init(l)
++#define ci_lock_dtor_i(l) do{}while(0)
++#define ci_lock_lock_i(l) spin_lock(l)
++#define ci_lock_trylock_i(l) spin_trylock(l)
++#define ci_lock_unlock_i(l) spin_unlock(l)
++
++#define ci_irqlock_ctor_i(l) spin_lock_init(l)
++#define ci_irqlock_dtor_i(l) do{}while(0)
++#define ci_irqlock_lock_i(l,s) spin_lock_irqsave(l,*(s))
++#define ci_irqlock_unlock_i(l,s) spin_unlock_irqrestore(l, *(s))
++
++
++/**********************************************************************
++ * register access
++ */
++
++#include <asm/io.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
++typedef volatile void __iomem* ioaddr_t;
++#else
++typedef unsigned long ioaddr_t;
++#endif
++
++
++
++/**********************************************************************
++ * thread implementation -- kernel dependancies probably should be
++ * moved to driver/linux_kernel.h
++ */
++
++#define ci_linux_daemonize(name) daemonize(name)
++
++#include <linux/workqueue.h>
++
++
++typedef struct {
++ void* (*fn)(void* arg);
++ void* arg;
++ const char* name;
++ int thrd_id;
++ struct completion exit_event;
++ struct work_struct keventd_witem;
++} ci_kernel_thread_t;
++
++
++typedef ci_kernel_thread_t* cithread_t;
++
++
++extern int cithread_create(cithread_t* tid, void* (*fn)(void*), void* arg,
++ const char* name);
++extern int cithread_detach(cithread_t kt);
++extern int cithread_join(cithread_t kt);
++
++
++/* Kernel sysctl variables. */
++extern int sysctl_tcp_wmem[3];
++extern int sysctl_tcp_rmem[3];
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++#define LINUX_HAS_SYSCTL_MEM_MAX
++extern ci_uint32 sysctl_wmem_max;
++extern ci_uint32 sysctl_rmem_max;
++#endif
++
++
++/*--------------------------------------------------------------------
++ *
++ * ci_bigbuf_t: An abstraction of a large buffer. Needed because in the
++ * Linux kernel, large buffers need to be allocated with vmalloc(), whereas
++ * smaller buffers should use kmalloc(). This abstraction chooses the
++ * appropriate mechansim.
++ *
++ *--------------------------------------------------------------------*/
++
++typedef struct {
++ char* p;
++ int is_vmalloc;
++} ci_bigbuf_t;
++
++
++ci_inline int ci_bigbuf_alloc(ci_bigbuf_t* bb, size_t bytes) {
++ if( bytes >= CI_PAGE_SIZE && ! ci_in_atomic() ) {
++ bb->is_vmalloc = 1;
++ if( (bb->p = vmalloc(bytes)) ) return 0;
++ }
++ bb->is_vmalloc = 0;
++ bb->p = kmalloc(bytes, ci_in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
++ return bb->p ? 0 : -ENOMEM;
++}
++
++ci_inline void ci_bigbuf_free(ci_bigbuf_t* bb) {
++ if( bb->is_vmalloc ) vfree(bb->p);
++ else kfree(bb->p);
++}
++
++ci_inline char* ci_bigbuf_ptr(ci_bigbuf_t* bb)
++{ return bb->p; }
++
++/**********************************************************************
++ * struct iovec abstraction (for Windows port)
++ */
++
++typedef struct iovec ci_iovec;
++
++/* Accessors for buffer/length */
++#define CI_IOVEC_BASE(i) ((i)->iov_base)
++#define CI_IOVEC_LEN(i) ((i)->iov_len)
++
++/**********************************************************************
++ * Signals
++ */
++
++ci_inline void
++ci_send_sig(int signum)
++{
++ send_sig(signum, current, 0);
++}
++
++#endif /* __CI_TOOLS_LINUX_KERNEL_H__ */
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/ci/tools/sysdep.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/tools/sysdep.h
+--- linux-2.6.18.8/drivers/xen/sfc_netback/ci/tools/sysdep.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/ci/tools/sysdep.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,132 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_tools */
++
++#ifndef __CI_TOOLS_SYSDEP_H__
++#define __CI_TOOLS_SYSDEP_H__
++
++/* Make this header self-sufficient */
++#include <ci/compat.h>
++#include <ci/tools/log.h>
++#include <ci/tools/debug.h>
++
++
++/**********************************************************************
++ * Platform dependencies.
++ */
++
++#if defined(__KERNEL__)
++
++# if defined(__linux__)
++# include <ci/tools/platform/linux_kernel.h>
++# elif defined(_WIN32)
++# include <ci/tools/platform/win32_kernel.h>
++# elif defined(__sun__)
++# include <ci/tools/platform/sunos_kernel.h>
++# else
++# error Unknown platform.
++# endif
++
++#elif defined(_WIN32)
++
++# include <ci/tools/platform/win32.h>
++
++#elif defined(__unix__)
++
++# include <ci/tools/platform/unix.h>
++
++#else
++
++# error Unknown platform.
++
++#endif
++
++#if defined(__linux__)
++/*! Linux sendfile() support enable/disable. */
++# define CI_HAVE_SENDFILE /* provide sendfile i/f */
++
++# define CI_HAVE_OS_NOPAGE
++#endif
++
++#if defined(__sun__)
++# define CI_HAVE_SENDFILE /* provide sendfile i/f */
++# define CI_HAVE_SENDFILEV /* provide sendfilev i/f */
++
++# define CI_IOCTL_SENDFILE /* use efrm CI_SENDFILEV ioctl */
++#endif
++
++#if defined(_WIN32)
++typedef ci_uint32 ci_uerr_t; /* range of OS user-mode return codes */
++typedef ci_uint32 ci_kerr_t; /* range of OS kernel-mode return codes */
++#elif defined(__unix__)
++typedef ci_int32 ci_uerr_t; /* range of OS user-mode return codes */
++typedef ci_int32 ci_kerr_t; /* range of OS kernel-mode return codes */
++#endif
++
++
++/**********************************************************************
++ * Compiler and processor dependencies.
++ */
++
++#if defined(__GNUC__)
++
++#if defined(__i386__) || defined(__x86_64__)
++# include <ci/tools/platform/gcc_x86.h>
++#elif defined(__PPC__)
++# include <ci/tools/platform/gcc_ppc.h>
++#elif defined(__ia64__)
++# include <ci/tools/platform/gcc_ia64.h>
++#else
++# error Unknown processor.
++#endif
++
++#elif defined(_MSC_VER)
++
++#if defined(__i386__)
++# include <ci/tools/platform/msvc_x86.h>
++# elif defined(__x86_64__)
++# include <ci/tools/platform/msvc_x86_64.h>
++#else
++# error Unknown processor.
++#endif
++
++#elif defined(__PGI)
++
++# include <ci/tools/platform/pg_x86.h>
++
++#elif defined(__INTEL_COMPILER)
++
++/* Intel compilers v7 claim to be very gcc compatible. */
++# include <ci/tools/platform/gcc_x86.h>
++
++#else
++# error Unknown compiler.
++#endif
++
++
++#endif /* __CI_TOOLS_SYSDEP_H__ */
++
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netback/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/Makefile
+--- linux-2.6.18.8/drivers/xen/sfc_netback/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netback/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,12 @@
++EXTRA_CFLAGS += -Idrivers/xen/sfc_netback -Idrivers/xen/sfc_netutil -Idrivers/xen/netback -Idrivers/net/sfc
++EXTRA_CFLAGS += -D__ci_driver__
++EXTRA_CFLAGS += -DEFX_USE_KCOMPAT
++EXTRA_CFLAGS += -Werror
++
++ifdef GCOV
++EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage -DEFX_GCOV
++endif
++
++obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND) := sfc_netback.o
++
++sfc_netback-objs := accel.o accel_fwd.o accel_msg.o accel_solarflare.o accel_xenbus.o accel_debugfs.o
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/accel_bufs.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_bufs.c
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/accel_bufs.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_bufs.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,393 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <xen/gnttab.h>
++
++#include "accel_bufs.h"
++#include "accel_util.h"
++
++#include "accel.h"
++
++
++static int
++netfront_accel_alloc_buf_desc_blocks(struct netfront_accel_bufinfo *manager,
++ int pages)
++{
++ manager->desc_blocks =
++ kzalloc(sizeof(struct netfront_accel_pkt_desc *) *
++ NETFRONT_ACCEL_BUF_NUM_BLOCKS(pages), GFP_KERNEL);
++ if (manager->desc_blocks == NULL) {
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static int
++netfront_accel_alloc_buf_lists(struct netfront_accel_bufpages *bufpages,
++ int pages)
++{
++ bufpages->page_list = kmalloc(pages * sizeof(void *), GFP_KERNEL);
++ if (bufpages->page_list == NULL) {
++ return -ENOMEM;
++ }
++
++ bufpages->grant_list = kzalloc(pages * sizeof(grant_ref_t), GFP_KERNEL);
++ if (bufpages->grant_list == NULL) {
++ kfree(bufpages->page_list);
++ bufpages->page_list = NULL;
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++
++int netfront_accel_alloc_buffer_mem(struct netfront_accel_bufpages *bufpages,
++ struct netfront_accel_bufinfo *rx_manager,
++ struct netfront_accel_bufinfo *tx_manager,
++ int pages)
++{
++ int n, rc;
++
++ if ((rc = netfront_accel_alloc_buf_desc_blocks
++ (rx_manager, pages - (pages / sfc_netfront_buffer_split))) < 0) {
++ goto rx_fail;
++ }
++
++ if ((rc = netfront_accel_alloc_buf_desc_blocks
++ (tx_manager, pages / sfc_netfront_buffer_split)) < 0) {
++ goto tx_fail;
++ }
++
++ if ((rc = netfront_accel_alloc_buf_lists(bufpages, pages)) < 0) {
++ goto lists_fail;
++ }
++
++ for (n = 0; n < pages; n++) {
++ void *tmp = (void*)__get_free_page(GFP_KERNEL);
++ if (tmp == NULL)
++ break;
++
++ bufpages->page_list[n] = tmp;
++ }
++
++ if (n != pages) {
++ EPRINTK("%s: not enough pages: %d != %d\n", __FUNCTION__, n,
++ pages);
++ for (; n >= 0; n--)
++ free_page((unsigned long)(bufpages->page_list[n]));
++ rc = -ENOMEM;
++ goto pages_fail;
++ }
++
++ bufpages->max_pages = pages;
++ bufpages->page_reqs = 0;
++
++ return 0;
++
++ pages_fail:
++ kfree(bufpages->page_list);
++ kfree(bufpages->grant_list);
++
++ bufpages->page_list = NULL;
++ bufpages->grant_list = NULL;
++ lists_fail:
++ kfree(tx_manager->desc_blocks);
++ tx_manager->desc_blocks = NULL;
++
++ tx_fail:
++ kfree(rx_manager->desc_blocks);
++ rx_manager->desc_blocks = NULL;
++ rx_fail:
++ return rc;
++}
++
++
++void netfront_accel_free_buffer_mem(struct netfront_accel_bufpages *bufpages,
++ struct netfront_accel_bufinfo *rx_manager,
++ struct netfront_accel_bufinfo *tx_manager)
++{
++ int i;
++
++ for (i = 0; i < bufpages->max_pages; i++) {
++ if (bufpages->grant_list[i] != 0)
++ net_accel_ungrant_page(bufpages->grant_list[i]);
++ free_page((unsigned long)(bufpages->page_list[i]));
++ }
++
++ if (bufpages->max_pages) {
++ kfree(bufpages->page_list);
++ kfree(bufpages->grant_list);
++ kfree(rx_manager->desc_blocks);
++ kfree(tx_manager->desc_blocks);
++ }
++}
++
++
++/*
++ * Allocate memory for the buffer manager and create a lock. If no
++ * lock is supplied its own is allocated.
++ */
++struct netfront_accel_bufinfo *netfront_accel_init_bufs(spinlock_t *lock)
++{
++ struct netfront_accel_bufinfo *res = kmalloc(sizeof(*res), GFP_KERNEL);
++ if (res != NULL) {
++ res->npages = res->nused = 0;
++ res->first_free = -1;
++
++ if (lock == NULL) {
++ res->lock = kmalloc(sizeof(*res->lock), GFP_KERNEL);
++ if (res->lock == NULL) {
++ kfree(res);
++ return NULL;
++ }
++ spin_lock_init(res->lock);
++ res->internally_locked = 1;
++ } else {
++ res->lock = lock;
++ res->internally_locked = 0;
++ }
++
++ res->desc_blocks = NULL;
++ }
++
++ return res;
++}
++
++
++void netfront_accel_fini_bufs(struct netfront_accel_bufinfo *bufs)
++{
++ if (bufs->internally_locked)
++ kfree(bufs->lock);
++ kfree(bufs);
++}
++
++
++int netfront_accel_buf_map_request(struct xenbus_device *dev,
++ struct netfront_accel_bufpages *bufpages,
++ struct net_accel_msg *msg,
++ int pages, int offset)
++{
++ int i, mfn;
++ int err;
++
++ net_accel_msg_init(msg, NET_ACCEL_MSG_MAPBUF);
++
++ BUG_ON(pages > NET_ACCEL_MSG_MAX_PAGE_REQ);
++
++ msg->u.mapbufs.pages = pages;
++
++ for (i = 0; i < msg->u.mapbufs.pages; i++) {
++ /*
++ * This can happen if we tried to send this message
++ * earlier but the queue was full.
++ */
++ if (bufpages->grant_list[offset+i] != 0) {
++ msg->u.mapbufs.grants[i] =
++ bufpages->grant_list[offset+i];
++ continue;
++ }
++
++ mfn = virt_to_mfn(bufpages->page_list[offset+i]);
++ VPRINTK("%s: Granting page %d, mfn %08x\n",
++ __FUNCTION__, i, mfn);
++
++ bufpages->grant_list[offset+i] =
++ net_accel_grant_page(dev, mfn, 0);
++ msg->u.mapbufs.grants[i] = bufpages->grant_list[offset+i];
++
++ if (msg->u.mapbufs.grants[i] < 0) {
++ EPRINTK("%s: Failed to grant buffer: %d\n",
++ __FUNCTION__, msg->u.mapbufs.grants[i]);
++ err = -EIO;
++ goto error;
++ }
++ }
++
++ /* This is interpreted on return as the offset in the the page_list */
++ msg->u.mapbufs.reqid = offset;
++
++ return 0;
++
++error:
++ /* Ungrant all the pages we've successfully granted. */
++ for (i--; i >= 0; i--) {
++ net_accel_ungrant_page(bufpages->grant_list[offset+i]);
++ bufpages->grant_list[offset+i] = 0;
++ }
++ return err;
++}
++
++
++/* Process a response to a buffer request. */
++int netfront_accel_add_bufs(struct netfront_accel_bufpages *bufpages,
++ struct netfront_accel_bufinfo *manager,
++ struct net_accel_msg *msg)
++{
++ int msg_pages, page_offset, i, newtot;
++ int old_block_count, new_block_count;
++ u32 msg_buf;
++ unsigned long flags;
++
++ VPRINTK("%s: manager %p msg %p\n", __FUNCTION__, manager, msg);
++
++ BUG_ON(msg->id != (NET_ACCEL_MSG_MAPBUF | NET_ACCEL_MSG_REPLY));
++
++ msg_pages = msg->u.mapbufs.pages;
++ msg_buf = msg->u.mapbufs.buf;
++ page_offset = msg->u.mapbufs.reqid;
++
++ spin_lock_irqsave(manager->lock, flags);
++ newtot = manager->npages + msg_pages;
++ old_block_count =
++ (manager->npages + NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK - 1) >>
++ NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT;
++ new_block_count =
++ (newtot + NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK - 1) >>
++ NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT;
++
++ for (i = old_block_count; i < new_block_count; i++) {
++ struct netfront_accel_pkt_desc *block;
++ if (manager->desc_blocks[i] != NULL) {
++ VPRINTK("Not needed\n");
++ continue;
++ }
++ block = kzalloc(NETFRONT_ACCEL_BUFS_PER_BLOCK *
++ sizeof(netfront_accel_pkt_desc), GFP_ATOMIC);
++ if (block == NULL) {
++ spin_unlock_irqrestore(manager->lock, flags);
++ return -ENOMEM;
++ }
++ manager->desc_blocks[i] = block;
++ }
++ for (i = manager->npages; i < newtot; i++) {
++ int k, j = i - manager->npages;
++ int block_num;
++ int block_idx;
++ struct netfront_accel_pkt_desc *pkt;
++
++ block_num = i >> NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT;
++ block_idx = (NETFRONT_ACCEL_BUFS_PER_PAGE*i)
++ & (NETFRONT_ACCEL_BUFS_PER_BLOCK-1);
++
++ pkt = manager->desc_blocks[block_num] + block_idx;
++
++ for (k = 0; k < NETFRONT_ACCEL_BUFS_PER_PAGE; k++) {
++ BUG_ON(page_offset + j >= bufpages->max_pages);
++
++ pkt[k].buf_id = NETFRONT_ACCEL_BUFS_PER_PAGE * i + k;
++ pkt[k].pkt_kva = bufpages->page_list[page_offset + j] +
++ (PAGE_SIZE/NETFRONT_ACCEL_BUFS_PER_PAGE) * k;
++ pkt[k].pkt_buff_addr = msg_buf +
++ (PAGE_SIZE/NETFRONT_ACCEL_BUFS_PER_PAGE) *
++ (NETFRONT_ACCEL_BUFS_PER_PAGE * j + k);
++ pkt[k].next_free = manager->first_free;
++ manager->first_free = pkt[k].buf_id;
++ *(int*)(pkt[k].pkt_kva) = pkt[k].buf_id;
++
++ VPRINTK("buf %d desc %p kva %p buffaddr %x\n",
++ pkt[k].buf_id, &(pkt[k]), pkt[k].pkt_kva,
++ pkt[k].pkt_buff_addr);
++ }
++ }
++ manager->npages = newtot;
++ spin_unlock_irqrestore(manager->lock, flags);
++ VPRINTK("Added %d pages. Total is now %d\n", msg_pages,
++ manager->npages);
++ return 0;
++}
++
++
++netfront_accel_pkt_desc *
++netfront_accel_buf_find(struct netfront_accel_bufinfo *manager, u16 id)
++{
++ netfront_accel_pkt_desc *pkt;
++ int block_num = id >> NETFRONT_ACCEL_BUFS_PER_BLOCK_SHIFT;
++ int block_idx = id & (NETFRONT_ACCEL_BUFS_PER_BLOCK - 1);
++ BUG_ON(id >= manager->npages * NETFRONT_ACCEL_BUFS_PER_PAGE);
++ BUG_ON(block_idx >= NETFRONT_ACCEL_BUFS_PER_BLOCK);
++ pkt = manager->desc_blocks[block_num] + block_idx;
++ return pkt;
++}
++
++
++/* Allocate a buffer from the buffer manager */
++netfront_accel_pkt_desc *
++netfront_accel_buf_get(struct netfront_accel_bufinfo *manager)
++{
++ int bufno = -1;
++ netfront_accel_pkt_desc *buf = NULL;
++ unsigned long flags = 0;
++
++ /* Any spare? */
++ if (manager->first_free == -1)
++ return NULL;
++ /* Take lock */
++ if (manager->internally_locked)
++ spin_lock_irqsave(manager->lock, flags);
++ bufno = manager->first_free;
++ if (bufno != -1) {
++ buf = netfront_accel_buf_find(manager, bufno);
++ manager->first_free = buf->next_free;
++ manager->nused++;
++ }
++ /* Release lock */
++ if (manager->internally_locked)
++ spin_unlock_irqrestore(manager->lock, flags);
++
++ /* Tell the world */
++ VPRINTK("Allocated buffer %i, buffaddr %x\n", bufno,
++ buf->pkt_buff_addr);
++
++ return buf;
++}
++
++
++/* Release a buffer back to the buffer manager pool */
++int netfront_accel_buf_put(struct netfront_accel_bufinfo *manager, u16 id)
++{
++ netfront_accel_pkt_desc *buf = netfront_accel_buf_find(manager, id);
++ unsigned long flags = 0;
++ unsigned was_empty = 0;
++ int bufno = id;
++
++ VPRINTK("Freeing buffer %i\n", id);
++ BUG_ON(id == (u16)-1);
++
++ if (manager->internally_locked)
++ spin_lock_irqsave(manager->lock, flags);
++
++ if (manager->first_free == -1)
++ was_empty = 1;
++
++ buf->next_free = manager->first_free;
++ manager->first_free = bufno;
++ manager->nused--;
++
++ if (manager->internally_locked)
++ spin_unlock_irqrestore(manager->lock, flags);
++
++ return was_empty;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/accel_bufs.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_bufs.h
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/accel_bufs.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_bufs.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,181 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NETFRONT_ACCEL_BUFS_H
++#define NETFRONT_ACCEL_BUFS_H
++
++#include <linux/skbuff.h>
++#include <linux/spinlock.h>
++#include <xen/xenbus.h>
++
++#include "accel_msg_iface.h"
++
++
++/*! Buffer descriptor structure */
++typedef struct netfront_accel_pkt_desc {
++ int buf_id;
++ u32 pkt_buff_addr;
++ void *pkt_kva;
++ /* This is the socket buffer currently married to this buffer */
++ struct sk_buff *skb;
++ int next_free;
++} netfront_accel_pkt_desc;
++
++
++#define NETFRONT_ACCEL_DEFAULT_BUF_PAGES (384)
++#define NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT (4)
++#define NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK \
++ (1 << (NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT))
++#define NETFRONT_ACCEL_BUFS_PER_PAGE_SHIFT (1)
++#define NETFRONT_ACCEL_BUFS_PER_PAGE \
++ (1 << (NETFRONT_ACCEL_BUFS_PER_PAGE_SHIFT))
++#define NETFRONT_ACCEL_BUFS_PER_BLOCK_SHIFT \
++ (NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT + \
++ NETFRONT_ACCEL_BUFS_PER_PAGE_SHIFT)
++#define NETFRONT_ACCEL_BUFS_PER_BLOCK \
++ (1 << NETFRONT_ACCEL_BUFS_PER_BLOCK_SHIFT)
++#define NETFRONT_ACCEL_BUF_NUM_BLOCKS(max_pages) \
++ (((max_pages)+NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK-1) / \
++ NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK)
++
++/*! Buffer management structure. */
++struct netfront_accel_bufinfo {
++ /* number added to this manager */
++ unsigned npages;
++ /* number currently used from this manager */
++ unsigned nused;
++
++ int first_free;
++
++ int internally_locked;
++ spinlock_t *lock;
++
++ /*
++ * array of pointers (length NETFRONT_ACCEL_BUF_NUM_BLOCKS) to
++ * pkt descs
++ */
++ struct netfront_accel_pkt_desc **desc_blocks;
++};
++
++
++struct netfront_accel_bufpages {
++ /* length of lists of pages/grants */
++ int max_pages;
++ /* list of pages allocated for network buffers */
++ void **page_list;
++ /* list of grants for the above pages */
++ grant_ref_t *grant_list;
++
++ /* number of page requests that have been made */
++ unsigned page_reqs;
++};
++
++
++/*! Allocate memory for the buffer manager, set up locks etc.
++ * Optionally takes a lock to use, if not supplied it makes its own.
++ *
++ * \return pointer to netfront_accel_bufinfo structure that represents the
++ * buffer manager
++ */
++extern struct netfront_accel_bufinfo *
++netfront_accel_init_bufs(spinlock_t *lock);
++
++/*! Allocate memory for the buffers
++ */
++extern int
++netfront_accel_alloc_buffer_mem(struct netfront_accel_bufpages *bufpages,
++ struct netfront_accel_bufinfo *rx_res,
++ struct netfront_accel_bufinfo *tx_res,
++ int pages);
++extern void
++netfront_accel_free_buffer_mem(struct netfront_accel_bufpages *bufpages,
++ struct netfront_accel_bufinfo *rx_res,
++ struct netfront_accel_bufinfo *tx_res);
++
++/*! Release memory for the buffer manager, buffers, etc.
++ *
++ * \param manager pointer to netfront_accel_bufinfo structure that
++ * represents the buffer manager
++ */
++extern void netfront_accel_fini_bufs(struct netfront_accel_bufinfo *manager);
++
++/*! Release a buffer.
++ *
++ * \param manager The buffer manager which owns the buffer.
++ * \param id The buffer identifier.
++ */
++extern int netfront_accel_buf_put(struct netfront_accel_bufinfo *manager,
++ u16 id);
++
++/*! Get the packet descriptor associated with a buffer id.
++ *
++ * \param manager The buffer manager which owns the buffer.
++ * \param id The buffer identifier.
++ *
++ * The returned value is the packet descriptor for this buffer.
++ */
++extern netfront_accel_pkt_desc *
++netfront_accel_buf_find(struct netfront_accel_bufinfo *manager, u16 id);
++
++
++/*! Fill out a message request for some buffers to be mapped by the
++ * back end driver
++ *
++ * \param manager The buffer manager
++ * \param msg Pointer to an ef_msg to complete.
++ * \return 0 on success
++ */
++extern int
++netfront_accel_buf_map_request(struct xenbus_device *dev,
++ struct netfront_accel_bufpages *bufpages,
++ struct net_accel_msg *msg,
++ int pages, int offset);
++
++/*! Process a response to a buffer request.
++ *
++ * Deal with a received message from the back end in response to our
++ * request for buffers
++ *
++ * \param manager The buffer manager
++ * \param msg The received message from the back end describing new
++ * buffers
++ * \return 0 on success
++ */
++extern int
++netfront_accel_add_bufs(struct netfront_accel_bufpages *bufpages,
++ struct netfront_accel_bufinfo *manager,
++ struct net_accel_msg *msg);
++
++
++/*! Allocate a buffer from the buffer manager
++ *
++ * \param manager The buffer manager data structure
++ * \param id On exit, the id of the buffer allocated
++ * \return Pointer to buffer descriptor.
++ */
++struct netfront_accel_pkt_desc *
++netfront_accel_buf_get(struct netfront_accel_bufinfo *manager);
++
++#endif /* NETFRONT_ACCEL_BUFS_H */
++
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/accel_debugfs.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_debugfs.c
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/accel_debugfs.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_debugfs.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,211 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/fs.h>
++#include <linux/debugfs.h>
++
++#include "accel.h"
++
++#if defined(CONFIG_DEBUG_FS)
++static struct dentry *sfc_debugfs_root = NULL;
++#endif
++
++void netfront_accel_debugfs_init(void)
++{
++#if defined(CONFIG_DEBUG_FS)
++ sfc_debugfs_root = debugfs_create_dir(frontend_name, NULL);
++#endif
++}
++
++
++void netfront_accel_debugfs_fini(void)
++{
++#if defined(CONFIG_DEBUG_FS)
++ if (sfc_debugfs_root)
++ debugfs_remove(sfc_debugfs_root);
++#endif
++}
++
++
++int netfront_accel_debugfs_create(netfront_accel_vnic *vnic)
++{
++#if defined(CONFIG_DEBUG_FS)
++ if (sfc_debugfs_root == NULL)
++ return -ENOENT;
++
++ vnic->dbfs_dir = debugfs_create_dir(vnic->net_dev->name,
++ sfc_debugfs_root);
++ if (vnic->dbfs_dir == NULL)
++ return -ENOMEM;
++
++ vnic->netdev_dbfs.fastpath_rx_pkts = debugfs_create_u32
++ ("fastpath_rx_pkts", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->netdev_stats.fastpath_rx_pkts);
++ vnic->netdev_dbfs.fastpath_rx_bytes = debugfs_create_u32
++ ("fastpath_rx_bytes", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->netdev_stats.fastpath_rx_bytes);
++ vnic->netdev_dbfs.fastpath_rx_errors = debugfs_create_u32
++ ("fastpath_rx_errors", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->netdev_stats.fastpath_rx_errors);
++ vnic->netdev_dbfs.fastpath_tx_pkts = debugfs_create_u32
++ ("fastpath_tx_pkts", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->netdev_stats.fastpath_tx_pkts);
++ vnic->netdev_dbfs.fastpath_tx_bytes = debugfs_create_u32
++ ("fastpath_tx_bytes", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->netdev_stats.fastpath_tx_bytes);
++ vnic->netdev_dbfs.fastpath_tx_errors = debugfs_create_u32
++ ("fastpath_tx_errors", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->netdev_stats.fastpath_tx_errors);
++
++#if NETFRONT_ACCEL_STATS
++ vnic->dbfs.irq_count = debugfs_create_u64
++ ("irq_count", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.irq_count);
++ vnic->dbfs.useless_irq_count = debugfs_create_u64
++ ("useless_irq_count", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.useless_irq_count);
++ vnic->dbfs.poll_schedule_count = debugfs_create_u64
++ ("poll_schedule_count", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.poll_schedule_count);
++ vnic->dbfs.poll_call_count = debugfs_create_u64
++ ("poll_call_count", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.poll_call_count);
++ vnic->dbfs.poll_reschedule_count = debugfs_create_u64
++ ("poll_reschedule_count", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.poll_reschedule_count);
++ vnic->dbfs.queue_stops = debugfs_create_u64
++ ("queue_stops", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.queue_stops);
++ vnic->dbfs.queue_wakes = debugfs_create_u64
++ ("queue_wakes", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.queue_wakes);
++ vnic->dbfs.ssr_bursts = debugfs_create_u64
++ ("ssr_bursts", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.ssr_bursts);
++ vnic->dbfs.ssr_drop_stream = debugfs_create_u64
++ ("ssr_drop_stream", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.ssr_drop_stream);
++ vnic->dbfs.ssr_misorder = debugfs_create_u64
++ ("ssr_misorder", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.ssr_misorder);
++ vnic->dbfs.ssr_slow_start = debugfs_create_u64
++ ("ssr_slow_start", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.ssr_slow_start);
++ vnic->dbfs.ssr_merges = debugfs_create_u64
++ ("ssr_merges", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.ssr_merges);
++ vnic->dbfs.ssr_too_many = debugfs_create_u64
++ ("ssr_too_many", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.ssr_too_many);
++ vnic->dbfs.ssr_new_stream = debugfs_create_u64
++ ("ssr_new_stream", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.ssr_new_stream);
++
++ vnic->dbfs.fastpath_tx_busy = debugfs_create_u64
++ ("fastpath_tx_busy", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.fastpath_tx_busy);
++ vnic->dbfs.fastpath_tx_completions = debugfs_create_u64
++ ("fastpath_tx_completions", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.fastpath_tx_completions);
++ vnic->dbfs.fastpath_tx_pending_max = debugfs_create_u32
++ ("fastpath_tx_pending_max", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.fastpath_tx_pending_max);
++ vnic->dbfs.event_count = debugfs_create_u64
++ ("event_count", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.event_count);
++ vnic->dbfs.bad_event_count = debugfs_create_u64
++ ("bad_event_count", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.bad_event_count);
++ vnic->dbfs.event_count_since_irq = debugfs_create_u32
++ ("event_count_since_irq", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.event_count_since_irq);
++ vnic->dbfs.events_per_irq_max = debugfs_create_u32
++ ("events_per_irq_max", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.events_per_irq_max);
++ vnic->dbfs.fastpath_frm_trunc = debugfs_create_u64
++ ("fastpath_frm_trunc", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.fastpath_frm_trunc);
++ vnic->dbfs.rx_no_desc_trunc = debugfs_create_u64
++ ("rx_no_desc_trunc", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.rx_no_desc_trunc);
++ vnic->dbfs.events_per_poll_max = debugfs_create_u32
++ ("events_per_poll_max", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.events_per_poll_max);
++ vnic->dbfs.events_per_poll_rx_max = debugfs_create_u32
++ ("events_per_poll_rx_max", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.events_per_poll_rx_max);
++ vnic->dbfs.events_per_poll_tx_max = debugfs_create_u32
++ ("events_per_poll_tx_max", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.events_per_poll_tx_max);
++#endif
++#endif
++ return 0;
++}
++
++
++int netfront_accel_debugfs_remove(netfront_accel_vnic *vnic)
++{
++#if defined(CONFIG_DEBUG_FS)
++ if (vnic->dbfs_dir != NULL) {
++ debugfs_remove(vnic->netdev_dbfs.fastpath_rx_pkts);
++ debugfs_remove(vnic->netdev_dbfs.fastpath_rx_bytes);
++ debugfs_remove(vnic->netdev_dbfs.fastpath_rx_errors);
++ debugfs_remove(vnic->netdev_dbfs.fastpath_tx_pkts);
++ debugfs_remove(vnic->netdev_dbfs.fastpath_tx_bytes);
++ debugfs_remove(vnic->netdev_dbfs.fastpath_tx_errors);
++
++#if NETFRONT_ACCEL_STATS
++ debugfs_remove(vnic->dbfs.irq_count);
++ debugfs_remove(vnic->dbfs.useless_irq_count);
++ debugfs_remove(vnic->dbfs.poll_schedule_count);
++ debugfs_remove(vnic->dbfs.poll_call_count);
++ debugfs_remove(vnic->dbfs.poll_reschedule_count);
++ debugfs_remove(vnic->dbfs.queue_stops);
++ debugfs_remove(vnic->dbfs.queue_wakes);
++ debugfs_remove(vnic->dbfs.ssr_bursts);
++ debugfs_remove(vnic->dbfs.ssr_drop_stream);
++ debugfs_remove(vnic->dbfs.ssr_misorder);
++ debugfs_remove(vnic->dbfs.ssr_slow_start);
++ debugfs_remove(vnic->dbfs.ssr_merges);
++ debugfs_remove(vnic->dbfs.ssr_too_many);
++ debugfs_remove(vnic->dbfs.ssr_new_stream);
++
++ debugfs_remove(vnic->dbfs.fastpath_tx_busy);
++ debugfs_remove(vnic->dbfs.fastpath_tx_completions);
++ debugfs_remove(vnic->dbfs.fastpath_tx_pending_max);
++ debugfs_remove(vnic->dbfs.event_count);
++ debugfs_remove(vnic->dbfs.bad_event_count);
++ debugfs_remove(vnic->dbfs.event_count_since_irq);
++ debugfs_remove(vnic->dbfs.events_per_irq_max);
++ debugfs_remove(vnic->dbfs.fastpath_frm_trunc);
++ debugfs_remove(vnic->dbfs.rx_no_desc_trunc);
++ debugfs_remove(vnic->dbfs.events_per_poll_max);
++ debugfs_remove(vnic->dbfs.events_per_poll_rx_max);
++ debugfs_remove(vnic->dbfs.events_per_poll_tx_max);
++#endif
++ debugfs_remove(vnic->dbfs_dir);
++ }
++#endif
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/accel.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel.h
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/accel.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,477 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NETFRONT_ACCEL_H
++#define NETFRONT_ACCEL_H
++
++#include "accel_msg_iface.h"
++#include "accel_cuckoo_hash.h"
++#include "accel_bufs.h"
++
++#include "etherfabric/ef_vi.h"
++
++#include <xen/xenbus.h>
++#include <xen/evtchn.h>
++
++#include <linux/kernel.h>
++#include <linux/list.h>
++
++enum netfront_accel_post_status {
++ NETFRONT_ACCEL_STATUS_GOOD,
++ NETFRONT_ACCEL_STATUS_BUSY,
++ NETFRONT_ACCEL_STATUS_CANT
++};
++
++#define NETFRONT_ACCEL_STATS 1
++#if NETFRONT_ACCEL_STATS
++#define NETFRONT_ACCEL_STATS_OP(x) x
++#else
++#define NETFRONT_ACCEL_STATS_OP(x)
++#endif
++
++
++enum netfront_accel_msg_state {
++ NETFRONT_ACCEL_MSG_NONE = 0,
++ NETFRONT_ACCEL_MSG_HELLO = 1,
++ NETFRONT_ACCEL_MSG_HW = 2
++};
++
++
++typedef struct {
++ u32 in_progress;
++ u32 total_len;
++ struct sk_buff *skb;
++} netfront_accel_jumbo_state;
++
++
++struct netfront_accel_ssr_state {
++ /** List of tracked connections. */
++ struct list_head conns;
++
++ /** Free efx_ssr_conn instances. */
++ struct list_head free_conns;
++};
++
++
++struct netfront_accel_netdev_stats {
++ /* Fastpath stats. */
++ u32 fastpath_rx_pkts;
++ u32 fastpath_rx_bytes;
++ u32 fastpath_rx_errors;
++ u32 fastpath_tx_pkts;
++ u32 fastpath_tx_bytes;
++ u32 fastpath_tx_errors;
++};
++
++
++struct netfront_accel_netdev_dbfs {
++ struct dentry *fastpath_rx_pkts;
++ struct dentry *fastpath_rx_bytes;
++ struct dentry *fastpath_rx_errors;
++ struct dentry *fastpath_tx_pkts;
++ struct dentry *fastpath_tx_bytes;
++ struct dentry *fastpath_tx_errors;
++};
++
++
++struct netfront_accel_stats {
++ /** Fast path events */
++ u64 fastpath_tx_busy;
++
++ /** TX DMA queue status */
++ u64 fastpath_tx_completions;
++
++ /** The number of events processed. */
++ u64 event_count;
++
++ /** Number of frame trunc events seen on fastpath */
++ u64 fastpath_frm_trunc;
++
++ /** Number of no rx descriptor trunc events seen on fastpath */
++ u64 rx_no_desc_trunc;
++
++ /** The number of misc bad events (e.g. RX_DISCARD) processed. */
++ u64 bad_event_count;
++
++ /** Number of events dealt with in poll loop */
++ u32 events_per_poll_max;
++ u32 events_per_poll_tx_max;
++ u32 events_per_poll_rx_max;
++
++ /** Largest number of concurrently outstanding tx descriptors */
++ u32 fastpath_tx_pending_max;
++
++ /** The number of events since the last interrupts. */
++ u32 event_count_since_irq;
++
++ /** The max number of events between interrupts. */
++ u32 events_per_irq_max;
++
++ /** The number of interrupts. */
++ u64 irq_count;
++
++ /** The number of useless interrupts. */
++ u64 useless_irq_count;
++
++ /** The number of polls scheduled. */
++ u64 poll_schedule_count;
++
++ /** The number of polls called. */
++ u64 poll_call_count;
++
++ /** The number of rechecks. */
++ u64 poll_reschedule_count;
++
++ /** Number of times we've called netif_stop_queue/netif_wake_queue */
++ u64 queue_stops;
++ u64 queue_wakes;
++
++ /** SSR stats */
++ u64 ssr_bursts;
++ u64 ssr_drop_stream;
++ u64 ssr_misorder;
++ u64 ssr_slow_start;
++ u64 ssr_merges;
++ u64 ssr_too_many;
++ u64 ssr_new_stream;
++};
++
++
++struct netfront_accel_dbfs {
++ struct dentry *fastpath_tx_busy;
++ struct dentry *fastpath_tx_completions;
++ struct dentry *fastpath_tx_pending_max;
++ struct dentry *fastpath_frm_trunc;
++ struct dentry *rx_no_desc_trunc;
++ struct dentry *event_count;
++ struct dentry *bad_event_count;
++ struct dentry *events_per_poll_max;
++ struct dentry *events_per_poll_rx_max;
++ struct dentry *events_per_poll_tx_max;
++ struct dentry *event_count_since_irq;
++ struct dentry *events_per_irq_max;
++ struct dentry *irq_count;
++ struct dentry *useless_irq_count;
++ struct dentry *poll_schedule_count;
++ struct dentry *poll_call_count;
++ struct dentry *poll_reschedule_count;
++ struct dentry *queue_stops;
++ struct dentry *queue_wakes;
++ struct dentry *ssr_bursts;
++ struct dentry *ssr_drop_stream;
++ struct dentry *ssr_misorder;
++ struct dentry *ssr_slow_start;
++ struct dentry *ssr_merges;
++ struct dentry *ssr_too_many;
++ struct dentry *ssr_new_stream;
++};
++
++
++typedef struct netfront_accel_vnic {
++ struct netfront_accel_vnic *next;
++
++ struct mutex vnic_mutex;
++
++ spinlock_t tx_lock;
++
++ struct netfront_accel_bufpages bufpages;
++ struct netfront_accel_bufinfo *rx_bufs;
++ struct netfront_accel_bufinfo *tx_bufs;
++
++ /** Hardware & VI state */
++ ef_vi vi;
++
++ ef_vi_state *vi_state;
++
++ ef_eventq_state evq_state;
++
++ void *evq_mapping;
++
++ /** Hardware dependant state */
++ union {
++ struct {
++ /** Falcon A or B */
++ enum net_accel_hw_type type;
++ u32 *evq_rptr;
++ u32 *doorbell;
++ void *evq_rptr_mapping;
++ void *doorbell_mapping;
++ void *txdmaq_mapping;
++ void *rxdmaq_mapping;
++ } falcon;
++ } hw;
++
++ /** RX DMA queue status */
++ u32 rx_dma_level;
++
++ /** Number of RX descriptors waiting to be pushed to the card. */
++ u32 rx_dma_batched;
++#define NETFRONT_ACCEL_RX_DESC_BATCH 16
++
++ /**
++ * Hash table of remote mac addresses to decide whether to try
++ * fast path
++ */
++ cuckoo_hash_table fastpath_table;
++ spinlock_t table_lock;
++
++ /** the local mac address of virtual interface we're accelerating */
++ u8 mac[ETH_ALEN];
++
++ int rx_pkt_stride;
++ int rx_skb_stride;
++
++ /**
++ * Keep track of fragments of jumbo packets as events are
++ * delivered by NIC
++ */
++ netfront_accel_jumbo_state jumbo_state;
++
++ struct net_device *net_dev;
++
++ /** These two gate the enabling of fast path operations */
++ int frontend_ready;
++ int backend_netdev_up;
++
++ int irq_enabled;
++ spinlock_t irq_enabled_lock;
++
++ int tx_enabled;
++
++ int poll_enabled;
++
++ /** A spare slot for a TX packet. This is treated as an extension
++ * of the DMA queue. */
++ struct sk_buff *tx_skb;
++
++ /** Keep track of fragments of SSR packets */
++ struct netfront_accel_ssr_state ssr_state;
++
++ struct xenbus_device *dev;
++
++ /** Event channel for messages */
++ int msg_channel;
++ int msg_channel_irq;
++
++ /** Event channel for network interrupts. */
++ int net_channel;
++ int net_channel_irq;
++
++ struct net_accel_shared_page *shared_page;
++
++ grant_ref_t ctrl_page_gnt;
++ grant_ref_t msg_page_gnt;
++
++ /** Message Qs, 1 each way. */
++ sh_msg_fifo2 to_dom0;
++ sh_msg_fifo2 from_dom0;
++
++ enum netfront_accel_msg_state msg_state;
++
++ /** Watch on accelstate */
++ struct xenbus_watch backend_accel_watch;
++ /** Watch on frontend's MAC address */
++ struct xenbus_watch mac_address_watch;
++
++ /** Work to process received irq/msg */
++ struct work_struct msg_from_bend;
++
++ /** Wait queue for changes in accelstate. */
++ wait_queue_head_t state_wait_queue;
++
++ /** The current accelstate of this driver. */
++ XenbusState frontend_state;
++
++ /** The most recent accelstate seen by the xenbus watch. */
++ XenbusState backend_state;
++
++ /** Non-zero if we should reject requests to connect. */
++ int removing;
++
++ /** Non-zero if the domU shared state has been initialised. */
++ int domU_state_is_setup;
++
++ /** Non-zero if the dom0 shared state has been initialised. */
++ int dom0_state_is_setup;
++
++ /* Those statistics that are added to the netdev stats */
++ struct netfront_accel_netdev_stats netdev_stats;
++ struct netfront_accel_netdev_stats stats_last_read;
++#ifdef CONFIG_DEBUG_FS
++ struct netfront_accel_netdev_dbfs netdev_dbfs;
++#endif
++
++ /* These statistics are internal and optional */
++#if NETFRONT_ACCEL_STATS
++ struct netfront_accel_stats stats;
++#ifdef CONFIG_DEBUG_FS
++ struct netfront_accel_dbfs dbfs;
++#endif
++#endif
++
++ /** Debufs fs dir for this interface */
++ struct dentry *dbfs_dir;
++} netfront_accel_vnic;
++
++
++/* Module parameters */
++extern unsigned sfc_netfront_max_pages;
++extern unsigned sfc_netfront_buffer_split;
++
++extern const char *frontend_name;
++extern struct netfront_accel_hooks accel_hooks;
++extern struct workqueue_struct *netfront_accel_workqueue;
++
++
++extern
++void netfront_accel_vi_ctor(netfront_accel_vnic *vnic);
++
++extern
++int netfront_accel_vi_init(netfront_accel_vnic *vnic,
++ struct net_accel_msg_hw *hw_msg);
++
++extern
++void netfront_accel_vi_dtor(netfront_accel_vnic *vnic);
++
++
++/**
++ * Add new buffers which have been registered with the NIC.
++ *
++ * @v vnic The vnic instance to process the response.
++ *
++ * The buffers contained in the message are added to the buffer pool.
++ */
++extern
++void netfront_accel_vi_add_bufs(netfront_accel_vnic *vnic, int is_rx);
++
++/**
++ * Put a packet on the tx DMA queue.
++ *
++ * @v vnic The vnic instance to accept the packet.
++ * @v skb A sk_buff to send.
++ *
++ * Attempt to send a packet. On success, the skb is owned by the DMA
++ * queue and will be released when the completion event arrives.
++ */
++extern enum netfront_accel_post_status
++netfront_accel_vi_tx_post(netfront_accel_vnic *vnic,
++ struct sk_buff *skb);
++
++
++/**
++ * Process events in response to an interrupt.
++ *
++ * @v vnic The vnic instance to poll.
++ * @v rx_packets The maximum number of rx packets to process.
++ * @ret rx_done The number of rx packets processed.
++ *
++ * The vnic will process events until there are no more events
++ * remaining or the specified number of rx packets has been processed.
++ * The split from the interrupt call is to allow Linux NAPI
++ * polling.
++ */
++extern
++int netfront_accel_vi_poll(netfront_accel_vnic *vnic, int rx_packets);
++
++
++/**
++ * Iterate over the fragments of a packet buffer.
++ *
++ * @v skb The packet buffer to examine.
++ * @v idx A variable name for the fragment index.
++ * @v data A variable name for the address of the fragment data.
++ * @v length A variable name for the fragment length.
++ * @v code A section of code to execute for each fragment.
++ *
++ * This macro iterates over the fragments in a packet buffer and
++ * executes the code for each of them.
++ */
++#define NETFRONT_ACCEL_PKTBUFF_FOR_EACH_FRAGMENT(skb, frag_idx, \
++ frag_data, frag_len, \
++ code) \
++ do { \
++ int frag_idx; \
++ void *frag_data; \
++ unsigned int frag_len; \
++ \
++ frag_data = skb->data; \
++ frag_len = skb_headlen(skb); \
++ frag_idx = 0; \
++ while (1) { /* For each fragment */ \
++ code; \
++ if (frag_idx >= skb_shinfo(skb)->nr_frags) { \
++ break; \
++ } else { \
++ skb_frag_t *fragment; \
++ fragment = &skb_shinfo(skb)->frags[frag_idx]; \
++ frag_len = fragment->size; \
++ frag_data = ((void*)page_address(fragment->page) \
++ + fragment->page_offset); \
++ }; \
++ frag_idx++; \
++ } \
++ } while(0)
++
++static inline
++void netfront_accel_disable_net_interrupts(netfront_accel_vnic *vnic)
++{
++ mask_evtchn(vnic->net_channel);
++}
++
++static inline
++void netfront_accel_enable_net_interrupts(netfront_accel_vnic *vnic)
++{
++ unmask_evtchn(vnic->net_channel);
++}
++
++void netfront_accel_msg_tx_fastpath(netfront_accel_vnic *vnic, const void *mac,
++ u32 ip, u16 port, u8 protocol);
++
++/* Process an IRQ received from back end driver */
++irqreturn_t netfront_accel_msg_channel_irq_from_bend(int irq, void *context,
++ struct pt_regs *unused);
++irqreturn_t netfront_accel_net_channel_irq_from_bend(int irq, void *context,
++ struct pt_regs *unused);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++extern void netfront_accel_msg_from_bend(struct work_struct *context);
++#else
++extern void netfront_accel_msg_from_bend(void *context);
++#endif
++
++extern void vnic_stop_fastpath(netfront_accel_vnic *vnic);
++
++extern int netfront_accel_probe(struct net_device *net_dev,
++ struct xenbus_device *dev);
++extern int netfront_accel_remove(struct xenbus_device *dev);
++extern void netfront_accel_set_closing(netfront_accel_vnic *vnic);
++
++extern int netfront_accel_vi_enable_interrupts(netfront_accel_vnic *vnic);
++
++extern void netfront_accel_debugfs_init(void);
++extern void netfront_accel_debugfs_fini(void);
++extern int netfront_accel_debugfs_create(netfront_accel_vnic *vnic);
++extern int netfront_accel_debugfs_remove(netfront_accel_vnic *vnic);
++
++#endif /* NETFRONT_ACCEL_H */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/accel_msg.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_msg.c
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/accel_msg.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_msg.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,566 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/stddef.h>
++#include <linux/errno.h>
++
++#include <xen/xenbus.h>
++
++#include "accel.h"
++#include "accel_msg_iface.h"
++#include "accel_util.h"
++#include "accel_bufs.h"
++
++#include "netfront.h" /* drivers/xen/netfront/netfront.h */
++
++static void vnic_start_interrupts(netfront_accel_vnic *vnic)
++{
++ unsigned long flags;
++
++ /* Prime our interrupt */
++ spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
++ if (!netfront_accel_vi_enable_interrupts(vnic)) {
++ /* Cripes, that was quick, better pass it up */
++ netfront_accel_disable_net_interrupts(vnic);
++ vnic->irq_enabled = 0;
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_schedule_count++);
++ netif_rx_schedule(vnic->net_dev);
++ } else {
++ /*
++ * Nothing yet, make sure we get interrupts through
++ * back end
++ */
++ vnic->irq_enabled = 1;
++ netfront_accel_enable_net_interrupts(vnic);
++ }
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
++}
++
++
++static void vnic_stop_interrupts(netfront_accel_vnic *vnic)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
++ netfront_accel_disable_net_interrupts(vnic);
++ vnic->irq_enabled = 0;
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
++}
++
++
++static void vnic_start_fastpath(netfront_accel_vnic *vnic)
++{
++ struct net_device *net_dev = vnic->net_dev;
++ unsigned long flags;
++
++ DPRINTK("%s\n", __FUNCTION__);
++
++ spin_lock_irqsave(&vnic->tx_lock, flags);
++ vnic->tx_enabled = 1;
++ spin_unlock_irqrestore(&vnic->tx_lock, flags);
++
++ netif_poll_disable(net_dev);
++ vnic->poll_enabled = 1;
++ netif_poll_enable(net_dev);
++
++ vnic_start_interrupts(vnic);
++}
++
++
++void vnic_stop_fastpath(netfront_accel_vnic *vnic)
++{
++ struct net_device *net_dev = vnic->net_dev;
++ struct netfront_info *np = (struct netfront_info *)netdev_priv(net_dev);
++ unsigned long flags1, flags2;
++
++ DPRINTK("%s\n", __FUNCTION__);
++
++ vnic_stop_interrupts(vnic);
++
++ spin_lock_irqsave(&vnic->tx_lock, flags1);
++ vnic->tx_enabled = 0;
++ spin_lock_irqsave(&np->tx_lock, flags2);
++ if (vnic->tx_skb != NULL) {
++ dev_kfree_skb_any(vnic->tx_skb);
++ vnic->tx_skb = NULL;
++ if (netfront_check_queue_ready(net_dev)) {
++ netif_wake_queue(net_dev);
++ NETFRONT_ACCEL_STATS_OP
++ (vnic->stats.queue_wakes++);
++ }
++ }
++ spin_unlock_irqrestore(&np->tx_lock, flags2);
++ spin_unlock_irqrestore(&vnic->tx_lock, flags1);
++
++ /* Must prevent polls and hold lock to modify poll_enabled */
++ netif_poll_disable(net_dev);
++ spin_lock_irqsave(&vnic->irq_enabled_lock, flags1);
++ vnic->poll_enabled = 0;
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags1);
++ netif_poll_enable(net_dev);
++}
++
++
++static void netfront_accel_interface_up(netfront_accel_vnic *vnic)
++{
++
++ if (!vnic->backend_netdev_up) {
++ vnic->backend_netdev_up = 1;
++
++ if (vnic->frontend_ready)
++ vnic_start_fastpath(vnic);
++ }
++}
++
++
++static void netfront_accel_interface_down(netfront_accel_vnic *vnic)
++{
++
++ if (vnic->backend_netdev_up) {
++ vnic->backend_netdev_up = 0;
++
++ if (vnic->frontend_ready)
++ vnic_stop_fastpath(vnic);
++ }
++}
++
++
++static int vnic_add_bufs(netfront_accel_vnic *vnic,
++ struct net_accel_msg *msg)
++{
++ int rc, offset;
++ struct netfront_accel_bufinfo *bufinfo;
++
++ BUG_ON(msg->u.mapbufs.pages > NET_ACCEL_MSG_MAX_PAGE_REQ);
++
++ offset = msg->u.mapbufs.reqid;
++
++ if (offset < vnic->bufpages.max_pages -
++ (vnic->bufpages.max_pages / sfc_netfront_buffer_split)) {
++ bufinfo = vnic->rx_bufs;
++ } else
++ bufinfo = vnic->tx_bufs;
++
++ /* Queue up some Rx buffers to start things off. */
++ if ((rc = netfront_accel_add_bufs(&vnic->bufpages, bufinfo, msg)) == 0) {
++ netfront_accel_vi_add_bufs(vnic, bufinfo == vnic->rx_bufs);
++
++ if (offset + msg->u.mapbufs.pages == vnic->bufpages.max_pages) {
++ VPRINTK("%s: got all buffers back\n", __FUNCTION__);
++ vnic->frontend_ready = 1;
++ if (vnic->backend_netdev_up)
++ vnic_start_fastpath(vnic);
++ } else {
++ VPRINTK("%s: got buffers back %d %d\n", __FUNCTION__,
++ offset, msg->u.mapbufs.pages);
++ }
++ }
++
++ return rc;
++}
++
++
++/* The largest [o] such that (1u << o) <= n. Requires n > 0. */
++
++inline unsigned log2_le(unsigned long n) {
++ unsigned order = 1;
++ while ((1ul << order) <= n) ++order;
++ return (order - 1);
++}
++
++static int vnic_send_buffer_requests(netfront_accel_vnic *vnic,
++ struct netfront_accel_bufpages *bufpages)
++{
++ int pages, offset, rc = 0, sent = 0;
++ struct net_accel_msg msg;
++
++ while (bufpages->page_reqs < bufpages->max_pages) {
++ offset = bufpages->page_reqs;
++
++ pages = pow2(log2_le(bufpages->max_pages -
++ bufpages->page_reqs));
++ pages = pages < NET_ACCEL_MSG_MAX_PAGE_REQ ?
++ pages : NET_ACCEL_MSG_MAX_PAGE_REQ;
++
++ BUG_ON(offset < 0);
++ BUG_ON(pages <= 0);
++
++ rc = netfront_accel_buf_map_request(vnic->dev, bufpages,
++ &msg, pages, offset);
++ if (rc == 0) {
++ rc = net_accel_msg_send(vnic->shared_page,
++ &vnic->to_dom0, &msg);
++ if (rc < 0) {
++ VPRINTK("%s: queue full, stopping for now\n",
++ __FUNCTION__);
++ break;
++ }
++ sent++;
++ } else {
++ EPRINTK("%s: problem with grant, stopping for now\n",
++ __FUNCTION__);
++ break;
++ }
++
++ bufpages->page_reqs += pages;
++ }
++
++ if (sent)
++ net_accel_msg_notify(vnic->msg_channel_irq);
++
++ return rc;
++}
++
++
++/*
++ * In response to dom0 saying "my queue is full", we reply with this
++ * when it is no longer full
++ */
++inline void vnic_set_queue_not_full(netfront_accel_vnic *vnic)
++{
++
++ if (test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL_B,
++ (unsigned long *)&vnic->shared_page->aflags))
++ notify_remote_via_irq(vnic->msg_channel_irq);
++ else
++ VPRINTK("queue not full bit already set, not signalling\n");
++}
++
++/*
++ * Notify dom0 that the queue we want to use is full, it should
++ * respond by setting MSG_AFLAGS_QUEUEUNOTFULL in due course
++ */
++inline void vnic_set_queue_full(netfront_accel_vnic *vnic)
++{
++
++ if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUFULL_B,
++ (unsigned long *)&vnic->shared_page->aflags))
++ notify_remote_via_irq(vnic->msg_channel_irq);
++ else
++ VPRINTK("queue full bit already set, not signalling\n");
++}
++
++
++static int vnic_check_hello_version(unsigned version)
++{
++ if (version > NET_ACCEL_MSG_VERSION) {
++ /* Newer protocol, we must refuse */
++ return -EPROTO;
++ }
++
++ if (version < NET_ACCEL_MSG_VERSION) {
++ /*
++ * We are newer, so have discretion to accept if we
++ * wish. For now however, just reject
++ */
++ return -EPROTO;
++ }
++
++ BUG_ON(version != NET_ACCEL_MSG_VERSION);
++ return 0;
++}
++
++
++static int vnic_process_hello_msg(netfront_accel_vnic *vnic,
++ struct net_accel_msg *msg)
++{
++ int err = 0;
++ unsigned pages = sfc_netfront_max_pages;
++
++ if (vnic_check_hello_version(msg->u.hello.version) < 0) {
++ msg->id = NET_ACCEL_MSG_HELLO | NET_ACCEL_MSG_REPLY
++ | NET_ACCEL_MSG_ERROR;
++ msg->u.hello.version = NET_ACCEL_MSG_VERSION;
++ } else {
++ vnic->backend_netdev_up
++ = vnic->shared_page->net_dev_up;
++
++ msg->id = NET_ACCEL_MSG_HELLO | NET_ACCEL_MSG_REPLY;
++ msg->u.hello.version = NET_ACCEL_MSG_VERSION;
++ if (msg->u.hello.max_pages &&
++ msg->u.hello.max_pages < pages)
++ pages = msg->u.hello.max_pages;
++ msg->u.hello.max_pages = pages;
++
++ /* Half of pages for rx, half for tx */
++ err = netfront_accel_alloc_buffer_mem(&vnic->bufpages,
++ vnic->rx_bufs,
++ vnic->tx_bufs,
++ pages);
++ if (err)
++ msg->id |= NET_ACCEL_MSG_ERROR;
++ }
++
++ /* Send reply */
++ net_accel_msg_reply_notify(vnic->shared_page, vnic->msg_channel_irq,
++ &vnic->to_dom0, msg);
++ return err;
++}
++
++
++static int vnic_process_localmac_msg(netfront_accel_vnic *vnic,
++ struct net_accel_msg *msg)
++{
++ unsigned long flags;
++ cuckoo_hash_mac_key key;
++
++ if (msg->u.localmac.flags & NET_ACCEL_MSG_ADD) {
++ DPRINTK("MAC has moved, could be local: " MAC_FMT "\n",
++ MAC_ARG(msg->u.localmac.mac));
++ key = cuckoo_mac_to_key(msg->u.localmac.mac);
++ spin_lock_irqsave(&vnic->table_lock, flags);
++ /* Try to remove it, not a big deal if not there */
++ cuckoo_hash_remove(&vnic->fastpath_table,
++ (cuckoo_hash_key *)&key);
++ spin_unlock_irqrestore(&vnic->table_lock, flags);
++ }
++
++ return 0;
++}
++
++
++static
++int vnic_process_rx_msg(netfront_accel_vnic *vnic,
++ struct net_accel_msg *msg)
++{
++ int err;
++
++ switch (msg->id) {
++ case NET_ACCEL_MSG_HELLO:
++ /* Hello, reply with Reply */
++ DPRINTK("got Hello, with version %.8x\n",
++ msg->u.hello.version);
++ BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_NONE);
++ err = vnic_process_hello_msg(vnic, msg);
++ if (err == 0)
++ vnic->msg_state = NETFRONT_ACCEL_MSG_HELLO;
++ break;
++ case NET_ACCEL_MSG_SETHW:
++ /* Hardware info message */
++ DPRINTK("got H/W info\n");
++ BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HELLO);
++ err = netfront_accel_vi_init(vnic, &msg->u.hw);
++ if (err == 0)
++ vnic->msg_state = NETFRONT_ACCEL_MSG_HW;
++ break;
++ case NET_ACCEL_MSG_MAPBUF | NET_ACCEL_MSG_REPLY:
++ VPRINTK("Got mapped buffers back\n");
++ BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW);
++ err = vnic_add_bufs(vnic, msg);
++ break;
++ case NET_ACCEL_MSG_MAPBUF | NET_ACCEL_MSG_REPLY | NET_ACCEL_MSG_ERROR:
++ /* No buffers. Can't use the fast path. */
++ EPRINTK("Got mapped buffers error. Cannot accelerate.\n");
++ BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW);
++ err = -EIO;
++ break;
++ case NET_ACCEL_MSG_LOCALMAC:
++ /* Should be add, remove not currently used */
++ EPRINTK_ON(!(msg->u.localmac.flags & NET_ACCEL_MSG_ADD));
++ BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW);
++ err = vnic_process_localmac_msg(vnic, msg);
++ break;
++ default:
++ EPRINTK("Huh? Message code is 0x%x\n", msg->id);
++ err = -EPROTO;
++ break;
++ }
++
++ return err;
++}
++
++
++/* Process an IRQ received from back end driver */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++void netfront_accel_msg_from_bend(struct work_struct *context)
++#else
++void netfront_accel_msg_from_bend(void *context)
++#endif
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++ netfront_accel_vnic *vnic =
++ container_of(context, netfront_accel_vnic, msg_from_bend);
++#else
++ netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
++#endif
++ struct net_accel_msg msg;
++ int err, queue_was_full = 0;
++
++ mutex_lock(&vnic->vnic_mutex);
++
++ /*
++ * This happens when the shared pages have been unmapped but
++ * the workqueue has yet to be flushed
++ */
++ if (!vnic->dom0_state_is_setup)
++ goto unlock_out;
++
++ while ((vnic->shared_page->aflags & NET_ACCEL_MSG_AFLAGS_TO_DOMU_MASK)
++ != 0) {
++ if (vnic->shared_page->aflags &
++ NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL) {
++ /* We've been told there may now be space. */
++ clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL_B,
++ (unsigned long *)&vnic->shared_page->aflags);
++ }
++
++ if (vnic->shared_page->aflags &
++ NET_ACCEL_MSG_AFLAGS_QUEUE0FULL) {
++ /*
++ * There will be space at the end of this
++ * function if we can make any.
++ */
++ clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0FULL_B,
++ (unsigned long *)&vnic->shared_page->aflags);
++ queue_was_full = 1;
++ }
++
++ if (vnic->shared_page->aflags &
++ NET_ACCEL_MSG_AFLAGS_NETUPDOWN) {
++ DPRINTK("%s: net interface change\n", __FUNCTION__);
++ clear_bit(NET_ACCEL_MSG_AFLAGS_NETUPDOWN_B,
++ (unsigned long *)&vnic->shared_page->aflags);
++ if (vnic->shared_page->net_dev_up)
++ netfront_accel_interface_up(vnic);
++ else
++ netfront_accel_interface_down(vnic);
++ }
++ }
++
++ /* Pull msg out of shared memory */
++ while ((err = net_accel_msg_recv(vnic->shared_page, &vnic->from_dom0,
++ &msg)) == 0) {
++ err = vnic_process_rx_msg(vnic, &msg);
++
++ if (err != 0)
++ goto done;
++ }
++
++ /*
++ * Send any pending buffer map request messages that we can,
++ * and mark domU->dom0 as full if necessary.
++ */
++ if (vnic->msg_state == NETFRONT_ACCEL_MSG_HW &&
++ vnic->bufpages.page_reqs < vnic->bufpages.max_pages) {
++ if (vnic_send_buffer_requests(vnic, &vnic->bufpages) == -ENOSPC)
++ vnic_set_queue_full(vnic);
++ }
++
++ /*
++ * If there are no messages then this is not an error. It
++ * just means that we've finished processing the queue.
++ */
++ if (err == -ENOENT)
++ err = 0;
++ done:
++ /* We will now have made space in the dom0->domU queue if we can */
++ if (queue_was_full)
++ vnic_set_queue_not_full(vnic);
++
++ if (err != 0) {
++ EPRINTK("%s returned %d\n", __FUNCTION__, err);
++ netfront_accel_set_closing(vnic);
++ }
++
++ unlock_out:
++ mutex_unlock(&vnic->vnic_mutex);
++
++ return;
++}
++
++
++irqreturn_t netfront_accel_msg_channel_irq_from_bend(int irq, void *context,
++ struct pt_regs *unused)
++{
++ netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
++ VPRINTK("irq %d from device %s\n", irq, vnic->dev->nodename);
++
++ queue_work(netfront_accel_workqueue, &vnic->msg_from_bend);
++
++ return IRQ_HANDLED;
++}
++
++/* Process an interrupt received from the NIC via backend */
++irqreturn_t netfront_accel_net_channel_irq_from_bend(int irq, void *context,
++ struct pt_regs *unused)
++{
++ netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
++ struct net_device *net_dev = vnic->net_dev;
++ unsigned long flags;
++
++ VPRINTK("net irq %d from device %s\n", irq, vnic->dev->nodename);
++
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.irq_count++);
++
++ BUG_ON(net_dev==NULL);
++
++ spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
++ if (vnic->irq_enabled) {
++ netfront_accel_disable_net_interrupts(vnic);
++ vnic->irq_enabled = 0;
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
++
++#if NETFRONT_ACCEL_STATS
++ vnic->stats.poll_schedule_count++;
++ if (vnic->stats.event_count_since_irq >
++ vnic->stats.events_per_irq_max)
++ vnic->stats.events_per_irq_max =
++ vnic->stats.event_count_since_irq;
++ vnic->stats.event_count_since_irq = 0;
++#endif
++ netif_rx_schedule(net_dev);
++ }
++ else {
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.useless_irq_count++);
++ DPRINTK("%s: irq when disabled\n", __FUNCTION__);
++ }
++
++ return IRQ_HANDLED;
++}
++
++
++void netfront_accel_msg_tx_fastpath(netfront_accel_vnic *vnic, const void *mac,
++ u32 ip, u16 port, u8 protocol)
++{
++ unsigned long lock_state;
++ struct net_accel_msg *msg;
++
++ msg = net_accel_msg_start_send(vnic->shared_page, &vnic->to_dom0,
++ &lock_state);
++
++ if (msg == NULL)
++ return;
++
++ net_accel_msg_init(msg, NET_ACCEL_MSG_FASTPATH);
++ msg->u.fastpath.flags = NET_ACCEL_MSG_REMOVE;
++ memcpy(msg->u.fastpath.mac, mac, ETH_ALEN);
++
++ msg->u.fastpath.port = port;
++ msg->u.fastpath.ip = ip;
++ msg->u.fastpath.proto = protocol;
++
++ net_accel_msg_complete_send_notify(vnic->shared_page, &vnic->to_dom0,
++ &lock_state, vnic->msg_channel_irq);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/accel_netfront.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_netfront.c
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/accel_netfront.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_netfront.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,319 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++
++/* drivers/xen/netfront/netfront.h */
++#include "netfront.h"
++
++#include "accel.h"
++#include "accel_bufs.h"
++#include "accel_util.h"
++#include "accel_msg_iface.h"
++#include "accel_ssr.h"
++
++#ifdef EFX_GCOV
++#include "gcov.h"
++#endif
++
++#define NETFRONT_ACCEL_VNIC_FROM_NETDEV(_nd) \
++ ((netfront_accel_vnic *)((struct netfront_info *)netdev_priv(net_dev))->accel_priv)
++
++static int netfront_accel_netdev_start_xmit(struct sk_buff *skb,
++ struct net_device *net_dev)
++{
++ netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
++ struct netfront_info *np =
++ (struct netfront_info *)netdev_priv(net_dev);
++ int handled, rc;
++ unsigned long flags1, flags2;
++
++ BUG_ON(vnic == NULL);
++
++ /* Take our tx lock and hold for the duration */
++ spin_lock_irqsave(&vnic->tx_lock, flags1);
++
++ if (!vnic->tx_enabled) {
++ rc = 0;
++ goto unlock_out;
++ }
++
++ handled = netfront_accel_vi_tx_post(vnic, skb);
++ if (handled == NETFRONT_ACCEL_STATUS_BUSY) {
++ BUG_ON(vnic->net_dev != net_dev);
++ DPRINTK("%s stopping queue\n", __FUNCTION__);
++
++ /* Netfront's lock protects tx_skb */
++ spin_lock_irqsave(&np->tx_lock, flags2);
++ BUG_ON(vnic->tx_skb != NULL);
++ vnic->tx_skb = skb;
++ netif_stop_queue(net_dev);
++ spin_unlock_irqrestore(&np->tx_lock, flags2);
++
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.queue_stops++);
++ }
++
++ if (handled == NETFRONT_ACCEL_STATUS_CANT)
++ rc = 0;
++ else
++ rc = 1;
++
++unlock_out:
++ spin_unlock_irqrestore(&vnic->tx_lock, flags1);
++
++ return rc;
++}
++
++
++static int netfront_accel_netdev_poll(struct net_device *net_dev, int *budget)
++{
++ netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
++ int rx_allowed = *budget, rx_done;
++
++ BUG_ON(vnic == NULL);
++
++ /* Can check this without lock as modifier excludes polls */
++ if (!vnic->poll_enabled)
++ return 0;
++
++ rx_done = netfront_accel_vi_poll(vnic, rx_allowed);
++ *budget -= rx_done;
++
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_call_count++);
++
++ VPRINTK("%s: done %d allowed %d\n",
++ __FUNCTION__, rx_done, rx_allowed);
++
++ netfront_accel_ssr_end_of_burst(vnic, &vnic->ssr_state);
++
++ if (rx_done < rx_allowed) {
++ return 0; /* Done */
++ }
++
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_reschedule_count++);
++
++ return 1; /* More to do. */
++}
++
++
++/*
++ * Process request from netfront to start napi interrupt
++ * mode. (i.e. enable interrupts as it's finished polling)
++ */
++static int netfront_accel_start_napi_interrupts(struct net_device *net_dev)
++{
++ netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
++ unsigned long flags;
++
++ BUG_ON(vnic == NULL);
++
++ /*
++ * Can check this without lock as writer excludes poll before
++ * modifying
++ */
++ if (!vnic->poll_enabled)
++ return 0;
++
++ if (!netfront_accel_vi_enable_interrupts(vnic)) {
++ /*
++ * There was something there, tell caller we had
++ * something to do.
++ */
++ return 1;
++ }
++
++ spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
++ vnic->irq_enabled = 1;
++ netfront_accel_enable_net_interrupts(vnic);
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
++
++ return 0;
++}
++
++
++/*
++ * Process request from netfront to stop napi interrupt
++ * mode. (i.e. disable interrupts as it's starting to poll
++ */
++static void netfront_accel_stop_napi_interrupts(struct net_device *net_dev)
++{
++ netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
++ unsigned long flags;
++
++ BUG_ON(vnic == NULL);
++
++ spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
++
++ if (!vnic->poll_enabled) {
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
++ return;
++ }
++
++ netfront_accel_disable_net_interrupts(vnic);
++ vnic->irq_enabled = 0;
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
++}
++
++
++static int netfront_accel_check_ready(struct net_device *net_dev)
++{
++ netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
++
++ BUG_ON(vnic == NULL);
++
++ /* This is protected by netfront's lock */
++ return vnic->tx_skb == NULL;
++}
++
++
++static int netfront_accel_get_stats(struct net_device *net_dev,
++ struct net_device_stats *stats)
++{
++ netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
++ struct netfront_accel_netdev_stats now;
++
++ BUG_ON(vnic == NULL);
++
++ now.fastpath_rx_pkts = vnic->netdev_stats.fastpath_rx_pkts;
++ now.fastpath_rx_bytes = vnic->netdev_stats.fastpath_rx_bytes;
++ now.fastpath_rx_errors = vnic->netdev_stats.fastpath_rx_errors;
++ now.fastpath_tx_pkts = vnic->netdev_stats.fastpath_tx_pkts;
++ now.fastpath_tx_bytes = vnic->netdev_stats.fastpath_tx_bytes;
++ now.fastpath_tx_errors = vnic->netdev_stats.fastpath_tx_errors;
++
++ stats->rx_packets += (now.fastpath_rx_pkts -
++ vnic->stats_last_read.fastpath_rx_pkts);
++ stats->rx_bytes += (now.fastpath_rx_bytes -
++ vnic->stats_last_read.fastpath_rx_bytes);
++ stats->rx_errors += (now.fastpath_rx_errors -
++ vnic->stats_last_read.fastpath_rx_errors);
++ stats->tx_packets += (now.fastpath_tx_pkts -
++ vnic->stats_last_read.fastpath_tx_pkts);
++ stats->tx_bytes += (now.fastpath_tx_bytes -
++ vnic->stats_last_read.fastpath_tx_bytes);
++ stats->tx_errors += (now.fastpath_tx_errors -
++ vnic->stats_last_read.fastpath_tx_errors);
++
++ vnic->stats_last_read = now;
++
++ return 0;
++}
++
++
++struct netfront_accel_hooks accel_hooks = {
++ .new_device = &netfront_accel_probe,
++ .remove = &netfront_accel_remove,
++ .netdev_poll = &netfront_accel_netdev_poll,
++ .start_xmit = &netfront_accel_netdev_start_xmit,
++ .start_napi_irq = &netfront_accel_start_napi_interrupts,
++ .stop_napi_irq = &netfront_accel_stop_napi_interrupts,
++ .check_ready = &netfront_accel_check_ready,
++ .get_stats = &netfront_accel_get_stats
++};
++
++
++unsigned sfc_netfront_max_pages = NETFRONT_ACCEL_DEFAULT_BUF_PAGES;
++module_param_named (max_pages, sfc_netfront_max_pages, uint, 0644);
++MODULE_PARM_DESC(max_pages, "Number of buffer pages to request");
++
++unsigned sfc_netfront_buffer_split = 2;
++module_param_named (buffer_split, sfc_netfront_buffer_split, uint, 0644);
++MODULE_PARM_DESC(buffer_split,
++ "Fraction of buffers to use for TX, rest for RX");
++
++
++const char *frontend_name = "sfc_netfront";
++
++struct workqueue_struct *netfront_accel_workqueue;
++
++static int __init netfront_accel_init(void)
++{
++ int rc;
++#ifdef EFX_GCOV
++ gcov_provider_init(THIS_MODULE);
++#endif
++
++ /*
++ * If we're running on dom0, netfront hasn't initialised
++ * itself, so we need to keep away
++ */
++ if (is_initial_xendomain())
++ return 0;
++
++ if (!is_pow2(sizeof(struct net_accel_msg)))
++ EPRINTK("%s: bad structure size\n", __FUNCTION__);
++
++ netfront_accel_workqueue = create_workqueue(frontend_name);
++
++ netfront_accel_debugfs_init();
++
++ rc = netfront_accelerator_loaded(NETFRONT_ACCEL_VERSION,
++ frontend_name, &accel_hooks);
++
++ if (rc < 0) {
++ EPRINTK("Xen netfront accelerator version mismatch\n");
++ return -EINVAL;
++ }
++
++ if (rc > 0) {
++ /*
++ * In future may want to add backwards compatibility
++ * and accept certain subsets of previous versions
++ */
++ EPRINTK("Xen netfront accelerator version mismatch\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++module_init(netfront_accel_init);
++
++static void __exit netfront_accel_exit(void)
++{
++ if (is_initial_xendomain())
++ return;
++
++ DPRINTK("%s: unhooking\n", __FUNCTION__);
++
++ /* Unhook from normal netfront */
++ netfront_accelerator_stop(frontend_name);
++
++ DPRINTK("%s: done\n", __FUNCTION__);
++
++ netfront_accel_debugfs_fini();
++
++ flush_workqueue(netfront_accel_workqueue);
++
++ destroy_workqueue(netfront_accel_workqueue);
++
++#ifdef EFX_GCOV
++ gcov_provider_fini(THIS_MODULE);
++#endif
++ return;
++}
++module_exit(netfront_accel_exit);
++
++MODULE_LICENSE("GPL");
++
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/accel_ssr.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_ssr.c
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/accel_ssr.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_ssr.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,308 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/socket.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/list.h>
++#include <net/ip.h>
++#include <net/checksum.h>
++
++#include "accel.h"
++#include "accel_util.h"
++#include "accel_bufs.h"
++
++#include "accel_ssr.h"
++
++static inline int list_valid(struct list_head *lh) {
++ return(lh->next != NULL);
++}
++
++static void netfront_accel_ssr_deliver (struct netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st,
++ struct netfront_accel_ssr_conn *c);
++
++/** Construct an efx_ssr_state.
++ *
++ * @v st The SSR state (per channel per port)
++ * @v port The port.
++ */
++void netfront_accel_ssr_init(struct netfront_accel_ssr_state *st) {
++ unsigned i;
++
++ INIT_LIST_HEAD(&st->conns);
++ INIT_LIST_HEAD(&st->free_conns);
++ for (i = 0; i < 8; ++i) {
++ struct netfront_accel_ssr_conn *c =
++ kmalloc(sizeof(*c), GFP_KERNEL);
++ if (c == NULL) break;
++ c->n_in_order_pkts = 0;
++ c->skb = NULL;
++ list_add(&c->link, &st->free_conns);
++ }
++
++}
++
++
++/** Destructor for an efx_ssr_state.
++ *
++ * @v st The SSR state (per channel per port)
++ */
++void netfront_accel_ssr_fini(netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st) {
++ struct netfront_accel_ssr_conn *c;
++
++ /* Return cleanly if efx_ssr_init() not previously called */
++ BUG_ON(list_valid(&st->conns) != list_valid(&st->free_conns));
++ if (! list_valid(&st->conns))
++ return;
++
++ while ( ! list_empty(&st->free_conns)) {
++ c = list_entry(st->free_conns.prev,
++ struct netfront_accel_ssr_conn, link);
++ list_del(&c->link);
++ BUG_ON(c->skb != NULL);
++ kfree(c);
++ }
++ while ( ! list_empty(&st->conns)) {
++ c = list_entry(st->conns.prev,
++ struct netfront_accel_ssr_conn, link);
++ list_del(&c->link);
++ if (c->skb)
++ netfront_accel_ssr_deliver(vnic, st, c);
++ kfree(c);
++ }
++}
++
++
++/** Calc IP checksum and deliver to the OS
++ *
++ * @v st The SSR state (per channel per port)
++ * @v c The SSR connection state
++ */
++static void netfront_accel_ssr_deliver(netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st,
++ struct netfront_accel_ssr_conn *c) {
++ BUG_ON(c->skb == NULL);
++
++ /*
++ * If we've chained packets together, recalculate the IP
++ * checksum.
++ */
++ if (skb_shinfo(c->skb)->frag_list) {
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.ssr_bursts);
++ c->iph->check = 0;
++ c->iph->check = ip_fast_csum((unsigned char *) c->iph,
++ c->iph->ihl);
++ }
++
++ VPRINTK("%s: %d\n", __FUNCTION__, c->skb->len);
++
++ netif_receive_skb(c->skb);
++ c->skb = NULL;
++}
++
++
++/** Push held skbs down into network stack.
++ *
++ * @v st SSR state
++ *
++ * Only called if we are tracking one or more connections.
++ */
++void __netfront_accel_ssr_end_of_burst(netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st) {
++ struct netfront_accel_ssr_conn *c;
++
++ BUG_ON(list_empty(&st->conns));
++
++ list_for_each_entry(c, &st->conns, link)
++ if (c->skb)
++ netfront_accel_ssr_deliver(vnic, st, c);
++
++ /* Time-out connections that have received no traffic for 20ms. */
++ c = list_entry(st->conns.prev, struct netfront_accel_ssr_conn,
++ link);
++ if (jiffies - c->last_pkt_jiffies > (HZ / 50 + 1)) {
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.ssr_drop_stream);
++ list_del(&c->link);
++ list_add(&c->link, &st->free_conns);
++ }
++}
++
++
++/** Process SKB and decide whether to dispatch it to the stack now or
++ * later.
++ *
++ * @v st SSR state
++ * @v skb SKB to exmaine
++ * @ret rc 0 => deliver SKB to kernel now, otherwise the SKB belongs
++ * us.
++ */
++int netfront_accel_ssr_skb(struct netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st,
++ struct sk_buff *skb) {
++ int data_length, dont_merge;
++ struct netfront_accel_ssr_conn *c;
++ struct iphdr *iph;
++ struct tcphdr *th;
++ unsigned th_seq;
++
++ BUG_ON(skb_shinfo(skb)->frag_list != NULL);
++ BUG_ON(skb->next != NULL);
++
++ /* We're not interested if it isn't TCP over IPv4. */
++ iph = (struct iphdr *) skb->data;
++ if (skb->protocol != htons(ETH_P_IP) ||
++ iph->protocol != IPPROTO_TCP) {
++ return 0;
++ }
++
++ /* Ignore segments that fail csum or are fragmented. */
++ if (unlikely((skb->ip_summed - CHECKSUM_UNNECESSARY) |
++ (iph->frag_off & htons(IP_MF | IP_OFFSET)))) {
++ return 0;
++ }
++
++ th = (struct tcphdr*)(skb->data + iph->ihl * 4);
++ data_length = ntohs(iph->tot_len) - iph->ihl * 4 - th->doff * 4;
++ th_seq = ntohl(th->seq);
++ dont_merge = (data_length == 0) | th->urg | th->syn | th->rst;
++
++ list_for_each_entry(c, &st->conns, link) {
++ if ((c->saddr - iph->saddr) |
++ (c->daddr - iph->daddr) |
++ (c->source - th->source) |
++ (c->dest - th->dest ))
++ continue;
++
++ /* Re-insert at head of list to reduce lookup time. */
++ list_del(&c->link);
++ list_add(&c->link, &st->conns);
++ c->last_pkt_jiffies = jiffies;
++
++ if (unlikely(th_seq - c->next_seq)) {
++ /* Out-of-order, so start counting again. */
++ if (c->skb)
++ netfront_accel_ssr_deliver(vnic, st, c);
++ c->n_in_order_pkts = 0;
++ c->next_seq = th_seq + data_length;
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.ssr_misorder);
++ return 0;
++ }
++ c->next_seq = th_seq + data_length;
++
++ if (++c->n_in_order_pkts < 300) {
++ /* May be in slow-start, so don't merge. */
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.ssr_slow_start);
++ return 0;
++ }
++
++ if (unlikely(dont_merge)) {
++ if (c->skb)
++ netfront_accel_ssr_deliver(vnic, st, c);
++ return 0;
++ }
++
++ if (c->skb) {
++ c->iph->tot_len = ntohs(c->iph->tot_len);
++ c->iph->tot_len += data_length;
++ c->iph->tot_len = htons(c->iph->tot_len);
++ c->th->ack_seq = th->ack_seq;
++ c->th->fin |= th->fin;
++ c->th->psh |= th->psh;
++ c->th->window = th->window;
++
++ /* Remove the headers from this skb. */
++ skb_pull(skb, skb->len - data_length);
++
++ /*
++ * Tack the new skb onto the head skb's frag_list.
++ * This is exactly the format that fragmented IP
++ * datagrams are reassembled into.
++ */
++ BUG_ON(skb->next != 0);
++ if ( ! skb_shinfo(c->skb)->frag_list)
++ skb_shinfo(c->skb)->frag_list = skb;
++ else
++ c->skb_tail->next = skb;
++ c->skb_tail = skb;
++ c->skb->len += skb->len;
++ c->skb->data_len += skb->len;
++ c->skb->truesize += skb->truesize;
++
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.ssr_merges);
++
++ /*
++ * If the next packet might push this super-packet
++ * over the limit for an IP packet, deliver it now.
++ * This is slightly conservative, but close enough.
++ */
++ if (c->skb->len +
++ (PAGE_SIZE / NETFRONT_ACCEL_BUFS_PER_PAGE)
++ > 16384)
++ netfront_accel_ssr_deliver(vnic, st, c);
++
++ return 1;
++ }
++ else {
++ c->iph = iph;
++ c->th = th;
++ c->skb = skb;
++ return 1;
++ }
++ }
++
++ /* We're not yet tracking this connection. */
++
++ if (dont_merge) {
++ return 0;
++ }
++
++ if (list_empty(&st->free_conns)) {
++ c = list_entry(st->conns.prev,
++ struct netfront_accel_ssr_conn,
++ link);
++ if (c->skb) {
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.ssr_too_many);
++ return 0;
++ }
++ }
++ else {
++ c = list_entry(st->free_conns.next,
++ struct netfront_accel_ssr_conn,
++ link);
++ }
++ list_del(&c->link);
++ list_add(&c->link, &st->conns);
++ c->saddr = iph->saddr;
++ c->daddr = iph->daddr;
++ c->source = th->source;
++ c->dest = th->dest;
++ c->next_seq = th_seq + data_length;
++ c->n_in_order_pkts = 0;
++ BUG_ON(c->skb != NULL);
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.ssr_new_stream);
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/accel_ssr.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_ssr.h
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/accel_ssr.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_ssr.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,88 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NETFRONT_ACCEL_SSR_H
++#define NETFRONT_ACCEL_SSR_H
++
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/list.h>
++
++#include "accel.h"
++
++/** State for Soft Segment Reassembly (SSR). */
++
++struct netfront_accel_ssr_conn {
++ struct list_head link;
++
++ unsigned saddr, daddr;
++ unsigned short source, dest;
++
++ /** Number of in-order packets we've seen with payload. */
++ unsigned n_in_order_pkts;
++
++ /** Next in-order sequence number. */
++ unsigned next_seq;
++
++ /** Time we last saw a packet on this connection. */
++ unsigned long last_pkt_jiffies;
++
++ /** The SKB we are currently holding. If NULL, then all following
++ * fields are undefined.
++ */
++ struct sk_buff *skb;
++
++ /** The tail of the frag_list of SKBs we're holding. Only valid
++ * after at least one merge.
++ */
++ struct sk_buff *skb_tail;
++
++ /** The IP header of the skb we are holding. */
++ struct iphdr *iph;
++
++ /** The TCP header of the skb we are holding. */
++ struct tcphdr *th;
++};
++
++extern void netfront_accel_ssr_init(struct netfront_accel_ssr_state *st);
++extern void netfront_accel_ssr_fini(netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st);
++
++extern void
++__netfront_accel_ssr_end_of_burst(netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st);
++
++extern int netfront_accel_ssr_skb(netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st,
++ struct sk_buff *skb);
++
++static inline void
++netfront_accel_ssr_end_of_burst (netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st) {
++ if ( ! list_empty(&st->conns) )
++ __netfront_accel_ssr_end_of_burst(vnic, st);
++}
++
++#endif /* NETFRONT_ACCEL_SSR_H */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/accel_tso.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_tso.c
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/accel_tso.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_tso.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,511 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/pci.h>
++#include <linux/tcp.h>
++#include <linux/ip.h>
++#include <linux/in.h>
++#include <linux/if_ether.h>
++
++#include "accel.h"
++#include "accel_util.h"
++
++#include "accel_tso.h"
++
++#define PTR_DIFF(p1, p2) ((u8*)(p1) - (u8*)(p2))
++#define ETH_HDR_LEN(skb) ((skb)->nh.raw - (skb)->data)
++#define SKB_TCP_OFF(skb) PTR_DIFF ((skb)->h.th, (skb)->data)
++#define SKB_IP_OFF(skb) PTR_DIFF ((skb)->nh.iph, (skb)->data)
++
++/*
++ * Set a maximum number of buffers in each output packet to make life
++ * a little simpler - if this is reached it will just move on to
++ * another packet
++ */
++#define ACCEL_TSO_MAX_BUFFERS (6)
++
++/** TSO State.
++ *
++ * The state used during segmentation. It is put into this data structure
++ * just to make it easy to pass into inline functions.
++ */
++struct netfront_accel_tso_state {
++ /** bytes of data we've yet to segment */
++ unsigned remaining_len;
++
++ /** current sequence number */
++ unsigned seqnum;
++
++ /** remaining space in current packet */
++ unsigned packet_space;
++
++ /** List of packets to be output, containing the buffers and
++ * iovecs to describe each packet
++ */
++ struct netfront_accel_tso_output_packet *output_packets;
++
++ /** Total number of buffers in output_packets */
++ unsigned buffers;
++
++ /** Total number of packets in output_packets */
++ unsigned packets;
++
++ /** Input Fragment Cursor.
++ *
++ * Where we are in the current fragment of the incoming SKB. These
++ * values get updated in place when we split a fragment over
++ * multiple packets.
++ */
++ struct {
++ /** address of current position */
++ void *addr;
++ /** remaining length */
++ unsigned int len;
++ } ifc; /* == ifc Input Fragment Cursor */
++
++ /** Parameters.
++ *
++ * These values are set once at the start of the TSO send and do
++ * not get changed as the routine progresses.
++ */
++ struct {
++ /* the number of bytes of header */
++ unsigned int header_length;
++
++ /* The number of bytes to put in each outgoing segment. */
++ int full_packet_size;
++
++ /* Current IP ID, host endian. */
++ unsigned ip_id;
++
++ /* Max size of each output packet payload */
++ int gso_size;
++ } p;
++};
++
++
++/**
++ * Verify that our various assumptions about sk_buffs and the conditions
++ * under which TSO will be attempted hold true.
++ *
++ * @v skb The sk_buff to check.
++ */
++static inline void tso_check_safe(struct sk_buff *skb) {
++ EPRINTK_ON(skb->protocol != htons (ETH_P_IP));
++ EPRINTK_ON(((struct ethhdr*) skb->data)->h_proto != htons (ETH_P_IP));
++ EPRINTK_ON(skb->nh.iph->protocol != IPPROTO_TCP);
++ EPRINTK_ON((SKB_TCP_OFF(skb)
++ + (skb->h.th->doff << 2u)) > skb_headlen(skb));
++}
++
++
++
++/** Parse the SKB header and initialise state. */
++static inline void tso_start(struct netfront_accel_tso_state *st,
++ struct sk_buff *skb) {
++
++ /*
++ * All ethernet/IP/TCP headers combined size is TCP header size
++ * plus offset of TCP header relative to start of packet.
++ */
++ st->p.header_length = (skb->h.th->doff << 2u) + SKB_TCP_OFF(skb);
++ st->p.full_packet_size = (st->p.header_length
++ + skb_shinfo(skb)->gso_size);
++ st->p.gso_size = skb_shinfo(skb)->gso_size;
++
++ st->p.ip_id = htons(skb->nh.iph->id);
++ st->seqnum = ntohl(skb->h.th->seq);
++
++ EPRINTK_ON(skb->h.th->urg);
++ EPRINTK_ON(skb->h.th->syn);
++ EPRINTK_ON(skb->h.th->rst);
++
++ st->remaining_len = skb->len - st->p.header_length;
++
++ st->output_packets = NULL;
++ st->buffers = 0;
++ st->packets = 0;
++
++ VPRINTK("Starting new TSO: hl %d ps %d gso %d seq %x len %d\n",
++ st->p.header_length, st->p.full_packet_size, st->p.gso_size,
++ st->seqnum, skb->len);
++}
++
++/**
++ * Add another NIC mapped buffer onto an output packet
++ */
++static inline int tso_start_new_buffer(netfront_accel_vnic *vnic,
++ struct netfront_accel_tso_state *st,
++ int first)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ struct netfront_accel_pkt_desc *buf;
++
++ /* Get a mapped packet buffer */
++ buf = netfront_accel_buf_get(vnic->tx_bufs);
++ if (buf == NULL) {
++ DPRINTK("%s: No buffer for TX\n", __FUNCTION__);
++ return -1;
++ }
++
++ /* Store a bit of meta-data at the end */
++ tso_buf =(struct netfront_accel_tso_buffer *)
++ (buf->pkt_kva + NETFRONT_ACCEL_TSO_BUF_LENGTH
++ + sizeof(struct netfront_accel_tso_output_packet));
++
++ tso_buf->buf = buf;
++
++ tso_buf->length = 0;
++
++ if (first) {
++ struct netfront_accel_tso_output_packet *output_packet
++ = (struct netfront_accel_tso_output_packet *)
++ (buf->pkt_kva + NETFRONT_ACCEL_TSO_BUF_LENGTH);
++ output_packet->next = st->output_packets;
++ st->output_packets = output_packet;
++ tso_buf->next = NULL;
++ st->output_packets->tso_bufs = tso_buf;
++ st->output_packets->tso_bufs_len = 1;
++ } else {
++ tso_buf->next = st->output_packets->tso_bufs;
++ st->output_packets->tso_bufs = tso_buf;
++ st->output_packets->tso_bufs_len ++;
++ }
++
++ BUG_ON(st->output_packets->tso_bufs_len > ACCEL_TSO_MAX_BUFFERS);
++
++ st->buffers ++;
++
++ /*
++ * Store the context, set to NULL, last packet buffer will get
++ * non-NULL later
++ */
++ tso_buf->buf->skb = NULL;
++
++ return 0;
++}
++
++
++/* Generate a new header, and prepare for the new packet.
++ *
++ * @v vnic VNIC
++ * @v skb Socket buffer
++ * @v st TSO state
++ * @ret rc 0 on success, or -1 if failed to alloc header
++ */
++
++static inline
++int tso_start_new_packet(netfront_accel_vnic *vnic,
++ struct sk_buff *skb,
++ struct netfront_accel_tso_state *st)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ struct iphdr *tsoh_iph;
++ struct tcphdr *tsoh_th;
++ unsigned ip_length;
++
++ if (tso_start_new_buffer(vnic, st, 1) < 0) {
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.fastpath_tx_busy++);
++ return -1;
++ }
++
++ /* This has been set up by tso_start_new_buffer() */
++ tso_buf = st->output_packets->tso_bufs;
++
++ /* Copy in the header */
++ memcpy(tso_buf->buf->pkt_kva, skb->data, st->p.header_length);
++ tso_buf->length = st->p.header_length;
++
++ tsoh_th = (struct tcphdr*)
++ (tso_buf->buf->pkt_kva + SKB_TCP_OFF(skb));
++ tsoh_iph = (struct iphdr*)
++ (tso_buf->buf->pkt_kva + SKB_IP_OFF(skb));
++
++ /* Set to zero to encourage falcon to fill these in */
++ tsoh_th->check = 0;
++ tsoh_iph->check = 0;
++
++ tsoh_th->seq = htonl(st->seqnum);
++ st->seqnum += st->p.gso_size;
++
++ if (st->remaining_len > st->p.gso_size) {
++ /* This packet will not finish the TSO burst. */
++ ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb);
++ tsoh_th->fin = 0;
++ tsoh_th->psh = 0;
++ } else {
++ /* This packet will be the last in the TSO burst. */
++ ip_length = (st->p.header_length - ETH_HDR_LEN(skb)
++ + st->remaining_len);
++ tsoh_th->fin = skb->h.th->fin;
++ tsoh_th->psh = skb->h.th->psh;
++ }
++
++ tsoh_iph->tot_len = htons(ip_length);
++
++ /* Linux leaves suitable gaps in the IP ID space for us to fill. */
++ tsoh_iph->id = st->p.ip_id++;
++ tsoh_iph->id = htons(tsoh_iph->id);
++
++ st->packet_space = st->p.gso_size;
++
++ st->packets++;
++
++ return 0;
++}
++
++
++
++static inline void tso_get_fragment(struct netfront_accel_tso_state *st,
++ int len, void *addr)
++{
++ st->ifc.len = len;
++ st->ifc.addr = addr;
++ return;
++}
++
++
++static inline void tso_unwind(netfront_accel_vnic *vnic,
++ struct netfront_accel_tso_state *st)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ struct netfront_accel_tso_output_packet *output_packet;
++
++ DPRINTK("%s\n", __FUNCTION__);
++
++ while (st->output_packets != NULL) {
++ output_packet = st->output_packets;
++ st->output_packets = output_packet->next;
++ while (output_packet->tso_bufs != NULL) {
++ tso_buf = output_packet->tso_bufs;
++ output_packet->tso_bufs = tso_buf->next;
++
++ st->buffers --;
++ output_packet->tso_bufs_len --;
++
++ netfront_accel_buf_put(vnic->tx_bufs,
++ tso_buf->buf->buf_id);
++ }
++ }
++ BUG_ON(st->buffers != 0);
++}
++
++
++
++static inline
++void tso_fill_packet_with_fragment(netfront_accel_vnic *vnic,
++ struct netfront_accel_tso_state *st)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ int n, space;
++
++ BUG_ON(st->output_packets == NULL);
++ BUG_ON(st->output_packets->tso_bufs == NULL);
++
++ tso_buf = st->output_packets->tso_bufs;
++
++ if (st->ifc.len == 0) return;
++ if (st->packet_space == 0) return;
++ if (tso_buf->length == NETFRONT_ACCEL_TSO_BUF_LENGTH) return;
++
++ n = min(st->ifc.len, st->packet_space);
++
++ space = NETFRONT_ACCEL_TSO_BUF_LENGTH - tso_buf->length;
++ n = min(n, space);
++
++ st->packet_space -= n;
++ st->remaining_len -= n;
++ st->ifc.len -= n;
++
++ memcpy(tso_buf->buf->pkt_kva + tso_buf->length, st->ifc.addr, n);
++
++ tso_buf->length += n;
++
++ BUG_ON(tso_buf->length > NETFRONT_ACCEL_TSO_BUF_LENGTH);
++
++ st->ifc.addr += n;
++
++ return;
++}
++
++
++int netfront_accel_enqueue_skb_tso(netfront_accel_vnic *vnic,
++ struct sk_buff *skb)
++{
++ struct netfront_accel_tso_state state;
++ struct netfront_accel_tso_buffer *tso_buf = NULL;
++ struct netfront_accel_tso_output_packet *reversed_list = NULL;
++ struct netfront_accel_tso_output_packet *tmp_pkt;
++ ef_iovec iovecs[ACCEL_TSO_MAX_BUFFERS];
++ int frag_i, rc, dma_id;
++ skb_frag_t *f;
++
++ tso_check_safe(skb);
++
++ if (skb->ip_summed != CHECKSUM_HW)
++ EPRINTK("Trying to TSO send a packet without HW checksum\n");
++
++ tso_start(&state, skb);
++
++ /*
++ * Setup the first payload fragment. If the skb header area
++ * contains exactly the headers and all payload is in the frag
++ * list things are little simpler
++ */
++ if (skb_headlen(skb) == state.p.header_length) {
++ /* Grab the first payload fragment. */
++ BUG_ON(skb_shinfo(skb)->nr_frags < 1);
++ frag_i = 0;
++ f = &skb_shinfo(skb)->frags[frag_i];
++ tso_get_fragment(&state, f->size,
++ page_address(f->page) + f->page_offset);
++ } else {
++ int hl = state.p.header_length;
++ tso_get_fragment(&state, skb_headlen(skb) - hl,
++ skb->data + hl);
++ frag_i = -1;
++ }
++
++ if (tso_start_new_packet(vnic, skb, &state) < 0) {
++ DPRINTK("%s: out of first start-packet memory\n",
++ __FUNCTION__);
++ goto unwind;
++ }
++
++ while (1) {
++ tso_fill_packet_with_fragment(vnic, &state);
++
++ /* Move onto the next fragment? */
++ if (state.ifc.len == 0) {
++ if (++frag_i >= skb_shinfo(skb)->nr_frags)
++ /* End of payload reached. */
++ break;
++ f = &skb_shinfo(skb)->frags[frag_i];
++ tso_get_fragment(&state, f->size,
++ page_address(f->page) +
++ f->page_offset);
++ }
++
++ /* Start a new buffer? */
++ if ((state.output_packets->tso_bufs->length ==
++ NETFRONT_ACCEL_TSO_BUF_LENGTH) &&
++ tso_start_new_buffer(vnic, &state, 0)) {
++ DPRINTK("%s: out of start-buffer memory\n",
++ __FUNCTION__);
++ goto unwind;
++ }
++
++ /* Start at new packet? */
++ if ((state.packet_space == 0 ||
++ ((state.output_packets->tso_bufs_len >=
++ ACCEL_TSO_MAX_BUFFERS) &&
++ (state.output_packets->tso_bufs->length >=
++ NETFRONT_ACCEL_TSO_BUF_LENGTH))) &&
++ tso_start_new_packet(vnic, skb, &state) < 0) {
++ DPRINTK("%s: out of start-packet memory\n",
++ __FUNCTION__);
++ goto unwind;
++ }
++
++ }
++
++ /* Check for space */
++ if (ef_vi_transmit_space(&vnic->vi) < state.buffers) {
++ DPRINTK("%s: Not enough TX space (%d)\n",
++ __FUNCTION__, state.buffers);
++ goto unwind;
++ }
++
++ /*
++ * Store the skb context in the most recent buffer (i.e. the
++ * last buffer that will be sent)
++ */
++ state.output_packets->tso_bufs->buf->skb = skb;
++
++ /* Reverse the list of packets as we construct it on a stack */
++ while (state.output_packets != NULL) {
++ tmp_pkt = state.output_packets;
++ state.output_packets = tmp_pkt->next;
++ tmp_pkt->next = reversed_list;
++ reversed_list = tmp_pkt;
++ }
++
++ /* Pass off to hardware */
++ while (reversed_list != NULL) {
++ tmp_pkt = reversed_list;
++ reversed_list = tmp_pkt->next;
++
++ BUG_ON(tmp_pkt->tso_bufs_len > ACCEL_TSO_MAX_BUFFERS);
++ BUG_ON(tmp_pkt->tso_bufs_len == 0);
++
++ dma_id = tmp_pkt->tso_bufs->buf->buf_id;
++
++ /*
++ * Make an iovec of the buffers in the list, reversing
++ * the buffers as we go as they are constructed on a
++ * stack
++ */
++ tso_buf = tmp_pkt->tso_bufs;
++ for (frag_i = tmp_pkt->tso_bufs_len - 1;
++ frag_i >= 0;
++ frag_i--) {
++ iovecs[frag_i].iov_base = tso_buf->buf->pkt_buff_addr;
++ iovecs[frag_i].iov_len = tso_buf->length;
++ tso_buf = tso_buf->next;
++ }
++
++ rc = ef_vi_transmitv(&vnic->vi, iovecs, tmp_pkt->tso_bufs_len,
++ dma_id);
++ /*
++ * We checked for space already, so it really should
++ * succeed
++ */
++ BUG_ON(rc != 0);
++ }
++
++ /* Track number of tx fastpath stats */
++ vnic->netdev_stats.fastpath_tx_bytes += skb->len;
++ vnic->netdev_stats.fastpath_tx_pkts += state.packets;
++#if NETFRONT_ACCEL_STATS
++ {
++ unsigned n;
++ n = vnic->netdev_stats.fastpath_tx_pkts -
++ vnic->stats.fastpath_tx_completions;
++ if (n > vnic->stats.fastpath_tx_pending_max)
++ vnic->stats.fastpath_tx_pending_max = n;
++ }
++#endif
++
++ return NETFRONT_ACCEL_STATUS_GOOD;
++
++ unwind:
++ tso_unwind(vnic, &state);
++
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.fastpath_tx_busy++);
++
++ return NETFRONT_ACCEL_STATUS_BUSY;
++}
++
++
++
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/accel_tso.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_tso.h
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/accel_tso.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_tso.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,57 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NETFRONT_ACCEL_TSO_H
++#define NETFRONT_ACCEL_TSO_H
++
++#include "accel_bufs.h"
++
++/* Track the buffers used in each output packet */
++struct netfront_accel_tso_buffer {
++ struct netfront_accel_tso_buffer *next;
++ struct netfront_accel_pkt_desc *buf;
++ unsigned length;
++};
++
++/* Track the output packets formed from each input packet */
++struct netfront_accel_tso_output_packet {
++ struct netfront_accel_tso_output_packet *next;
++ struct netfront_accel_tso_buffer *tso_bufs;
++ unsigned tso_bufs_len;
++};
++
++
++/*
++ * Max available space in a buffer for data once meta-data has taken
++ * its place
++ */
++#define NETFRONT_ACCEL_TSO_BUF_LENGTH \
++ ((PAGE_SIZE / NETFRONT_ACCEL_BUFS_PER_PAGE) \
++ - sizeof(struct netfront_accel_tso_buffer) \
++ - sizeof(struct netfront_accel_tso_output_packet))
++
++int netfront_accel_enqueue_skb_tso(netfront_accel_vnic *vnic,
++ struct sk_buff *skb);
++
++#endif /* NETFRONT_ACCEL_TSO_H */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/accel_vi.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_vi.c
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/accel_vi.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_vi.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1194 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/if_ether.h>
++#include <linux/ip.h>
++#include <net/checksum.h>
++#include <asm/io.h>
++
++#include "accel.h"
++#include "accel_util.h"
++#include "accel_bufs.h"
++#include "accel_tso.h"
++#include "accel_ssr.h"
++#include "netfront.h"
++
++#include "etherfabric/ef_vi.h"
++
++/*
++ * Max available space in a buffer for data once meta-data has taken
++ * its place
++ */
++#define NETFRONT_ACCEL_TX_BUF_LENGTH \
++ ((PAGE_SIZE / NETFRONT_ACCEL_BUFS_PER_PAGE) \
++ - sizeof(struct netfront_accel_tso_buffer))
++
++#define ACCEL_TX_MAX_BUFFERS (6)
++#define ACCEL_VI_POLL_EVENTS (8)
++
++static
++int netfront_accel_vi_init_fini(netfront_accel_vnic *vnic,
++ struct net_accel_msg_hw *hw_msg)
++{
++ struct ef_vi_nic_type nic_type;
++ struct net_accel_hw_falcon_b *hw_info;
++ void *io_kva, *evq_base, *rx_dma_kva, *tx_dma_kva, *doorbell_kva;
++ u32 *evq_gnts;
++ u32 evq_order;
++ int vi_state_size;
++ u8 vi_data[VI_MAPPINGS_SIZE];
++
++ if (hw_msg == NULL)
++ goto fini;
++
++ /* And create the local macs table lock */
++ spin_lock_init(&vnic->table_lock);
++
++ /* Create fastpath table, initial size 8, key length 8 */
++ if (cuckoo_hash_init(&vnic->fastpath_table, 3, 8)) {
++ EPRINTK("failed to allocate fastpath table\n");
++ goto fail_cuckoo;
++ }
++
++ vnic->hw.falcon.type = hw_msg->type;
++
++ switch (hw_msg->type) {
++ case NET_ACCEL_MSG_HWTYPE_FALCON_A:
++ hw_info = &hw_msg->resources.falcon_a.common;
++ /* Need the extra rptr register page on A1 */
++ io_kva = net_accel_map_iomem_page
++ (vnic->dev, hw_msg->resources.falcon_a.evq_rptr_gnt,
++ &vnic->hw.falcon.evq_rptr_mapping);
++ if (io_kva == NULL) {
++ EPRINTK("%s: evq_rptr permission failed\n", __FUNCTION__);
++ goto evq_rptr_fail;
++ }
++
++ vnic->hw.falcon.evq_rptr = io_kva +
++ (hw_info->evq_rptr & (PAGE_SIZE - 1));
++ break;
++ case NET_ACCEL_MSG_HWTYPE_FALCON_B:
++ hw_info = &hw_msg->resources.falcon_b;
++ break;
++ default:
++ goto bad_type;
++ }
++
++ /**** Event Queue ****/
++
++ /* Map the event queue pages */
++ evq_gnts = hw_info->evq_mem_gnts;
++ evq_order = hw_info->evq_order;
++
++ EPRINTK_ON(hw_info->evq_offs != 0);
++
++ DPRINTK("Will map evq %d pages\n", 1 << evq_order);
++
++ evq_base =
++ net_accel_map_grants_contig(vnic->dev, evq_gnts, 1 << evq_order,
++ &vnic->evq_mapping);
++ if (evq_base == NULL) {
++ EPRINTK("%s: evq_base failed\n", __FUNCTION__);
++ goto evq_fail;
++ }
++
++ /**** Doorbells ****/
++ /* Set up the doorbell mappings. */
++ doorbell_kva =
++ net_accel_map_iomem_page(vnic->dev, hw_info->doorbell_gnt,
++ &vnic->hw.falcon.doorbell_mapping);
++ if (doorbell_kva == NULL) {
++ EPRINTK("%s: doorbell permission failed\n", __FUNCTION__);
++ goto doorbell_fail;
++ }
++ vnic->hw.falcon.doorbell = doorbell_kva;
++
++ /* On Falcon_B we get the rptr from the doorbell page */
++ if (hw_msg->type == NET_ACCEL_MSG_HWTYPE_FALCON_B) {
++ vnic->hw.falcon.evq_rptr =
++ (u32 *)((char *)vnic->hw.falcon.doorbell
++ + hw_info->evq_rptr);
++ }
++
++ /**** DMA Queue ****/
++
++ /* Set up the DMA Queues from the message. */
++ tx_dma_kva = net_accel_map_grants_contig
++ (vnic->dev, &(hw_info->txdmaq_gnt), 1,
++ &vnic->hw.falcon.txdmaq_mapping);
++ if (tx_dma_kva == NULL) {
++ EPRINTK("%s: TX dma failed\n", __FUNCTION__);
++ goto tx_dma_fail;
++ }
++
++ rx_dma_kva = net_accel_map_grants_contig
++ (vnic->dev, &(hw_info->rxdmaq_gnt), 1,
++ &vnic->hw.falcon.rxdmaq_mapping);
++ if (rx_dma_kva == NULL) {
++ EPRINTK("%s: RX dma failed\n", __FUNCTION__);
++ goto rx_dma_fail;
++ }
++
++ /* Full confession */
++ DPRINTK("Mapped H/W"
++ " Tx DMAQ grant %x -> %p\n"
++ " Rx DMAQ grant %x -> %p\n"
++ " EVQ grant %x -> %p\n",
++ hw_info->txdmaq_gnt, tx_dma_kva,
++ hw_info->rxdmaq_gnt, rx_dma_kva,
++ evq_gnts[0], evq_base
++ );
++
++ memset(vi_data, 0, sizeof(vi_data));
++
++ /* TODO BUG11305: convert efhw_arch to ef_vi_arch
++ * e.g.
++ * arch = ef_vi_arch_from_efhw_arch(hw_info->nic_arch);
++ * assert(arch >= 0);
++ * nic_type.arch = arch;
++ */
++ nic_type.arch = (unsigned char)hw_info->nic_arch;
++ nic_type.variant = (char)hw_info->nic_variant;
++ nic_type.revision = (unsigned char)hw_info->nic_revision;
++
++ ef_vi_init_mapping_evq(vi_data, nic_type, hw_info->instance,
++ 1 << (evq_order + PAGE_SHIFT), evq_base,
++ (void *)0xdeadbeef);
++
++ ef_vi_init_mapping_vi(vi_data, nic_type, hw_info->rx_capacity,
++ hw_info->tx_capacity, hw_info->instance,
++ doorbell_kva, rx_dma_kva, tx_dma_kva, 0);
++
++ vi_state_size = ef_vi_calc_state_bytes(hw_info->rx_capacity,
++ hw_info->tx_capacity);
++ vnic->vi_state = (ef_vi_state *)kmalloc(vi_state_size, GFP_KERNEL);
++ if (vnic->vi_state == NULL) {
++ EPRINTK("%s: kmalloc for VI state failed\n", __FUNCTION__);
++ goto vi_state_fail;
++ }
++ ef_vi_init(&vnic->vi, vi_data, vnic->vi_state, &vnic->evq_state, 0);
++
++ ef_eventq_state_init(&vnic->vi);
++
++ ef_vi_state_init(&vnic->vi);
++
++ return 0;
++
++fini:
++ kfree(vnic->vi_state);
++ vnic->vi_state = NULL;
++vi_state_fail:
++ net_accel_unmap_grants_contig(vnic->dev, vnic->hw.falcon.rxdmaq_mapping);
++rx_dma_fail:
++ net_accel_unmap_grants_contig(vnic->dev, vnic->hw.falcon.txdmaq_mapping);
++tx_dma_fail:
++ net_accel_unmap_iomem_page(vnic->dev, vnic->hw.falcon.doorbell_mapping);
++ vnic->hw.falcon.doorbell = NULL;
++doorbell_fail:
++ net_accel_unmap_grants_contig(vnic->dev, vnic->evq_mapping);
++evq_fail:
++ if (vnic->hw.falcon.type == NET_ACCEL_MSG_HWTYPE_FALCON_A)
++ net_accel_unmap_iomem_page(vnic->dev,
++ vnic->hw.falcon.evq_rptr_mapping);
++ vnic->hw.falcon.evq_rptr = NULL;
++evq_rptr_fail:
++bad_type:
++ cuckoo_hash_destroy(&vnic->fastpath_table);
++fail_cuckoo:
++ return -EIO;
++}
++
++
++void netfront_accel_vi_ctor(netfront_accel_vnic *vnic)
++{
++ /* Just mark the VI as uninitialised. */
++ vnic->vi_state = NULL;
++}
++
++
++int netfront_accel_vi_init(netfront_accel_vnic *vnic, struct net_accel_msg_hw *hw_msg)
++{
++ BUG_ON(hw_msg == NULL);
++ return netfront_accel_vi_init_fini(vnic, hw_msg);
++}
++
++
++void netfront_accel_vi_dtor(netfront_accel_vnic *vnic)
++{
++ if (vnic->vi_state != NULL)
++ netfront_accel_vi_init_fini(vnic, NULL);
++}
++
++
++static
++void netfront_accel_vi_post_rx(netfront_accel_vnic *vnic, u16 id,
++ netfront_accel_pkt_desc *buf)
++{
++
++ int idx = vnic->rx_dma_batched;
++
++#if 0
++ VPRINTK("Posting buffer %d (0x%08x) for rx at index %d, space is %d\n",
++ id, buf->pkt_buff_addr, idx, ef_vi_receive_space(&vnic->vi));
++#endif
++ /* Set up a virtual buffer descriptor */
++ ef_vi_receive_init(&vnic->vi, buf->pkt_buff_addr, id,
++ /*rx_bytes=max*/0);
++
++ idx++;
++
++ vnic->rx_dma_level++;
++
++ /*
++ * Only push the descriptor to the card if we've reached the
++ * batch size. Otherwise, the descriptors can sit around for
++ * a while. There will be plenty available.
++ */
++ if (idx >= NETFRONT_ACCEL_RX_DESC_BATCH ||
++ vnic->rx_dma_level < NETFRONT_ACCEL_RX_DESC_BATCH) {
++#if 0
++ VPRINTK("Flushing %d rx descriptors.\n", idx);
++#endif
++
++ /* Push buffer to hardware */
++ ef_vi_receive_push(&vnic->vi);
++
++ idx = 0;
++ }
++
++ vnic->rx_dma_batched = idx;
++}
++
++
++inline
++void netfront_accel_vi_post_rx_or_free(netfront_accel_vnic *vnic, u16 id,
++ netfront_accel_pkt_desc *buf)
++{
++
++ VPRINTK("%s: %d\n", __FUNCTION__, id);
++
++ if (ef_vi_receive_space(&vnic->vi) <= vnic->rx_dma_batched) {
++ VPRINTK("RX space is full\n");
++ netfront_accel_buf_put(vnic->rx_bufs, id);
++ return;
++ }
++
++ VPRINTK("Completed buffer %d is reposted\n", id);
++ netfront_accel_vi_post_rx(vnic, id, buf);
++
++ /*
++ * Let's see if there's any more to be pushed out to the NIC
++ * while we're here
++ */
++ while (ef_vi_receive_space(&vnic->vi) > vnic->rx_dma_batched) {
++ /* Try to allocate a buffer. */
++ buf = netfront_accel_buf_get(vnic->rx_bufs);
++ if (buf == NULL)
++ break;
++
++ /* Add it to the rx dma queue. */
++ netfront_accel_vi_post_rx(vnic, buf->buf_id, buf);
++ }
++}
++
++
++void netfront_accel_vi_add_bufs(netfront_accel_vnic *vnic, int is_rx)
++{
++
++ while (is_rx &&
++ ef_vi_receive_space(&vnic->vi) > vnic->rx_dma_batched) {
++ netfront_accel_pkt_desc *buf;
++
++ VPRINTK("%s: %d\n", __FUNCTION__, vnic->rx_dma_level);
++
++ /* Try to allocate a buffer. */
++ buf = netfront_accel_buf_get(vnic->rx_bufs);
++
++ if (buf == NULL)
++ break;
++
++ /* Add it to the rx dma queue. */
++ netfront_accel_vi_post_rx(vnic, buf->buf_id, buf);
++ }
++
++ VPRINTK("%s: done\n", __FUNCTION__);
++}
++
++
++struct netfront_accel_multi_state {
++ unsigned remaining_len;
++
++ unsigned buffers;
++
++ struct netfront_accel_tso_buffer *output_buffers;
++
++ /* Where we are in the current fragment of the SKB. */
++ struct {
++ /* address of current position */
++ void *addr;
++ /* remaining length */
++ unsigned int len;
++ } ifc; /* == Input Fragment Cursor */
++};
++
++
++static inline void multi_post_start(struct netfront_accel_multi_state *st,
++ struct sk_buff *skb)
++{
++ st->remaining_len = skb->len;
++ st->output_buffers = NULL;
++ st->buffers = 0;
++ st->ifc.len = skb_headlen(skb);
++ st->ifc.addr = skb->data;
++}
++
++static int multi_post_start_new_buffer(netfront_accel_vnic *vnic,
++ struct netfront_accel_multi_state *st)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ struct netfront_accel_pkt_desc *buf;
++
++ /* Get a mapped packet buffer */
++ buf = netfront_accel_buf_get(vnic->tx_bufs);
++ if (buf == NULL) {
++ DPRINTK("%s: No buffer for TX\n", __FUNCTION__);
++ return -1;
++ }
++
++ /* Store a bit of meta-data at the end */
++ tso_buf = (struct netfront_accel_tso_buffer *)
++ (buf->pkt_kva + NETFRONT_ACCEL_TX_BUF_LENGTH);
++
++ tso_buf->buf = buf;
++
++ tso_buf->length = 0;
++
++ tso_buf->next = st->output_buffers;
++ st->output_buffers = tso_buf;
++ st->buffers++;
++
++ BUG_ON(st->buffers >= ACCEL_TX_MAX_BUFFERS);
++
++ /*
++ * Store the context, set to NULL, last packet buffer will get
++ * non-NULL later
++ */
++ tso_buf->buf->skb = NULL;
++
++ return 0;
++}
++
++
++static void
++multi_post_fill_buffer_with_fragment(netfront_accel_vnic *vnic,
++ struct netfront_accel_multi_state *st)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ unsigned n, space;
++
++ BUG_ON(st->output_buffers == NULL);
++ tso_buf = st->output_buffers;
++
++ if (st->ifc.len == 0) return;
++ if (tso_buf->length == NETFRONT_ACCEL_TX_BUF_LENGTH) return;
++
++ BUG_ON(tso_buf->length > NETFRONT_ACCEL_TX_BUF_LENGTH);
++
++ space = NETFRONT_ACCEL_TX_BUF_LENGTH - tso_buf->length;
++ n = min(st->ifc.len, space);
++
++ memcpy(tso_buf->buf->pkt_kva + tso_buf->length, st->ifc.addr, n);
++
++ st->remaining_len -= n;
++ st->ifc.len -= n;
++ tso_buf->length += n;
++ st->ifc.addr += n;
++
++ BUG_ON(tso_buf->length > NETFRONT_ACCEL_TX_BUF_LENGTH);
++
++ return;
++}
++
++
++static inline void multi_post_unwind(netfront_accel_vnic *vnic,
++ struct netfront_accel_multi_state *st)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++
++ DPRINTK("%s\n", __FUNCTION__);
++
++ while (st->output_buffers != NULL) {
++ tso_buf = st->output_buffers;
++ st->output_buffers = tso_buf->next;
++ st->buffers--;
++ netfront_accel_buf_put(vnic->tx_bufs, tso_buf->buf->buf_id);
++ }
++ BUG_ON(st->buffers != 0);
++}
++
++
++static enum netfront_accel_post_status
++netfront_accel_enqueue_skb_multi(netfront_accel_vnic *vnic, struct sk_buff *skb)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ struct netfront_accel_multi_state state;
++ ef_iovec iovecs[ACCEL_TX_MAX_BUFFERS];
++ skb_frag_t *f;
++ int frag_i, rc, dma_id;
++
++ multi_post_start(&state, skb);
++
++ frag_i = -1;
++
++ if (skb->ip_summed == CHECKSUM_HW) {
++ /* Set to zero to encourage falcon to work it out for us */
++ *(u16*)(skb->h.raw + skb->csum) = 0;
++ }
++
++ if (multi_post_start_new_buffer(vnic, &state)) {
++ DPRINTK("%s: out of buffers\n", __FUNCTION__);
++ goto unwind;
++ }
++
++ while (1) {
++ multi_post_fill_buffer_with_fragment(vnic, &state);
++
++ /* Move onto the next fragment? */
++ if (state.ifc.len == 0) {
++ if (++frag_i >= skb_shinfo(skb)->nr_frags)
++ /* End of payload reached. */
++ break;
++ f = &skb_shinfo(skb)->frags[frag_i];
++ state.ifc.len = f->size;
++ state.ifc.addr = page_address(f->page) + f->page_offset;
++ }
++
++ /* Start a new buffer? */
++ if ((state.output_buffers->length ==
++ NETFRONT_ACCEL_TX_BUF_LENGTH) &&
++ multi_post_start_new_buffer(vnic, &state)) {
++ DPRINTK("%s: out of buffers\n", __FUNCTION__);
++ goto unwind;
++ }
++ }
++
++ /* Check for space */
++ if (ef_vi_transmit_space(&vnic->vi) < state.buffers) {
++ DPRINTK("%s: Not enough TX space (%d)\n", __FUNCTION__, state.buffers);
++ goto unwind;
++ }
++
++ /* Store the skb in what will be the last buffer's context */
++ state.output_buffers->buf->skb = skb;
++ /* Remember dma_id of what will be the last buffer */
++ dma_id = state.output_buffers->buf->buf_id;
++
++ /*
++ * Make an iovec of the buffers in the list, reversing the
++ * buffers as we go as they are constructed on a stack
++ */
++ tso_buf = state.output_buffers;
++ for (frag_i = state.buffers-1; frag_i >= 0; frag_i--) {
++ iovecs[frag_i].iov_base = tso_buf->buf->pkt_buff_addr;
++ iovecs[frag_i].iov_len = tso_buf->length;
++ tso_buf = tso_buf->next;
++ }
++
++ rc = ef_vi_transmitv(&vnic->vi, iovecs, state.buffers, dma_id);
++
++ /* Track number of tx fastpath stats */
++ vnic->netdev_stats.fastpath_tx_bytes += skb->len;
++ vnic->netdev_stats.fastpath_tx_pkts ++;
++#if NETFRONT_ACCEL_STATS
++ {
++ u32 n;
++ n = vnic->netdev_stats.fastpath_tx_pkts -
++ (u32)vnic->stats.fastpath_tx_completions;
++ if (n > vnic->stats.fastpath_tx_pending_max)
++ vnic->stats.fastpath_tx_pending_max = n;
++ }
++#endif
++ return NETFRONT_ACCEL_STATUS_GOOD;
++
++unwind:
++ multi_post_unwind(vnic, &state);
++
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.fastpath_tx_busy++);
++
++ return NETFRONT_ACCEL_STATUS_BUSY;
++}
++
++
++static enum netfront_accel_post_status
++netfront_accel_enqueue_skb_single(netfront_accel_vnic *vnic, struct sk_buff *skb)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ struct netfront_accel_pkt_desc *buf;
++ u8 *kva;
++ int rc;
++
++ if (ef_vi_transmit_space(&vnic->vi) < 1) {
++ DPRINTK("%s: No TX space\n", __FUNCTION__);
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.fastpath_tx_busy++);
++ return NETFRONT_ACCEL_STATUS_BUSY;
++ }
++
++ buf = netfront_accel_buf_get(vnic->tx_bufs);
++ if (buf == NULL) {
++ DPRINTK("%s: No buffer for TX\n", __FUNCTION__);
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.fastpath_tx_busy++);
++ return NETFRONT_ACCEL_STATUS_BUSY;
++ }
++
++ /* Track number of tx fastpath stats */
++ vnic->netdev_stats.fastpath_tx_pkts++;
++ vnic->netdev_stats.fastpath_tx_bytes += skb->len;
++
++#if NETFRONT_ACCEL_STATS
++ {
++ u32 n;
++ n = vnic->netdev_stats.fastpath_tx_pkts -
++ (u32)vnic->stats.fastpath_tx_completions;
++ if (n > vnic->stats.fastpath_tx_pending_max)
++ vnic->stats.fastpath_tx_pending_max = n;
++ }
++#endif
++
++ /* Store the context */
++ buf->skb = skb;
++
++ kva = buf->pkt_kva;
++
++ if (skb->ip_summed == CHECKSUM_HW) {
++ /* Set to zero to encourage falcon to work it out for us */
++ *(u16*)(skb->h.raw + skb->csum) = 0;
++ }
++ NETFRONT_ACCEL_PKTBUFF_FOR_EACH_FRAGMENT
++ (skb, idx, frag_data, frag_len, {
++ /* Copy in payload */
++ VPRINTK("*** Copying %d bytes to %p\n", frag_len, kva);
++ memcpy(kva, frag_data, frag_len);
++ kva += frag_len;
++ });
++
++ VPRINTK("%s: id %d pkt %p kva %p buff_addr 0x%08x\n", __FUNCTION__,
++ buf->buf_id, buf, buf->pkt_kva, buf->pkt_buff_addr);
++
++
++ /* Set up the TSO meta-data for a single buffer/packet */
++ tso_buf = (struct netfront_accel_tso_buffer *)
++ (buf->pkt_kva + NETFRONT_ACCEL_TX_BUF_LENGTH);
++ tso_buf->next = NULL;
++ tso_buf->buf = buf;
++ tso_buf->length = skb->len;
++
++ rc = ef_vi_transmit(&vnic->vi, buf->pkt_buff_addr, skb->len,
++ buf->buf_id);
++ /* We checked for space already, so it really should succeed */
++ BUG_ON(rc != 0);
++
++ return NETFRONT_ACCEL_STATUS_GOOD;
++}
++
++
++enum netfront_accel_post_status
++netfront_accel_vi_tx_post(netfront_accel_vnic *vnic, struct sk_buff *skb)
++{
++ struct ethhdr *pkt_eth_hdr;
++ struct iphdr *pkt_ipv4_hdr;
++ int value, try_fastpath;
++
++ /*
++ * This assumes that the data field points to the dest mac
++ * address.
++ */
++ cuckoo_hash_mac_key key = cuckoo_mac_to_key(skb->data);
++
++ /*
++ * NB very important that all things that could return "CANT"
++ * are tested before things that return "BUSY" as if it it
++ * returns "BUSY" it is assumed that it won't return "CANT"
++ * next time it is tried
++ */
++
++ /*
++ * Do a fastpath send if fast path table lookup returns true.
++ * We do this without the table lock and so may get the wrong
++ * answer, but current opinion is that's not a big problem
++ */
++ try_fastpath = cuckoo_hash_lookup(&vnic->fastpath_table,
++ (cuckoo_hash_key *)(&key), &value);
++
++ if (!try_fastpath) {
++ VPRINTK("try fast path false for mac: " MAC_FMT "\n",
++ MAC_ARG(skb->data));
++
++ return NETFRONT_ACCEL_STATUS_CANT;
++ }
++
++ /* Check to see if the packet can be sent. */
++ if (skb_headlen(skb) < sizeof(*pkt_eth_hdr) + sizeof(*pkt_ipv4_hdr)) {
++ EPRINTK("%s: Packet header is too small\n", __FUNCTION__);
++ return NETFRONT_ACCEL_STATUS_CANT;
++ }
++
++ pkt_eth_hdr = (void*)skb->data;
++ pkt_ipv4_hdr = (void*)(pkt_eth_hdr+1);
++
++ if (be16_to_cpu(pkt_eth_hdr->h_proto) != ETH_P_IP) {
++ DPRINTK("%s: Packet is not IPV4 (ether_type=0x%04x)\n", __FUNCTION__,
++ be16_to_cpu(pkt_eth_hdr->h_proto));
++ return NETFRONT_ACCEL_STATUS_CANT;
++ }
++
++ if (pkt_ipv4_hdr->protocol != IPPROTO_TCP &&
++ pkt_ipv4_hdr->protocol != IPPROTO_UDP) {
++ DPRINTK("%s: Packet is not TCP/UDP (ip_protocol=0x%02x)\n",
++ __FUNCTION__, pkt_ipv4_hdr->protocol);
++ return NETFRONT_ACCEL_STATUS_CANT;
++ }
++
++ VPRINTK("%s: %d bytes, gso %d\n", __FUNCTION__, skb->len,
++ skb_shinfo(skb)->gso_size);
++
++ if (skb_shinfo(skb)->gso_size) {
++ return netfront_accel_enqueue_skb_tso(vnic, skb);
++ }
++
++ if (skb->len <= NETFRONT_ACCEL_TX_BUF_LENGTH) {
++ return netfront_accel_enqueue_skb_single(vnic, skb);
++ }
++
++ return netfront_accel_enqueue_skb_multi(vnic, skb);
++}
++
++
++/*
++ * Copy the data to required end destination. NB. len is the total new
++ * length of the socket buffer, not the amount of data to copy
++ */
++inline
++int ef_vnic_copy_to_skb(netfront_accel_vnic *vnic, struct sk_buff *skb,
++ struct netfront_accel_pkt_desc *buf, int len)
++{
++ int i, extra = len - skb->len;
++ char c;
++ int pkt_stride = vnic->rx_pkt_stride;
++ int skb_stride = vnic->rx_skb_stride;
++ char *skb_start;
++
++ /*
++ * This pulls stuff into the cache - have seen performance
++ * benefit in this, but disabled by default
++ */
++ skb_start = skb->data;
++ if (pkt_stride) {
++ for (i = 0; i < len; i += pkt_stride) {
++ c += ((volatile char*)(buf->pkt_kva))[i];
++ }
++ }
++ if (skb_stride) {
++ for (i = skb->len; i < len ; i += skb_stride) {
++ c += ((volatile char*)(skb_start))[i];
++ }
++ }
++
++ if (skb_tailroom(skb) >= extra) {
++ memcpy(skb_put(skb, extra), buf->pkt_kva, extra);
++ return 0;
++ }
++
++ return -ENOSPC;
++}
++
++
++static void discard_jumbo_state(netfront_accel_vnic *vnic)
++{
++
++ if (vnic->jumbo_state.skb != NULL) {
++ dev_kfree_skb_any(vnic->jumbo_state.skb);
++
++ vnic->jumbo_state.skb = NULL;
++ }
++ vnic->jumbo_state.in_progress = 0;
++}
++
++
++static void netfront_accel_vi_rx_complete(netfront_accel_vnic *vnic,
++ struct sk_buff *skb)
++{
++ cuckoo_hash_mac_key key;
++ unsigned long flags;
++ int value;
++ struct net_device *net_dev;
++
++
++ key = cuckoo_mac_to_key(skb->data + ETH_ALEN);
++
++ /*
++ * If this is a MAC address that we want to do fast path TX
++ * to, and we don't already, add it to the fastpath table.
++ * The initial lookup is done without the table lock and so
++ * may get the wrong answer, but current opinion is that's not
++ * a big problem
++ */
++ if (is_valid_ether_addr(skb->data + ETH_ALEN) &&
++ !cuckoo_hash_lookup(&vnic->fastpath_table, (cuckoo_hash_key *)&key,
++ &value)) {
++ spin_lock_irqsave(&vnic->table_lock, flags);
++
++ cuckoo_hash_add_check(&vnic->fastpath_table,
++ (cuckoo_hash_key *)&key,
++ 1, 1);
++
++ spin_unlock_irqrestore(&vnic->table_lock, flags);
++ }
++
++ if (compare_ether_addr(skb->data, vnic->mac)) {
++ struct iphdr *ip = (struct iphdr *)(skb->data + ETH_HLEN);
++ u16 port;
++
++ DPRINTK("%s: saw wrong MAC address " MAC_FMT "\n",
++ __FUNCTION__, MAC_ARG(skb->data));
++
++ if (ip->protocol == IPPROTO_TCP) {
++ struct tcphdr *tcp = (struct tcphdr *)
++ ((char *)ip + 4 * ip->ihl);
++ port = tcp->dest;
++ } else {
++ struct udphdr *udp = (struct udphdr *)
++ ((char *)ip + 4 * ip->ihl);
++ EPRINTK_ON(ip->protocol != IPPROTO_UDP);
++ port = udp->dest;
++ }
++
++ netfront_accel_msg_tx_fastpath(vnic, skb->data,
++ ip->daddr, port,
++ ip->protocol);
++ }
++
++ net_dev = vnic->net_dev;
++ skb->dev = net_dev;
++ skb->protocol = eth_type_trans(skb, net_dev);
++ /* CHECKSUM_UNNECESSARY as hardware has done it already */
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++ if (!netfront_accel_ssr_skb(vnic, &vnic->ssr_state, skb))
++ netif_receive_skb(skb);
++}
++
++
++static int netfront_accel_vi_poll_process_rx(netfront_accel_vnic *vnic,
++ ef_event *ev)
++{
++ struct netfront_accel_bufinfo *bufinfo = vnic->rx_bufs;
++ struct netfront_accel_pkt_desc *buf = NULL;
++ struct sk_buff *skb;
++ int id, len, sop = 0, cont = 0;
++
++ VPRINTK("Rx event.\n");
++ /*
++ * Complete the receive operation, and get the request id of
++ * the buffer
++ */
++ id = ef_vi_receive_done(&vnic->vi, ev);
++
++ if (id < 0 || id >= bufinfo->npages*NETFRONT_ACCEL_BUFS_PER_PAGE) {
++ EPRINTK("Rx packet %d is invalid\n", id);
++ /* Carry on round the loop if more events */
++ goto bad_packet;
++ }
++ /* Get our buffer descriptor */
++ buf = netfront_accel_buf_find(bufinfo, id);
++
++ len = EF_EVENT_RX_BYTES(*ev);
++
++ /* An RX buffer has been removed from the DMA ring. */
++ vnic->rx_dma_level--;
++
++ if (EF_EVENT_TYPE(*ev) == EF_EVENT_TYPE_RX) {
++ sop = EF_EVENT_RX_SOP(*ev);
++ cont = EF_EVENT_RX_CONT(*ev);
++
++ skb = vnic->jumbo_state.skb;
++
++ VPRINTK("Rx packet %d: %d bytes so far; sop %d; cont %d\n",
++ id, len, sop, cont);
++
++ if (sop) {
++ if (!vnic->jumbo_state.in_progress) {
++ vnic->jumbo_state.in_progress = 1;
++ BUG_ON(vnic->jumbo_state.skb != NULL);
++ } else {
++ /*
++ * This fragment shows a missing tail in
++ * previous one, but is itself possibly OK
++ */
++ DPRINTK("sop and in_progress => no tail\n");
++
++ /* Release the socket buffer we already had */
++ discard_jumbo_state(vnic);
++
++ /* Now start processing this fragment */
++ vnic->jumbo_state.in_progress = 1;
++ skb = NULL;
++ }
++ } else if (!vnic->jumbo_state.in_progress) {
++ DPRINTK("!sop and !in_progress => missing head\n");
++ goto missing_head;
++ }
++
++ if (!cont) {
++ /* Update state for next time */
++ vnic->jumbo_state.in_progress = 0;
++ vnic->jumbo_state.skb = NULL;
++ } else if (!vnic->jumbo_state.in_progress) {
++ DPRINTK("cont and !in_progress => missing head\n");
++ goto missing_head;
++ }
++
++ if (skb == NULL) {
++ BUG_ON(!sop);
++
++ if (!cont)
++ skb = alloc_skb(len+NET_IP_ALIGN, GFP_ATOMIC);
++ else
++ skb = alloc_skb(vnic->net_dev->mtu+NET_IP_ALIGN,
++ GFP_ATOMIC);
++
++ if (skb == NULL) {
++ DPRINTK("%s: Couldn't get an rx skb.\n",
++ __FUNCTION__);
++ netfront_accel_vi_post_rx_or_free(vnic, (u16)id, buf);
++ /*
++ * Dropping this fragment means we
++ * should discard the rest too
++ */
++ discard_jumbo_state(vnic);
++
++ /* Carry on round the loop if more events */
++ return 0;
++ }
++
++ }
++
++ /* Copy the data to required end destination */
++ if (ef_vnic_copy_to_skb(vnic, skb, buf, len) != 0) {
++ /*
++ * No space in the skb - suggests > MTU packet
++ * received
++ */
++ EPRINTK("%s: Rx packet too large (%d)\n",
++ __FUNCTION__, len);
++ netfront_accel_vi_post_rx_or_free(vnic, (u16)id, buf);
++ discard_jumbo_state(vnic);
++ return 0;
++ }
++
++ /* Put the buffer back in the DMA queue. */
++ netfront_accel_vi_post_rx_or_free(vnic, (u16)id, buf);
++
++ if (cont) {
++ vnic->jumbo_state.skb = skb;
++
++ return 0;
++ } else {
++ /* Track number of rx fastpath packets */
++ vnic->netdev_stats.fastpath_rx_pkts++;
++ vnic->netdev_stats.fastpath_rx_bytes += len;
++
++ netfront_accel_vi_rx_complete(vnic, skb);
++
++ return 1;
++ }
++ } else {
++ BUG_ON(EF_EVENT_TYPE(*ev) != EF_EVENT_TYPE_RX_DISCARD);
++
++ if (EF_EVENT_RX_DISCARD_TYPE(*ev)
++ == EF_EVENT_RX_DISCARD_TRUNC) {
++ DPRINTK("%s: " EF_EVENT_FMT
++ " buffer %d FRM_TRUNC q_id %d\n",
++ __FUNCTION__, EF_EVENT_PRI_ARG(*ev), id,
++ EF_EVENT_RX_DISCARD_Q_ID(*ev) );
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.fastpath_frm_trunc);
++ } else if (EF_EVENT_RX_DISCARD_TYPE(*ev)
++ == EF_EVENT_RX_DISCARD_OTHER) {
++ DPRINTK("%s: " EF_EVENT_FMT
++ " buffer %d RX_DISCARD_OTHER q_id %d\n",
++ __FUNCTION__, EF_EVENT_PRI_ARG(*ev), id,
++ EF_EVENT_RX_DISCARD_Q_ID(*ev) );
++ /*
++ * Probably tail of packet for which error has
++ * already been logged, so don't count in
++ * stats
++ */
++ } else {
++ EPRINTK("%s: " EF_EVENT_FMT
++ " buffer %d rx discard type %d q_id %d\n",
++ __FUNCTION__, EF_EVENT_PRI_ARG(*ev), id,
++ EF_EVENT_RX_DISCARD_TYPE(*ev),
++ EF_EVENT_RX_DISCARD_Q_ID(*ev) );
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.bad_event_count);
++ }
++ }
++
++ /* discard type drops through here */
++
++bad_packet:
++ /* Release the socket buffer we already had */
++ discard_jumbo_state(vnic);
++
++missing_head:
++ BUG_ON(vnic->jumbo_state.in_progress != 0);
++ BUG_ON(vnic->jumbo_state.skb != NULL);
++
++ if (id >= 0 && id < bufinfo->npages*NETFRONT_ACCEL_BUFS_PER_PAGE)
++ /* Put the buffer back in the DMA queue. */
++ netfront_accel_vi_post_rx_or_free(vnic, (u16)id, buf);
++
++ vnic->netdev_stats.fastpath_rx_errors++;
++
++ DPRINTK("%s experienced bad packet/missing fragment error: %d \n",
++ __FUNCTION__, ev->rx.flags);
++
++ return 0;
++}
++
++
++static void netfront_accel_vi_not_busy(netfront_accel_vnic *vnic)
++{
++ struct netfront_info *np = ((struct netfront_info *)
++ netdev_priv(vnic->net_dev));
++ struct sk_buff *skb;
++ int handled;
++ unsigned long flags;
++
++ /*
++ * TODO if we could safely check tx_skb == NULL and return
++ * early without taking the lock, that would obviously help
++ * performance
++ */
++
++ /* Take the netfront lock which protects tx_skb. */
++ spin_lock_irqsave(&np->tx_lock, flags);
++ if (vnic->tx_skb != NULL) {
++ DPRINTK("%s trying to send spare buffer\n", __FUNCTION__);
++
++ skb = vnic->tx_skb;
++ vnic->tx_skb = NULL;
++
++ spin_unlock_irqrestore(&np->tx_lock, flags);
++
++ handled = netfront_accel_vi_tx_post(vnic, skb);
++
++ spin_lock_irqsave(&np->tx_lock, flags);
++
++ if (handled != NETFRONT_ACCEL_STATUS_BUSY) {
++ DPRINTK("%s restarting tx\n", __FUNCTION__);
++ if (netfront_check_queue_ready(vnic->net_dev)) {
++ netif_wake_queue(vnic->net_dev);
++ NETFRONT_ACCEL_STATS_OP
++ (vnic->stats.queue_wakes++);
++ }
++ } else {
++ vnic->tx_skb = skb;
++ }
++
++ /*
++ * Should never get a CANT, as it checks that before
++ * deciding it was BUSY first time round
++ */
++ BUG_ON(handled == NETFRONT_ACCEL_STATUS_CANT);
++ }
++ spin_unlock_irqrestore(&np->tx_lock, flags);
++}
++
++
++static void netfront_accel_vi_tx_complete(netfront_accel_vnic *vnic,
++ struct netfront_accel_tso_buffer *tso_buf,
++ int is_last)
++{
++ struct netfront_accel_tso_buffer *next;
++
++ /*
++ * We get a single completion for every call to
++ * ef_vi_transmitv so handle any other buffers which are part
++ * of the same packet
++ */
++ while (tso_buf != NULL) {
++ if (tso_buf->buf->skb != NULL) {
++ dev_kfree_skb_any(tso_buf->buf->skb);
++ tso_buf->buf->skb = NULL;
++ }
++
++ next = tso_buf->next;
++
++ netfront_accel_buf_put(vnic->tx_bufs, tso_buf->buf->buf_id);
++
++ tso_buf = next;
++ }
++
++ /*
++ * If this was the last one in the batch, we try and send any
++ * pending tx_skb. There should now be buffers and
++ * descriptors
++ */
++ if (is_last)
++ netfront_accel_vi_not_busy(vnic);
++}
++
++
++static void netfront_accel_vi_poll_process_tx(netfront_accel_vnic *vnic,
++ ef_event *ev)
++{
++ struct netfront_accel_pkt_desc *buf;
++ struct netfront_accel_tso_buffer *tso_buf;
++ ef_request_id ids[EF_VI_TRANSMIT_BATCH];
++ int i, n_ids;
++ unsigned long flags;
++
++ /* Get the request ids for this tx completion event. */
++ n_ids = ef_vi_transmit_unbundle(&vnic->vi, ev, ids);
++
++ /* Take the tx buffer spin lock and hold for the duration */
++ spin_lock_irqsave(&vnic->tx_lock, flags);
++
++ for (i = 0; i < n_ids; ++i) {
++ VPRINTK("Tx packet %d complete\n", ids[i]);
++ buf = netfront_accel_buf_find(vnic->tx_bufs, ids[i]);
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.fastpath_tx_completions++);
++
++ tso_buf = (struct netfront_accel_tso_buffer *)
++ (buf->pkt_kva + NETFRONT_ACCEL_TX_BUF_LENGTH);
++ BUG_ON(tso_buf->buf != buf);
++
++ netfront_accel_vi_tx_complete(vnic, tso_buf, i == (n_ids-1));
++ }
++
++ spin_unlock_irqrestore(&vnic->tx_lock, flags);
++}
++
++
++int netfront_accel_vi_poll(netfront_accel_vnic *vnic, int rx_packets)
++{
++ ef_event ev[ACCEL_VI_POLL_EVENTS];
++ int rx_remain = rx_packets, rc, events, i;
++#if NETFRONT_ACCEL_STATS
++ int n_evs_polled = 0, rx_evs_polled = 0, tx_evs_polled = 0;
++#endif
++ BUG_ON(rx_packets <= 0);
++
++ events = ef_eventq_poll(&vnic->vi, ev,
++ min(rx_remain, ACCEL_VI_POLL_EVENTS));
++ i = 0;
++ NETFRONT_ACCEL_STATS_OP(n_evs_polled += events);
++
++ VPRINTK("%s: %d events\n", __FUNCTION__, events);
++
++ /* Loop over each event */
++ while (events) {
++ VPRINTK("%s: Event "EF_EVENT_FMT", index %lu\n", __FUNCTION__,
++ EF_EVENT_PRI_ARG(ev[i]),
++ (unsigned long)(vnic->vi.evq_state->evq_ptr));
++
++ if ((EF_EVENT_TYPE(ev[i]) == EF_EVENT_TYPE_RX) ||
++ (EF_EVENT_TYPE(ev[i]) == EF_EVENT_TYPE_RX_DISCARD)) {
++ rc = netfront_accel_vi_poll_process_rx(vnic, &ev[i]);
++ rx_remain -= rc;
++ BUG_ON(rx_remain < 0);
++ NETFRONT_ACCEL_STATS_OP(rx_evs_polled++);
++ } else if (EF_EVENT_TYPE(ev[i]) == EF_EVENT_TYPE_TX) {
++ netfront_accel_vi_poll_process_tx(vnic, &ev[i]);
++ NETFRONT_ACCEL_STATS_OP(tx_evs_polled++);
++ } else if (EF_EVENT_TYPE(ev[i]) ==
++ EF_EVENT_TYPE_RX_NO_DESC_TRUNC) {
++ DPRINTK("%s: RX_NO_DESC_TRUNC " EF_EVENT_FMT "\n",
++ __FUNCTION__, EF_EVENT_PRI_ARG(ev[i]));
++ discard_jumbo_state(vnic);
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.rx_no_desc_trunc++);
++ } else {
++ EPRINTK("Unexpected event " EF_EVENT_FMT "\n",
++ EF_EVENT_PRI_ARG(ev[i]));
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.bad_event_count++);
++ }
++
++ i++;
++
++ /* Carry on round the loop if more events and more space */
++ if (i == events) {
++ if (rx_remain == 0)
++ break;
++
++ events = ef_eventq_poll(&vnic->vi, ev,
++ min(rx_remain,
++ ACCEL_VI_POLL_EVENTS));
++ i = 0;
++ NETFRONT_ACCEL_STATS_OP(n_evs_polled += events);
++ }
++ }
++
++#if NETFRONT_ACCEL_STATS
++ vnic->stats.event_count += n_evs_polled;
++ vnic->stats.event_count_since_irq += n_evs_polled;
++ if (n_evs_polled > vnic->stats.events_per_poll_max)
++ vnic->stats.events_per_poll_max = n_evs_polled;
++ if (rx_evs_polled > vnic->stats.events_per_poll_rx_max)
++ vnic->stats.events_per_poll_rx_max = rx_evs_polled;
++ if (tx_evs_polled > vnic->stats.events_per_poll_tx_max)
++ vnic->stats.events_per_poll_tx_max = tx_evs_polled;
++#endif
++
++ return rx_packets - rx_remain;
++}
++
++
++int netfront_accel_vi_enable_interrupts(netfront_accel_vnic *vnic)
++{
++ u32 sw_evq_ptr;
++
++ VPRINTK("%s: checking for event on %p\n", __FUNCTION__, &vnic->vi.evq_state);
++
++ BUG_ON(vnic == NULL);
++ BUG_ON(vnic->vi.evq_state == NULL);
++
++ /* Do a quick check for an event. */
++ if (ef_eventq_has_event(&vnic->vi)) {
++ VPRINTK("%s: found event\n", __FUNCTION__);
++ return 0;
++ }
++
++ VPRINTK("evq_ptr=0x%08x evq_mask=0x%08x\n",
++ vnic->evq_state.evq_ptr, vnic->vi.evq_mask);
++
++ /* Request a wakeup from the hardware. */
++ sw_evq_ptr = vnic->evq_state.evq_ptr & vnic->vi.evq_mask;
++
++ BUG_ON(vnic->hw.falcon.evq_rptr == NULL);
++
++ VPRINTK("Requesting wakeup at 0x%08x, rptr %p\n", sw_evq_ptr,
++ vnic->hw.falcon.evq_rptr);
++ *(volatile u32 *)(vnic->hw.falcon.evq_rptr) = (sw_evq_ptr >> 3);
++
++ return 1;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/accel_xenbus.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_xenbus.c
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/accel_xenbus.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/accel_xenbus.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,776 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/stddef.h>
++#include <linux/errno.h>
++
++#include <xen/xenbus.h>
++#include <xen/evtchn.h>
++#include <xen/gnttab.h>
++
++#include "accel.h"
++#include "accel_util.h"
++#include "accel_msg_iface.h"
++#include "accel_bufs.h"
++#include "accel_ssr.h"
++/* drivers/xen/netfront/netfront.h */
++#include "netfront.h"
++
++void netfront_accel_set_closing(netfront_accel_vnic *vnic)
++{
++
++ vnic->frontend_state = XenbusStateClosing;
++ net_accel_update_state(vnic->dev, XenbusStateClosing);
++}
++
++
++static void mac_address_change(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ netfront_accel_vnic *vnic;
++ struct xenbus_device *dev;
++ int rc;
++
++ DPRINTK("%s\n", __FUNCTION__);
++
++ vnic = container_of(watch, netfront_accel_vnic,
++ mac_address_watch);
++ dev = vnic->dev;
++
++ rc = net_accel_xen_net_read_mac(dev, vnic->mac);
++
++ if (rc != 0)
++ EPRINTK("%s: failed to read mac (%d)\n", __FUNCTION__, rc);
++}
++
++
++static int setup_mac_address_watch(struct xenbus_device *dev,
++ netfront_accel_vnic *vnic)
++{
++ int err;
++
++ DPRINTK("Setting watch on %s/%s\n", dev->nodename, "mac");
++
++ err = xenbus_watch_path2(dev, dev->nodename, "mac",
++ &vnic->mac_address_watch,
++ mac_address_change);
++ if (err) {
++ EPRINTK("%s: Failed to register xenbus watch: %d\n",
++ __FUNCTION__, err);
++ goto fail;
++ }
++
++ return 0;
++ fail:
++ vnic->mac_address_watch.node = NULL;
++ return err;
++}
++
++
++/* Grant access to some pages and publish through xenbus */
++static int make_named_grant(struct xenbus_device *dev, void *page,
++ const char *name, grant_ref_t *gnt_ref)
++{
++ struct xenbus_transaction tr;
++ int err;
++ grant_ref_t gnt;
++
++ gnt = net_accel_grant_page(dev, virt_to_mfn(page), 0);
++ if (gnt < 0)
++ return gnt;
++
++ do {
++ err = xenbus_transaction_start(&tr);
++ if (err != 0) {
++ EPRINTK("%s: transaction start failed %d\n",
++ __FUNCTION__, err);
++ return err;
++ }
++ err = xenbus_printf(tr, dev->nodename, name, "%d", gnt);
++ if (err != 0) {
++ EPRINTK("%s: xenbus_printf failed %d\n", __FUNCTION__,
++ err);
++ xenbus_transaction_end(tr, 1);
++ return err;
++ }
++ err = xenbus_transaction_end(tr, 0);
++ } while (err == -EAGAIN);
++
++ if (err != 0) {
++ EPRINTK("%s: transaction end failed %d\n", __FUNCTION__, err);
++ return err;
++ }
++
++ *gnt_ref = gnt;
++
++ return 0;
++}
++
++
++static int remove_named_grant(struct xenbus_device *dev,
++ const char *name, grant_ref_t gnt_ref)
++{
++ struct xenbus_transaction tr;
++ int err;
++
++ net_accel_ungrant_page(gnt_ref);
++
++ do {
++ err = xenbus_transaction_start(&tr);
++ if (err != 0) {
++ EPRINTK("%s: transaction start failed %d\n",
++ __FUNCTION__, err);
++ return err;
++ }
++ err = xenbus_rm(tr, dev->nodename, name);
++ if (err != 0) {
++ EPRINTK("%s: xenbus_rm failed %d\n", __FUNCTION__,
++ err);
++ xenbus_transaction_end(tr, 1);
++ return err;
++ }
++ err = xenbus_transaction_end(tr, 0);
++ } while (err == -EAGAIN);
++
++ if (err != 0) {
++ EPRINTK("%s: transaction end failed %d\n", __FUNCTION__, err);
++ return err;
++ }
++
++ return 0;
++}
++
++
++static
++netfront_accel_vnic *netfront_accel_vnic_ctor(struct net_device *net_dev,
++ struct xenbus_device *dev)
++{
++ struct netfront_info *np =
++ (struct netfront_info *)netdev_priv(net_dev);
++ netfront_accel_vnic *vnic;
++ int err;
++
++ /*
++ * A bug in earlier versions of Xen accel plugin system meant
++ * you could be probed twice for the same device on suspend
++ * cancel. Be tolerant of that.
++ */
++ if (np->accel_priv != NULL)
++ return ERR_PTR(-EALREADY);
++
++ /* Alloc mem for state */
++ vnic = kzalloc(sizeof(netfront_accel_vnic), GFP_KERNEL);
++ if (vnic == NULL) {
++ EPRINTK("%s: no memory for vnic state\n", __FUNCTION__);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ spin_lock_init(&vnic->tx_lock);
++
++ mutex_init(&vnic->vnic_mutex);
++ mutex_lock(&vnic->vnic_mutex);
++
++ /* Store so state can be retrieved from device */
++ BUG_ON(np->accel_priv != NULL);
++ np->accel_priv = vnic;
++ vnic->dev = dev;
++ vnic->net_dev = net_dev;
++ spin_lock_init(&vnic->irq_enabled_lock);
++ netfront_accel_ssr_init(&vnic->ssr_state);
++
++ init_waitqueue_head(&vnic->state_wait_queue);
++ vnic->backend_state = XenbusStateUnknown;
++ vnic->frontend_state = XenbusStateClosed;
++ vnic->removing = 0;
++ vnic->domU_state_is_setup = 0;
++ vnic->dom0_state_is_setup = 0;
++ vnic->poll_enabled = 0;
++ vnic->tx_enabled = 0;
++ vnic->tx_skb = NULL;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++ INIT_WORK(&vnic->msg_from_bend, netfront_accel_msg_from_bend);
++#else
++ INIT_WORK(&vnic->msg_from_bend, netfront_accel_msg_from_bend, vnic);
++#endif
++
++ netfront_accel_debugfs_create(vnic);
++
++ mutex_unlock(&vnic->vnic_mutex);
++
++ err = net_accel_xen_net_read_mac(dev, vnic->mac);
++ if (err)
++ goto fail_mac;
++
++ /* Setup a watch on the frontend's MAC address */
++ err = setup_mac_address_watch(dev, vnic);
++ if (err)
++ goto fail_mac;
++
++ return vnic;
++
++fail_mac:
++
++ mutex_lock(&vnic->vnic_mutex);
++
++ netfront_accel_debugfs_remove(vnic);
++
++ netfront_accel_ssr_fini(vnic, &vnic->ssr_state);
++
++ EPRINTK_ON(vnic->tx_skb != NULL);
++
++ vnic->frontend_state = XenbusStateUnknown;
++ net_accel_update_state(dev, XenbusStateUnknown);
++
++ mutex_unlock(&vnic->vnic_mutex);
++
++ np->accel_priv = NULL;
++ kfree(vnic);
++
++ return ERR_PTR(err);
++}
++
++
++static void netfront_accel_vnic_dtor(netfront_accel_vnic *vnic)
++{
++ struct net_device *net_dev = vnic->net_dev;
++ struct netfront_info *np =
++ (struct netfront_info *)netdev_priv(net_dev);
++
++ /*
++ * Now we don't hold the lock any more it is safe to remove
++ * this watch and synchonrise with the completion of
++ * watches
++ */
++ DPRINTK("%s: unregistering xenbus mac watch\n", __FUNCTION__);
++ unregister_xenbus_watch(&vnic->mac_address_watch);
++ kfree(vnic->mac_address_watch.node);
++
++ flush_workqueue(netfront_accel_workqueue);
++
++ mutex_lock(&vnic->vnic_mutex);
++
++ netfront_accel_debugfs_remove(vnic);
++
++ netfront_accel_ssr_fini(vnic, &vnic->ssr_state);
++
++ EPRINTK_ON(vnic->tx_skb != NULL);
++
++ vnic->frontend_state = XenbusStateUnknown;
++ net_accel_update_state(vnic->dev, XenbusStateUnknown);
++
++ mutex_unlock(&vnic->vnic_mutex);
++
++ np->accel_priv = NULL;
++ kfree(vnic);
++}
++
++
++static int vnic_setup_domU_shared_state(struct xenbus_device *dev,
++ netfront_accel_vnic *vnic)
++{
++ struct xenbus_transaction tr;
++ int err;
++ int msgs_per_queue;
++
++
++ DPRINTK("Setting up domU shared state.\n");
++
++ msgs_per_queue = (PAGE_SIZE/2) / sizeof(struct net_accel_msg);
++
++ /* Allocate buffer state */
++ vnic->tx_bufs = netfront_accel_init_bufs(&vnic->tx_lock);
++ if (vnic->tx_bufs == NULL) {
++ err = -ENOMEM;
++ EPRINTK("%s: Failed to allocate tx buffers\n", __FUNCTION__);
++ goto fail_tx_bufs;
++ }
++
++ vnic->rx_bufs = netfront_accel_init_bufs(NULL);
++ if (vnic->rx_bufs == NULL) {
++ err = -ENOMEM;
++ EPRINTK("%s: Failed to allocate rx buffers\n", __FUNCTION__);
++ goto fail_rx_bufs;
++ }
++
++ /*
++ * This allocates two pages, one for the shared page and one
++ * for the message queue.
++ */
++ vnic->shared_page = (struct net_accel_shared_page *)
++ __get_free_pages(GFP_KERNEL, 1);
++ if (vnic->shared_page == NULL) {
++ EPRINTK("%s: no memory for shared pages\n", __FUNCTION__);
++ err = -ENOMEM;
++ goto fail_shared_page;
++ }
++
++ net_accel_msg_init_queue
++ (&vnic->from_dom0, &vnic->shared_page->queue0,
++ (struct net_accel_msg *)((u8*)vnic->shared_page + PAGE_SIZE),
++ msgs_per_queue);
++
++ net_accel_msg_init_queue
++ (&vnic->to_dom0, &vnic->shared_page->queue1,
++ (struct net_accel_msg *)((u8*)vnic->shared_page +
++ (3 * PAGE_SIZE / 2)),
++ msgs_per_queue);
++
++ vnic->msg_state = NETFRONT_ACCEL_MSG_NONE;
++
++ err = make_named_grant(dev, vnic->shared_page, "accel-ctrl-page",
++ &vnic->ctrl_page_gnt);
++ if (err) {
++ EPRINTK("couldn't make ctrl-page named grant\n");
++ goto fail_ctrl_page_grant;
++ }
++
++ err = make_named_grant(dev, (u8*)vnic->shared_page + PAGE_SIZE,
++ "accel-msg-page", &vnic->msg_page_gnt);
++ if (err) {
++ EPRINTK("couldn't make msg-page named grant\n");
++ goto fail_msg_page_grant;
++ }
++
++ /* Create xenbus msg event channel */
++ err = bind_listening_port_to_irqhandler
++ (dev->otherend_id, netfront_accel_msg_channel_irq_from_bend,
++ SA_SAMPLE_RANDOM, "vnicctrl", vnic);
++ if (err < 0) {
++ EPRINTK("Couldn't bind msg event channel\n");
++ goto fail_msg_irq;
++ }
++ vnic->msg_channel_irq = err;
++ vnic->msg_channel = irq_to_evtchn_port(vnic->msg_channel_irq);
++
++ /* Create xenbus net event channel */
++ err = bind_listening_port_to_irqhandler
++ (dev->otherend_id, netfront_accel_net_channel_irq_from_bend,
++ SA_SAMPLE_RANDOM, "vnicfront", vnic);
++ if (err < 0) {
++ EPRINTK("Couldn't bind net event channel\n");
++ goto fail_net_irq;
++ }
++ vnic->net_channel_irq = err;
++ vnic->net_channel = irq_to_evtchn_port(vnic->net_channel_irq);
++ /* Want to ensure we don't get interrupts before we're ready */
++ netfront_accel_disable_net_interrupts(vnic);
++
++ DPRINTK("otherend %d has msg ch %u (%u) and net ch %u (%u)\n",
++ dev->otherend_id, vnic->msg_channel, vnic->msg_channel_irq,
++ vnic->net_channel, vnic->net_channel_irq);
++
++ do {
++ err = xenbus_transaction_start(&tr);
++ if (err != 0) {
++ EPRINTK("%s: Transaction start failed %d\n",
++ __FUNCTION__, err);
++ goto fail_transaction;
++ }
++
++ err = xenbus_printf(tr, dev->nodename, "accel-msg-channel",
++ "%u", vnic->msg_channel);
++ if (err != 0) {
++ EPRINTK("%s: event channel xenbus write failed %d\n",
++ __FUNCTION__, err);
++ xenbus_transaction_end(tr, 1);
++ goto fail_transaction;
++ }
++
++ err = xenbus_printf(tr, dev->nodename, "accel-net-channel",
++ "%u", vnic->net_channel);
++ if (err != 0) {
++ EPRINTK("%s: net channel xenbus write failed %d\n",
++ __FUNCTION__, err);
++ xenbus_transaction_end(tr, 1);
++ goto fail_transaction;
++ }
++
++ err = xenbus_transaction_end(tr, 0);
++ } while (err == -EAGAIN);
++
++ if (err != 0) {
++ EPRINTK("%s: Transaction end failed %d\n", __FUNCTION__, err);
++ goto fail_transaction;
++ }
++
++ DPRINTK("Completed setting up domU shared state\n");
++
++ return 0;
++
++fail_transaction:
++
++ unbind_from_irqhandler(vnic->net_channel_irq, vnic);
++fail_net_irq:
++
++ unbind_from_irqhandler(vnic->msg_channel_irq, vnic);
++fail_msg_irq:
++
++ remove_named_grant(dev, "accel-ctrl-page", vnic->ctrl_page_gnt);
++fail_msg_page_grant:
++
++ remove_named_grant(dev, "accel-msg-page", vnic->msg_page_gnt);
++fail_ctrl_page_grant:
++
++ free_pages((unsigned long)vnic->shared_page, 1);
++ vnic->shared_page = NULL;
++fail_shared_page:
++
++ netfront_accel_fini_bufs(vnic->rx_bufs);
++fail_rx_bufs:
++
++ netfront_accel_fini_bufs(vnic->tx_bufs);
++fail_tx_bufs:
++
++ /* Undo the memory allocation created when we got the HELLO */
++ netfront_accel_free_buffer_mem(&vnic->bufpages,
++ vnic->rx_bufs,
++ vnic->tx_bufs);
++
++ DPRINTK("Failed to setup domU shared state with code %d\n", err);
++
++ return err;
++}
++
++
++static void vnic_remove_domU_shared_state(struct xenbus_device *dev,
++ netfront_accel_vnic *vnic)
++{
++ struct xenbus_transaction tr;
++
++ /*
++ * Don't remove any watches because we currently hold the
++ * mutex and the watches take the mutex.
++ */
++
++ DPRINTK("%s: removing event channel irq handlers %d %d\n",
++ __FUNCTION__, vnic->net_channel_irq, vnic->msg_channel_irq);
++ do {
++ if (xenbus_transaction_start(&tr) != 0)
++ break;
++ xenbus_rm(tr, dev->nodename, "accel-msg-channel");
++ xenbus_rm(tr, dev->nodename, "accel-net-channel");
++ } while (xenbus_transaction_end(tr, 0) == -EAGAIN);
++
++ unbind_from_irqhandler(vnic->net_channel_irq, vnic);
++ unbind_from_irqhandler(vnic->msg_channel_irq, vnic);
++
++ /* ungrant pages for msg channel */
++ remove_named_grant(dev, "accel-ctrl-page", vnic->ctrl_page_gnt);
++ remove_named_grant(dev, "accel-msg-page", vnic->msg_page_gnt);
++ free_pages((unsigned long)vnic->shared_page, 1);
++ vnic->shared_page = NULL;
++
++ /* ungrant pages for buffers, and free buffer memory */
++ netfront_accel_free_buffer_mem(&vnic->bufpages,
++ vnic->rx_bufs,
++ vnic->tx_bufs);
++ netfront_accel_fini_bufs(vnic->rx_bufs);
++ netfront_accel_fini_bufs(vnic->tx_bufs);
++}
++
++
++static void vnic_setup_dom0_shared_state(struct xenbus_device *dev,
++ netfront_accel_vnic *vnic)
++{
++ DPRINTK("Setting up dom0 shared state\n");
++
++ netfront_accel_vi_ctor(vnic);
++
++ /*
++ * Message processing will be enabled when this function
++ * returns, but we might have missed an interrupt. Schedule a
++ * check just in case.
++ */
++ queue_work(netfront_accel_workqueue, &vnic->msg_from_bend);
++}
++
++
++static void vnic_remove_dom0_shared_state(struct xenbus_device *dev,
++ netfront_accel_vnic *vnic)
++{
++ DPRINTK("Removing dom0 shared state\n");
++
++ vnic_stop_fastpath(vnic);
++
++ netfront_accel_vi_dtor(vnic);
++}
++
++
++/*************************************************************************/
++
++/*
++ * The following code handles accelstate changes between the frontend
++ * and the backend. In response to transitions, calls the following
++ * functions in matching pairs:
++ *
++ * vnic_setup_domU_shared_state
++ * vnic_remove_domU_shared_state
++ *
++ * vnic_setup_dom0_shared_state
++ * vnic_remove_dom0_shared_state
++ *
++ * Valid state transitions for DomU are as follows:
++ *
++ * Closed->Init on probe or in response to Init from dom0
++ *
++ * Init->Connected in response to Init from dom0
++ * Init->Closing on error providing dom0 is in Init
++ * Init->Closed on remove or in response to Closing from dom0
++ *
++ * Connected->Closing on error/remove
++ * Connected->Closed in response to Closing from dom0
++ *
++ * Closing->Closed in response to Closing from dom0
++ *
++ */
++
++
++/* Function to deal with Xenbus accel state change in backend */
++static void netfront_accel_backend_accel_changed(netfront_accel_vnic *vnic,
++ XenbusState backend_state)
++{
++ struct xenbus_device *dev = vnic->dev;
++ XenbusState frontend_state;
++ int state;
++
++ DPRINTK("%s: changing from %s to %s. nodename %s, otherend %s\n",
++ __FUNCTION__, xenbus_strstate(vnic->backend_state),
++ xenbus_strstate(backend_state), dev->nodename, dev->otherend);
++
++ /*
++ * Ignore duplicate state changes. This can happen if the
++ * backend changes state twice in quick succession and the
++ * first watch fires in the frontend after the second
++ * transition has completed.
++ */
++ if (vnic->backend_state == backend_state)
++ return;
++
++ vnic->backend_state = backend_state;
++ frontend_state = vnic->frontend_state;
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ /*
++ * It's possible for us to miss the closed state from
++ * dom0, so do the work here.
++ */
++ if (vnic->domU_state_is_setup) {
++ vnic_remove_domU_shared_state(dev, vnic);
++ vnic->domU_state_is_setup = 0;
++ }
++
++ if (frontend_state != XenbusStateInitialising) {
++ /* Make sure the backend doesn't go away. */
++ frontend_state = XenbusStateInitialising;
++ net_accel_update_state(dev, frontend_state);
++ xenbus_scanf(XBT_NIL, dev->otherend, "accelstate", "%d", &state);
++ backend_state = (XenbusState)state;
++ if (backend_state != XenbusStateInitialising)
++ break;
++ }
++
++ /* Start the new connection. */
++ if (!vnic->removing) {
++ BUG_ON(vnic->domU_state_is_setup);
++ if (vnic_setup_domU_shared_state(dev, vnic) == 0) {
++ vnic->domU_state_is_setup = 1;
++ frontend_state = XenbusStateConnected;
++ } else
++ frontend_state = XenbusStateClosing;
++ }
++ break;
++ case XenbusStateConnected:
++ if (vnic->domU_state_is_setup &&
++ !vnic->dom0_state_is_setup) {
++ vnic_setup_dom0_shared_state(dev, vnic);
++ vnic->dom0_state_is_setup = 1;
++ }
++ break;
++ default:
++ case XenbusStateClosing:
++ if (vnic->dom0_state_is_setup) {
++ vnic_remove_dom0_shared_state(dev, vnic);
++ vnic->dom0_state_is_setup = 0;
++ }
++ frontend_state = XenbusStateClosed;
++ break;
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ if (vnic->domU_state_is_setup) {
++ vnic_remove_domU_shared_state(dev, vnic);
++ vnic->domU_state_is_setup = 0;
++ }
++ break;
++ }
++
++ if (frontend_state != vnic->frontend_state) {
++ DPRINTK("Switching from state %s (%d) to %s (%d)\n",
++ xenbus_strstate(vnic->frontend_state),
++ vnic->frontend_state,
++ xenbus_strstate(frontend_state), frontend_state);
++ vnic->frontend_state = frontend_state;
++ net_accel_update_state(dev, frontend_state);
++ }
++
++ wake_up(&vnic->state_wait_queue);
++}
++
++
++static void backend_accel_state_change(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ int state;
++ netfront_accel_vnic *vnic;
++ struct xenbus_device *dev;
++
++ DPRINTK("%s\n", __FUNCTION__);
++
++ vnic = container_of(watch, struct netfront_accel_vnic,
++ backend_accel_watch);
++
++ mutex_lock(&vnic->vnic_mutex);
++
++ dev = vnic->dev;
++
++ state = (int)XenbusStateUnknown;
++ xenbus_scanf(XBT_NIL, dev->otherend, "accelstate", "%d", &state);
++ netfront_accel_backend_accel_changed(vnic, state);
++
++ mutex_unlock(&vnic->vnic_mutex);
++}
++
++
++static int setup_dom0_accel_watch(struct xenbus_device *dev,
++ netfront_accel_vnic *vnic)
++{
++ int err;
++
++ DPRINTK("Setting watch on %s/%s\n", dev->otherend, "accelstate");
++
++ err = xenbus_watch_path2(dev, dev->otherend, "accelstate",
++ &vnic->backend_accel_watch,
++ backend_accel_state_change);
++ if (err) {
++ EPRINTK("%s: Failed to register xenbus watch: %d\n",
++ __FUNCTION__, err);
++ goto fail;
++ }
++ return 0;
++ fail:
++ vnic->backend_accel_watch.node = NULL;
++ return err;
++}
++
++
++int netfront_accel_probe(struct net_device *net_dev, struct xenbus_device *dev)
++{
++ netfront_accel_vnic *vnic;
++ int err;
++
++ DPRINTK("Probe passed device %s\n", dev->nodename);
++
++ vnic = netfront_accel_vnic_ctor(net_dev, dev);
++ if (IS_ERR(vnic))
++ return PTR_ERR(vnic);
++
++ /*
++ * Setup a watch on the backend accel state. This sets things
++ * going.
++ */
++ err = setup_dom0_accel_watch(dev, vnic);
++ if (err) {
++ netfront_accel_vnic_dtor(vnic);
++ EPRINTK("%s: probe failed with code %d\n", __FUNCTION__, err);
++ return err;
++ }
++
++ /*
++ * Indicate to the other end that we're ready to start unless
++ * the watch has already fired.
++ */
++ mutex_lock(&vnic->vnic_mutex);
++ VPRINTK("setup success, updating accelstate\n");
++ if (vnic->frontend_state == XenbusStateClosed) {
++ vnic->frontend_state = XenbusStateInitialising;
++ net_accel_update_state(dev, XenbusStateInitialising);
++ }
++ mutex_unlock(&vnic->vnic_mutex);
++
++ DPRINTK("Probe done device %s\n", dev->nodename);
++
++ return 0;
++}
++
++
++int netfront_accel_remove(struct xenbus_device *dev)
++{
++ struct netfront_info *np =
++ (struct netfront_info *)dev->dev.driver_data;
++ netfront_accel_vnic *vnic = (netfront_accel_vnic *)np->accel_priv;
++
++ DPRINTK("%s %s\n", __FUNCTION__, dev->nodename);
++
++ BUG_ON(vnic == NULL);
++
++ mutex_lock(&vnic->vnic_mutex);
++
++ /* Reject any attempts to connect. */
++ vnic->removing = 1;
++
++ /* Close any existing connection. */
++ if (vnic->frontend_state == XenbusStateConnected) {
++ vnic->frontend_state = XenbusStateClosing;
++ net_accel_update_state(dev, XenbusStateClosing);
++ }
++
++ mutex_unlock(&vnic->vnic_mutex);
++
++ DPRINTK("%s waiting for release of %s\n", __FUNCTION__, dev->nodename);
++
++ /*
++ * Wait for the xenbus watch to release the shared resources.
++ * This indicates that dom0 has made the transition
++ * Closing->Closed or that dom0 was in Closed or Init and no
++ * resources were mapped.
++ */
++ wait_event(vnic->state_wait_queue,
++ !vnic->domU_state_is_setup);
++
++ /*
++ * Now we don't need this watch anymore it is safe to remove
++ * it (and so synchronise with it completing if outstanding)
++ */
++ DPRINTK("%s: unregistering xenbus accel watch\n",
++ __FUNCTION__);
++ unregister_xenbus_watch(&vnic->backend_accel_watch);
++ kfree(vnic->backend_accel_watch.node);
++
++ netfront_accel_vnic_dtor(vnic);
++
++ DPRINTK("%s done %s\n", __FUNCTION__, dev->nodename);
++
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/ef_vi_falcon_core.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/ef_vi_falcon_core.h
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/ef_vi_falcon_core.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/ef_vi_falcon_core.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1075 @@
++
++#define EFVI_FALCON_EXTENDED_P_BAR 1
++
++//////////////---- Bus Interface Unit Registers C Header ----//////////////
++#define IOM_IND_ADR_REG_OFST 0x0 // IO-mapped indirect access address register
++ #define IOM_AUTO_ADR_INC_EN_LBN 16
++ #define IOM_AUTO_ADR_INC_EN_WIDTH 1
++ #define IOM_IND_ADR_LBN 0
++ #define IOM_IND_ADR_WIDTH 16
++#define IOM_IND_DAT_REG_OFST 0x4 // IO-mapped indirect access data register
++ #define IOM_IND_DAT_LBN 0
++ #define IOM_IND_DAT_WIDTH 32
++#define ADR_REGION_REG_KER_OFST 0x0 // Address region register
++#define ADR_REGION_REG_OFST 0x0 // Address region register
++ #define ADR_REGION3_LBN 96
++ #define ADR_REGION3_WIDTH 18
++ #define ADR_REGION2_LBN 64
++ #define ADR_REGION2_WIDTH 18
++ #define ADR_REGION1_LBN 32
++ #define ADR_REGION1_WIDTH 18
++ #define ADR_REGION0_LBN 0
++ #define ADR_REGION0_WIDTH 18
++#define INT_EN_REG_KER_OFST 0x10 // Kernel driver Interrupt enable register
++ #define KER_INT_CHAR_LBN 4
++ #define KER_INT_CHAR_WIDTH 1
++ #define KER_INT_KER_LBN 3
++ #define KER_INT_KER_WIDTH 1
++ #define ILL_ADR_ERR_INT_EN_KER_LBN 2
++ #define ILL_ADR_ERR_INT_EN_KER_WIDTH 1
++ #define SRM_PERR_INT_EN_KER_LBN 1
++ #define SRM_PERR_INT_EN_KER_WIDTH 1
++ #define DRV_INT_EN_KER_LBN 0
++ #define DRV_INT_EN_KER_WIDTH 1
++#define INT_EN_REG_CHAR_OFST 0x20 // Char Driver interrupt enable register
++ #define CHAR_INT_CHAR_LBN 4
++ #define CHAR_INT_CHAR_WIDTH 1
++ #define CHAR_INT_KER_LBN 3
++ #define CHAR_INT_KER_WIDTH 1
++ #define ILL_ADR_ERR_INT_EN_CHAR_LBN 2
++ #define ILL_ADR_ERR_INT_EN_CHAR_WIDTH 1
++ #define SRM_PERR_INT_EN_CHAR_LBN 1
++ #define SRM_PERR_INT_EN_CHAR_WIDTH 1
++ #define DRV_INT_EN_CHAR_LBN 0
++ #define DRV_INT_EN_CHAR_WIDTH 1
++#define INT_ADR_REG_KER_OFST 0x30 // Interrupt host address for Kernel driver
++ #define INT_ADR_KER_LBN 0
++ #define INT_ADR_KER_WIDTH 64
++ #define DRV_INT_KER_LBN 32
++ #define DRV_INT_KER_WIDTH 1
++ #define EV_FF_HALF_INT_KER_LBN 3
++ #define EV_FF_HALF_INT_KER_WIDTH 1
++ #define EV_FF_FULL_INT_KER_LBN 2
++ #define EV_FF_FULL_INT_KER_WIDTH 1
++ #define ILL_ADR_ERR_INT_KER_LBN 1
++ #define ILL_ADR_ERR_INT_KER_WIDTH 1
++ #define SRAM_PERR_INT_KER_LBN 0
++ #define SRAM_PERR_INT_KER_WIDTH 1
++#define INT_ADR_REG_CHAR_OFST 0x40 // Interrupt host address for Char driver
++ #define INT_ADR_CHAR_LBN 0
++ #define INT_ADR_CHAR_WIDTH 64
++ #define DRV_INT_CHAR_LBN 32
++ #define DRV_INT_CHAR_WIDTH 1
++ #define EV_FF_HALF_INT_CHAR_LBN 3
++ #define EV_FF_HALF_INT_CHAR_WIDTH 1
++ #define EV_FF_FULL_INT_CHAR_LBN 2
++ #define EV_FF_FULL_INT_CHAR_WIDTH 1
++ #define ILL_ADR_ERR_INT_CHAR_LBN 1
++ #define ILL_ADR_ERR_INT_CHAR_WIDTH 1
++ #define SRAM_PERR_INT_CHAR_LBN 0
++ #define SRAM_PERR_INT_CHAR_WIDTH 1
++#define INT_ISR0_B0_OFST 0x90 // B0 only
++#define INT_ISR1_B0_OFST 0xA0
++#define INT_ACK_REG_KER_A1_OFST 0x50 // Kernel interrupt acknowledge register
++ #define RESERVED_LBN 0
++ #define RESERVED_WIDTH 32
++#define INT_ACK_REG_CHAR_A1_OFST 0x60 // CHAR interrupt acknowledge register
++ #define RESERVED_LBN 0
++ #define RESERVED_WIDTH 32
++//////////////---- Global CSR Registers C Header ----//////////////
++#define STRAP_REG_KER_OFST 0x200 // ASIC strap status register
++#define STRAP_REG_OFST 0x200 // ASIC strap status register
++ #define ONCHIP_SRAM_LBN 16
++ #define ONCHIP_SRAM_WIDTH 0
++ #define STRAP_ISCSI_EN_LBN 3
++ #define STRAP_ISCSI_EN_WIDTH 1
++ #define STRAP_PINS_LBN 0
++ #define STRAP_PINS_WIDTH 3
++#define GPIO_CTL_REG_KER_OFST 0x210 // GPIO control register
++#define GPIO_CTL_REG_OFST 0x210 // GPIO control register
++ #define GPIO_OEN_LBN 24
++ #define GPIO_OEN_WIDTH 4
++ #define GPIO_OUT_LBN 16
++ #define GPIO_OUT_WIDTH 4
++ #define GPIO_IN_LBN 8
++ #define GPIO_IN_WIDTH 4
++ #define GPIO_PWRUP_VALUE_LBN 0
++ #define GPIO_PWRUP_VALUE_WIDTH 4
++#define GLB_CTL_REG_KER_OFST 0x220 // Global control register
++#define GLB_CTL_REG_OFST 0x220 // Global control register
++ #define SWRST_LBN 0
++ #define SWRST_WIDTH 1
++#define FATAL_INTR_REG_KER_OFST 0x230 // Fatal interrupt register for Kernel
++ #define PCI_BUSERR_INT_KER_EN_LBN 43
++ #define PCI_BUSERR_INT_KER_EN_WIDTH 1
++ #define SRAM_OOB_INT_KER_EN_LBN 42
++ #define SRAM_OOB_INT_KER_EN_WIDTH 1
++ #define BUFID_OOB_INT_KER_EN_LBN 41
++ #define BUFID_OOB_INT_KER_EN_WIDTH 1
++ #define MEM_PERR_INT_KER_EN_LBN 40
++ #define MEM_PERR_INT_KER_EN_WIDTH 1
++ #define RBUF_OWN_INT_KER_EN_LBN 39
++ #define RBUF_OWN_INT_KER_EN_WIDTH 1
++ #define TBUF_OWN_INT_KER_EN_LBN 38
++ #define TBUF_OWN_INT_KER_EN_WIDTH 1
++ #define RDESCQ_OWN_INT_KER_EN_LBN 37
++ #define RDESCQ_OWN_INT_KER_EN_WIDTH 1
++ #define TDESCQ_OWN_INT_KER_EN_LBN 36
++ #define TDESCQ_OWN_INT_KER_EN_WIDTH 1
++ #define EVQ_OWN_INT_KER_EN_LBN 35
++ #define EVQ_OWN_INT_KER_EN_WIDTH 1
++ #define EVFF_OFLO_INT_KER_EN_LBN 34
++ #define EVFF_OFLO_INT_KER_EN_WIDTH 1
++ #define ILL_ADR_INT_KER_EN_LBN 33
++ #define ILL_ADR_INT_KER_EN_WIDTH 1
++ #define SRM_PERR_INT_KER_EN_LBN 32
++ #define SRM_PERR_INT_KER_EN_WIDTH 1
++ #define PCI_BUSERR_INT_KER_LBN 11
++ #define PCI_BUSERR_INT_KER_WIDTH 1
++ #define SRAM_OOB_INT_KER_LBN 10
++ #define SRAM_OOB_INT_KER_WIDTH 1
++ #define BUFID_OOB_INT_KER_LBN 9
++ #define BUFID_OOB_INT_KER_WIDTH 1
++ #define MEM_PERR_INT_KER_LBN 8
++ #define MEM_PERR_INT_KER_WIDTH 1
++ #define RBUF_OWN_INT_KER_LBN 7
++ #define RBUF_OWN_INT_KER_WIDTH 1
++ #define TBUF_OWN_INT_KER_LBN 6
++ #define TBUF_OWN_INT_KER_WIDTH 1
++ #define RDESCQ_OWN_INT_KER_LBN 5
++ #define RDESCQ_OWN_INT_KER_WIDTH 1
++ #define TDESCQ_OWN_INT_KER_LBN 4
++ #define TDESCQ_OWN_INT_KER_WIDTH 1
++ #define EVQ_OWN_INT_KER_LBN 3
++ #define EVQ_OWN_INT_KER_WIDTH 1
++ #define EVFF_OFLO_INT_KER_LBN 2
++ #define EVFF_OFLO_INT_KER_WIDTH 1
++ #define ILL_ADR_INT_KER_LBN 1
++ #define ILL_ADR_INT_KER_WIDTH 1
++ #define SRM_PERR_INT_KER_LBN 0
++ #define SRM_PERR_INT_KER_WIDTH 1
++#define FATAL_INTR_REG_OFST 0x240 // Fatal interrupt register for Char
++ #define PCI_BUSERR_INT_CHAR_EN_LBN 43
++ #define PCI_BUSERR_INT_CHAR_EN_WIDTH 1
++ #define SRAM_OOB_INT_CHAR_EN_LBN 42
++ #define SRAM_OOB_INT_CHAR_EN_WIDTH 1
++ #define BUFID_OOB_INT_CHAR_EN_LBN 41
++ #define BUFID_OOB_INT_CHAR_EN_WIDTH 1
++ #define MEM_PERR_INT_CHAR_EN_LBN 40
++ #define MEM_PERR_INT_CHAR_EN_WIDTH 1
++ #define RBUF_OWN_INT_CHAR_EN_LBN 39
++ #define RBUF_OWN_INT_CHAR_EN_WIDTH 1
++ #define TBUF_OWN_INT_CHAR_EN_LBN 38
++ #define TBUF_OWN_INT_CHAR_EN_WIDTH 1
++ #define RDESCQ_OWN_INT_CHAR_EN_LBN 37
++ #define RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
++ #define TDESCQ_OWN_INT_CHAR_EN_LBN 36
++ #define TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
++ #define EVQ_OWN_INT_CHAR_EN_LBN 35
++ #define EVQ_OWN_INT_CHAR_EN_WIDTH 1
++ #define EVFF_OFLO_INT_CHAR_EN_LBN 34
++ #define EVFF_OFLO_INT_CHAR_EN_WIDTH 1
++ #define ILL_ADR_INT_CHAR_EN_LBN 33
++ #define ILL_ADR_INT_CHAR_EN_WIDTH 1
++ #define SRM_PERR_INT_CHAR_EN_LBN 32
++ #define SRM_PERR_INT_CHAR_EN_WIDTH 1
++ #define FATAL_INTR_REG_EN_BITS 0xffffffffffffffffULL
++ #define PCI_BUSERR_INT_CHAR_LBN 11
++ #define PCI_BUSERR_INT_CHAR_WIDTH 1
++ #define SRAM_OOB_INT_CHAR_LBN 10
++ #define SRAM_OOB_INT_CHAR_WIDTH 1
++ #define BUFID_OOB_INT_CHAR_LBN 9
++ #define BUFID_OOB_INT_CHAR_WIDTH 1
++ #define MEM_PERR_INT_CHAR_LBN 8
++ #define MEM_PERR_INT_CHAR_WIDTH 1
++ #define RBUF_OWN_INT_CHAR_LBN 7
++ #define RBUF_OWN_INT_CHAR_WIDTH 1
++ #define TBUF_OWN_INT_CHAR_LBN 6
++ #define TBUF_OWN_INT_CHAR_WIDTH 1
++ #define RDESCQ_OWN_INT_CHAR_LBN 5
++ #define RDESCQ_OWN_INT_CHAR_WIDTH 1
++ #define TDESCQ_OWN_INT_CHAR_LBN 4
++ #define TDESCQ_OWN_INT_CHAR_WIDTH 1
++ #define EVQ_OWN_INT_CHAR_LBN 3
++ #define EVQ_OWN_INT_CHAR_WIDTH 1
++ #define EVFF_OFLO_INT_CHAR_LBN 2
++ #define EVFF_OFLO_INT_CHAR_WIDTH 1
++ #define ILL_ADR_INT_CHAR_LBN 1
++ #define ILL_ADR_INT_CHAR_WIDTH 1
++ #define SRM_PERR_INT_CHAR_LBN 0
++ #define SRM_PERR_INT_CHAR_WIDTH 1
++#define DP_CTRL_REG_OFST 0x250 // Datapath control register
++ #define FLS_EVQ_ID_LBN 0
++ #define FLS_EVQ_ID_WIDTH 12
++#define MEM_STAT_REG_KER_OFST 0x260 // Memory status register
++#define MEM_STAT_REG_OFST 0x260 // Memory status register
++ #define MEM_PERR_VEC_LBN 53
++ #define MEM_PERR_VEC_WIDTH 38
++ #define MBIST_CORR_LBN 38
++ #define MBIST_CORR_WIDTH 15
++ #define MBIST_ERR_LBN 0
++ #define MBIST_ERR_WIDTH 38
++#define DEBUG_REG_KER_OFST 0x270 // Debug register
++#define DEBUG_REG_OFST 0x270 // Debug register
++ #define DEBUG_BLK_SEL2_LBN 47
++ #define DEBUG_BLK_SEL2_WIDTH 3
++ #define DEBUG_BLK_SEL1_LBN 44
++ #define DEBUG_BLK_SEL1_WIDTH 3
++ #define DEBUG_BLK_SEL0_LBN 41
++ #define DEBUG_BLK_SEL0_WIDTH 3
++ #define MISC_DEBUG_ADDR_LBN 36
++ #define MISC_DEBUG_ADDR_WIDTH 5
++ #define SERDES_DEBUG_ADDR_LBN 31
++ #define SERDES_DEBUG_ADDR_WIDTH 5
++ #define EM_DEBUG_ADDR_LBN 26
++ #define EM_DEBUG_ADDR_WIDTH 5
++ #define SR_DEBUG_ADDR_LBN 21
++ #define SR_DEBUG_ADDR_WIDTH 5
++ #define EV_DEBUG_ADDR_LBN 16
++ #define EV_DEBUG_ADDR_WIDTH 5
++ #define RX_DEBUG_ADDR_LBN 11
++ #define RX_DEBUG_ADDR_WIDTH 5
++ #define TX_DEBUG_ADDR_LBN 6
++ #define TX_DEBUG_ADDR_WIDTH 5
++ #define BIU_DEBUG_ADDR_LBN 1
++ #define BIU_DEBUG_ADDR_WIDTH 5
++ #define DEBUG_EN_LBN 0
++ #define DEBUG_EN_WIDTH 1
++#define DRIVER_REG0_KER_OFST 0x280 // Driver scratch register 0
++#define DRIVER_REG0_OFST 0x280 // Driver scratch register 0
++ #define DRIVER_DW0_LBN 0
++ #define DRIVER_DW0_WIDTH 32
++#define DRIVER_REG1_KER_OFST 0x290 // Driver scratch register 1
++#define DRIVER_REG1_OFST 0x290 // Driver scratch register 1
++ #define DRIVER_DW1_LBN 0
++ #define DRIVER_DW1_WIDTH 32
++#define DRIVER_REG2_KER_OFST 0x2A0 // Driver scratch register 2
++#define DRIVER_REG2_OFST 0x2A0 // Driver scratch register 2
++ #define DRIVER_DW2_LBN 0
++ #define DRIVER_DW2_WIDTH 32
++#define DRIVER_REG3_KER_OFST 0x2B0 // Driver scratch register 3
++#define DRIVER_REG3_OFST 0x2B0 // Driver scratch register 3
++ #define DRIVER_DW3_LBN 0
++ #define DRIVER_DW3_WIDTH 32
++#define DRIVER_REG4_KER_OFST 0x2C0 // Driver scratch register 4
++#define DRIVER_REG4_OFST 0x2C0 // Driver scratch register 4
++ #define DRIVER_DW4_LBN 0
++ #define DRIVER_DW4_WIDTH 32
++#define DRIVER_REG5_KER_OFST 0x2D0 // Driver scratch register 5
++#define DRIVER_REG5_OFST 0x2D0 // Driver scratch register 5
++ #define DRIVER_DW5_LBN 0
++ #define DRIVER_DW5_WIDTH 32
++#define DRIVER_REG6_KER_OFST 0x2E0 // Driver scratch register 6
++#define DRIVER_REG6_OFST 0x2E0 // Driver scratch register 6
++ #define DRIVER_DW6_LBN 0
++ #define DRIVER_DW6_WIDTH 32
++#define DRIVER_REG7_KER_OFST 0x2F0 // Driver scratch register 7
++#define DRIVER_REG7_OFST 0x2F0 // Driver scratch register 7
++ #define DRIVER_DW7_LBN 0
++ #define DRIVER_DW7_WIDTH 32
++#define ALTERA_BUILD_REG_OFST 0x300 // Altera build register
++#define ALTERA_BUILD_REG_OFST 0x300 // Altera build register
++ #define ALTERA_BUILD_VER_LBN 0
++ #define ALTERA_BUILD_VER_WIDTH 32
++
++/* so called CSR spare register
++ - contains separate parity enable bits for the various internal memory blocks */
++#define MEM_PARITY_ERR_EN_REG_KER 0x310
++#define MEM_PARITY_ALL_BLOCKS_EN_LBN 64
++#define MEM_PARITY_ALL_BLOCKS_EN_WIDTH 38
++#define MEM_PARITY_TX_DATA_EN_LBN 72
++#define MEM_PARITY_TX_DATA_EN_WIDTH 2
++
++//////////////---- Event & Timer Module Registers C Header ----//////////////
++
++#if EFVI_FALCON_EXTENDED_P_BAR
++#define EVQ_RPTR_REG_KER_OFST 0x11B00 // Event queue read pointer register
++#else
++#define EVQ_RPTR_REG_KER_OFST 0x1B00 // Event queue read pointer register
++#endif
++
++#define EVQ_RPTR_REG_OFST 0xFA0000 // Event queue read pointer register array.
++ #define EVQ_RPTR_LBN 0
++ #define EVQ_RPTR_WIDTH 15
++
++#if EFVI_FALCON_EXTENDED_P_BAR
++#define EVQ_PTR_TBL_KER_OFST 0x11A00 // Event queue pointer table for kernel access
++#else
++#define EVQ_PTR_TBL_KER_OFST 0x1A00 // Event queue pointer table for kernel access
++#endif
++
++#define EVQ_PTR_TBL_CHAR_OFST 0xF60000 // Event queue pointer table for char direct access
++ #define EVQ_WKUP_OR_INT_EN_LBN 39
++ #define EVQ_WKUP_OR_INT_EN_WIDTH 1
++ #define EVQ_NXT_WPTR_LBN 24
++ #define EVQ_NXT_WPTR_WIDTH 15
++ #define EVQ_EN_LBN 23
++ #define EVQ_EN_WIDTH 1
++ #define EVQ_SIZE_LBN 20
++ #define EVQ_SIZE_WIDTH 3
++ #define EVQ_BUF_BASE_ID_LBN 0
++ #define EVQ_BUF_BASE_ID_WIDTH 20
++#define TIMER_CMD_REG_KER_OFST 0x420 // Timer table for kernel access. Page-mapped
++#define TIMER_CMD_REG_PAGE4_OFST 0x8420 // Timer table for user-level access. Page-mapped. For lowest 1K queues.
++#define TIMER_CMD_REG_PAGE123K_OFST 0x1000420 // Timer table for user-level access. Page-mapped. For upper 3K queues.
++#define TIMER_TBL_OFST 0xF70000 // Timer table for char driver direct access
++ #define TIMER_MODE_LBN 12
++ #define TIMER_MODE_WIDTH 2
++ #define TIMER_VAL_LBN 0
++ #define TIMER_VAL_WIDTH 12
++ #define TIMER_MODE_INT_HLDOFF 2
++ #define EVQ_BUF_SIZE_LBN 0
++ #define EVQ_BUF_SIZE_WIDTH 1
++#define DRV_EV_REG_KER_OFST 0x440 // Driver generated event register
++#define DRV_EV_REG_OFST 0x440 // Driver generated event register
++ #define DRV_EV_QID_LBN 64
++ #define DRV_EV_QID_WIDTH 12
++ #define DRV_EV_DATA_LBN 0
++ #define DRV_EV_DATA_WIDTH 64
++#define EVQ_CTL_REG_KER_OFST 0x450 // Event queue control register
++#define EVQ_CTL_REG_OFST 0x450 // Event queue control register
++ #define RX_EVQ_WAKEUP_MASK_B0_LBN 15
++ #define RX_EVQ_WAKEUP_MASK_B0_WIDTH 6
++ #define EVQ_OWNERR_CTL_LBN 14
++ #define EVQ_OWNERR_CTL_WIDTH 1
++ #define EVQ_FIFO_AF_TH_LBN 8
++ #define EVQ_FIFO_AF_TH_WIDTH 6
++ #define EVQ_FIFO_NOTAF_TH_LBN 0
++ #define EVQ_FIFO_NOTAF_TH_WIDTH 6
++//////////////---- SRAM Module Registers C Header ----//////////////
++#define BUF_TBL_CFG_REG_KER_OFST 0x600 // Buffer table configuration register
++#define BUF_TBL_CFG_REG_OFST 0x600 // Buffer table configuration register
++ #define BUF_TBL_MODE_LBN 3
++ #define BUF_TBL_MODE_WIDTH 1
++#define SRM_RX_DC_CFG_REG_KER_OFST 0x610 // SRAM receive descriptor cache configuration register
++#define SRM_RX_DC_CFG_REG_OFST 0x610 // SRAM receive descriptor cache configuration register
++ #define SRM_RX_DC_BASE_ADR_LBN 0
++ #define SRM_RX_DC_BASE_ADR_WIDTH 21
++#define SRM_TX_DC_CFG_REG_KER_OFST 0x620 // SRAM transmit descriptor cache configuration register
++#define SRM_TX_DC_CFG_REG_OFST 0x620 // SRAM transmit descriptor cache configuration register
++ #define SRM_TX_DC_BASE_ADR_LBN 0
++ #define SRM_TX_DC_BASE_ADR_WIDTH 21
++#define SRM_CFG_REG_KER_OFST 0x630 // SRAM configuration register
++#define SRM_CFG_REG_OFST 0x630 // SRAM configuration register
++ #define SRAM_OOB_ADR_INTEN_LBN 5
++ #define SRAM_OOB_ADR_INTEN_WIDTH 1
++ #define SRAM_OOB_BUF_INTEN_LBN 4
++ #define SRAM_OOB_BUF_INTEN_WIDTH 1
++ #define SRAM_BT_INIT_EN_LBN 3
++ #define SRAM_BT_INIT_EN_WIDTH 1
++ #define SRM_NUM_BANK_LBN 2
++ #define SRM_NUM_BANK_WIDTH 1
++ #define SRM_BANK_SIZE_LBN 0
++ #define SRM_BANK_SIZE_WIDTH 2
++#define BUF_TBL_UPD_REG_KER_OFST 0x650 // Buffer table update register
++#define BUF_TBL_UPD_REG_OFST 0x650 // Buffer table update register
++ #define BUF_UPD_CMD_LBN 63
++ #define BUF_UPD_CMD_WIDTH 1
++ #define BUF_CLR_CMD_LBN 62
++ #define BUF_CLR_CMD_WIDTH 1
++ #define BUF_CLR_END_ID_LBN 32
++ #define BUF_CLR_END_ID_WIDTH 20
++ #define BUF_CLR_START_ID_LBN 0
++ #define BUF_CLR_START_ID_WIDTH 20
++#define SRM_UPD_EVQ_REG_KER_OFST 0x660 // Buffer table update register
++#define SRM_UPD_EVQ_REG_OFST 0x660 // Buffer table update register
++ #define SRM_UPD_EVQ_ID_LBN 0
++ #define SRM_UPD_EVQ_ID_WIDTH 12
++#define SRAM_PARITY_REG_KER_OFST 0x670 // SRAM parity register.
++#define SRAM_PARITY_REG_OFST 0x670 // SRAM parity register.
++ #define FORCE_SRAM_PERR_LBN 0
++ #define FORCE_SRAM_PERR_WIDTH 1
++
++#if EFVI_FALCON_EXTENDED_P_BAR
++#define BUF_HALF_TBL_KER_OFST 0x18000 // Buffer table in half buffer table mode direct access by kernel driver
++#else
++#define BUF_HALF_TBL_KER_OFST 0x8000 // Buffer table in half buffer table mode direct access by kernel driver
++#endif
++
++
++#define BUF_HALF_TBL_OFST 0x800000 // Buffer table in half buffer table mode direct access by char driver
++ #define BUF_ADR_HBUF_ODD_LBN 44
++ #define BUF_ADR_HBUF_ODD_WIDTH 20
++ #define BUF_OWNER_ID_HBUF_ODD_LBN 32
++ #define BUF_OWNER_ID_HBUF_ODD_WIDTH 12
++ #define BUF_ADR_HBUF_EVEN_LBN 12
++ #define BUF_ADR_HBUF_EVEN_WIDTH 20
++ #define BUF_OWNER_ID_HBUF_EVEN_LBN 0
++ #define BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
++
++
++#if EFVI_FALCON_EXTENDED_P_BAR
++#define BUF_FULL_TBL_KER_OFST 0x18000 // Buffer table in full buffer table mode direct access by kernel driver
++#else
++#define BUF_FULL_TBL_KER_OFST 0x8000 // Buffer table in full buffer table mode direct access by kernel driver
++#endif
++
++
++
++
++#define BUF_FULL_TBL_OFST 0x800000 // Buffer table in full buffer table mode direct access by char driver
++ #define IP_DAT_BUF_SIZE_LBN 50
++ #define IP_DAT_BUF_SIZE_WIDTH 1
++ #define BUF_ADR_REGION_LBN 48
++ #define BUF_ADR_REGION_WIDTH 2
++ #define BUF_ADR_FBUF_LBN 14
++ #define BUF_ADR_FBUF_WIDTH 34
++ #define BUF_OWNER_ID_FBUF_LBN 0
++ #define BUF_OWNER_ID_FBUF_WIDTH 14
++#define SRM_DBG_REG_OFST 0x3000000 // SRAM debug access
++ #define SRM_DBG_LBN 0
++ #define SRM_DBG_WIDTH 64
++//////////////---- RX Datapath Registers C Header ----//////////////
++
++#define RX_CFG_REG_KER_OFST 0x800 // Receive configuration register
++#define RX_CFG_REG_OFST 0x800 // Receive configuration register
++
++#if !defined(FALCON_64K_RXFIFO) && !defined(FALCON_PRE_02020029)
++# if !defined(FALCON_128K_RXFIFO)
++# define FALCON_128K_RXFIFO
++# endif
++#endif
++
++#if defined(FALCON_128K_RXFIFO)
++
++/* new for B0 */
++ #define RX_TOEP_TCP_SUPPRESS_B0_LBN 48
++ #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1
++ #define RX_INGR_EN_B0_LBN 47
++ #define RX_INGR_EN_B0_WIDTH 1
++ #define RX_TOEP_IPV4_B0_LBN 46
++ #define RX_TOEP_IPV4_B0_WIDTH 1
++ #define RX_HASH_ALG_B0_LBN 45
++ #define RX_HASH_ALG_B0_WIDTH 1
++ #define RX_HASH_INSERT_HDR_B0_LBN 44
++ #define RX_HASH_INSERT_HDR_B0_WIDTH 1
++/* moved for B0 */
++ #define RX_DESC_PUSH_EN_B0_LBN 43
++ #define RX_DESC_PUSH_EN_B0_WIDTH 1
++ #define RX_RDW_PATCH_EN_LBN 42 /* Non head of line blocking */
++ #define RX_RDW_PATCH_EN_WIDTH 1
++ #define RX_PCI_BURST_SIZE_B0_LBN 39
++ #define RX_PCI_BURST_SIZE_B0_WIDTH 3
++ #define RX_OWNERR_CTL_B0_LBN 38
++ #define RX_OWNERR_CTL_B0_WIDTH 1
++ #define RX_XON_TX_TH_B0_LBN 33
++ #define RX_XON_TX_TH_B0_WIDTH 5
++ #define RX_XOFF_TX_TH_B0_LBN 28
++ #define RX_XOFF_TX_TH_B0_WIDTH 5
++ #define RX_USR_BUF_SIZE_B0_LBN 19
++ #define RX_USR_BUF_SIZE_B0_WIDTH 9
++ #define RX_XON_MAC_TH_B0_LBN 10
++ #define RX_XON_MAC_TH_B0_WIDTH 9
++ #define RX_XOFF_MAC_TH_B0_LBN 1
++ #define RX_XOFF_MAC_TH_B0_WIDTH 9
++ #define RX_XOFF_MAC_EN_B0_LBN 0
++ #define RX_XOFF_MAC_EN_B0_WIDTH 1
++
++#elif !defined(FALCON_PRE_02020029)
++/* new for B0 */
++ #define RX_TOEP_TCP_SUPPRESS_B0_LBN 46
++ #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1
++ #define RX_INGR_EN_B0_LBN 45
++ #define RX_INGR_EN_B0_WIDTH 1
++ #define RX_TOEP_IPV4_B0_LBN 44
++ #define RX_TOEP_IPV4_B0_WIDTH 1
++ #define RX_HASH_ALG_B0_LBN 43
++ #define RX_HASH_ALG_B0_WIDTH 41
++ #define RX_HASH_INSERT_HDR_B0_LBN 42
++ #define RX_HASH_INSERT_HDR_B0_WIDTH 1
++/* moved for B0 */
++ #define RX_DESC_PUSH_EN_B0_LBN 41
++ #define RX_DESC_PUSH_EN_B0_WIDTH 1
++ #define RX_PCI_BURST_SIZE_B0_LBN 37
++ #define RX_PCI_BURST_SIZE_B0_WIDTH 3
++ #define RX_OWNERR_CTL_B0_LBN 36
++ #define RX_OWNERR_CTL_B0_WIDTH 1
++ #define RX_XON_TX_TH_B0_LBN 31
++ #define RX_XON_TX_TH_B0_WIDTH 5
++ #define RX_XOFF_TX_TH_B0_LBN 26
++ #define RX_XOFF_TX_TH_B0_WIDTH 5
++ #define RX_USR_BUF_SIZE_B0_LBN 17
++ #define RX_USR_BUF_SIZE_B0_WIDTH 9
++ #define RX_XON_MAC_TH_B0_LBN 9
++ #define RX_XON_MAC_TH_B0_WIDTH 8
++ #define RX_XOFF_MAC_TH_B0_LBN 1
++ #define RX_XOFF_MAC_TH_B0_WIDTH 8
++ #define RX_XOFF_MAC_EN_B0_LBN 0
++ #define RX_XOFF_MAC_EN_B0_WIDTH 1
++
++#else
++/* new for B0 */
++ #define RX_TOEP_TCP_SUPPRESS_B0_LBN 44
++ #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1
++ #define RX_INGR_EN_B0_LBN 43
++ #define RX_INGR_EN_B0_WIDTH 1
++ #define RX_TOEP_IPV4_B0_LBN 42
++ #define RX_TOEP_IPV4_B0_WIDTH 1
++ #define RX_HASH_ALG_B0_LBN 41
++ #define RX_HASH_ALG_B0_WIDTH 41
++ #define RX_HASH_INSERT_HDR_B0_LBN 40
++ #define RX_HASH_INSERT_HDR_B0_WIDTH 1
++/* moved for B0 */
++ #define RX_DESC_PUSH_EN_B0_LBN 35
++ #define RX_DESC_PUSH_EN_B0_WIDTH 1
++ #define RX_PCI_BURST_SIZE_B0_LBN 35
++ #define RX_PCI_BURST_SIZE_B0_WIDTH 2
++ #define RX_OWNERR_CTL_B0_LBN 34
++ #define RX_OWNERR_CTL_B0_WIDTH 1
++ #define RX_XON_TX_TH_B0_LBN 29
++ #define RX_XON_TX_TH_B0_WIDTH 5
++ #define RX_XOFF_TX_TH_B0_LBN 24
++ #define RX_XOFF_TX_TH_B0_WIDTH 5
++ #define RX_USR_BUF_SIZE_B0_LBN 15
++ #define RX_USR_BUF_SIZE_B0_WIDTH 9
++ #define RX_XON_MAC_TH_B0_LBN 8
++ #define RX_XON_MAC_TH_B0_WIDTH 7
++ #define RX_XOFF_MAC_TH_B0_LBN 1
++ #define RX_XOFF_MAC_TH_B0_WIDTH 7
++ #define RX_XOFF_MAC_EN_B0_LBN 0
++ #define RX_XOFF_MAC_EN_B0_WIDTH 1
++
++#endif
++
++/* A0/A1 */
++ #define RX_PUSH_EN_A1_LBN 35
++ #define RX_PUSH_EN_A1_WIDTH 1
++ #define RX_PCI_BURST_SIZE_A1_LBN 31
++ #define RX_PCI_BURST_SIZE_A1_WIDTH 3
++ #define RX_OWNERR_CTL_A1_LBN 30
++ #define RX_OWNERR_CTL_A1_WIDTH 1
++ #define RX_XON_TX_TH_A1_LBN 25
++ #define RX_XON_TX_TH_A1_WIDTH 5
++ #define RX_XOFF_TX_TH_A1_LBN 20
++ #define RX_XOFF_TX_TH_A1_WIDTH 5
++ #define RX_USR_BUF_SIZE_A1_LBN 11
++ #define RX_USR_BUF_SIZE_A1_WIDTH 9
++ #define RX_XON_MAC_TH_A1_LBN 6
++ #define RX_XON_MAC_TH_A1_WIDTH 5
++ #define RX_XOFF_MAC_TH_A1_LBN 1
++ #define RX_XOFF_MAC_TH_A1_WIDTH 5
++ #define RX_XOFF_MAC_EN_A1_LBN 0
++ #define RX_XOFF_MAC_EN_A1_WIDTH 1
++
++#define RX_FILTER_CTL_REG_OFST 0x810 // Receive filter control registers
++ #define SCATTER_ENBL_NO_MATCH_Q_B0_LBN 40
++ #define SCATTER_ENBL_NO_MATCH_Q_B0_WIDTH 1
++ #define UDP_FULL_SRCH_LIMIT_LBN 32
++ #define UDP_FULL_SRCH_LIMIT_WIDTH 8
++ #define NUM_KER_LBN 24
++ #define NUM_KER_WIDTH 2
++ #define UDP_WILD_SRCH_LIMIT_LBN 16
++ #define UDP_WILD_SRCH_LIMIT_WIDTH 8
++ #define TCP_WILD_SRCH_LIMIT_LBN 8
++ #define TCP_WILD_SRCH_LIMIT_WIDTH 8
++ #define TCP_FULL_SRCH_LIMIT_LBN 0
++ #define TCP_FULL_SRCH_LIMIT_WIDTH 8
++#define RX_FLUSH_DESCQ_REG_KER_OFST 0x820 // Receive flush descriptor queue register
++#define RX_FLUSH_DESCQ_REG_OFST 0x820 // Receive flush descriptor queue register
++ #define RX_FLUSH_DESCQ_CMD_LBN 24
++ #define RX_FLUSH_DESCQ_CMD_WIDTH 1
++ #define RX_FLUSH_EVQ_ID_LBN 12
++ #define RX_FLUSH_EVQ_ID_WIDTH 12
++ #define RX_FLUSH_DESCQ_LBN 0
++ #define RX_FLUSH_DESCQ_WIDTH 12
++#define RX_DESC_UPD_REG_KER_OFST 0x830 // Kernel receive descriptor update register. Page-mapped
++#define RX_DESC_UPD_REG_PAGE4_OFST 0x8830 // Char & user receive descriptor update register. Page-mapped. For lowest 1K queues.
++#define RX_DESC_UPD_REG_PAGE123K_OFST 0x1000830 // Char & user receive descriptor update register. Page-mapped. For upper 3K queues.
++ #define RX_DESC_WPTR_LBN 96
++ #define RX_DESC_WPTR_WIDTH 12
++ #define RX_DESC_PUSH_CMD_LBN 95
++ #define RX_DESC_PUSH_CMD_WIDTH 1
++ #define RX_DESC_LBN 0
++ #define RX_DESC_WIDTH 64
++ #define RX_KER_DESC_LBN 0
++ #define RX_KER_DESC_WIDTH 64
++ #define RX_USR_DESC_LBN 0
++ #define RX_USR_DESC_WIDTH 32
++#define RX_DC_CFG_REG_KER_OFST 0x840 // Receive descriptor cache configuration register
++#define RX_DC_CFG_REG_OFST 0x840 // Receive descriptor cache configuration register
++ #define RX_DC_SIZE_LBN 0
++ #define RX_DC_SIZE_WIDTH 2
++#define RX_DC_PF_WM_REG_KER_OFST 0x850 // Receive descriptor cache pre-fetch watermark register
++#define RX_DC_PF_WM_REG_OFST 0x850 // Receive descriptor cache pre-fetch watermark register
++ #define RX_DC_PF_LWM_LO_LBN 0
++ #define RX_DC_PF_LWM_LO_WIDTH 6
++
++#define RX_RSS_TKEY_B0_OFST 0x860 // RSS Toeplitz hash key (B0 only)
++
++#define RX_NODESC_DROP_REG 0x880
++ #define RX_NODESC_DROP_CNT_LBN 0
++ #define RX_NODESC_DROP_CNT_WIDTH 16
++
++#define XM_TX_CFG_REG_OFST 0x1230
++ #define XM_AUTO_PAD_LBN 5
++ #define XM_AUTO_PAD_WIDTH 1
++
++#define RX_FILTER_TBL0_OFST 0xF00000 // Receive filter table - even entries
++ #define RSS_EN_0_B0_LBN 110
++ #define RSS_EN_0_B0_WIDTH 1
++ #define SCATTER_EN_0_B0_LBN 109
++ #define SCATTER_EN_0_B0_WIDTH 1
++ #define TCP_UDP_0_LBN 108
++ #define TCP_UDP_0_WIDTH 1
++ #define RXQ_ID_0_LBN 96
++ #define RXQ_ID_0_WIDTH 12
++ #define DEST_IP_0_LBN 64
++ #define DEST_IP_0_WIDTH 32
++ #define DEST_PORT_TCP_0_LBN 48
++ #define DEST_PORT_TCP_0_WIDTH 16
++ #define SRC_IP_0_LBN 16
++ #define SRC_IP_0_WIDTH 32
++ #define SRC_TCP_DEST_UDP_0_LBN 0
++ #define SRC_TCP_DEST_UDP_0_WIDTH 16
++#define RX_FILTER_TBL1_OFST 0xF00010 // Receive filter table - odd entries
++ #define RSS_EN_1_B0_LBN 110
++ #define RSS_EN_1_B0_WIDTH 1
++ #define SCATTER_EN_1_B0_LBN 109
++ #define SCATTER_EN_1_B0_WIDTH 1
++ #define TCP_UDP_1_LBN 108
++ #define TCP_UDP_1_WIDTH 1
++ #define RXQ_ID_1_LBN 96
++ #define RXQ_ID_1_WIDTH 12
++ #define DEST_IP_1_LBN 64
++ #define DEST_IP_1_WIDTH 32
++ #define DEST_PORT_TCP_1_LBN 48
++ #define DEST_PORT_TCP_1_WIDTH 16
++ #define SRC_IP_1_LBN 16
++ #define SRC_IP_1_WIDTH 32
++ #define SRC_TCP_DEST_UDP_1_LBN 0
++ #define SRC_TCP_DEST_UDP_1_WIDTH 16
++
++#if EFVI_FALCON_EXTENDED_P_BAR
++#define RX_DESC_PTR_TBL_KER_OFST 0x11800 // Receive descriptor pointer kernel access
++#else
++#define RX_DESC_PTR_TBL_KER_OFST 0x1800 // Receive descriptor pointer kernel access
++#endif
++
++
++#define RX_DESC_PTR_TBL_OFST 0xF40000 // Receive descriptor pointer table
++ #define RX_ISCSI_DDIG_EN_LBN 88
++ #define RX_ISCSI_DDIG_EN_WIDTH 1
++ #define RX_ISCSI_HDIG_EN_LBN 87
++ #define RX_ISCSI_HDIG_EN_WIDTH 1
++ #define RX_DESC_PREF_ACT_LBN 86
++ #define RX_DESC_PREF_ACT_WIDTH 1
++ #define RX_DC_HW_RPTR_LBN 80
++ #define RX_DC_HW_RPTR_WIDTH 6
++ #define RX_DESCQ_HW_RPTR_LBN 68
++ #define RX_DESCQ_HW_RPTR_WIDTH 12
++ #define RX_DESCQ_SW_WPTR_LBN 56
++ #define RX_DESCQ_SW_WPTR_WIDTH 12
++ #define RX_DESCQ_BUF_BASE_ID_LBN 36
++ #define RX_DESCQ_BUF_BASE_ID_WIDTH 20
++ #define RX_DESCQ_EVQ_ID_LBN 24
++ #define RX_DESCQ_EVQ_ID_WIDTH 12
++ #define RX_DESCQ_OWNER_ID_LBN 10
++ #define RX_DESCQ_OWNER_ID_WIDTH 14
++ #define RX_DESCQ_LABEL_LBN 5
++ #define RX_DESCQ_LABEL_WIDTH 5
++ #define RX_DESCQ_SIZE_LBN 3
++ #define RX_DESCQ_SIZE_WIDTH 2
++ #define RX_DESCQ_TYPE_LBN 2
++ #define RX_DESCQ_TYPE_WIDTH 1
++ #define RX_DESCQ_JUMBO_LBN 1
++ #define RX_DESCQ_JUMBO_WIDTH 1
++ #define RX_DESCQ_EN_LBN 0
++ #define RX_DESCQ_EN_WIDTH 1
++
++
++#define RX_RSS_INDIR_TBL_B0_OFST 0xFB0000 // RSS indirection table (B0 only)
++ #define RX_RSS_INDIR_ENT_B0_LBN 0
++ #define RX_RSS_INDIR_ENT_B0_WIDTH 6
++
++//////////////---- TX Datapath Registers C Header ----//////////////
++#define TX_FLUSH_DESCQ_REG_KER_OFST 0xA00 // Transmit flush descriptor queue register
++#define TX_FLUSH_DESCQ_REG_OFST 0xA00 // Transmit flush descriptor queue register
++ #define TX_FLUSH_DESCQ_CMD_LBN 12
++ #define TX_FLUSH_DESCQ_CMD_WIDTH 1
++ #define TX_FLUSH_DESCQ_LBN 0
++ #define TX_FLUSH_DESCQ_WIDTH 12
++#define TX_DESC_UPD_REG_KER_OFST 0xA10 // Kernel transmit descriptor update register. Page-mapped
++#define TX_DESC_UPD_REG_PAGE4_OFST 0x8A10 // Char & user transmit descriptor update register. Page-mapped
++#define TX_DESC_UPD_REG_PAGE123K_OFST 0x1000A10 // Char & user transmit descriptor update register. Page-mapped
++ #define TX_DESC_WPTR_LBN 96
++ #define TX_DESC_WPTR_WIDTH 12
++ #define TX_DESC_PUSH_CMD_LBN 95
++ #define TX_DESC_PUSH_CMD_WIDTH 1
++ #define TX_DESC_LBN 0
++ #define TX_DESC_WIDTH 95
++ #define TX_KER_DESC_LBN 0
++ #define TX_KER_DESC_WIDTH 64
++ #define TX_USR_DESC_LBN 0
++ #define TX_USR_DESC_WIDTH 64
++#define TX_DC_CFG_REG_KER_OFST 0xA20 // Transmit descriptor cache configuration register
++#define TX_DC_CFG_REG_OFST 0xA20 // Transmit descriptor cache configuration register
++ #define TX_DC_SIZE_LBN 0
++ #define TX_DC_SIZE_WIDTH 2
++
++#if EFVI_FALCON_EXTENDED_P_BAR
++#define TX_DESC_PTR_TBL_KER_OFST 0x11900 // Transmit descriptor pointer.
++#else
++#define TX_DESC_PTR_TBL_KER_OFST 0x1900 // Transmit descriptor pointer.
++#endif
++
++
++#define TX_DESC_PTR_TBL_OFST 0xF50000 // Transmit descriptor pointer
++ #define TX_NON_IP_DROP_DIS_B0_LBN 91
++ #define TX_NON_IP_DROP_DIS_B0_WIDTH 1
++ #define TX_IP_CHKSM_DIS_B0_LBN 90
++ #define TX_IP_CHKSM_DIS_B0_WIDTH 1
++ #define TX_TCP_CHKSM_DIS_B0_LBN 89
++ #define TX_TCP_CHKSM_DIS_B0_WIDTH 1
++ #define TX_DESCQ_EN_LBN 88
++ #define TX_DESCQ_EN_WIDTH 1
++ #define TX_ISCSI_DDIG_EN_LBN 87
++ #define TX_ISCSI_DDIG_EN_WIDTH 1
++ #define TX_ISCSI_HDIG_EN_LBN 86
++ #define TX_ISCSI_HDIG_EN_WIDTH 1
++ #define TX_DC_HW_RPTR_LBN 80
++ #define TX_DC_HW_RPTR_WIDTH 6
++ #define TX_DESCQ_HW_RPTR_LBN 68
++ #define TX_DESCQ_HW_RPTR_WIDTH 12
++ #define TX_DESCQ_SW_WPTR_LBN 56
++ #define TX_DESCQ_SW_WPTR_WIDTH 12
++ #define TX_DESCQ_BUF_BASE_ID_LBN 36
++ #define TX_DESCQ_BUF_BASE_ID_WIDTH 20
++ #define TX_DESCQ_EVQ_ID_LBN 24
++ #define TX_DESCQ_EVQ_ID_WIDTH 12
++ #define TX_DESCQ_OWNER_ID_LBN 10
++ #define TX_DESCQ_OWNER_ID_WIDTH 14
++ #define TX_DESCQ_LABEL_LBN 5
++ #define TX_DESCQ_LABEL_WIDTH 5
++ #define TX_DESCQ_SIZE_LBN 3
++ #define TX_DESCQ_SIZE_WIDTH 2
++ #define TX_DESCQ_TYPE_LBN 1
++ #define TX_DESCQ_TYPE_WIDTH 2
++ #define TX_DESCQ_FLUSH_LBN 0
++ #define TX_DESCQ_FLUSH_WIDTH 1
++#define TX_CFG_REG_KER_OFST 0xA50 // Transmit configuration register
++#define TX_CFG_REG_OFST 0xA50 // Transmit configuration register
++ #define TX_IP_ID_P1_OFS_LBN 32
++ #define TX_IP_ID_P1_OFS_WIDTH 15
++ #define TX_IP_ID_P0_OFS_LBN 16
++ #define TX_IP_ID_P0_OFS_WIDTH 15
++ #define TX_TURBO_EN_LBN 3
++ #define TX_TURBO_EN_WIDTH 1
++ #define TX_OWNERR_CTL_LBN 2
++ #define TX_OWNERR_CTL_WIDTH 2
++ #define TX_NON_IP_DROP_DIS_LBN 1
++ #define TX_NON_IP_DROP_DIS_WIDTH 1
++ #define TX_IP_ID_REP_EN_LBN 0
++ #define TX_IP_ID_REP_EN_WIDTH 1
++#define TX_RESERVED_REG_KER_OFST 0xA80 // Transmit configuration register
++#define TX_RESERVED_REG_OFST 0xA80 // Transmit configuration register
++ #define TX_CSR_PUSH_EN_LBN 89
++ #define TX_CSR_PUSH_EN_WIDTH 1
++ #define TX_RX_SPACER_LBN 64
++ #define TX_RX_SPACER_WIDTH 8
++ #define TX_SW_EV_EN_LBN 59
++ #define TX_SW_EV_EN_WIDTH 1
++ #define TX_RX_SPACER_EN_LBN 57
++ #define TX_RX_SPACER_EN_WIDTH 1
++ #define TX_CSR_PREF_WD_TMR_LBN 24
++ #define TX_CSR_PREF_WD_TMR_WIDTH 16
++ #define TX_CSR_ONLY1TAG_LBN 21
++ #define TX_CSR_ONLY1TAG_WIDTH 1
++ #define TX_PREF_THRESHOLD_LBN 19
++ #define TX_PREF_THRESHOLD_WIDTH 2
++ #define TX_ONE_PKT_PER_Q_LBN 18
++ #define TX_ONE_PKT_PER_Q_WIDTH 1
++ #define TX_DIS_NON_IP_EV_LBN 17
++ #define TX_DIS_NON_IP_EV_WIDTH 1
++ #define TX_DMA_SPACER_LBN 8
++ #define TX_DMA_SPACER_WIDTH 8
++ #define TX_FLUSH_MIN_LEN_EN_B0_LBN 7
++ #define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1
++ #define TX_TCP_DIS_A1_LBN 7
++ #define TX_TCP_DIS_A1_WIDTH 1
++ #define TX_IP_DIS_A1_LBN 6
++ #define TX_IP_DIS_A1_WIDTH 1
++ #define TX_MAX_CPL_LBN 2
++ #define TX_MAX_CPL_WIDTH 2
++ #define TX_MAX_PREF_LBN 0
++ #define TX_MAX_PREF_WIDTH 2
++#define TX_VLAN_REG_OFST 0xAE0 // Transmit VLAN tag register
++ #define TX_VLAN_EN_LBN 127
++ #define TX_VLAN_EN_WIDTH 1
++ #define TX_VLAN7_PORT1_EN_LBN 125
++ #define TX_VLAN7_PORT1_EN_WIDTH 1
++ #define TX_VLAN7_PORT0_EN_LBN 124
++ #define TX_VLAN7_PORT0_EN_WIDTH 1
++ #define TX_VLAN7_LBN 112
++ #define TX_VLAN7_WIDTH 12
++ #define TX_VLAN6_PORT1_EN_LBN 109
++ #define TX_VLAN6_PORT1_EN_WIDTH 1
++ #define TX_VLAN6_PORT0_EN_LBN 108
++ #define TX_VLAN6_PORT0_EN_WIDTH 1
++ #define TX_VLAN6_LBN 96
++ #define TX_VLAN6_WIDTH 12
++ #define TX_VLAN5_PORT1_EN_LBN 93
++ #define TX_VLAN5_PORT1_EN_WIDTH 1
++ #define TX_VLAN5_PORT0_EN_LBN 92
++ #define TX_VLAN5_PORT0_EN_WIDTH 1
++ #define TX_VLAN5_LBN 80
++ #define TX_VLAN5_WIDTH 12
++ #define TX_VLAN4_PORT1_EN_LBN 77
++ #define TX_VLAN4_PORT1_EN_WIDTH 1
++ #define TX_VLAN4_PORT0_EN_LBN 76
++ #define TX_VLAN4_PORT0_EN_WIDTH 1
++ #define TX_VLAN4_LBN 64
++ #define TX_VLAN4_WIDTH 12
++ #define TX_VLAN3_PORT1_EN_LBN 61
++ #define TX_VLAN3_PORT1_EN_WIDTH 1
++ #define TX_VLAN3_PORT0_EN_LBN 60
++ #define TX_VLAN3_PORT0_EN_WIDTH 1
++ #define TX_VLAN3_LBN 48
++ #define TX_VLAN3_WIDTH 12
++ #define TX_VLAN2_PORT1_EN_LBN 45
++ #define TX_VLAN2_PORT1_EN_WIDTH 1
++ #define TX_VLAN2_PORT0_EN_LBN 44
++ #define TX_VLAN2_PORT0_EN_WIDTH 1
++ #define TX_VLAN2_LBN 32
++ #define TX_VLAN2_WIDTH 12
++ #define TX_VLAN1_PORT1_EN_LBN 29
++ #define TX_VLAN1_PORT1_EN_WIDTH 1
++ #define TX_VLAN1_PORT0_EN_LBN 28
++ #define TX_VLAN1_PORT0_EN_WIDTH 1
++ #define TX_VLAN1_LBN 16
++ #define TX_VLAN1_WIDTH 12
++ #define TX_VLAN0_PORT1_EN_LBN 13
++ #define TX_VLAN0_PORT1_EN_WIDTH 1
++ #define TX_VLAN0_PORT0_EN_LBN 12
++ #define TX_VLAN0_PORT0_EN_WIDTH 1
++ #define TX_VLAN0_LBN 0
++ #define TX_VLAN0_WIDTH 12
++#define TX_FIL_CTL_REG_OFST 0xAF0 // Transmit filter control register
++ #define TX_MADR1_FIL_EN_LBN 65
++ #define TX_MADR1_FIL_EN_WIDTH 1
++ #define TX_MADR0_FIL_EN_LBN 64
++ #define TX_MADR0_FIL_EN_WIDTH 1
++ #define TX_IPFIL31_PORT1_EN_LBN 63
++ #define TX_IPFIL31_PORT1_EN_WIDTH 1
++ #define TX_IPFIL31_PORT0_EN_LBN 62
++ #define TX_IPFIL31_PORT0_EN_WIDTH 1
++ #define TX_IPFIL30_PORT1_EN_LBN 61
++ #define TX_IPFIL30_PORT1_EN_WIDTH 1
++ #define TX_IPFIL30_PORT0_EN_LBN 60
++ #define TX_IPFIL30_PORT0_EN_WIDTH 1
++ #define TX_IPFIL29_PORT1_EN_LBN 59
++ #define TX_IPFIL29_PORT1_EN_WIDTH 1
++ #define TX_IPFIL29_PORT0_EN_LBN 58
++ #define TX_IPFIL29_PORT0_EN_WIDTH 1
++ #define TX_IPFIL28_PORT1_EN_LBN 57
++ #define TX_IPFIL28_PORT1_EN_WIDTH 1
++ #define TX_IPFIL28_PORT0_EN_LBN 56
++ #define TX_IPFIL28_PORT0_EN_WIDTH 1
++ #define TX_IPFIL27_PORT1_EN_LBN 55
++ #define TX_IPFIL27_PORT1_EN_WIDTH 1
++ #define TX_IPFIL27_PORT0_EN_LBN 54
++ #define TX_IPFIL27_PORT0_EN_WIDTH 1
++ #define TX_IPFIL26_PORT1_EN_LBN 53
++ #define TX_IPFIL26_PORT1_EN_WIDTH 1
++ #define TX_IPFIL26_PORT0_EN_LBN 52
++ #define TX_IPFIL26_PORT0_EN_WIDTH 1
++ #define TX_IPFIL25_PORT1_EN_LBN 51
++ #define TX_IPFIL25_PORT1_EN_WIDTH 1
++ #define TX_IPFIL25_PORT0_EN_LBN 50
++ #define TX_IPFIL25_PORT0_EN_WIDTH 1
++ #define TX_IPFIL24_PORT1_EN_LBN 49
++ #define TX_IPFIL24_PORT1_EN_WIDTH 1
++ #define TX_IPFIL24_PORT0_EN_LBN 48
++ #define TX_IPFIL24_PORT0_EN_WIDTH 1
++ #define TX_IPFIL23_PORT1_EN_LBN 47
++ #define TX_IPFIL23_PORT1_EN_WIDTH 1
++ #define TX_IPFIL23_PORT0_EN_LBN 46
++ #define TX_IPFIL23_PORT0_EN_WIDTH 1
++ #define TX_IPFIL22_PORT1_EN_LBN 45
++ #define TX_IPFIL22_PORT1_EN_WIDTH 1
++ #define TX_IPFIL22_PORT0_EN_LBN 44
++ #define TX_IPFIL22_PORT0_EN_WIDTH 1
++ #define TX_IPFIL21_PORT1_EN_LBN 43
++ #define TX_IPFIL21_PORT1_EN_WIDTH 1
++ #define TX_IPFIL21_PORT0_EN_LBN 42
++ #define TX_IPFIL21_PORT0_EN_WIDTH 1
++ #define TX_IPFIL20_PORT1_EN_LBN 41
++ #define TX_IPFIL20_PORT1_EN_WIDTH 1
++ #define TX_IPFIL20_PORT0_EN_LBN 40
++ #define TX_IPFIL20_PORT0_EN_WIDTH 1
++ #define TX_IPFIL19_PORT1_EN_LBN 39
++ #define TX_IPFIL19_PORT1_EN_WIDTH 1
++ #define TX_IPFIL19_PORT0_EN_LBN 38
++ #define TX_IPFIL19_PORT0_EN_WIDTH 1
++ #define TX_IPFIL18_PORT1_EN_LBN 37
++ #define TX_IPFIL18_PORT1_EN_WIDTH 1
++ #define TX_IPFIL18_PORT0_EN_LBN 36
++ #define TX_IPFIL18_PORT0_EN_WIDTH 1
++ #define TX_IPFIL17_PORT1_EN_LBN 35
++ #define TX_IPFIL17_PORT1_EN_WIDTH 1
++ #define TX_IPFIL17_PORT0_EN_LBN 34
++ #define TX_IPFIL17_PORT0_EN_WIDTH 1
++ #define TX_IPFIL16_PORT1_EN_LBN 33
++ #define TX_IPFIL16_PORT1_EN_WIDTH 1
++ #define TX_IPFIL16_PORT0_EN_LBN 32
++ #define TX_IPFIL16_PORT0_EN_WIDTH 1
++ #define TX_IPFIL15_PORT1_EN_LBN 31
++ #define TX_IPFIL15_PORT1_EN_WIDTH 1
++ #define TX_IPFIL15_PORT0_EN_LBN 30
++ #define TX_IPFIL15_PORT0_EN_WIDTH 1
++ #define TX_IPFIL14_PORT1_EN_LBN 29
++ #define TX_IPFIL14_PORT1_EN_WIDTH 1
++ #define TX_IPFIL14_PORT0_EN_LBN 28
++ #define TX_IPFIL14_PORT0_EN_WIDTH 1
++ #define TX_IPFIL13_PORT1_EN_LBN 27
++ #define TX_IPFIL13_PORT1_EN_WIDTH 1
++ #define TX_IPFIL13_PORT0_EN_LBN 26
++ #define TX_IPFIL13_PORT0_EN_WIDTH 1
++ #define TX_IPFIL12_PORT1_EN_LBN 25
++ #define TX_IPFIL12_PORT1_EN_WIDTH 1
++ #define TX_IPFIL12_PORT0_EN_LBN 24
++ #define TX_IPFIL12_PORT0_EN_WIDTH 1
++ #define TX_IPFIL11_PORT1_EN_LBN 23
++ #define TX_IPFIL11_PORT1_EN_WIDTH 1
++ #define TX_IPFIL11_PORT0_EN_LBN 22
++ #define TX_IPFIL11_PORT0_EN_WIDTH 1
++ #define TX_IPFIL10_PORT1_EN_LBN 21
++ #define TX_IPFIL10_PORT1_EN_WIDTH 1
++ #define TX_IPFIL10_PORT0_EN_LBN 20
++ #define TX_IPFIL10_PORT0_EN_WIDTH 1
++ #define TX_IPFIL9_PORT1_EN_LBN 19
++ #define TX_IPFIL9_PORT1_EN_WIDTH 1
++ #define TX_IPFIL9_PORT0_EN_LBN 18
++ #define TX_IPFIL9_PORT0_EN_WIDTH 1
++ #define TX_IPFIL8_PORT1_EN_LBN 17
++ #define TX_IPFIL8_PORT1_EN_WIDTH 1
++ #define TX_IPFIL8_PORT0_EN_LBN 16
++ #define TX_IPFIL8_PORT0_EN_WIDTH 1
++ #define TX_IPFIL7_PORT1_EN_LBN 15
++ #define TX_IPFIL7_PORT1_EN_WIDTH 1
++ #define TX_IPFIL7_PORT0_EN_LBN 14
++ #define TX_IPFIL7_PORT0_EN_WIDTH 1
++ #define TX_IPFIL6_PORT1_EN_LBN 13
++ #define TX_IPFIL6_PORT1_EN_WIDTH 1
++ #define TX_IPFIL6_PORT0_EN_LBN 12
++ #define TX_IPFIL6_PORT0_EN_WIDTH 1
++ #define TX_IPFIL5_PORT1_EN_LBN 11
++ #define TX_IPFIL5_PORT1_EN_WIDTH 1
++ #define TX_IPFIL5_PORT0_EN_LBN 10
++ #define TX_IPFIL5_PORT0_EN_WIDTH 1
++ #define TX_IPFIL4_PORT1_EN_LBN 9
++ #define TX_IPFIL4_PORT1_EN_WIDTH 1
++ #define TX_IPFIL4_PORT0_EN_LBN 8
++ #define TX_IPFIL4_PORT0_EN_WIDTH 1
++ #define TX_IPFIL3_PORT1_EN_LBN 7
++ #define TX_IPFIL3_PORT1_EN_WIDTH 1
++ #define TX_IPFIL3_PORT0_EN_LBN 6
++ #define TX_IPFIL3_PORT0_EN_WIDTH 1
++ #define TX_IPFIL2_PORT1_EN_LBN 5
++ #define TX_IPFIL2_PORT1_EN_WIDTH 1
++ #define TX_IPFIL2_PORT0_EN_LBN 4
++ #define TX_IPFIL2_PORT0_EN_WIDTH 1
++ #define TX_IPFIL1_PORT1_EN_LBN 3
++ #define TX_IPFIL1_PORT1_EN_WIDTH 1
++ #define TX_IPFIL1_PORT0_EN_LBN 2
++ #define TX_IPFIL1_PORT0_EN_WIDTH 1
++ #define TX_IPFIL0_PORT1_EN_LBN 1
++ #define TX_IPFIL0_PORT1_EN_WIDTH 1
++ #define TX_IPFIL0_PORT0_EN_LBN 0
++ #define TX_IPFIL0_PORT0_EN_WIDTH 1
++#define TX_IPFIL_TBL_OFST 0xB00 // Transmit IP source address filter table
++ #define TX_IPFIL_MASK_LBN 32
++ #define TX_IPFIL_MASK_WIDTH 32
++ #define TX_IP_SRC_ADR_LBN 0
++ #define TX_IP_SRC_ADR_WIDTH 32
++#define TX_PACE_REG_A1_OFST 0xF80000 // Transmit pace control register
++#define TX_PACE_REG_B0_OFST 0xA90 // Transmit pace control register
++ #define TX_PACE_SB_AF_LBN 19
++ #define TX_PACE_SB_AF_WIDTH 10
++ #define TX_PACE_SB_NOTAF_LBN 9
++ #define TX_PACE_SB_NOTAF_WIDTH 10
++ #define TX_PACE_FB_BASE_LBN 5
++ #define TX_PACE_FB_BASE_WIDTH 4
++ #define TX_PACE_BIN_TH_LBN 0
++ #define TX_PACE_BIN_TH_WIDTH 5
++#define TX_PACE_TBL_A1_OFST 0xF80040 // Transmit pacing table
++#define TX_PACE_TBL_FIRST_QUEUE_A1 4
++#define TX_PACE_TBL_B0_OFST 0xF80000 // Transmit pacing table
++#define TX_PACE_TBL_FIRST_QUEUE_B0 0
++ #define TX_PACE_LBN 0
++ #define TX_PACE_WIDTH 5
++
++//////////////---- EE/Flash Registers C Header ----//////////////
++#define EE_SPI_HCMD_REG_KER_OFST 0x100 // SPI host command register
++#define EE_SPI_HCMD_REG_OFST 0x100 // SPI host command register
++ #define EE_SPI_HCMD_CMD_EN_LBN 31
++ #define EE_SPI_HCMD_CMD_EN_WIDTH 1
++ #define EE_WR_TIMER_ACTIVE_LBN 28
++ #define EE_WR_TIMER_ACTIVE_WIDTH 1
++ #define EE_SPI_HCMD_SF_SEL_LBN 24
++ #define EE_SPI_HCMD_SF_SEL_WIDTH 1
++ #define EE_SPI_HCMD_DABCNT_LBN 16
++ #define EE_SPI_HCMD_DABCNT_WIDTH 5
++ #define EE_SPI_HCMD_READ_LBN 15
++ #define EE_SPI_HCMD_READ_WIDTH 1
++ #define EE_SPI_HCMD_DUBCNT_LBN 12
++ #define EE_SPI_HCMD_DUBCNT_WIDTH 2
++ #define EE_SPI_HCMD_ADBCNT_LBN 8
++ #define EE_SPI_HCMD_ADBCNT_WIDTH 2
++ #define EE_SPI_HCMD_ENC_LBN 0
++ #define EE_SPI_HCMD_ENC_WIDTH 8
++#define EE_SPI_HADR_REG_KER_OFST 0X110 // SPI host address register
++#define EE_SPI_HADR_REG_OFST 0X110 // SPI host address register
++ #define EE_SPI_HADR_DUBYTE_LBN 24
++ #define EE_SPI_HADR_DUBYTE_WIDTH 8
++ #define EE_SPI_HADR_ADR_LBN 0
++ #define EE_SPI_HADR_ADR_WIDTH 24
++#define EE_SPI_HDATA_REG_KER_OFST 0x120 // SPI host data register
++#define EE_SPI_HDATA_REG_OFST 0x120 // SPI host data register
++ #define EE_SPI_HDATA3_LBN 96
++ #define EE_SPI_HDATA3_WIDTH 32
++ #define EE_SPI_HDATA2_LBN 64
++ #define EE_SPI_HDATA2_WIDTH 32
++ #define EE_SPI_HDATA1_LBN 32
++ #define EE_SPI_HDATA1_WIDTH 32
++ #define EE_SPI_HDATA0_LBN 0
++ #define EE_SPI_HDATA0_WIDTH 32
++#define EE_BASE_PAGE_REG_KER_OFST 0x130 // Expansion ROM base mirror register
++#define EE_BASE_PAGE_REG_OFST 0x130 // Expansion ROM base mirror register
++ #define EE_EXP_ROM_WINDOW_BASE_LBN 16
++ #define EE_EXP_ROM_WINDOW_BASE_WIDTH 13
++ #define EE_EXPROM_MASK_LBN 0
++ #define EE_EXPROM_MASK_WIDTH 13
++#define EE_VPD_CFG0_REG_KER_OFST 0X140 // SPI/VPD configuration register
++#define EE_VPD_CFG0_REG_OFST 0X140 // SPI/VPD configuration register
++ #define EE_SF_FASTRD_EN_LBN 127
++ #define EE_SF_FASTRD_EN_WIDTH 1
++ #define EE_SF_CLOCK_DIV_LBN 120
++ #define EE_SF_CLOCK_DIV_WIDTH 7
++ #define EE_VPD_WIP_POLL_LBN 119
++ #define EE_VPD_WIP_POLL_WIDTH 1
++ #define EE_VPDW_LENGTH_LBN 80
++ #define EE_VPDW_LENGTH_WIDTH 15
++ #define EE_VPDW_BASE_LBN 64
++ #define EE_VPDW_BASE_WIDTH 15
++ #define EE_VPD_WR_CMD_EN_LBN 56
++ #define EE_VPD_WR_CMD_EN_WIDTH 8
++ #define EE_VPD_BASE_LBN 32
++ #define EE_VPD_BASE_WIDTH 24
++ #define EE_VPD_LENGTH_LBN 16
++ #define EE_VPD_LENGTH_WIDTH 13
++ #define EE_VPD_AD_SIZE_LBN 8
++ #define EE_VPD_AD_SIZE_WIDTH 5
++ #define EE_VPD_ACCESS_ON_LBN 5
++ #define EE_VPD_ACCESS_ON_WIDTH 1
++#define EE_VPD_SW_CNTL_REG_KER_OFST 0X150 // VPD access SW control register
++#define EE_VPD_SW_CNTL_REG_OFST 0X150 // VPD access SW control register
++ #define EE_VPD_CYCLE_PENDING_LBN 31
++ #define EE_VPD_CYCLE_PENDING_WIDTH 1
++ #define EE_VPD_CYC_WRITE_LBN 28
++ #define EE_VPD_CYC_WRITE_WIDTH 1
++ #define EE_VPD_CYC_ADR_LBN 0
++ #define EE_VPD_CYC_ADR_WIDTH 15
++#define EE_VPD_SW_DATA_REG_KER_OFST 0x160 // VPD access SW data register
++#define EE_VPD_SW_DATA_REG_OFST 0x160 // VPD access SW data register
++ #define EE_VPD_CYC_DAT_LBN 0
++ #define EE_VPD_CYC_DAT_WIDTH 32
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/ef_vi_falcon_desc.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/ef_vi_falcon_desc.h
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/ef_vi_falcon_desc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/ef_vi_falcon_desc.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,43 @@
++//////////////---- Descriptors C Headers ----//////////////
++// Receive Kernel IP Descriptor
++ #define RX_KER_BUF_SIZE_LBN 48
++ #define RX_KER_BUF_SIZE_WIDTH 14
++ #define RX_KER_BUF_REGION_LBN 46
++ #define RX_KER_BUF_REGION_WIDTH 2
++ #define RX_KER_BUF_REGION0_DECODE 0
++ #define RX_KER_BUF_REGION1_DECODE 1
++ #define RX_KER_BUF_REGION2_DECODE 2
++ #define RX_KER_BUF_REGION3_DECODE 3
++ #define RX_KER_BUF_ADR_LBN 0
++ #define RX_KER_BUF_ADR_WIDTH 46
++// Receive User IP Descriptor
++ #define RX_USR_2BYTE_OFS_LBN 20
++ #define RX_USR_2BYTE_OFS_WIDTH 12
++ #define RX_USR_BUF_ID_LBN 0
++ #define RX_USR_BUF_ID_WIDTH 20
++// Transmit Kernel IP Descriptor
++ #define TX_KER_PORT_LBN 63
++ #define TX_KER_PORT_WIDTH 1
++ #define TX_KER_CONT_LBN 62
++ #define TX_KER_CONT_WIDTH 1
++ #define TX_KER_BYTE_CNT_LBN 48
++ #define TX_KER_BYTE_CNT_WIDTH 14
++ #define TX_KER_BUF_REGION_LBN 46
++ #define TX_KER_BUF_REGION_WIDTH 2
++ #define TX_KER_BUF_REGION0_DECODE 0
++ #define TX_KER_BUF_REGION1_DECODE 1
++ #define TX_KER_BUF_REGION2_DECODE 2
++ #define TX_KER_BUF_REGION3_DECODE 3
++ #define TX_KER_BUF_ADR_LBN 0
++ #define TX_KER_BUF_ADR_WIDTH 46
++// Transmit User IP Descriptor
++ #define TX_USR_PORT_LBN 47
++ #define TX_USR_PORT_WIDTH 1
++ #define TX_USR_CONT_LBN 46
++ #define TX_USR_CONT_WIDTH 1
++ #define TX_USR_BYTE_CNT_LBN 33
++ #define TX_USR_BYTE_CNT_WIDTH 13
++ #define TX_USR_BUF_ID_LBN 13
++ #define TX_USR_BUF_ID_WIDTH 20
++ #define TX_USR_BYTE_OFS_LBN 0
++ #define TX_USR_BYTE_OFS_WIDTH 13
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/ef_vi_falcon_event.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/ef_vi_falcon_event.h
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/ef_vi_falcon_event.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/ef_vi_falcon_event.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,123 @@
++//////////////---- Events Format C Header ----//////////////
++//////////////---- Event entry ----//////////////
++ #define EV_CODE_LBN 60
++ #define EV_CODE_WIDTH 4
++ #define RX_IP_EV_DECODE 0
++ #define TX_IP_EV_DECODE 2
++ #define DRIVER_EV_DECODE 5
++ #define GLOBAL_EV_DECODE 6
++ #define DRV_GEN_EV_DECODE 7
++ #define EV_DATA_LBN 0
++ #define EV_DATA_WIDTH 60
++//////////////---- Receive IP events for both Kernel & User event queues ----//////////////
++ #define RX_EV_PKT_OK_LBN 56
++ #define RX_EV_PKT_OK_WIDTH 1
++ #define RX_EV_BUF_OWNER_ID_ERR_LBN 54
++ #define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
++ #define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
++ #define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
++ #define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
++ #define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
++ #define RX_EV_ETH_CRC_ERR_LBN 50
++ #define RX_EV_ETH_CRC_ERR_WIDTH 1
++ #define RX_EV_FRM_TRUNC_LBN 49
++ #define RX_EV_FRM_TRUNC_WIDTH 1
++ #define RX_EV_DRIB_NIB_LBN 48
++ #define RX_EV_DRIB_NIB_WIDTH 1
++ #define RX_EV_TOBE_DISC_LBN 47
++ #define RX_EV_TOBE_DISC_WIDTH 1
++ #define RX_EV_PKT_TYPE_LBN 44
++ #define RX_EV_PKT_TYPE_WIDTH 3
++ #define RX_EV_PKT_TYPE_ETH_DECODE 0
++ #define RX_EV_PKT_TYPE_LLC_DECODE 1
++ #define RX_EV_PKT_TYPE_JUMBO_DECODE 2
++ #define RX_EV_PKT_TYPE_VLAN_DECODE 3
++ #define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4
++ #define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5
++ #define RX_EV_HDR_TYPE_LBN 42
++ #define RX_EV_HDR_TYPE_WIDTH 2
++ #define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0
++ #define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1
++ #define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2
++ #define RX_EV_HDR_TYPE_NON_IP_DECODE 3
++ #define RX_EV_DESC_Q_EMPTY_LBN 41
++ #define RX_EV_DESC_Q_EMPTY_WIDTH 1
++ #define RX_EV_MCAST_HASH_MATCH_LBN 40
++ #define RX_EV_MCAST_HASH_MATCH_WIDTH 1
++ #define RX_EV_MCAST_PKT_LBN 39
++ #define RX_EV_MCAST_PKT_WIDTH 1
++ #define RX_EV_Q_LABEL_LBN 32
++ #define RX_EV_Q_LABEL_WIDTH 5
++ #define RX_JUMBO_CONT_LBN 31
++ #define RX_JUMBO_CONT_WIDTH 1
++ #define RX_SOP_LBN 15
++ #define RX_SOP_WIDTH 1
++ #define RX_PORT_LBN 30
++ #define RX_PORT_WIDTH 1
++ #define RX_EV_BYTE_CNT_LBN 16
++ #define RX_EV_BYTE_CNT_WIDTH 14
++ #define RX_iSCSI_PKT_OK_LBN 14
++ #define RX_iSCSI_PKT_OK_WIDTH 1
++ #define RX_ISCSI_DDIG_ERR_LBN 13
++ #define RX_ISCSI_DDIG_ERR_WIDTH 1
++ #define RX_ISCSI_HDIG_ERR_LBN 12
++ #define RX_ISCSI_HDIG_ERR_WIDTH 1
++ #define RX_EV_DESC_PTR_LBN 0
++ #define RX_EV_DESC_PTR_WIDTH 12
++//////////////---- Transmit IP events for both Kernel & User event queues ----//////////////
++ #define TX_EV_PKT_ERR_LBN 38
++ #define TX_EV_PKT_ERR_WIDTH 1
++ #define TX_EV_PKT_TOO_BIG_LBN 37
++ #define TX_EV_PKT_TOO_BIG_WIDTH 1
++ #define TX_EV_Q_LABEL_LBN 32
++ #define TX_EV_Q_LABEL_WIDTH 5
++ #define TX_EV_PORT_LBN 16
++ #define TX_EV_PORT_WIDTH 1
++ #define TX_EV_WQ_FF_FULL_LBN 15
++ #define TX_EV_WQ_FF_FULL_WIDTH 1
++ #define TX_EV_BUF_OWNER_ID_ERR_LBN 14
++ #define TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
++ #define TX_EV_COMP_LBN 12
++ #define TX_EV_COMP_WIDTH 1
++ #define TX_EV_DESC_PTR_LBN 0
++ #define TX_EV_DESC_PTR_WIDTH 12
++//////////////---- Char or Kernel driver events ----//////////////
++ #define DRIVER_EV_SUB_CODE_LBN 56
++ #define DRIVER_EV_SUB_CODE_WIDTH 4
++ #define TX_DESCQ_FLS_DONE_EV_DECODE 0x0
++ #define RX_DESCQ_FLS_DONE_EV_DECODE 0x1
++ #define EVQ_INIT_DONE_EV_DECODE 0x2
++ #define EVQ_NOT_EN_EV_DECODE 0x3
++ #define RX_DESCQ_FLSFF_OVFL_EV_DECODE 0x4
++ #define SRM_UPD_DONE_EV_DECODE 0x5
++ #define WAKE_UP_EV_DECODE 0x6
++ #define TX_PKT_NON_TCP_UDP_DECODE 0x9
++ #define TIMER_EV_DECODE 0xA
++ #define RX_DSC_ERROR_EV_DECODE 0xE
++ #define DRIVER_EV_TX_DESCQ_ID_LBN 0
++ #define DRIVER_EV_TX_DESCQ_ID_WIDTH 12
++ #define DRIVER_EV_RX_DESCQ_ID_LBN 0
++ #define DRIVER_EV_RX_DESCQ_ID_WIDTH 12
++ #define DRIVER_EV_EVQ_ID_LBN 0
++ #define DRIVER_EV_EVQ_ID_WIDTH 12
++ #define DRIVER_TMR_ID_LBN 0
++ #define DRIVER_TMR_ID_WIDTH 12
++ #define DRIVER_EV_SRM_UPD_LBN 0
++ #define DRIVER_EV_SRM_UPD_WIDTH 2
++ #define SRM_CLR_EV_DECODE 0
++ #define SRM_UPD_EV_DECODE 1
++ #define SRM_ILLCLR_EV_DECODE 2
++//////////////---- Global events. Sent to both event queue 0 and 4. ----//////////////
++ #define XFP_PHY_INTR_LBN 10
++ #define XFP_PHY_INTR_WIDTH 1
++ #define XG_PHY_INTR_LBN 9
++ #define XG_PHY_INTR_WIDTH 1
++ #define G_PHY1_INTR_LBN 8
++ #define G_PHY1_INTR_WIDTH 1
++ #define G_PHY0_INTR_LBN 7
++ #define G_PHY0_INTR_WIDTH 1
++//////////////---- Driver generated events ----//////////////
++ #define DRV_GEN_EV_CODE_LBN 60
++ #define DRV_GEN_EV_CODE_WIDTH 4
++ #define DRV_GEN_EV_DATA_LBN 0
++ #define DRV_GEN_EV_DATA_WIDTH 60
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/ef_vi_falcon.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/ef_vi_falcon.h
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/ef_vi_falcon.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/ef_vi_falcon.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,172 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author slp
++ * \brief Falcon specific definitions
++ * \date 2004/08
++ */
++
++#ifndef __EF_VI_FALCON_H__
++#define __EF_VI_FALCON_H__
++
++#define EFHW_4K 0x00001000u
++#define EFHW_8K 0x00002000u
++
++/* include the autogenerated register definitions */
++
++#include "ef_vi_falcon_core.h"
++#include "ef_vi_falcon_desc.h"
++#include "ef_vi_falcon_event.h"
++
++
++/*----------------------------------------------------------------------------
++ *
++ * Helpers to turn bit shifts into dword shifts and check that the bit fields
++ * haven't overflown the dword etc. Aim is to preserve consistency with the
++ * autogenerated headers - once stable we could hard code.
++ *
++ *---------------------------------------------------------------------------*/
++
++/* mask constructors */
++#define __FALCON_MASK(WIDTH,T) ((((T)1) << (WIDTH)) - 1)
++#define __EFVI_MASK32(WIDTH) __FALCON_MASK((WIDTH),uint32_t)
++#define __EFVI_MASK64(WIDTH) __FALCON_MASK((WIDTH),uint64_t)
++
++#define __EFVI_FALCON_MASKFIELD32(LBN, WIDTH) ((uint32_t) \
++ (__EFVI_MASK32(WIDTH) << (LBN)))
++
++/* constructors for fields which span the first and second dwords */
++#define __LW(LBN) (32 - LBN)
++#define LOW(v, LBN, WIDTH) ((uint32_t) \
++ (((v) & __EFVI_MASK64(__LW((LBN)))) << (LBN)))
++#define HIGH(v, LBN, WIDTH) ((uint32_t)(((v) >> __LW((LBN))) & \
++ __EFVI_MASK64((WIDTH - __LW((LBN))))))
++/* constructors for fields within the second dword */
++#define __DW2(LBN) ((LBN) - 32)
++
++/* constructors for fields which span the second and third dwords */
++#define __LW2(LBN) (64 - LBN)
++#define LOW2(v, LBN, WIDTH) ((uint32_t) \
++ (((v) & __EFVI_MASK64(__LW2((LBN)))) << ((LBN) - 32)))
++#define HIGH2(v, LBN, WIDTH) ((uint32_t) \
++ (((v) >> __LW2((LBN))) & __EFVI_MASK64((WIDTH - __LW2((LBN))))))
++
++/* constructors for fields within the third dword */
++#define __DW3(LBN) ((LBN) - 64)
++
++
++/* constructors for fields which span the third and fourth dwords */
++#define __LW3(LBN) (96 - LBN)
++#define LOW3(v, LBN, WIDTH) ((uint32_t) \
++ (((v) & __EFVI_MASK64(__LW3((LBN)))) << ((LBN) - 64)))
++#define HIGH3(v, LBN, WIDTH) ((unit32_t) \
++ (((v) >> __LW3((LBN))) & __EFVI_MASK64((WIDTH - __LW3((LBN))))))
++
++/* constructors for fields within the fourth dword */
++#define __DW4(LBN) ((LBN) - 96)
++
++/* checks that the autogenerated headers our consistent with our model */
++#define WIDTHCHCK(a, b) ef_assert((a) == (b))
++#define RANGECHCK(v, WIDTH) \
++ ef_assert(((uint64_t)(v) & ~(__EFVI_MASK64((WIDTH)))) == 0)
++
++/* fields within the first dword */
++#define DWCHCK(LBN, WIDTH) ef_assert(((LBN) >= 0) &&(((LBN)+(WIDTH)) <= 32))
++
++/* fields which span the first and second dwords */
++#define LWCHK(LBN, WIDTH) ef_assert(WIDTH >= __LW(LBN))
++
++/*----------------------------------------------------------------------------
++ *
++ * Buffer virtual addresses (4K buffers)
++ *
++ *---------------------------------------------------------------------------*/
++
++/* Form a buffer virtual address from buffer ID and offset. If the offset
++** is larger than the buffer size, then the buffer indexed will be
++** calculated appropriately. It is the responsibility of the caller to
++** ensure that they have valid buffers programmed at that address.
++*/
++#define EFVI_FALCON_VADDR_4K_S (12)
++#define EFVI_FALCON_VADDR_M 0xfffff /* post shift mask */
++
++
++#define EFVI_FALCON_BUFFER_4K_ADDR(id,off) \
++ (((id) << EFVI_FALCON_VADDR_4K_S) + (off))
++
++#define EFVI_FALCON_BUFFER_4K_PAGE(vaddr) \
++ (((vaddr) >> EFVI_FALCON_VADDR_4K_S) & EFVI_FALCON_VADDR_M)
++
++#define EFVI_FALCON_BUFFER_4K_OFF(vaddr) \
++ ((vaddr) & __EFVI_MASK32(EFVI_FALCON_VADDR_4K_S))
++
++
++/*----------------------------------------------------------------------------
++ *
++ * Masks
++ *
++ *---------------------------------------------------------------------------*/
++
++#define EFVI_FALCON_CLOCK_ASIC_HZ (125000)
++#define EFVI_FALCON_CLOCK_FPGA_HZ (62500)
++#define EFVI_FALCON_CLOCK_HZ EFVI_FALCON_CLOCK_ASIC_HZ
++
++
++/*----------------------------------------------------------------------------
++ *
++ * Timers
++ *
++ *---------------------------------------------------------------------------*/
++
++/* Event-Queue Timer granularity - measured in us
++ Given by: 4096 * 3 cycle * clock period */
++
++#define EFVI_FALCON_EVQTIMER_PERIOD_US ((4096 * 3 * 1000) / EFVI_FALCON_CLOCK_HZ)
++
++/* mode bits */
++#define EFVI_FALCON_TIMER_MODE_DIS 0 /* disabled */
++#define EFVI_FALCON_TIMER_MODE_RUN 1 /* started counting right away */
++#define EFVI_FALCON_TIMER_MODE_HOLD 2 /* trigger mode (user queues) */
++
++#define EFVI_FALCON_EVQTIMER_HOLD (EFVI_FALCON_TIMER_MODE_HOLD << TIMER_MODE_LBN)
++#define EFVI_FALCON_EVQTIMER_RUN (EFVI_FALCON_TIMER_MODE_RUN << TIMER_MODE_LBN)
++#define EFVI_FALCON_EVQTIMER_DISABLE (EFVI_FALCON_TIMER_MODE_DIS << TIMER_MODE_LBN)
++
++
++/* ---- efhw_event_t helpers --- */
++
++#define EFVI_FALCON_EVENT_CODE(evp) \
++ ((evp)->u64 & EFVI_FALCON_EVENT_CODE_MASK)
++
++#define EFVI_FALCON_EVENT_SW_DATA_MASK 0x0000ffff
++
++#define __EFVI_FALCON_OPEN_MASK(WIDTH) ((((uint64_t)1) << (WIDTH)) - 1)
++
++#define EFVI_FALCON_EVENT_CODE_MASK \
++ (__EFVI_FALCON_OPEN_MASK(EV_CODE_WIDTH) << EV_CODE_LBN)
++
++
++#endif /* __EF_VI_FALCON_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/ef_vi_internal.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/ef_vi_internal.h
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/ef_vi_internal.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/ef_vi_internal.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,256 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Really-and-truely-honestly internal stuff for libef.
++ * \date 2004/06/13
++ */
++
++/*! \cidoxg_include_ci_ul */
++#ifndef __CI_EF_VI_INTERNAL_H__
++#define __CI_EF_VI_INTERNAL_H__
++
++
++/* These flags share space with enum ef_vi_flags. */
++#define EF_VI_BUG5692_WORKAROUND 0x10000
++
++
++/* ***********************************************************************
++ * COMPILATION CONTROL FLAGS (see ef_vi.h for "workaround" controls)
++ */
++
++#define EF_VI_DO_MAGIC_CHECKS 1
++
++
++/**********************************************************************
++ * Headers
++ */
++
++#include <etherfabric/ef_vi.h>
++#include "sysdep.h"
++#include "ef_vi_falcon.h"
++
++
++/**********************************************************************
++ * Debugging.
++ */
++
++#ifndef NDEBUG
++
++# define _ef_assert(exp, file, line) BUG_ON(!(exp));
++
++# define _ef_assert2(exp, x, y, file, line) do { \
++ if (unlikely(!(exp))) \
++ BUG(); \
++ } while (0)
++
++#else
++
++# define _ef_assert(exp, file, line)
++# define _ef_assert2(e, x, y, file, line)
++
++#endif
++
++#define ef_assert(a) do{ _ef_assert((a),__FILE__,__LINE__); } while(0)
++#define ef_assert_equal(a,b) _ef_assert2((a)==(b),(a),(b),__FILE__,__LINE__)
++#define ef_assert_eq ef_assert_equal
++#define ef_assert_lt(a,b) _ef_assert2((a)<(b),(a),(b),__FILE__,__LINE__)
++#define ef_assert_le(a,b) _ef_assert2((a)<=(b),(a),(b),__FILE__,__LINE__)
++#define ef_assert_nequal(a,b) _ef_assert2((a)!=(b),(a),(b),__FILE__,__LINE__)
++#define ef_assert_ne ef_assert_nequal
++#define ef_assert_ge(a,b) _ef_assert2((a)>=(b),(a),(b),__FILE__,__LINE__)
++#define ef_assert_gt(a,b) _ef_assert2((a)>(b),(a),(b),__FILE__,__LINE__)
++
++/**********************************************************************
++ * Debug checks. ******************************************************
++ **********************************************************************/
++
++#ifdef NDEBUG
++# define EF_VI_MAGIC_SET(p, type)
++# define EF_VI_CHECK_VI(p)
++# define EF_VI_CHECK_EVENT_Q(p)
++# define EF_VI_CHECK_IOBUFSET(p)
++# define EF_VI_CHECK_FILTER(p)
++# define EF_VI_CHECK_SHMBUF(p)
++# define EF_VI_CHECK_PT_EP(p)
++#else
++# define EF_VI 0x3
++# define EF_EPLOCK 0x6
++# define EF_IOBUFSET 0x9
++# define EF_FILTER 0xa
++# define EF_SHMBUF 0x11
++
++# define EF_VI_MAGIC(p, type) \
++ (((unsigned)(type) << 28) | \
++ (((unsigned)(intptr_t)(p)) & 0x0fffffffu))
++
++# if !EF_VI_DO_MAGIC_CHECKS
++# define EF_VI_MAGIC_SET(p, type)
++# define EF_VI_MAGIC_CHECK(p, type)
++# else
++# define EF_VI_MAGIC_SET(p, type) \
++ do { \
++ (p)->magic = EF_VI_MAGIC((p), (type)); \
++ } while (0)
++
++# define EF_VI_MAGIC_OKAY(p, type) \
++ ((p)->magic == EF_VI_MAGIC((p), (type)))
++
++# define EF_VI_MAGIC_CHECK(p, type) \
++ ef_assert(EF_VI_MAGIC_OKAY((p), (type)))
++
++#endif /* EF_VI_DO_MAGIC_CHECKS */
++
++# define EF_VI_CHECK_VI(p) \
++ ef_assert(p); \
++ EF_VI_MAGIC_CHECK((p), EF_VI);
++
++# define EF_VI_CHECK_EVENT_Q(p) \
++ ef_assert(p); \
++ EF_VI_MAGIC_CHECK((p), EF_VI); \
++ ef_assert((p)->evq_base); \
++ ef_assert((p)->evq_mask);
++
++# define EF_VI_CHECK_PT_EP(p) \
++ ef_assert(p); \
++ EF_VI_MAGIC_CHECK((p), EF_VI); \
++ ef_assert((p)->ep_state);
++
++# define EF_VI_CHECK_IOBUFSET(p) \
++ ef_assert(p); \
++ EF_VI_MAGIC_CHECK((p), EF_IOBUFSET)
++
++# define EF_VI_CHECK_FILTER(p) \
++ ef_assert(p); \
++ EF_VI_MAGIC_CHECK((p), EF_FILTER);
++
++# define EF_VI_CHECK_SHMBUF(p) \
++ ef_assert(p); \
++ EF_VI_MAGIC_CHECK((p), EF_SHMBUF);
++
++#endif
++
++#ifndef NDEBUG
++# define EF_DRIVER_MAGIC 0x00f00ba4
++# define EF_ASSERT_THIS_DRIVER_VALID(driver) \
++ do{ ef_assert(driver); \
++ EF_VI_MAGIC_CHECK((driver), EF_DRIVER_MAGIC); \
++ ef_assert((driver)->init); }while(0)
++
++# define EF_ASSERT_DRIVER_VALID() EF_ASSERT_THIS_DRIVER_VALID(&ci_driver)
++#else
++# define EF_ASSERT_THIS_DRIVER_VALID(driver)
++# define EF_ASSERT_DRIVER_VALID()
++#endif
++
++
++/* *************************************
++ * Power of 2 FIFO
++ */
++
++#define EF_VI_FIFO2_M(f, x) ((x) & ((f)->fifo_mask))
++#define ef_vi_fifo2_valid(f) ((f) && (f)->fifo && (f)->fifo_mask > 0 && \
++ (f)->fifo_rd_i <= (f)->fifo_mask && \
++ (f)->fifo_wr_i <= (f)->fifo_mask && \
++ EF_VI_IS_POW2((f)->fifo_mask+1u))
++
++#define ef_vi_fifo2_init(f, cap) \
++ do{ ef_assert(EF_VI_IS_POW2((cap) + 1)); \
++ (f)->fifo_rd_i = (f)->fifo_wr_i = 0u; \
++ (f)->fifo_mask = (cap); \
++ }while(0)
++
++#define ef_vi_fifo2_is_empty(f) ((f)->fifo_rd_i == (f)->fifo_wr_i)
++#define ef_vi_fifo2_capacity(f) ((f)->fifo_mask)
++#define ef_vi_fifo2_buf_size(f) ((f)->fifo_mask + 1u)
++#define ef_vi_fifo2_end(f) ((f)->fifo + ef_vi_fifo2_buf_size(f))
++#define ef_vi_fifo2_peek(f) ((f)->fifo[(f)->fifo_rd_i])
++#define ef_vi_fifo2_poke(f) ((f)->fifo[(f)->fifo_wr_i])
++#define ef_vi_fifo2_num(f) EF_VI_FIFO2_M((f),(f)->fifo_wr_i-(f)->fifo_rd_i)
++
++#define ef_vi_fifo2_wr_prev(f) \
++ do{ (f)->fifo_wr_i = EF_VI_FIFO2_M((f), (f)->fifo_wr_i - 1u); }while(0)
++#define ef_vi_fifo2_wr_next(f) \
++ do{ (f)->fifo_wr_i = EF_VI_FIFO2_M((f), (f)->fifo_wr_i + 1u); }while(0)
++#define ef_vi_fifo2_rd_adv(f, n) \
++ do{ (f)->fifo_rd_i = EF_VI_FIFO2_M((f), (f)->fifo_rd_i + (n)); }while(0)
++#define ef_vi_fifo2_rd_prev(f) \
++ do{ (f)->fifo_rd_i = EF_VI_FIFO2_M((f), (f)->fifo_rd_i - 1u); }while(0)
++#define ef_vi_fifo2_rd_next(f) \
++ do{ (f)->fifo_rd_i = EF_VI_FIFO2_M((f), (f)->fifo_rd_i + 1u); }while(0)
++
++#define ef_vi_fifo2_put(f, v) \
++ do{ ef_vi_fifo2_poke(f) = (v); ef_vi_fifo2_wr_next(f); }while(0)
++#define ef_vi_fifo2_get(f, pv) \
++ do{ *(pv) = ef_vi_fifo2_peek(f); ef_vi_fifo2_rd_next(f); }while(0)
++
++
++/* *********************************************************************
++ * Eventq handling
++ */
++
++typedef union {
++ uint64_t u64;
++ struct {
++ uint32_t a;
++ uint32_t b;
++ } opaque;
++} ef_vi_event;
++
++
++#define EF_VI_EVENT_OFFSET(q, i) \
++ (((q)->evq_state->evq_ptr - (i) * sizeof(ef_vi_event)) & (q)->evq_mask)
++
++#define EF_VI_EVENT_PTR(q, i) \
++ ((ef_vi_event*) ((q)->evq_base + EF_VI_EVENT_OFFSET((q), (i))))
++
++/* *********************************************************************
++ * Miscellaneous goodies
++ */
++#ifdef NDEBUG
++# define EF_VI_DEBUG(x)
++#else
++# define EF_VI_DEBUG(x) x
++#endif
++
++#define EF_VI_ROUND_UP(i, align) (((i)+(align)-1u) & ~((align)-1u))
++#define EF_VI_ALIGN_FWD(p, align) (((p)+(align)-1u) & ~((align)-1u))
++#define EF_VI_ALIGN_BACK(p, align) ((p) & ~((align)-1u))
++#define EF_VI_PTR_ALIGN_BACK(p, align) \
++ ((char*)EF_VI_ALIGN_BACK(((intptr_t)(p)), ((intptr_t)(align))))
++#define EF_VI_IS_POW2(x) ((x) && ! ((x) & ((x) - 1)))
++
++
++/* ********************************************************************
++ */
++
++extern void falcon_vi_init(ef_vi*, void* vvis ) EF_VI_HF;
++extern void ef_eventq_state_init(ef_vi* evq) EF_VI_HF;
++extern void __ef_init(void) EF_VI_HF;
++
++
++#endif /* __CI_EF_VI_INTERNAL_H__ */
++
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/etherfabric/ef_vi.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/etherfabric/ef_vi.h
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/etherfabric/ef_vi.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/etherfabric/ef_vi.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,665 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \brief Virtual Interface
++ * \date 2007/05/16
++ */
++
++#ifndef __EFAB_EF_VI_H__
++#define __EFAB_EF_VI_H__
++
++
++/**********************************************************************
++ * Primitive types ****************************************************
++ **********************************************************************/
++
++/* We standardise on the types from stdint.h and synthesise these types
++ * for compilers/platforms that don't provide them */
++
++# include <linux/types.h>
++# define EF_VI_ALIGN(x) __attribute__ ((aligned (x)))
++# define ef_vi_inline static inline
++
++
++
++/**********************************************************************
++ * Types **************************************************************
++ **********************************************************************/
++
++typedef uint32_t ef_eventq_ptr;
++
++typedef uint64_t ef_addr;
++typedef char* ef_vi_ioaddr_t;
++
++/**********************************************************************
++ * ef_event ***********************************************************
++ **********************************************************************/
++
++/*! \i_ef_vi A DMA request identifier.
++**
++** This is an integer token specified by the transport and associated
++** with a DMA request. It is returned to the VI user with DMA completion
++** events. It is typically used to identify the buffer associated with
++** the transfer.
++*/
++typedef int ef_request_id;
++
++typedef union {
++ uint64_t u64[1];
++ uint32_t u32[2];
++} ef_vi_qword;
++
++typedef ef_vi_qword ef_hw_event;
++
++#define EF_REQUEST_ID_BITS 16u
++#define EF_REQUEST_ID_MASK ((1u << EF_REQUEST_ID_BITS) - 1u)
++
++/*! \i_ef_event An [ef_event] is a token that identifies something that
++** has happened. Examples include packets received, packets transmitted
++** and errors.
++*/
++typedef union {
++ struct {
++ ef_hw_event ev;
++ unsigned type :16;
++ } generic;
++ struct {
++ ef_hw_event ev;
++ unsigned type :16;
++ /*ef_request_id request_id :EF_REQUEST_ID_BITS;*/
++ unsigned q_id :16;
++ unsigned len :16;
++ unsigned flags :16;
++ } rx;
++ struct { /* This *must* have same layout as [rx]. */
++ ef_hw_event ev;
++ unsigned type :16;
++ /*ef_request_id request_id :EF_REQUEST_ID_BITS;*/
++ unsigned q_id :16;
++ unsigned len :16;
++ unsigned flags :16;
++ unsigned subtype :16;
++ } rx_discard;
++ struct {
++ ef_hw_event ev;
++ unsigned type :16;
++ /*ef_request_id request_id :EF_REQUEST_ID_BITS;*/
++ unsigned q_id :16;
++ } tx;
++ struct {
++ ef_hw_event ev;
++ unsigned type :16;
++ /*ef_request_id request_id :EF_REQUEST_ID_BITS;*/
++ unsigned q_id :16;
++ unsigned subtype :16;
++ } tx_error;
++ struct {
++ ef_hw_event ev;
++ unsigned type :16;
++ unsigned q_id :16;
++ } rx_no_desc_trunc;
++ struct {
++ ef_hw_event ev;
++ unsigned type :16;
++ unsigned data;
++ } sw;
++} ef_event;
++
++
++#define EF_EVENT_TYPE(e) ((e).generic.type)
++enum {
++ /** Good data was received. */
++ EF_EVENT_TYPE_RX,
++ /** Packets have been sent. */
++ EF_EVENT_TYPE_TX,
++ /** Data received and buffer consumed, but something is wrong. */
++ EF_EVENT_TYPE_RX_DISCARD,
++ /** Transmit of packet failed. */
++ EF_EVENT_TYPE_TX_ERROR,
++ /** Received packet was truncated due to lack of descriptors. */
++ EF_EVENT_TYPE_RX_NO_DESC_TRUNC,
++ /** Software generated event. */
++ EF_EVENT_TYPE_SW,
++ /** Event queue overflow. */
++ EF_EVENT_TYPE_OFLOW,
++};
++
++#define EF_EVENT_RX_BYTES(e) ((e).rx.len)
++#define EF_EVENT_RX_Q_ID(e) ((e).rx.q_id)
++#define EF_EVENT_RX_CONT(e) ((e).rx.flags & EF_EVENT_FLAG_CONT)
++#define EF_EVENT_RX_SOP(e) ((e).rx.flags & EF_EVENT_FLAG_SOP)
++#define EF_EVENT_RX_ISCSI_OKAY(e) ((e).rx.flags & EF_EVENT_FLAG_ISCSI_OK)
++#define EF_EVENT_FLAG_SOP 0x1
++#define EF_EVENT_FLAG_CONT 0x2
++#define EF_EVENT_FLAG_ISCSI_OK 0x4
++
++#define EF_EVENT_TX_Q_ID(e) ((e).tx.q_id)
++
++#define EF_EVENT_RX_DISCARD_Q_ID(e) ((e).rx_discard.q_id)
++#define EF_EVENT_RX_DISCARD_LEN(e) ((e).rx_discard.len)
++#define EF_EVENT_RX_DISCARD_TYPE(e) ((e).rx_discard.subtype)
++enum {
++ EF_EVENT_RX_DISCARD_CSUM_BAD,
++ EF_EVENT_RX_DISCARD_CRC_BAD,
++ EF_EVENT_RX_DISCARD_TRUNC,
++ EF_EVENT_RX_DISCARD_RIGHTS,
++ EF_EVENT_RX_DISCARD_OTHER,
++};
++
++#define EF_EVENT_TX_ERROR_Q_ID(e) ((e).tx_error.q_id)
++#define EF_EVENT_TX_ERROR_TYPE(e) ((e).tx_error.subtype)
++enum {
++ EF_EVENT_TX_ERROR_RIGHTS,
++ EF_EVENT_TX_ERROR_OFLOW,
++ EF_EVENT_TX_ERROR_2BIG,
++ EF_EVENT_TX_ERROR_BUS,
++};
++
++#define EF_EVENT_RX_NO_DESC_TRUNC_Q_ID(e) ((e).rx_no_desc_trunc.q_id)
++
++#define EF_EVENT_SW_DATA_MASK 0xffff
++#define EF_EVENT_SW_DATA(e) ((e).sw.data)
++
++#define EF_EVENT_FMT "[ev:%x:%08x:%08x]"
++#define EF_EVENT_PRI_ARG(e) (unsigned) (e).generic.type, \
++ (unsigned) (e).generic.ev.u32[1], \
++ (unsigned) (e).generic.ev.u32[0]
++
++#define EF_GET_HW_EV(e) ((e).generic.ev)
++#define EF_GET_HW_EV_PTR(e) (&(e).generic.ev)
++#define EF_GET_HW_EV_U64(e) ((e).generic.ev.u64[0])
++
++
++/* ***************** */
++
++/*! Used by netif shared state. Must use types of explicit size. */
++typedef struct {
++ uint16_t rx_last_desc_ptr; /* for RX duplicates */
++ uint8_t bad_sop; /* bad SOP detected */
++ uint8_t frag_num; /* next fragment #, 0=>SOP */
++} ef_rx_dup_state_t;
++
++
++/* Max number of ports on any SF NIC. */
++#define EFAB_DMAQS_PER_EVQ_MAX 32
++
++typedef struct {
++ ef_eventq_ptr evq_ptr;
++ int32_t trashed;
++ ef_rx_dup_state_t rx_dup_state[EFAB_DMAQS_PER_EVQ_MAX];
++} ef_eventq_state;
++
++
++/*! \i_ef_base [ef_iovec] is similar the standard [struct iovec]. An
++** array of these is used to designate a scatter/gather list of I/O
++** buffers.
++*/
++typedef struct {
++ ef_addr iov_base EF_VI_ALIGN(8);
++ unsigned iov_len;
++} ef_iovec;
++
++/* Falcon constants */
++#define TX_EV_DESC_PTR_LBN 0
++
++/**********************************************************************
++ * ef_iobufset ********************************************************
++ **********************************************************************/
++
++/*! \i_ef_bufs An [ef_iobufset] is a collection of buffers to be used
++** with the NIC.
++*/
++typedef struct ef_iobufset {
++ unsigned magic;
++ unsigned bufs_mmap_bytes;
++ unsigned bufs_handle;
++ int bufs_ptr_off;
++ ef_addr bufs_addr;
++ unsigned bufs_size; /* size rounded to pow2 */
++ int bufs_num;
++ int faultonaccess;
++} ef_iobufset;
++
++
++/**********************************************************************
++ * ef_vi **************************************************************
++ **********************************************************************/
++
++enum ef_vi_flags {
++ EF_VI_RX_SCATTER = 0x1,
++ EF_VI_ISCSI_RX_HDIG = 0x2,
++ EF_VI_ISCSI_TX_HDIG = 0x4,
++ EF_VI_ISCSI_RX_DDIG = 0x8,
++ EF_VI_ISCSI_TX_DDIG = 0x10,
++ EF_VI_TX_PHYS_ADDR = 0x20,
++ EF_VI_RX_PHYS_ADDR = 0x40,
++ EF_VI_TX_IP_CSUM_DIS = 0x80,
++ EF_VI_TX_TCPUDP_CSUM_DIS= 0x100,
++ EF_VI_TX_TCPUDP_ONLY = 0x200,
++ /* Flags in range 0xXXXX0000 are for internal use. */
++};
++
++typedef struct {
++ uint32_t added;
++ uint32_t removed;
++} ef_vi_txq_state;
++
++typedef struct {
++ uint32_t added;
++ uint32_t removed;
++} ef_vi_rxq_state;
++
++typedef struct {
++ uint32_t mask;
++ void* doorbell;
++ void* descriptors;
++ uint16_t* ids;
++ unsigned misalign_mask;
++} ef_vi_txq;
++
++typedef struct {
++ uint32_t mask;
++ void* doorbell;
++ void* descriptors;
++ uint16_t* ids;
++} ef_vi_rxq;
++
++typedef struct {
++ ef_eventq_state evq;
++ ef_vi_txq_state txq;
++ ef_vi_rxq_state rxq;
++ /* Followed by request id fifos. */
++} ef_vi_state;
++
++/*! \i_ef_vi A virtual interface.
++**
++** An [ef_vi] represents a virtual interface on a specific NIC. A
++** virtual interface is a collection of an event queue and two DMA queues
++** used to pass Ethernet frames between the transport implementation and
++** the network.
++*/
++typedef struct ef_vi {
++ unsigned magic;
++
++ unsigned vi_resource_id;
++ unsigned vi_resource_handle_hack;
++ unsigned vi_i;
++
++ char* vi_mem_mmap_ptr;
++ int vi_mem_mmap_bytes;
++ char* vi_io_mmap_ptr;
++ int vi_io_mmap_bytes;
++
++ ef_eventq_state* evq_state;
++ char* evq_base;
++ unsigned evq_mask;
++ ef_vi_ioaddr_t evq_timer_reg;
++
++ ef_vi_txq vi_txq;
++ ef_vi_rxq vi_rxq;
++ ef_vi_state* ep_state;
++ enum ef_vi_flags vi_flags;
++} ef_vi;
++
++
++enum ef_vi_arch {
++ EF_VI_ARCH_FALCON,
++};
++
++
++struct ef_vi_nic_type {
++ unsigned char arch;
++ char variant;
++ unsigned char revision;
++};
++
++
++/* This structure is opaque to the client & used to pass mapping data
++ * from the resource manager to the ef_vi lib. for ef_vi_init().
++ */
++struct vi_mappings {
++ uint32_t signature;
++# define VI_MAPPING_VERSION 0x02 /*Byte: Increment me if struct altered*/
++# define VI_MAPPING_SIGNATURE (0xBA1150 + VI_MAPPING_VERSION)
++
++ struct ef_vi_nic_type nic_type;
++
++ int vi_instance;
++
++ unsigned evq_bytes;
++ char* evq_base;
++ ef_vi_ioaddr_t evq_timer_reg;
++
++ unsigned rx_queue_capacity;
++ ef_vi_ioaddr_t rx_dma_ef1;
++ char* rx_dma_falcon;
++ ef_vi_ioaddr_t rx_bell;
++
++ unsigned tx_queue_capacity;
++ ef_vi_ioaddr_t tx_dma_ef1;
++ char* tx_dma_falcon;
++ ef_vi_ioaddr_t tx_bell;
++};
++/* This is used by clients to allocate a suitably sized buffer for the
++ * resource manager to fill & ef_vi_init() to use. */
++#define VI_MAPPINGS_SIZE (sizeof(struct vi_mappings))
++
++
++/**********************************************************************
++ * ef_config **********************************************************
++ **********************************************************************/
++
++struct ef_config_t {
++ int log; /* debug logging level */
++};
++
++extern struct ef_config_t ef_config;
++
++
++/**********************************************************************
++ * ef_vi **************************************************************
++ **********************************************************************/
++
++/* Initialise [data_area] with information required to initialise an ef_vi.
++ * In the following, an unused param should be set to NULL. Note the case
++ * marked (*) of [iobuf_mmap] for falcon/driver; for normal driver this
++ * must be NULL.
++ *
++ * \param data_area [in,out] required, must ref at least VI_MAPPINGS_SIZE
++ * bytes
++ * \param evq_capacity [in] number of events in event queue. Specify 0 for
++ * no event queue.
++ * \param rxq_capacity [in] number of descriptors in RX DMA queue. Specify
++ * 0 for no RX queue.
++ * \param txq_capacity [in] number of descriptors in TX DMA queue. Specify
++ * 0 for no TX queue.
++ * \param mmap_info [in] mem-map info for resource
++ * \param io_mmap [in] ef1, required
++ * falcon, required
++ * \param iobuf_mmap [in] ef1, UL: unused
++ * falcon, UL: required
++ */
++extern void ef_vi_init_mapping_vi(void* data_area, struct ef_vi_nic_type,
++ unsigned rxq_capacity,
++ unsigned txq_capacity, int instance,
++ void* io_mmap, void* iobuf_mmap_rx,
++ void* iobuf_mmap_tx, enum ef_vi_flags);
++
++
++extern void ef_vi_init_mapping_evq(void* data_area, struct ef_vi_nic_type,
++ int instance, unsigned evq_bytes,
++ void* base, void* timer_reg);
++
++ef_vi_inline unsigned ef_vi_resource_id(ef_vi* vi)
++{
++ return vi->vi_resource_id;
++}
++
++ef_vi_inline enum ef_vi_flags ef_vi_flags(ef_vi* vi)
++{
++ return vi->vi_flags;
++}
++
++
++/**********************************************************************
++ * Receive interface **************************************************
++ **********************************************************************/
++
++/*! \i_ef_vi Returns the amount of space in the RX descriptor ring.
++**
++** \return the amount of space in the queue.
++*/
++ef_vi_inline int ef_vi_receive_space(ef_vi* vi)
++{
++ ef_vi_rxq_state* qs = &vi->ep_state->rxq;
++ return vi->vi_rxq.mask - (qs->added - qs->removed);
++}
++
++
++/*! \i_ef_vi Returns the fill level of the RX descriptor ring.
++**
++** \return the fill level of the queue.
++*/
++ef_vi_inline int ef_vi_receive_fill_level(ef_vi* vi)
++{
++ ef_vi_rxq_state* qs = &vi->ep_state->rxq;
++ return qs->added - qs->removed;
++}
++
++
++ef_vi_inline int ef_vi_receive_capacity(ef_vi* vi)
++{
++ return vi->vi_rxq.mask;
++}
++
++/*! \i_ef_vi Complete a receive operation.
++**
++** When a receive completion event is received, it should be passed to
++** this function. The request-id for the buffer that the packet was
++** delivered to is returned.
++**
++** After this function returns, more space may be available in the
++** receive queue.
++*/
++extern ef_request_id ef_vi_receive_done(const ef_vi*, const ef_event*);
++
++/*! \i_ef_vi Return request ID indicated by a receive event
++ */
++ef_vi_inline ef_request_id ef_vi_receive_request_id(const ef_vi* vi,
++ const ef_event* ef_ev)
++{
++ const ef_vi_qword* ev = EF_GET_HW_EV_PTR(*ef_ev);
++ return ev->u32[0] & vi->vi_rxq.mask;
++}
++
++
++/*! \i_ef_vi Form a receive descriptor.
++**
++** If \c initial_rx_bytes is zero use a reception size at least as large
++** as an MTU.
++*/
++extern int ef_vi_receive_init(ef_vi* vi, ef_addr addr, ef_request_id dma_id,
++ int intial_rx_bytes);
++
++/*! \i_ef_vi Submit initialised receive descriptors to the NIC. */
++extern void ef_vi_receive_push(ef_vi* vi);
++
++/*! \i_ef_vi Post a buffer on the receive queue.
++**
++** \return 0 on success, or -EAGAIN if the receive queue is full
++*/
++extern int ef_vi_receive_post(ef_vi*, ef_addr addr,
++ ef_request_id dma_id);
++
++/**********************************************************************
++ * Transmit interface *************************************************
++ **********************************************************************/
++
++/*! \i_ef_vi Return the amount of space (in descriptors) in the transmit
++** queue.
++**
++** \return the amount of space in the queue (in descriptors)
++*/
++ef_vi_inline int ef_vi_transmit_space(ef_vi* vi)
++{
++ ef_vi_txq_state* qs = &vi->ep_state->txq;
++ return vi->vi_txq.mask - (qs->added - qs->removed);
++}
++
++
++/*! \i_ef_vi Returns the fill level of the TX descriptor ring.
++**
++** \return the fill level of the queue.
++*/
++ef_vi_inline int ef_vi_transmit_fill_level(ef_vi* vi)
++{
++ ef_vi_txq_state* qs = &vi->ep_state->txq;
++ return qs->added - qs->removed;
++}
++
++
++/*! \i_ef_vi Returns the total capacity of the TX descriptor ring.
++**
++** \return the capacity of the queue.
++*/
++ef_vi_inline int ef_vi_transmit_capacity(ef_vi* vi)
++{
++ return vi->vi_txq.mask;
++}
++
++
++/*! \i_ef_vi Transmit a packet.
++**
++** \param bytes must be greater than ETH_ZLEN.
++** \return -EAGAIN if the transmit queue is full, or 0 on success
++*/
++extern int ef_vi_transmit(ef_vi*, ef_addr, int bytes, ef_request_id dma_id);
++
++/*! \i_ef_vi Transmit a packet using a gather list.
++**
++** \param iov_len must be greater than zero
++** \param iov the first must be non-zero in length (but others need not)
++**
++** \return -EAGAIN if the queue is full, or 0 on success
++*/
++extern int ef_vi_transmitv(ef_vi*, const ef_iovec* iov, int iov_len,
++ ef_request_id dma_id);
++
++/*! \i_ef_vi Initialise a DMA request.
++**
++** \return -EAGAIN if the queue is full, or 0 on success
++*/
++extern int ef_vi_transmit_init(ef_vi*, ef_addr, int bytes,
++ ef_request_id dma_id);
++
++/*! \i_ef_vi Initialise a DMA request.
++**
++** \return -EAGAIN if the queue is full, or 0 on success
++*/
++extern int ef_vi_transmitv_init(ef_vi*, const ef_iovec*, int iov_len,
++ ef_request_id dma_id);
++
++/*! \i_ef_vi Submit DMA requests to the NIC.
++**
++** The DMA requests must have been initialised using
++** ef_vi_transmit_init() or ef_vi_transmitv_init().
++*/
++extern void ef_vi_transmit_push(ef_vi*);
++
++
++/*! \i_ef_vi Maximum number of transmit completions per transmit event. */
++#define EF_VI_TRANSMIT_BATCH 64
++
++/*! \i_ef_vi Determine the set of [ef_request_id]s for each DMA request
++** which has been completed by a given transmit completion
++** event.
++**
++** \param ids must point to an array of length EF_VI_TRANSMIT_BATCH
++** \return the number of valid [ef_request_id]s (can be zero)
++*/
++extern int ef_vi_transmit_unbundle(ef_vi* ep, const ef_event*,
++ ef_request_id* ids);
++
++
++/*! \i_ef_event Returns true if ef_eventq_poll() will return event(s). */
++extern int ef_eventq_has_event(ef_vi* vi);
++
++/*! \i_ef_event Returns true if there are quite a few events in the event
++** queue.
++**
++** This looks ahead in the event queue, so has the property that it will
++** not ping-pong a cache-line when it is called concurrently with events
++** being delivered.
++*/
++extern int ef_eventq_has_many_events(ef_vi* evq, int look_ahead);
++
++/*! Type of function to handle unknown events arriving on event queue
++** Return CI_TRUE iff the event has been handled.
++*/
++typedef int/*bool*/ ef_event_handler_fn(void* priv, ef_vi* evq, ef_event* ev);
++
++/*! Standard poll exception routine */
++extern int/*bool*/ ef_eventq_poll_exception(void* priv, ef_vi* evq,
++ ef_event* ev);
++
++/*! \i_ef_event Retrieve events from the event queue, handle RX/TX events
++** and pass any others to an exception handler function
++**
++** \return The number of events retrieved.
++*/
++extern int ef_eventq_poll_evs(ef_vi* evq, ef_event* evs, int evs_len,
++ ef_event_handler_fn *exception, void *expt_priv);
++
++/*! \i_ef_event Retrieve events from the event queue.
++**
++** \return The number of events retrieved.
++*/
++ef_vi_inline int ef_eventq_poll(ef_vi* evq, ef_event* evs, int evs_len)
++{
++ return ef_eventq_poll_evs(evq, evs, evs_len,
++ &ef_eventq_poll_exception, (void*)0);
++}
++
++/*! \i_ef_event Returns the capacity of an event queue. */
++ef_vi_inline int ef_eventq_capacity(ef_vi* vi)
++{
++ return (vi->evq_mask + 1u) / sizeof(ef_hw_event);
++}
++
++/* Returns the instance ID of [vi] */
++ef_vi_inline unsigned ef_vi_instance(ef_vi* vi)
++{ return vi->vi_i; }
++
++
++/**********************************************************************
++ * Initialisation *****************************************************
++ **********************************************************************/
++
++/*! Return size of state buffer of an initialised VI. */
++extern int ef_vi_state_bytes(ef_vi*);
++
++/*! Return size of buffer needed for VI state given sizes of RX and TX
++** DMA queues. Queue sizes must be legal sizes (power of 2), or 0 (no
++** queue).
++*/
++extern int ef_vi_calc_state_bytes(int rxq_size, int txq_size);
++
++/*! Initialise [ef_vi] from the provided resources. [vvis] must have been
++** created by ef_make_vi_data() & remains owned by the caller.
++*/
++extern void ef_vi_init(ef_vi*, void* vi_info, ef_vi_state* state,
++ ef_eventq_state* evq_state, enum ef_vi_flags);
++
++extern void ef_vi_state_init(ef_vi*);
++extern void ef_eventq_state_init(ef_vi*);
++
++/*! Convert an efhw device arch to ef_vi_arch, or returns -1 if not
++** recognised.
++*/
++extern int ef_vi_arch_from_efhw_arch(int efhw_arch);
++
++
++#endif /* __EFAB_EF_VI_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/falcon_event.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/falcon_event.c
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/falcon_event.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/falcon_event.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,346 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Routine to poll event queues.
++ * \date 2003/03/04
++ */
++
++/*! \cidoxg_lib_ef */
++#include "ef_vi_internal.h"
++
++/* Be worried about this on byteswapped machines */
++/* Due to crazy chipsets, we see the event words being written in
++** arbitrary order (bug4539). So test for presence of event must ensure
++** that both halves have changed from the null.
++*/
++# define EF_VI_IS_EVENT(evp) \
++ ( (((evp)->opaque.a != (uint32_t)-1) && \
++ ((evp)->opaque.b != (uint32_t)-1)) )
++
++
++#ifdef NDEBUG
++# define IS_DEBUG 0
++#else
++# define IS_DEBUG 1
++#endif
++
++
++/*! Check for RX events with inconsistent SOP/CONT
++**
++** Returns true if this event should be discarded
++*/
++ef_vi_inline int ef_eventq_is_rx_sop_cont_bad_efab(ef_vi* vi,
++ const ef_vi_qword* ev)
++{
++ ef_rx_dup_state_t* rx_dup_state;
++ uint8_t* bad_sop;
++
++ unsigned label = QWORD_GET_U(RX_EV_Q_LABEL, *ev);
++ unsigned sop = QWORD_TEST_BIT(RX_SOP, *ev);
++
++ ef_assert(vi);
++ ef_assert_lt(label, EFAB_DMAQS_PER_EVQ_MAX);
++
++ rx_dup_state = &vi->evq_state->rx_dup_state[label];
++ bad_sop = &rx_dup_state->bad_sop;
++
++ if( ! ((vi->vi_flags & EF_VI_BUG5692_WORKAROUND) || IS_DEBUG) ) {
++ *bad_sop = (*bad_sop && !sop);
++ }
++ else {
++ unsigned cont = QWORD_TEST_BIT(RX_JUMBO_CONT, *ev);
++ uint8_t *frag_num = &rx_dup_state->frag_num;
++
++ /* bad_sop should latch till the next sop */
++ *bad_sop = (*bad_sop && !sop) || ( !!sop != (*frag_num==0) );
++
++ /* we do not check the number of bytes relative to the
++ * fragment number and size of the user rx buffer here
++ * because we don't know the size of the user rx
++ * buffer - we probably should perform this check in
++ * the nearest code calling this though.
++ */
++ *frag_num = cont ? (*frag_num + 1) : 0;
++ }
++
++ return *bad_sop;
++}
++
++
++ef_vi_inline int falcon_rx_check_dup(ef_vi* evq, ef_event* ev_out,
++ const ef_vi_qword* ev)
++{
++ unsigned q_id = QWORD_GET_U(RX_EV_Q_LABEL, *ev);
++ unsigned desc_ptr = QWORD_GET_U(RX_EV_DESC_PTR, *ev);
++ ef_rx_dup_state_t* rx_dup_state = &evq->evq_state->rx_dup_state[q_id];
++
++ if(likely( desc_ptr != rx_dup_state->rx_last_desc_ptr )) {
++ rx_dup_state->rx_last_desc_ptr = desc_ptr;
++ return 0;
++ }
++
++ rx_dup_state->rx_last_desc_ptr = desc_ptr;
++ rx_dup_state->bad_sop = 1;
++#ifndef NDEBUG
++ rx_dup_state->frag_num = 0;
++#endif
++ BUG_ON(!QWORD_TEST_BIT(RX_EV_FRM_TRUNC, *ev));
++ BUG_ON( QWORD_TEST_BIT(RX_EV_PKT_OK, *ev));
++ BUG_ON(!QWORD_GET_U(RX_EV_BYTE_CNT, *ev) == 0);
++ ev_out->rx_no_desc_trunc.type = EF_EVENT_TYPE_RX_NO_DESC_TRUNC;
++ ev_out->rx_no_desc_trunc.q_id = q_id;
++ return 1;
++}
++
++
++ef_vi_inline void falcon_rx_event(ef_event* ev_out, const ef_vi_qword* ev)
++{
++ if(likely( QWORD_TEST_BIT(RX_EV_PKT_OK, *ev) )) {
++ ev_out->rx.type = EF_EVENT_TYPE_RX;
++ ev_out->rx.q_id = QWORD_GET_U(RX_EV_Q_LABEL, *ev);
++ ev_out->rx.len = QWORD_GET_U(RX_EV_BYTE_CNT, *ev);
++ if( QWORD_TEST_BIT(RX_SOP, *ev) )
++ ev_out->rx.flags = EF_EVENT_FLAG_SOP;
++ else
++ ev_out->rx.flags = 0;
++ if( QWORD_TEST_BIT(RX_JUMBO_CONT, *ev) )
++ ev_out->rx.flags |= EF_EVENT_FLAG_CONT;
++ if( QWORD_TEST_BIT(RX_iSCSI_PKT_OK, *ev) )
++ ev_out->rx.flags |= EF_EVENT_FLAG_ISCSI_OK;
++ }
++ else {
++ ev_out->rx_discard.type = EF_EVENT_TYPE_RX_DISCARD;
++ ev_out->rx_discard.q_id = QWORD_GET_U(RX_EV_Q_LABEL, *ev);
++ ev_out->rx_discard.len = QWORD_GET_U(RX_EV_BYTE_CNT, *ev);
++#if 1 /* hack for ptloop compatability: ?? TODO purge */
++ if( QWORD_TEST_BIT(RX_SOP, *ev) )
++ ev_out->rx_discard.flags = EF_EVENT_FLAG_SOP;
++ else
++ ev_out->rx_discard.flags = 0;
++ if( QWORD_TEST_BIT(RX_JUMBO_CONT, *ev) )
++ ev_out->rx_discard.flags |= EF_EVENT_FLAG_CONT;
++ if( QWORD_TEST_BIT(RX_iSCSI_PKT_OK, *ev) )
++ ev_out->rx_discard.flags |= EF_EVENT_FLAG_ISCSI_OK;
++#endif
++ /* Order matters here: more fundamental errors first. */
++ if( QWORD_TEST_BIT(RX_EV_BUF_OWNER_ID_ERR, *ev) )
++ ev_out->rx_discard.subtype =
++ EF_EVENT_RX_DISCARD_RIGHTS;
++ else if( QWORD_TEST_BIT(RX_EV_FRM_TRUNC, *ev) )
++ ev_out->rx_discard.subtype =
++ EF_EVENT_RX_DISCARD_TRUNC;
++ else if( QWORD_TEST_BIT(RX_EV_ETH_CRC_ERR, *ev) )
++ ev_out->rx_discard.subtype =
++ EF_EVENT_RX_DISCARD_CRC_BAD;
++ else if( QWORD_TEST_BIT(RX_EV_IP_HDR_CHKSUM_ERR, *ev) )
++ ev_out->rx_discard.subtype =
++ EF_EVENT_RX_DISCARD_CSUM_BAD;
++ else if( QWORD_TEST_BIT(RX_EV_TCP_UDP_CHKSUM_ERR, *ev) )
++ ev_out->rx_discard.subtype =
++ EF_EVENT_RX_DISCARD_CSUM_BAD;
++ else
++ ev_out->rx_discard.subtype =
++ EF_EVENT_RX_DISCARD_OTHER;
++ }
++}
++
++
++ef_vi_inline void falcon_tx_event(ef_event* ev_out, const ef_vi_qword* ev)
++{
++ /* Danger danger! No matter what we ask for wrt batching, we
++ ** will get a batched event every 16 descriptors, and we also
++ ** get dma-queue-empty events. i.e. Duplicates are expected.
++ **
++ ** In addition, if it's been requested in the descriptor, we
++ ** get an event per descriptor. (We don't currently request
++ ** this).
++ */
++ if(likely( QWORD_TEST_BIT(TX_EV_COMP, *ev) )) {
++ ev_out->tx.type = EF_EVENT_TYPE_TX;
++ ev_out->tx.q_id = QWORD_GET_U(TX_EV_Q_LABEL, *ev);
++ }
++ else {
++ ev_out->tx_error.type = EF_EVENT_TYPE_TX_ERROR;
++ ev_out->tx_error.q_id = QWORD_GET_U(TX_EV_Q_LABEL, *ev);
++ if(likely( QWORD_TEST_BIT(TX_EV_BUF_OWNER_ID_ERR, *ev) ))
++ ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_RIGHTS;
++ else if(likely( QWORD_TEST_BIT(TX_EV_WQ_FF_FULL, *ev) ))
++ ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_OFLOW;
++ else if(likely( QWORD_TEST_BIT(TX_EV_PKT_TOO_BIG, *ev) ))
++ ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_2BIG;
++ else if(likely( QWORD_TEST_BIT(TX_EV_PKT_ERR, *ev) ))
++ ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_BUS;
++ }
++}
++
++
++static void mark_bad(ef_event* ev)
++{
++ ev->generic.ev.u64[0] &=~ ((uint64_t) 1u << RX_EV_PKT_OK_LBN);
++}
++
++
++int ef_eventq_poll_evs(ef_vi* evq, ef_event* evs, int evs_len,
++ ef_event_handler_fn *exception, void *expt_priv)
++{
++ int evs_len_orig = evs_len;
++
++ EF_VI_CHECK_EVENT_Q(evq);
++ ef_assert(evs);
++ ef_assert_gt(evs_len, 0);
++
++ if(unlikely( EF_VI_IS_EVENT(EF_VI_EVENT_PTR(evq, 1)) ))
++ goto overflow;
++
++ do {
++ { /* Read the event out of the ring, then fiddle with
++ * copied version. Reason is that the ring is
++ * likely to get pushed out of cache by another
++ * event being delivered by hardware. */
++ ef_vi_event* ev = EF_VI_EVENT_PTR(evq, 0);
++ if( ! EF_VI_IS_EVENT(ev) )
++ break;
++ evs->generic.ev.u64[0] = cpu_to_le64 (ev->u64);
++ evq->evq_state->evq_ptr += sizeof(ef_vi_event);
++ ev->u64 = (uint64_t)(int64_t) -1;
++ }
++
++ /* Ugly: Exploit the fact that event code lies in top
++ * bits of event. */
++ ef_assert_ge(EV_CODE_LBN, 32u);
++ switch( evs->generic.ev.u32[1] >> (EV_CODE_LBN - 32u) ) {
++ case RX_IP_EV_DECODE:
++ /* Look for duplicate desc_ptr: it signals
++ * that a jumbo frame was truncated because we
++ * ran out of descriptors. */
++ if(unlikely( falcon_rx_check_dup
++ (evq, evs, &evs->generic.ev) )) {
++ --evs_len;
++ ++evs;
++ break;
++ }
++ else {
++ /* Cope with FalconA1 bugs where RX
++ * gives inconsistent RX events Mark
++ * events as bad until SOP becomes
++ * consistent again
++ * ef_eventq_is_rx_sop_cont_bad() has
++ * side effects - order is important
++ */
++ if(unlikely
++ (ef_eventq_is_rx_sop_cont_bad_efab
++ (evq, &evs->generic.ev) )) {
++ mark_bad(evs);
++ }
++ }
++ falcon_rx_event(evs, &evs->generic.ev);
++ --evs_len;
++ ++evs;
++ break;
++
++ case TX_IP_EV_DECODE:
++ falcon_tx_event(evs, &evs->generic.ev);
++ --evs_len;
++ ++evs;
++ break;
++
++ default:
++ break;
++ }
++ } while( evs_len );
++
++ return evs_len_orig - evs_len;
++
++
++ overflow:
++ evs->generic.type = EF_EVENT_TYPE_OFLOW;
++ evs->generic.ev.u64[0] = (uint64_t)((int64_t)-1);
++ return 1;
++}
++
++
++int/*bool*/ ef_eventq_poll_exception(void* priv, ef_vi* evq, ef_event* ev)
++{
++ int /*bool*/ handled = 0;
++
++ switch( ev->generic.ev.u32[1] >> (EV_CODE_LBN - 32u) ) {
++ case DRIVER_EV_DECODE:
++ if( QWORD_GET_U(DRIVER_EV_SUB_CODE, ev->generic.ev) ==
++ EVQ_INIT_DONE_EV_DECODE )
++ /* EVQ initialised event: ignore. */
++ handled = 1;
++ break;
++ }
++ return handled;
++}
++
++
++void ef_eventq_iterate(ef_vi* vi,
++ void (*fn)(void* arg, ef_vi*, int rel_pos,
++ int abs_pos, void* event),
++ void* arg, int stop_at_end)
++{
++ int i, size_evs = (vi->evq_mask + 1) / sizeof(ef_vi_event);
++
++ for( i = 0; i < size_evs; ++i ) {
++ ef_vi_event* e = EF_VI_EVENT_PTR(vi, -i);
++ if( EF_VI_IS_EVENT(e) )
++ fn(arg, vi, i,
++ EF_VI_EVENT_OFFSET(vi, -i) / sizeof(ef_vi_event),
++ e);
++ else if( stop_at_end )
++ break;
++ }
++}
++
++
++int ef_eventq_has_event(ef_vi* vi)
++{
++ return EF_VI_IS_EVENT(EF_VI_EVENT_PTR(vi, 0));
++}
++
++
++int ef_eventq_has_many_events(ef_vi* vi, int look_ahead)
++{
++ ef_assert_ge(look_ahead, 0);
++ return EF_VI_IS_EVENT(EF_VI_EVENT_PTR(vi, -look_ahead));
++}
++
++
++int ef_eventq_has_rx_event(ef_vi* vi)
++{
++ ef_vi_event* ev;
++ int i, n_evs = 0;
++
++ for( i = 0; EF_VI_IS_EVENT(EF_VI_EVENT_PTR(vi, i)); --i ) {
++ ev = EF_VI_EVENT_PTR(vi, i);
++ if( EFVI_FALCON_EVENT_CODE(ev) == EF_EVENT_TYPE_RX ) n_evs++;
++ }
++ return n_evs;
++}
++
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/falcon_vi.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/falcon_vi.c
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/falcon_vi.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/falcon_vi.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,465 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr, stg
++ * \brief Falcon-specific VI
++ * \date 2006/11/30
++ */
++
++#include "ef_vi_internal.h"
++
++
++#define EFVI_FALCON_DMA_TX_FRAG 1
++
++
++/* TX descriptor for both physical and virtual packet transfers */
++typedef union {
++ uint32_t dword[2];
++} ef_vi_falcon_dma_tx_buf_desc;
++typedef ef_vi_falcon_dma_tx_buf_desc ef_vi_falcon_dma_tx_phys_desc;
++
++
++/* RX descriptor for physical addressed transfers */
++typedef union {
++ uint32_t dword[2];
++} ef_vi_falcon_dma_rx_phys_desc;
++
++
++/* RX descriptor for virtual packet transfers */
++typedef struct {
++ uint32_t dword[1];
++} ef_vi_falcon_dma_rx_buf_desc;
++
++/* Buffer table index */
++typedef uint32_t ef_vi_buffer_addr_t;
++
++ef_vi_inline int64_t dma_addr_to_u46(int64_t src_dma_addr)
++{
++ return (src_dma_addr & __FALCON_MASK(46, int64_t));
++}
++
++/*! Setup a physical address based descriptor with a specified length */
++ef_vi_inline void
++__falcon_dma_rx_calc_ip_phys(ef_vi_dma_addr_t dest_pa,
++ ef_vi_falcon_dma_rx_phys_desc *desc,
++ int bytes)
++{
++ int region = 0; /* TODO fixme */
++ int64_t dest = dma_addr_to_u46(dest_pa); /* lower 46 bits */
++
++ DWCHCK(__DW2(RX_KER_BUF_SIZE_LBN), RX_KER_BUF_SIZE_WIDTH);
++ DWCHCK(__DW2(RX_KER_BUF_REGION_LBN),RX_KER_BUF_REGION_WIDTH);
++
++ LWCHK(RX_KER_BUF_ADR_LBN, RX_KER_BUF_ADR_WIDTH);
++
++ RANGECHCK(bytes, RX_KER_BUF_SIZE_WIDTH);
++ RANGECHCK(region, RX_KER_BUF_REGION_WIDTH);
++
++ ef_assert(desc);
++
++ desc->dword[1] = ((bytes << __DW2(RX_KER_BUF_SIZE_LBN)) |
++ (region << __DW2(RX_KER_BUF_REGION_LBN)) |
++ (HIGH(dest,
++ RX_KER_BUF_ADR_LBN,
++ RX_KER_BUF_ADR_WIDTH)));
++
++ desc->dword[0] = LOW(dest,
++ RX_KER_BUF_ADR_LBN,
++ RX_KER_BUF_ADR_WIDTH);
++}
++
++/*! Setup a virtual buffer descriptor for an IPMODE transfer */
++ef_vi_inline void
++__falcon_dma_tx_calc_ip_buf(unsigned buf_id, unsigned buf_ofs, unsigned bytes,
++ int port, int frag,
++ ef_vi_falcon_dma_tx_buf_desc *desc)
++{
++ DWCHCK(__DW2(TX_USR_PORT_LBN), TX_USR_PORT_WIDTH);
++ DWCHCK(__DW2(TX_USR_CONT_LBN), TX_USR_CONT_WIDTH);
++ DWCHCK(__DW2(TX_USR_BYTE_CNT_LBN), TX_USR_BYTE_CNT_WIDTH);
++ LWCHK(RX_KER_BUF_ADR_LBN, RX_KER_BUF_ADR_WIDTH);
++ DWCHCK(TX_USR_BYTE_OFS_LBN, TX_USR_BYTE_OFS_WIDTH);
++
++ RANGECHCK(bytes, TX_USR_BYTE_CNT_WIDTH);
++ RANGECHCK(port, TX_USR_PORT_WIDTH);
++ RANGECHCK(frag, TX_USR_CONT_WIDTH);
++ RANGECHCK(buf_id, TX_USR_BUF_ID_WIDTH);
++ RANGECHCK(buf_ofs, TX_USR_BYTE_OFS_WIDTH);
++
++ ef_assert(desc);
++
++ desc->dword[1] = ((port << __DW2(TX_USR_PORT_LBN)) |
++ (frag << __DW2(TX_USR_CONT_LBN)) |
++ (bytes << __DW2(TX_USR_BYTE_CNT_LBN)) |
++ (HIGH(buf_id,
++ TX_USR_BUF_ID_LBN,
++ TX_USR_BUF_ID_WIDTH)));
++
++ desc->dword[0] = ((LOW(buf_id,
++ TX_USR_BUF_ID_LBN,
++ (TX_USR_BUF_ID_WIDTH))) |
++ (buf_ofs << TX_USR_BYTE_OFS_LBN));
++}
++
++ef_vi_inline void
++falcon_dma_tx_calc_ip_buf_4k(unsigned buf_vaddr, unsigned bytes,
++ int port, int frag,
++ ef_vi_falcon_dma_tx_buf_desc *desc)
++{
++ /* TODO FIXME [buf_vaddr] consists of the buffer index in the
++ ** high bits, and an offset in the low bits. Assumptions
++ ** permate the code that these can be rolled into one 32bit
++ ** value, so this is currently preserved for Falcon. But we
++ ** should change to support 8K pages
++ */
++ unsigned buf_id = EFVI_FALCON_BUFFER_4K_PAGE(buf_vaddr);
++ unsigned buf_ofs = EFVI_FALCON_BUFFER_4K_OFF(buf_vaddr);
++
++ __falcon_dma_tx_calc_ip_buf( buf_id, buf_ofs, bytes, port, frag, desc);
++}
++
++ef_vi_inline void
++falcon_dma_tx_calc_ip_buf(unsigned buf_vaddr, unsigned bytes, int port,
++ int frag, ef_vi_falcon_dma_tx_buf_desc *desc)
++{
++ falcon_dma_tx_calc_ip_buf_4k(buf_vaddr, bytes, port, frag, desc);
++}
++
++/*! Setup a virtual buffer based descriptor */
++ef_vi_inline void
++__falcon_dma_rx_calc_ip_buf(unsigned buf_id, unsigned buf_ofs,
++ ef_vi_falcon_dma_rx_buf_desc *desc)
++{
++ /* check alignment of buffer offset and pack */
++ ef_assert((buf_ofs & 0x1) == 0);
++
++ buf_ofs >>= 1;
++
++ DWCHCK(RX_USR_2BYTE_OFS_LBN, RX_USR_2BYTE_OFS_WIDTH);
++ DWCHCK(RX_USR_BUF_ID_LBN, RX_USR_BUF_ID_WIDTH);
++
++ RANGECHCK(buf_ofs, RX_USR_2BYTE_OFS_WIDTH);
++ RANGECHCK(buf_id, RX_USR_BUF_ID_WIDTH);
++
++ ef_assert(desc);
++
++ desc->dword[0] = ((buf_ofs << RX_USR_2BYTE_OFS_LBN) |
++ (buf_id << RX_USR_BUF_ID_LBN));
++}
++
++ef_vi_inline void
++falcon_dma_rx_calc_ip_buf_4k(unsigned buf_vaddr,
++ ef_vi_falcon_dma_rx_buf_desc *desc)
++{
++ /* TODO FIXME [buf_vaddr] consists of the buffer index in the
++ ** high bits, and an offset in the low bits. Assumptions
++ ** permeate the code that these can be rolled into one 32bit
++ ** value, so this is currently preserved for Falcon. But we
++ ** should change to support 8K pages
++ */
++ unsigned buf_id = EFVI_FALCON_BUFFER_4K_PAGE(buf_vaddr);
++ unsigned buf_ofs = EFVI_FALCON_BUFFER_4K_OFF(buf_vaddr);
++
++ __falcon_dma_rx_calc_ip_buf(buf_id, buf_ofs, desc);
++}
++
++ef_vi_inline void
++falcon_dma_rx_calc_ip_buf(unsigned buf_vaddr,
++ ef_vi_falcon_dma_rx_buf_desc *desc)
++{
++ falcon_dma_rx_calc_ip_buf_4k(buf_vaddr, desc);
++}
++
++
++ef_vi_inline ef_vi_dma_addr_t ef_physaddr(ef_addr efaddr)
++{
++ return (ef_vi_dma_addr_t) efaddr;
++}
++
++
++/*! Convert between an ef_addr and a buffer table index
++** Assert that this was not a physical address
++*/
++ef_vi_inline ef_vi_buffer_addr_t ef_bufaddr(ef_addr efaddr)
++{
++ ef_assert(efaddr < ((uint64_t)1 << 32) );
++
++ return (ef_vi_buffer_addr_t) efaddr;
++}
++
++
++/*! Setup an physical address based descriptor for an IPMODE transfer */
++ef_vi_inline void
++falcon_dma_tx_calc_ip_phys(ef_vi_dma_addr_t src_dma_addr, unsigned bytes,
++ int port, int frag,
++ ef_vi_falcon_dma_tx_phys_desc *desc)
++{
++
++ int region = 0; /* FIXME */
++ int64_t src = dma_addr_to_u46(src_dma_addr); /* lower 46 bits */
++
++ DWCHCK(__DW2(TX_KER_PORT_LBN), TX_KER_PORT_WIDTH);
++ DWCHCK(__DW2(TX_KER_CONT_LBN), TX_KER_CONT_WIDTH);
++ DWCHCK(__DW2(TX_KER_BYTE_CNT_LBN), TX_KER_BYTE_CNT_WIDTH);
++ DWCHCK(__DW2(TX_KER_BUF_REGION_LBN),TX_KER_BUF_REGION_WIDTH);
++
++ LWCHK(TX_KER_BUF_ADR_LBN, TX_KER_BUF_ADR_WIDTH);
++
++ RANGECHCK(port, TX_KER_PORT_WIDTH);
++ RANGECHCK(frag, TX_KER_CONT_WIDTH);
++ RANGECHCK(bytes, TX_KER_BYTE_CNT_WIDTH);
++ RANGECHCK(region, TX_KER_BUF_REGION_WIDTH);
++
++ desc->dword[1] = ((port << __DW2(TX_KER_PORT_LBN)) |
++ (frag << __DW2(TX_KER_CONT_LBN)) |
++ (bytes << __DW2(TX_KER_BYTE_CNT_LBN)) |
++ (region << __DW2(TX_KER_BUF_REGION_LBN)) |
++ (HIGH(src,
++ TX_KER_BUF_ADR_LBN,
++ TX_KER_BUF_ADR_WIDTH)));
++
++ ef_assert_equal(TX_KER_BUF_ADR_LBN, 0);
++ desc->dword[0] = (uint32_t) src_dma_addr;
++}
++
++
++void falcon_vi_init(ef_vi* vi, void* vvis)
++{
++ struct vi_mappings *vm = (struct vi_mappings*)vvis;
++ uint16_t* ids;
++
++ ef_assert(vi);
++ ef_assert(vvis);
++ ef_assert_equal(vm->signature, VI_MAPPING_SIGNATURE);
++ ef_assert_equal(vm->nic_type.arch, EF_VI_ARCH_FALCON);
++
++ /* Initialise masks to zero, so that ef_vi_state_init() will
++ ** not do any harm when we don't have DMA queues. */
++ vi->vi_rxq.mask = vi->vi_txq.mask = 0;
++
++ /* Used for BUG5391_WORKAROUND. */
++ vi->vi_txq.misalign_mask = 0;
++
++ /* Initialise doorbell addresses to a distinctive small value
++ ** which will cause a segfault, to trap doorbell pushes to VIs
++ ** without DMA queues. */
++ vi->vi_rxq.doorbell = vi->vi_txq.doorbell = (ef_vi_ioaddr_t)0xdb;
++
++ ids = (uint16_t*) (vi->ep_state + 1);
++
++ if( vm->tx_queue_capacity ) {
++ vi->vi_txq.mask = vm->tx_queue_capacity - 1;
++ vi->vi_txq.doorbell = vm->tx_bell + 12;
++ vi->vi_txq.descriptors = vm->tx_dma_falcon;
++ vi->vi_txq.ids = ids;
++ ids += vi->vi_txq.mask + 1;
++ /* Check that the id fifo fits in the space allocated. */
++ ef_assert_le((char*) (vi->vi_txq.ids + vm->tx_queue_capacity),
++ (char*) vi->ep_state
++ + ef_vi_calc_state_bytes(vm->rx_queue_capacity,
++ vm->tx_queue_capacity));
++ }
++ if( vm->rx_queue_capacity ) {
++ vi->vi_rxq.mask = vm->rx_queue_capacity - 1;
++ vi->vi_rxq.doorbell = vm->rx_bell + 12;
++ vi->vi_rxq.descriptors = vm->rx_dma_falcon;
++ vi->vi_rxq.ids = ids;
++ /* Check that the id fifo fits in the space allocated. */
++ ef_assert_le((char*) (vi->vi_rxq.ids + vm->rx_queue_capacity),
++ (char*) vi->ep_state
++ + ef_vi_calc_state_bytes(vm->rx_queue_capacity,
++ vm->tx_queue_capacity));
++ }
++
++ if( vm->nic_type.variant == 'A' ) {
++ vi->vi_txq.misalign_mask = 15; /* BUG5391_WORKAROUND */
++ vi->vi_flags |= EF_VI_BUG5692_WORKAROUND;
++ }
++}
++
++
++int ef_vi_transmitv_init(ef_vi* vi, const ef_iovec* iov, int iov_len,
++ ef_request_id dma_id)
++{
++ ef_vi_txq* q = &vi->vi_txq;
++ ef_vi_txq_state* qs = &vi->ep_state->txq;
++ ef_vi_falcon_dma_tx_buf_desc* dp;
++ unsigned len, dma_len, di;
++ unsigned added_save = qs->added;
++ ef_addr dma_addr;
++ unsigned last_len = 0;
++
++ ef_assert(iov_len > 0);
++ ef_assert(iov);
++ ef_assert_equal((dma_id & EF_REQUEST_ID_MASK), dma_id);
++ ef_assert_nequal(dma_id, 0xffff);
++
++ dma_addr = iov->iov_base;
++ len = iov->iov_len;
++
++ if( vi->vi_flags & EF_VI_ISCSI_TX_DDIG ) {
++ /* Last 4 bytes of placeholder for digest must be
++ * removed for h/w */
++ ef_assert(len > 4);
++ last_len = iov[iov_len - 1].iov_len;
++ if( last_len <= 4 ) {
++ ef_assert(iov_len > 1);
++ --iov_len;
++ last_len = iov[iov_len - 1].iov_len - (4 - last_len);
++ }
++ else {
++ last_len = iov[iov_len - 1].iov_len - 4;
++ }
++ if( iov_len == 1 )
++ len = last_len;
++ }
++
++ while( 1 ) {
++ if( qs->added - qs->removed >= q->mask ) {
++ qs->added = added_save;
++ return -EAGAIN;
++ }
++
++ dma_len = (~((unsigned) dma_addr) & 0xfff) + 1;
++ if( dma_len > len ) dma_len = len;
++ { /* BUG5391_WORKAROUND */
++ unsigned misalign =
++ (unsigned) dma_addr & q->misalign_mask;
++ if( misalign && dma_len + misalign > 512 )
++ dma_len = 512 - misalign;
++ }
++
++ di = qs->added++ & q->mask;
++ dp = (ef_vi_falcon_dma_tx_buf_desc*) q->descriptors + di;
++ if( vi->vi_flags & EF_VI_TX_PHYS_ADDR )
++ falcon_dma_tx_calc_ip_phys
++ (ef_physaddr(dma_addr), dma_len, /*port*/ 0,
++ (iov_len == 1 && dma_len == len) ? 0 :
++ EFVI_FALCON_DMA_TX_FRAG, dp);
++ else
++ falcon_dma_tx_calc_ip_buf
++ (ef_bufaddr(dma_addr), dma_len, /*port*/ 0,
++ (iov_len == 1 && dma_len == len) ? 0 :
++ EFVI_FALCON_DMA_TX_FRAG, dp);
++
++ dma_addr += dma_len;
++ len -= dma_len;
++
++ if( len == 0 ) {
++ if( --iov_len == 0 ) break;
++ ++iov;
++ dma_addr = iov->iov_base;
++ len = iov->iov_len;
++ if( (vi->vi_flags & EF_VI_ISCSI_TX_DDIG) &&
++ (iov_len == 1) )
++ len = last_len;
++ }
++ }
++
++ q->ids[di] = (uint16_t) dma_id;
++ return 0;
++}
++
++
++void ef_vi_transmit_push(ef_vi* vi)
++{
++ ef_vi_wiob();
++ writel((vi->ep_state->txq.added & vi->vi_txq.mask) <<
++ __DW4(TX_DESC_WPTR_LBN),
++ vi->vi_txq.doorbell);
++}
++
++
++/*! The value of initial_rx_bytes is used to set RX_KER_BUF_SIZE in an initial
++** receive descriptor here if physical addressing is being used. A value of
++** zero represents 16384 bytes. This is okay, because caller must provide a
++** buffer than is > MTU, and mac should filter anything bigger than that.
++*/
++int ef_vi_receive_init(ef_vi* vi, ef_addr addr, ef_request_id dma_id,
++ int initial_rx_bytes)
++{
++ ef_vi_rxq* q = &vi->vi_rxq;
++ ef_vi_rxq_state* qs = &vi->ep_state->rxq;
++ unsigned di;
++
++ if( ef_vi_receive_space(vi) ) {
++ di = qs->added++ & q->mask;
++ ef_assert_equal(q->ids[di], 0xffff);
++ q->ids[di] = (uint16_t) dma_id;
++
++ if( ! (vi->vi_flags & EF_VI_RX_PHYS_ADDR) ) {
++ ef_vi_falcon_dma_rx_buf_desc* dp;
++ dp = (ef_vi_falcon_dma_rx_buf_desc*)
++ q->descriptors + di;
++ falcon_dma_rx_calc_ip_buf(ef_bufaddr(addr), dp);
++ }
++ else {
++ ef_vi_falcon_dma_rx_phys_desc* dp;
++ dp = (ef_vi_falcon_dma_rx_phys_desc*)
++ q->descriptors + di;
++ __falcon_dma_rx_calc_ip_phys(addr, dp,
++ initial_rx_bytes);
++ }
++
++ return 0;
++ }
++
++ return -EAGAIN;
++}
++
++
++void ef_vi_receive_push(ef_vi* vi)
++{
++ ef_vi_wiob();
++ writel ((vi->ep_state->rxq.added & vi->vi_rxq.mask) <<
++ __DW4(RX_DESC_WPTR_LBN),
++ vi->vi_rxq.doorbell);
++}
++
++
++ef_request_id ef_vi_receive_done(const ef_vi* vi, const ef_event* ef_ev)
++{
++ const ef_vi_qword* ev = EF_GET_HW_EV_PTR(*ef_ev);
++ unsigned di = ev->u32[0] & vi->vi_rxq.mask;
++ ef_request_id rq_id;
++
++ ef_assert(EF_EVENT_TYPE(*ef_ev) == EF_EVENT_TYPE_RX ||
++ EF_EVENT_TYPE(*ef_ev) == EF_EVENT_TYPE_RX_DISCARD);
++
++ /* Detect spurious / duplicate RX events. We may need to modify this
++ ** code so that we are robust if they happen. */
++ ef_assert_equal(di, vi->ep_state->rxq.removed & vi->vi_rxq.mask);
++
++ /* We only support 1 port: so events should be in order. */
++ ef_assert(vi->vi_rxq.ids[di] != 0xffff);
++
++ rq_id = vi->vi_rxq.ids[di];
++ vi->vi_rxq.ids[di] = 0xffff;
++ ++vi->ep_state->rxq.removed;
++ return rq_id;
++}
++
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/Makefile
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,11 @@
++EXTRA_CFLAGS += -Idrivers/xen/sfc_netfront -Idrivers/xen/sfc_netutil -Idrivers/xen/netfront
++EXTRA_CFLAGS += -D__ci_driver__
++EXTRA_CFLAGS += -Werror
++
++ifdef GCOV
++EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage -DEFX_GCOV
++endif
++
++obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND) := sfc_netfront.o
++
++sfc_netfront-objs := accel_msg.o accel_bufs.o accel_netfront.o accel_vi.o accel_xenbus.o accel_tso.o accel_ssr.o accel_debugfs.o falcon_event.o falcon_vi.o pt_tx.o vi_init.o
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/pt_tx.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/pt_tx.c
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/pt_tx.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/pt_tx.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,91 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Packet-mode transmit interface.
++ * \date 2003/04/02
++ */
++
++/*! \cidoxg_lib_ef */
++#include "ef_vi_internal.h"
++
++
++int ef_vi_transmit_init(ef_vi* vi, ef_addr base, int len, ef_request_id dma_id)
++{
++ ef_iovec iov = { base, len };
++ return ef_vi_transmitv_init(vi, &iov, 1, dma_id);
++}
++
++
++int ef_vi_transmit(ef_vi* vi, ef_addr base, int len, ef_request_id dma_id)
++{
++ ef_iovec iov = { base, len };
++ int rc = ef_vi_transmitv_init(vi, &iov, 1, dma_id);
++ if( rc == 0 ) ef_vi_transmit_push(vi);
++ return rc;
++}
++
++
++int ef_vi_transmitv(ef_vi* vi, const ef_iovec* iov, int iov_len,
++ ef_request_id dma_id)
++{
++ int rc = ef_vi_transmitv_init(vi, iov, iov_len, dma_id);
++ if( rc == 0 ) ef_vi_transmit_push(vi);
++ return rc;
++}
++
++
++int ef_vi_transmit_unbundle(ef_vi* vi, const ef_event* __ev,
++ ef_request_id* ids)
++{
++ ef_request_id* ids_in = ids;
++ ef_vi_txq* q = &vi->vi_txq;
++ ef_vi_txq_state* qs = &vi->ep_state->txq;
++ const ef_vi_qword* ev = EF_GET_HW_EV_PTR(*__ev);
++ unsigned i, stop = (ev->u32[0] + 1) & q->mask;
++
++ ef_assert(EF_EVENT_TYPE(*__ev) == EF_EVENT_TYPE_TX ||
++ EF_EVENT_TYPE(*__ev) == EF_EVENT_TYPE_TX_ERROR);
++
++ /* Shouldn't be batching more than 64 descriptors, and should not go
++ ** backwards. */
++ ef_assert_le((((ev->u32[0] + 1) - qs->removed) & q->mask), 64);
++ /* Should not complete more than we've posted. */
++ ef_assert_le((((ev->u32[0] + 1) - qs->removed) & q->mask),
++ qs->added - qs->removed);
++
++ for( i = qs->removed & q->mask; i != stop; i = ++qs->removed & q->mask )
++ if( q->ids[i] != 0xffff ) {
++ *ids++ = q->ids[i];
++ q->ids[i] = 0xffff;
++ }
++
++ ef_assert_le(ids - ids_in, EF_VI_TRANSMIT_BATCH);
++
++ return (int) (ids - ids_in);
++}
++
++/*! \cidoxg_end */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/sysdep.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/sysdep.h
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/sysdep.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/sysdep.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,184 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author stg
++ * \brief System dependent support for ef vi lib
++ * \date 2007/05/10
++ */
++
++/*! \cidoxg_include_ci_ul */
++#ifndef __CI_CIUL_SYSDEP_LINUX_H__
++#define __CI_CIUL_SYSDEP_LINUX_H__
++
++/**********************************************************************
++ * Kernel version compatability
++ */
++
++#if defined(__GNUC__)
++
++/* Linux kernel doesn't have stdint.h or [u]intptr_t. */
++# if !defined(LINUX_VERSION_CODE)
++# include <linux/version.h>
++# endif
++# include <asm/io.h>
++
++/* In Linux 2.6.24, linux/types.h has uintptr_t */
++# if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++# if BITS_PER_LONG == 32
++ typedef __u32 uintptr_t;
++# else
++ typedef __u64 uintptr_t;
++# endif
++# endif
++
++/* But even 2.6.24 doesn't define intptr_t */
++# if BITS_PER_LONG == 32
++ typedef __s32 intptr_t;
++# else
++ typedef __s64 intptr_t;
++# endif
++
++# if defined(__ia64__)
++# define EF_VI_PRIx64 "lx"
++# else
++# define EF_VI_PRIx64 "llx"
++# endif
++
++# define EF_VI_HF __attribute__((visibility("hidden")))
++# define EF_VI_HV __attribute__((visibility("hidden")))
++
++# if defined(__i386__) || defined(__x86_64__) /* GCC x86/x64 */
++ typedef unsigned long long ef_vi_dma_addr_t;
++# if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96)
++# define ef_vi_wiob() __asm__ __volatile__ ("sfence")
++# else
++# define ef_vi_wiob() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xF8")
++# endif
++
++# endif
++#endif
++
++#ifdef EFX_NOT_UPSTREAM
++
++/* Stuff for architectures/compilers not officially supported */
++
++#if !defined(__GNUC__)
++# if defined(__PPC__) /* GCC, PPC */
++ typedef unsigned long ef_vi_dma_addr_t;
++# define ef_vi_wiob() wmb()
++
++# ifdef __powerpc64__
++# ifdef CONFIG_SMP
++# define CI_SMP_SYNC "\n eieio \n" /* memory cache sync */
++# define CI_SMP_ISYNC "\n isync \n" /* instr cache sync */
++# else
++# define CI_SMP_SYNC
++# define CI_SMP_ISYNC
++# endif
++# else /* for ppc32 systems */
++# ifdef CONFIG_SMP
++# define CI_SMP_SYNC "\n eieio \n"
++# define CI_SMP_ISYNC "\n sync \n"
++# else
++# define CI_SMP_SYNC
++# define CI_SMP_ISYNC
++# endif
++# endif
++
++# elif defined(__ia64__) /* GCC, IA64 */
++ typedef unsigned long ef_vi_dma_addr_t;
++# define ef_vi_wiob() __asm__ __volatile__("mf.a": : :"memory")
++
++# else
++# error Unknown processor - GNU C
++# endif
++
++#elif defined(__PGI)
++# error PGI not supported
++
++#elif defined(__INTEL_COMPILER)
++
++/* Intel compilers v7 claim to be very gcc compatible. */
++# if __INTEL_COMPILER >= 700
++# if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ > 91)
++# define EF_VI_LIKELY(t) __builtin_expect((t), 1)
++# define EF_VI_UNLIKELY(t) __builtin_expect((t), 0)
++# endif
++
++# if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96)
++# define ef_vi_wiob() __asm__ __volatile__ ("sfence")
++# else
++# define ef_vi_wiob() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xF8")
++# endif
++
++# else
++# error Old Intel compiler not supported.
++# endif
++
++#else
++# error Unknown compiler.
++#endif
++
++#endif
++
++
++# include <linux/errno.h>
++
++
++/**********************************************************************
++ * Extracting bit fields.
++ */
++
++#define _QWORD_GET_LOW(f, v) \
++ (((v).u32[0] >> (f##_LBN)) & ((1u << f##_WIDTH) - 1u))
++#define _QWORD_GET_HIGH(f, v) \
++ (((v).u32[1] >> (f##_LBN - 32u)) & ((1u << f##_WIDTH) - 1u))
++#define _QWORD_GET_ANY(f, v) \
++ (((v).u64[0] >> f##_LBN) & (((uint64_t) 1u << f##_WIDTH) - 1u))
++
++#define QWORD_GET(f, v) \
++ ((f##_LBN + f##_WIDTH) <= 32u \
++ ? _QWORD_GET_LOW(f, (v)) \
++ : ((f##_LBN >= 32u) ? _QWORD_GET_HIGH(f, (v)) : _QWORD_GET_ANY(f, (v))))
++
++#define QWORD_GET_U(f, v) ((unsigned) QWORD_GET(f, (v)))
++
++#define _QWORD_TEST_BIT_LOW(f, v) ((v).u32[0] & (1u << (f##_LBN)))
++#define _QWORD_TEST_BIT_HIGH(f, v) ((v).u32[1] & (1u << (f##_LBN - 32u)))
++
++#define QWORD_TEST_BIT(f, v) \
++ (f##_LBN < 32 ? _QWORD_TEST_BIT_LOW(f, (v)) : _QWORD_TEST_BIT_HIGH(f, (v)))
++
++
++
++
++#ifndef DECLSPEC_NORETURN
++/* normally defined on Windows to expand to a declaration that the
++ function will not return */
++# define DECLSPEC_NORETURN
++#endif
++
++#endif /* __CI_CIUL_SYSDEP_LINUX_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netfront/vi_init.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/vi_init.c
+--- linux-2.6.18.8/drivers/xen/sfc_netfront/vi_init.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netfront/vi_init.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,183 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Initialisation of VIs.
++ * \date 2007/06/08
++ */
++
++#include "ef_vi_internal.h"
++
++#define EF_VI_STATE_BYTES(rxq_sz, txq_sz) \
++ (sizeof(ef_vi_state) + (rxq_sz) * sizeof(uint16_t) \
++ + (txq_sz) * sizeof(uint16_t))
++
++int ef_vi_calc_state_bytes(int rxq_sz, int txq_sz)
++{
++ ef_assert(rxq_sz == 0 || EF_VI_IS_POW2(rxq_sz));
++ ef_assert(txq_sz == 0 || EF_VI_IS_POW2(txq_sz));
++
++ return EF_VI_STATE_BYTES(rxq_sz, txq_sz);
++}
++
++
++int ef_vi_state_bytes(ef_vi* vi)
++{
++ int rxq_sz = 0, txq_sz = 0;
++ if( ef_vi_receive_capacity(vi) )
++ rxq_sz = ef_vi_receive_capacity(vi) + 1;
++ if( ef_vi_transmit_capacity(vi) )
++ txq_sz = ef_vi_transmit_capacity(vi) + 1;
++
++ ef_assert(rxq_sz == 0 || EF_VI_IS_POW2(rxq_sz));
++ ef_assert(txq_sz == 0 || EF_VI_IS_POW2(txq_sz));
++
++ return EF_VI_STATE_BYTES(rxq_sz, txq_sz);
++}
++
++
++void ef_eventq_state_init(ef_vi* evq)
++{
++ int j;
++
++ for (j = 0; j<EFAB_DMAQS_PER_EVQ_MAX; j++) {
++ ef_rx_dup_state_t *rx_dup_state =
++ &evq->evq_state->rx_dup_state[j];
++ rx_dup_state->bad_sop = 0;
++ rx_dup_state->rx_last_desc_ptr = -1;
++ rx_dup_state->frag_num = 0;
++ }
++
++ evq->evq_state->evq_ptr = 0;
++}
++
++
++void ef_vi_state_init(ef_vi* vi)
++{
++ ef_vi_state* state = vi->ep_state;
++ unsigned i;
++
++ state->txq.added = state->txq.removed = 0;
++ state->rxq.added = state->rxq.removed = 0;
++
++ if( vi->vi_rxq.mask )
++ for( i = 0; i <= vi->vi_rxq.mask; ++i )
++ vi->vi_rxq.ids[i] = (uint16_t) -1;
++ if( vi->vi_txq.mask )
++ for( i = 0; i <= vi->vi_txq.mask; ++i )
++ vi->vi_txq.ids[i] = (uint16_t) -1;
++}
++
++
++void ef_vi_init_mapping_evq(void* data_area, struct ef_vi_nic_type nic_type,
++ int instance, unsigned evq_bytes, void* base,
++ void* timer_reg)
++{
++ struct vi_mappings* vm = (struct vi_mappings*) data_area;
++
++ vm->signature = VI_MAPPING_SIGNATURE;
++ vm->vi_instance = instance;
++ vm->nic_type = nic_type;
++ vm->evq_bytes = evq_bytes;
++ vm->evq_base = base;
++ vm->evq_timer_reg = timer_reg;
++}
++
++
++void ef_vi_init(ef_vi* vi, void* vvis, ef_vi_state* state,
++ ef_eventq_state* evq_state, enum ef_vi_flags vi_flags)
++{
++ struct vi_mappings* vm = (struct vi_mappings*) vvis;
++
++ vi->vi_i = vm->vi_instance;
++ vi->ep_state = state;
++ vi->vi_flags = vi_flags;
++
++ switch( vm->nic_type.arch ) {
++ case EF_VI_ARCH_FALCON:
++ falcon_vi_init(vi, vvis);
++ break;
++ default:
++ /* ?? TODO: We should return an error code. */
++ ef_assert(0);
++ break;
++ }
++
++ if( vm->evq_bytes ) {
++ vi->evq_state = evq_state;
++ vi->evq_mask = vm->evq_bytes - 1u;
++ vi->evq_base = vm->evq_base;
++ vi->evq_timer_reg = vm->evq_timer_reg;
++ }
++
++ EF_VI_MAGIC_SET(vi, EF_VI);
++}
++
++
++/* Initialise [data_area] with information required to initialise an ef_vi.
++ * In the following, an unused param should be set to NULL. Note the case
++ * marked (*) of [iobuf_mmap] for falcon/driver; for the normal driver this
++ * must be NULL.
++ *
++ * \param data_area [in,out] required, must ref at least VI_MAPPING_SIZE
++ * bytes
++ * \param io_mmap [in] ef1, required
++ * falcon, required
++ * \param iobuf_mmap [in] ef1, unused
++ * falcon, required
++ */
++void ef_vi_init_mapping_vi(void* data_area, struct ef_vi_nic_type nic_type,
++ unsigned rxq_capacity, unsigned txq_capacity,
++ int instance, void* io_mmap,
++ void* iobuf_mmap_rx, void* iobuf_mmap_tx,
++ enum ef_vi_flags vi_flags)
++{
++ struct vi_mappings* vm = (struct vi_mappings*) data_area;
++ int rx_desc_bytes, rxq_bytes;
++
++ ef_assert(rxq_capacity > 0 || txq_capacity > 0);
++ ef_assert(vm);
++ ef_assert(io_mmap);
++ ef_assert(iobuf_mmap_rx || iobuf_mmap_tx);
++
++ vm->signature = VI_MAPPING_SIGNATURE;
++ vm->vi_instance = instance;
++ vm->nic_type = nic_type;
++
++ rx_desc_bytes = (vi_flags & EF_VI_RX_PHYS_ADDR) ? 8 : 4;
++ rxq_bytes = rxq_capacity * rx_desc_bytes;
++ rxq_bytes = (rxq_bytes + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
++
++ if( iobuf_mmap_rx == iobuf_mmap_tx )
++ iobuf_mmap_tx = (char*) iobuf_mmap_rx + rxq_bytes;
++
++ vm->rx_queue_capacity = rxq_capacity;
++ vm->rx_dma_falcon = iobuf_mmap_rx;
++ vm->rx_bell = (char*) io_mmap + (RX_DESC_UPD_REG_KER_OFST & 4095);
++ vm->tx_queue_capacity = txq_capacity;
++ vm->tx_dma_falcon = iobuf_mmap_tx;
++ vm->tx_bell = (char*) io_mmap + (TX_DESC_UPD_REG_KER_OFST & 4095);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netutil/accel_cuckoo_hash.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/accel_cuckoo_hash.c
+--- linux-2.6.18.8/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,651 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/types.h> /* needed for linux/random.h */
++#include <linux/random.h>
++
++#include "accel_cuckoo_hash.h"
++#include "accel_util.h"
++
++static inline int cuckoo_hash_key_compare(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key1,
++ cuckoo_hash_key *key2)
++{
++ return !memcmp(key1, key2, hashtab->key_length);
++}
++
++
++static inline void cuckoo_hash_key_set(cuckoo_hash_key *key1,
++ cuckoo_hash_key *key2)
++{
++ *key1 = *key2;
++}
++
++
++/*
++ * Sets hash function parameters. Chooses "a" to be odd, 0 < a < 2^w
++ * where w is the length of the key
++ */
++static void set_hash_parameters(cuckoo_hash_table *hashtab)
++{
++ again:
++ hashtab->a0 = hashtab->a1 = 0;
++
++ /* Make sure random */
++ get_random_bytes(&hashtab->a0, hashtab->key_length);
++ get_random_bytes(&hashtab->a1, hashtab->key_length);
++
++ /* Make sure odd */
++ hashtab->a0 |= 1;
++ hashtab->a1 |= 1;
++
++ /* Being different is good */
++ if (hashtab->a0 != hashtab->a1)
++ return;
++
++ goto again;
++}
++
++int cuckoo_hash_init(cuckoo_hash_table *hashtab, unsigned length_bits,
++ unsigned key_length)
++{
++ char *table_mem;
++ unsigned length = 1 << length_bits;
++
++ BUG_ON(length_bits >= sizeof(unsigned) * 8);
++ BUG_ON(key_length > sizeof(cuckoo_hash_key));
++
++ table_mem = kmalloc(sizeof(cuckoo_hash_entry) * 2 * length, GFP_KERNEL);
++
++ if (table_mem == NULL)
++ return -ENOMEM;
++
++ hashtab->length = length;
++ hashtab->length_bits = length_bits;
++ hashtab->key_length = key_length;
++ hashtab->entries = 0;
++
++ hashtab->table0 = (cuckoo_hash_entry *)table_mem;
++ hashtab->table1 = (cuckoo_hash_entry *)
++ (table_mem + length * sizeof(cuckoo_hash_entry));
++
++ set_hash_parameters(hashtab);
++
++ /* Zero the table */
++ memset(hashtab->table0, 0, length * 2 * sizeof(cuckoo_hash_entry));
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_init);
++
++void cuckoo_hash_destroy(cuckoo_hash_table *hashtab)
++{
++ if (hashtab->table0 != NULL)
++ kfree(hashtab->table0);
++}
++
++EXPORT_SYMBOL_GPL(cuckoo_hash_destroy);
++
++/*
++ * This computes sizeof(cuckoo_hash) bits of hash, not all will be
++ * necessarily used, but the hash function throws away any that
++ * aren't
++ */
++static inline void cuckoo_compute_hash_helper(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *a,
++ cuckoo_hash_key *x,
++ cuckoo_hash *result)
++{
++ u64 multiply_result = 0, a_temp, x_temp;
++ u32 carry = 0;
++ u32 *a_words;
++ u32 *x_words;
++ int i;
++
++ /*
++ * As the mod and div operations in the function effectively
++ * reduce and shift the bits of the product down to just the
++ * third word, we need only compute that and return it as a
++ * result.
++ *
++ * Do enough long multiplication to get the word we need
++ */
++
++ /* This assumes things about the sizes of the key and hash */
++ BUG_ON(hashtab->key_length % sizeof(u32) != 0);
++ BUG_ON(sizeof(cuckoo_hash) != sizeof(u32));
++
++ a_words = (u32 *)a;
++ x_words = (u32 *)x;
++
++ for (i = 0; i < hashtab->key_length / sizeof(u32); i++) {
++ a_temp = a_words[i];
++ x_temp = x_words[i];
++
++ multiply_result = (a_temp * x_temp) + carry;
++ carry = (multiply_result >> 32) & 0xffffffff;
++ }
++
++ *result = multiply_result & 0xffffffff;
++}
++
++
++/*
++ * Want to implement (ax mod 2^w) div 2^(w-q) for odd a, 0 < a < 2^w;
++ * w is the length of the key, q is the length of the hash, I think.
++ * See http://www.it-c.dk/people/pagh/papers/cuckoo-jour.pdf
++ */
++static cuckoo_hash cuckoo_compute_hash(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key,
++ cuckoo_hash_key *a)
++{
++ unsigned q = hashtab->length_bits;
++ unsigned shift = 32 - q;
++ unsigned mask = ((1 << q) - 1) << shift;
++ cuckoo_hash hash;
++
++ cuckoo_compute_hash_helper(hashtab, a, key, &hash);
++
++ /*
++ * Take the top few bits to get the right length for this
++ * hash table
++ */
++ hash = (hash & mask) >> shift;
++
++ BUG_ON(hash >= hashtab->length);
++
++ return hash;
++}
++
++
++static int cuckoo_hash_lookup0(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key,
++ cuckoo_hash_value *value)
++{
++ cuckoo_hash hash = cuckoo_compute_hash(hashtab, key, &hashtab->a0);
++
++ if ((hashtab->table0[hash].state == CUCKOO_HASH_STATE_OCCUPIED)
++ && cuckoo_hash_key_compare(hashtab, &(hashtab->table0[hash].key),
++ key)) {
++ *value = hashtab->table0[hash].value;
++ return 1;
++ }
++
++ return 0;
++}
++
++static int cuckoo_hash_lookup1(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key,
++ cuckoo_hash_value *value)
++{
++ cuckoo_hash hash = cuckoo_compute_hash(hashtab, key, &hashtab->a1);
++
++ if ((hashtab->table1[hash].state == CUCKOO_HASH_STATE_OCCUPIED)
++ && cuckoo_hash_key_compare(hashtab, &(hashtab->table1[hash].key),
++ key)) {
++ *value = hashtab->table1[hash].value;
++ return 1;
++ }
++
++ return 0;
++}
++
++
++int cuckoo_hash_lookup(cuckoo_hash_table *hashtab, cuckoo_hash_key *key,
++ cuckoo_hash_value *value)
++{
++ return cuckoo_hash_lookup0(hashtab, key, value)
++ || cuckoo_hash_lookup1(hashtab, key, value);
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_lookup);
++
++
++/* Transfer any active entries from "old_table" into hashtab */
++static int cuckoo_hash_transfer_entries(cuckoo_hash_table *hashtab,
++ cuckoo_hash_entry *old_table,
++ unsigned capacity)
++{
++ int i, rc;
++ cuckoo_hash_entry *entry;
++
++ hashtab->entries = 0;
++
++ for (i = 0; i < capacity; i++) {
++ entry = &old_table[i];
++ if (entry->state == CUCKOO_HASH_STATE_OCCUPIED) {
++ rc = cuckoo_hash_add(hashtab, &(entry->key),
++ entry->value, 0);
++ if (rc != 0) {
++ return rc;
++ }
++ }
++ }
++
++ return 0;
++}
++
++
++int cuckoo_hash_rehash(cuckoo_hash_table *hashtab)
++{
++ cuckoo_hash_entry *new_table;
++ cuckoo_hash_table old_hashtab;
++ int resize = 0, rc, rehash_count;
++
++ /*
++ * Store old tables so we can access the existing values and
++ * copy across
++ */
++ memcpy(&old_hashtab, hashtab, sizeof(cuckoo_hash_table));
++
++ /* resize if hashtable is more than half full */
++ if (old_hashtab.entries > old_hashtab.length &&
++ old_hashtab.length_bits < 32)
++ resize = 1;
++
++ resize:
++ if (resize) {
++ new_table = kmalloc(sizeof(cuckoo_hash_entry) * 4 * hashtab->length,
++ GFP_ATOMIC);
++ if (new_table == NULL) {
++ rc = -ENOMEM;
++ goto err;
++ }
++
++ hashtab->length = 2 * hashtab->length;
++ hashtab->length_bits++;
++ } else {
++ new_table = kmalloc(sizeof(cuckoo_hash_entry) * 2 * hashtab->length,
++ GFP_ATOMIC);
++ if (new_table == NULL) {
++ rc = -ENOMEM;
++ goto err;
++ }
++ }
++
++ /*
++ * Point hashtab to new memory region so we can try to
++ * construct new table
++ */
++ hashtab->table0 = new_table;
++ hashtab->table1 = (cuckoo_hash_entry *)
++ ((char *)new_table + hashtab->length * sizeof(cuckoo_hash_entry));
++
++ rehash_count = 0;
++
++ again:
++ /* Zero the new tables */
++ memset(new_table, 0, hashtab->length * 2 * sizeof(cuckoo_hash_entry));
++
++ /* Choose new parameters for the hash functions */
++ set_hash_parameters(hashtab);
++
++ /*
++ * Multiply old_table_length by 2 as the length refers to each
++ * table, and there are two of them. This assumes that they
++ * are arranged sequentially in memory, so assert it
++ */
++ BUG_ON(((char *)old_hashtab.table1) !=
++ ((char *)old_hashtab.table0 + old_hashtab.length
++ * sizeof(cuckoo_hash_entry)));
++ rc = cuckoo_hash_transfer_entries(hashtab, old_hashtab.table0,
++ old_hashtab.length * 2);
++ if (rc < 0) {
++ /* Problem */
++ if (rc == -ENOSPC) {
++ ++rehash_count;
++ if (rehash_count < CUCKOO_HASH_MAX_LOOP) {
++ /*
++ * Wanted to rehash, but rather than
++ * recurse we can just do it here
++ */
++ goto again;
++ } else {
++ /*
++ * Didn't manage to rehash, so let's
++ * go up a size (if we haven't already
++ * and there's space)
++ */
++ if (!resize && hashtab->length_bits < 32) {
++ resize = 1;
++ kfree(new_table);
++ goto resize;
++ }
++ else
++ goto err;
++ }
++ }
++ else
++ goto err;
++ }
++
++ /* Success, I think. Free up the old table */
++ kfree(old_hashtab.table0);
++
++ /* We should have put all the entries from old table in the new one */
++ BUG_ON(hashtab->entries != old_hashtab.entries);
++
++ return 0;
++ err:
++ EPRINTK("%s: Rehash failed, giving up\n", __FUNCTION__);
++ /* Some other error, give up, at least restore table to how it was */
++ memcpy(hashtab, &old_hashtab, sizeof(cuckoo_hash_table));
++ if (new_table)
++ kfree(new_table);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_rehash);
++
++
++static int
++cuckoo_hash_insert_or_displace(cuckoo_hash_entry *table, unsigned hash,
++ cuckoo_hash_key *key,
++ cuckoo_hash_value value,
++ cuckoo_hash_key *displaced_key,
++ cuckoo_hash_value *displaced_value)
++{
++ if (table[hash].state == CUCKOO_HASH_STATE_VACANT) {
++ cuckoo_hash_key_set(&(table[hash].key), key);
++ table[hash].value = value;
++ table[hash].state = CUCKOO_HASH_STATE_OCCUPIED;
++
++ return 1;
++ } else {
++ cuckoo_hash_key_set(displaced_key, &(table[hash].key));
++ *displaced_value = table[hash].value;
++ cuckoo_hash_key_set(&(table[hash].key), key);
++ table[hash].value = value;
++
++ return 0;
++ }
++}
++
++
++int cuckoo_hash_add(cuckoo_hash_table *hashtab, cuckoo_hash_key *key,
++ cuckoo_hash_value value, int can_rehash)
++{
++ cuckoo_hash hash0, hash1;
++ int i, rc;
++ cuckoo_hash_key key1, key2;
++
++ cuckoo_hash_key_set(&key1, key);
++
++ again:
++ i = 0;
++ do {
++ hash0 = cuckoo_compute_hash(hashtab, &key1, &hashtab->a0);
++ if (cuckoo_hash_insert_or_displace(hashtab->table0, hash0,
++ &key1, value, &key2,
++ &value)) {
++ /* Success */
++ hashtab->entries++;
++ return 0;
++ }
++
++ hash1 = cuckoo_compute_hash(hashtab, &key2, &hashtab->a1);
++ if (cuckoo_hash_insert_or_displace(hashtab->table1, hash1,
++ &key2, value, &key1,
++ &value)) {
++ /* Success */
++ hashtab->entries++;
++ return 0;
++ }
++ } while (++i < CUCKOO_HASH_MAX_LOOP);
++
++ if (can_rehash) {
++ if ((rc = cuckoo_hash_rehash(hashtab)) < 0) {
++ /*
++ * Give up - this will drop whichever
++ * key/value pair we have currently displaced
++ * on the floor
++ */
++ return rc;
++ }
++ goto again;
++ }
++
++ EPRINTK("%s: failed hash add\n", __FUNCTION__);
++ /*
++ * Couldn't do it - bad as we've now removed some random thing
++ * from the table, and will just drop it on the floor. Better
++ * would be to somehow revert the table to the state it was in
++ * at the start
++ */
++ return -ENOSPC;
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_add);
++
++
++int cuckoo_hash_add_check(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key, cuckoo_hash_value value,
++ int can_rehash)
++{
++ int stored_value;
++
++ if (cuckoo_hash_lookup(hashtab, key, &stored_value))
++ return -EBUSY;
++
++ return cuckoo_hash_add(hashtab, key, value, can_rehash);
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_add_check);
++
++
++int cuckoo_hash_remove(cuckoo_hash_table *hashtab, cuckoo_hash_key *key)
++{
++ cuckoo_hash hash;
++
++ hash = cuckoo_compute_hash(hashtab, key, &hashtab->a0);
++ if ((hashtab->table0[hash].state == CUCKOO_HASH_STATE_OCCUPIED) &&
++ cuckoo_hash_key_compare(hashtab, &(hashtab->table0[hash].key),
++ key)) {
++ hashtab->table0[hash].state = CUCKOO_HASH_STATE_VACANT;
++ hashtab->entries--;
++ return 0;
++ }
++
++ hash = cuckoo_compute_hash(hashtab, key, &hashtab->a1);
++ if ((hashtab->table1[hash].state == CUCKOO_HASH_STATE_OCCUPIED) &&
++ cuckoo_hash_key_compare(hashtab, &(hashtab->table1[hash].key),
++ key)) {
++ hashtab->table1[hash].state = CUCKOO_HASH_STATE_VACANT;
++ hashtab->entries--;
++ return 0;
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_remove);
++
++
++int cuckoo_hash_update(cuckoo_hash_table *hashtab, cuckoo_hash_key *key,
++ cuckoo_hash_value value)
++{
++ cuckoo_hash hash;
++
++ hash = cuckoo_compute_hash(hashtab, key, &hashtab->a0);
++ if ((hashtab->table0[hash].state == CUCKOO_HASH_STATE_OCCUPIED) &&
++ cuckoo_hash_key_compare(hashtab, &(hashtab->table0[hash].key),
++ key)) {
++ hashtab->table0[hash].value = value;
++ return 0;
++ }
++
++ hash = cuckoo_compute_hash(hashtab, key, &hashtab->a1);
++ if ((hashtab->table1[hash].state == CUCKOO_HASH_STATE_OCCUPIED) &&
++ cuckoo_hash_key_compare(hashtab, &(hashtab->table1[hash].key),
++ key)) {
++ hashtab->table1[hash].value = value;
++ return 0;
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_update);
++
++
++void cuckoo_hash_iterate_reset(cuckoo_hash_table *hashtab)
++{
++ hashtab->iterate_index = 0;
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_iterate_reset);
++
++
++int cuckoo_hash_iterate(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key, cuckoo_hash_value *value)
++{
++ unsigned index;
++
++ while (hashtab->iterate_index < hashtab->length) {
++ index = hashtab->iterate_index;
++ ++hashtab->iterate_index;
++ if (hashtab->table0[index].state == CUCKOO_HASH_STATE_OCCUPIED) {
++ *key = hashtab->table0[index].key;
++ *value = hashtab->table0[index].value;
++ return 0;
++ }
++ }
++
++ while (hashtab->iterate_index >= hashtab->length &&
++ hashtab->iterate_index < hashtab->length * 2) {
++ index = hashtab->iterate_index - hashtab->length;
++ ++hashtab->iterate_index;
++ if (hashtab->table1[index].state == CUCKOO_HASH_STATE_OCCUPIED) {
++ *key = hashtab->table1[index].key;
++ *value = hashtab->table1[index].value;
++ return 0;
++ }
++ }
++
++ return -ENOSPC;
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_iterate);
++
++
++#if 0
++void cuckoo_hash_valid(cuckoo_hash_table *hashtab)
++{
++ int i, entry_count = 0;
++
++ for (i=0; i < hashtab->length; i++) {
++ EPRINTK_ON(hashtab->table0[i].state != CUCKOO_HASH_STATE_VACANT &&
++ hashtab->table0[i].state != CUCKOO_HASH_STATE_OCCUPIED);
++ if (hashtab->table0[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ entry_count++;
++ EPRINTK_ON(hashtab->table1[i].state != CUCKOO_HASH_STATE_VACANT &&
++ hashtab->table1[i].state != CUCKOO_HASH_STATE_OCCUPIED);
++ if (hashtab->table1[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ entry_count++;
++ }
++
++ if (entry_count != hashtab->entries) {
++ EPRINTK("%s: bad count\n", __FUNCTION__);
++ cuckoo_hash_dump(hashtab);
++ return;
++ }
++
++ for (i=0; i< hashtab->length; i++) {
++ if (hashtab->table0[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ if (i != cuckoo_compute_hash(hashtab,
++ &hashtab->table0[i].key,
++ &hashtab->a0)) {
++ EPRINTK("%s: Bad key table 0 index %d\n",
++ __FUNCTION__, i);
++ cuckoo_hash_dump(hashtab);
++ return;
++ }
++ if (hashtab->table1[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ if (i != cuckoo_compute_hash(hashtab,
++ &hashtab->table1[i].key,
++ &hashtab->a1)) {
++ EPRINTK("%s: Bad key table 1 index %d\n",
++ __FUNCTION__, i);
++ cuckoo_hash_dump(hashtab);
++ return;
++ }
++ }
++
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_valid);
++
++
++void cuckoo_hash_dump(cuckoo_hash_table *hashtab)
++{
++ int i, entry_count;
++
++ entry_count = 0;
++ for (i=0; i < hashtab->length; i++) {
++ EPRINTK_ON(hashtab->table0[i].state != CUCKOO_HASH_STATE_VACANT &&
++ hashtab->table0[i].state != CUCKOO_HASH_STATE_OCCUPIED);
++ if (hashtab->table0[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ entry_count++;
++ EPRINTK_ON(hashtab->table1[i].state != CUCKOO_HASH_STATE_VACANT &&
++ hashtab->table1[i].state != CUCKOO_HASH_STATE_OCCUPIED);
++ if (hashtab->table1[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ entry_count++;
++ }
++
++ EPRINTK("======================\n");
++ EPRINTK("Cuckoo hash table dump\n");
++ EPRINTK("======================\n");
++ EPRINTK("length: %d; length_bits: %d; key_length: %d\n", hashtab->length,
++ hashtab->length_bits, hashtab->key_length);
++ EPRINTK("Recorded entries: %d\n", hashtab->entries);
++ EPRINTK("Counted entries: %d\n", entry_count);
++ EPRINTK("a0: %llx; a1: %llx\n", hashtab->a0, hashtab->a1);
++ EPRINTK("-----------------------------------------\n");
++ EPRINTK("Index Occupied Key Value Index0 Index1\n");
++ EPRINTK("-----------------------------------------\n");
++ for (i=0; i< hashtab->length; i++) {
++ if (hashtab->table0[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ EPRINTK("%d %d %llx %d %d %d\n", i,
++ hashtab->table0[i].state == CUCKOO_HASH_STATE_OCCUPIED,
++ hashtab->table0[i].key, hashtab->table0[i].value,
++ cuckoo_compute_hash(hashtab, &hashtab->table0[i].key,
++ &hashtab->a0),
++ cuckoo_compute_hash(hashtab, &hashtab->table0[i].key,
++ &hashtab->a1));
++ else
++ EPRINTK("%d %d - - - -\n", i,
++ hashtab->table0[i].state == CUCKOO_HASH_STATE_OCCUPIED);
++
++ }
++ EPRINTK("-----------------------------------------\n");
++ EPRINTK("Index Occupied Key Value Index0 Index1\n");
++ EPRINTK("-----------------------------------------\n");
++ for (i=0; i< hashtab->length; i++) {
++ if (hashtab->table1[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ EPRINTK("%d %d %llx %d %d %d\n", i,
++ hashtab->table1[i].state == CUCKOO_HASH_STATE_OCCUPIED,
++ hashtab->table1[i].key, hashtab->table1[i].value,
++ cuckoo_compute_hash(hashtab, &hashtab->table1[i].key,
++ &hashtab->a0),
++ cuckoo_compute_hash(hashtab, &hashtab->table1[i].key,
++ &hashtab->a1));
++ else
++ EPRINTK("%d %d - - - -\n", i,
++ hashtab->table1[i].state == CUCKOO_HASH_STATE_OCCUPIED);
++ }
++ EPRINTK("======================\n");
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_dump);
++#endif
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netutil/accel_cuckoo_hash.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/accel_cuckoo_hash.h
+--- linux-2.6.18.8/drivers/xen/sfc_netutil/accel_cuckoo_hash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/accel_cuckoo_hash.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,227 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * A cuckoo hash table consists of two sub tables. Each entry can
++ * hash to a position in each table. If, on entry, its position is
++ * found to be occupied, the existing element is moved to it's other
++ * location. This recurses until success or a loop is found. If a
++ * loop is found the table is rehashed.
++ *
++ * See http://www.it-c.dk/people/pagh/papers/cuckoo-jour.pdf
++ */
++
++#ifndef NET_ACCEL_CUCKOO_HASH_H
++#define NET_ACCEL_CUCKOO_HASH_H
++
++/*! Type used for hash table keys of ip pairs */
++typedef struct {
++ u32 local_ip;
++ //u32 remote_ip;
++ u16 local_port;
++ //u16 remote_port;
++ /* Technically only 1 bit, but use 16 to make key a round
++ number size */
++ u16 proto;
++} cuckoo_hash_ip_key;
++
++/*! Type used for hash table keys of mac addresses */
++typedef u64 cuckoo_hash_mac_key;
++
++/*! This type is designed to be large enough to hold all supported key
++ * sizes to avoid having to malloc storage for them.
++ */
++typedef u64 cuckoo_hash_key;
++
++/*! Type used for the values stored in the hash table */
++typedef int cuckoo_hash_value;
++
++/*! Type used for the hash used to index the table */
++typedef u32 cuckoo_hash;
++
++/*! How long to spend displacing values when adding before giving up
++ * and rehashing */
++#define CUCKOO_HASH_MAX_LOOP (hashtab->length)
++
++/*! State of hash table entry */
++typedef enum {
++ CUCKOO_HASH_STATE_VACANT = 0,
++ CUCKOO_HASH_STATE_OCCUPIED
++} cuckoo_hash_state;
++
++/*! An entry in the hash table */
++typedef struct {
++ cuckoo_hash_state state;
++ cuckoo_hash_key key;
++ cuckoo_hash_value value;
++} cuckoo_hash_entry;
++
++/*! A cuckoo hash table */
++typedef struct {
++ /*! The length of each table (NB. there are two tables of this
++ * length) */
++ unsigned length;
++ /*! The length of each table in bits */
++ unsigned length_bits;
++ /*! The length of the key in bytes */
++ unsigned key_length;
++ /*! The number of entries currently stored in the table */
++ unsigned entries;
++ /*! Index into table used by cuckoo_hash_iterate */
++ unsigned iterate_index;
++
++ /* parameter of hash functions */
++ /*! The "a" parameter of the first hash function */
++ cuckoo_hash_key a0;
++ /*! The "a" parameter of the second hash function */
++ cuckoo_hash_key a1;
++
++ /*! The first table */
++ cuckoo_hash_entry *table0;
++ /*! The second table */
++ cuckoo_hash_entry *table1;
++} cuckoo_hash_table;
++
++/*! Initialise the cuckoo has table
++ *
++ * \param hashtab A pointer to an unitialised hash table structure
++ * \param length_bits The number of elements in each table equals
++ * 2**length_bits
++ * \param key_length The length of the key in bytes
++ *
++ * \return 0 on success, -ENOMEM if it couldn't allocate the tables
++ */
++extern
++int cuckoo_hash_init(cuckoo_hash_table *hashtab, unsigned length_bits,
++ unsigned key_length);
++
++
++/*! Destroy a hash table
++ *
++ * \param hashtab A hash table that has previously been passed to a
++ * successful call of cuckoo_hash_init()
++ */
++extern
++void cuckoo_hash_destroy(cuckoo_hash_table *hashtab);
++
++
++/*! Lookup an entry in the hash table
++ *
++ * \param hashtab The hash table in which to look.
++ * \param key Pointer to a mac address to use as the key
++ * \param value On exit set to the value stored if key was present
++ *
++ * \return 0 if not present in the table, non-zero if it is (and value
++ * is set accordingly)
++ */
++extern
++int cuckoo_hash_lookup(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key,
++ cuckoo_hash_value *value);
++
++/*! Add an entry to the hash table. Key must not be a duplicate of
++ * anything already in the table. If this is a risk, see
++ * cuckoo_hash_add_check
++ *
++ * \param hashtab The hash table to add the entry to
++ * \param key Pointer to a mac address to use as a key
++ * \param value The value to store
++ * \param can_rehash Flag to allow the add function to rehash the
++ * table if necessary
++ *
++ * \return 0 on success, non-zero on failure. -ENOSPC means it just
++ * couldn't find anywhere to put it - this is bad and probably means
++ * an entry has been dropped on the floor (but the entry you just
++ * tried to add may now be included)
++ */
++extern
++int cuckoo_hash_add(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key,
++ cuckoo_hash_value value,
++ int can_rehash);
++
++/*! Same as cuckoo_hash_add but first checks to ensure entry is not
++ * already there
++ * \return -EBUSY if already there
++ */
++
++extern
++int cuckoo_hash_add_check(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key,
++ cuckoo_hash_value value,
++ int can_rehash);
++/*! Remove an entry from the table
++ *
++ * \param hashtab The hash table to remove the entry from
++ * \param key The key that was used to previously add the entry
++ *
++ * \return 0 on success, -EINVAL if the entry couldn't be found
++ */
++extern
++int cuckoo_hash_remove(cuckoo_hash_table *hashtab, cuckoo_hash_key *key);
++
++
++/*! Helper for those using mac addresses to convert to a key for the
++ * hash table
++ */
++static inline cuckoo_hash_mac_key cuckoo_mac_to_key(const u8 *mac)
++{
++ return (cuckoo_hash_mac_key)(mac[0])
++ | (cuckoo_hash_mac_key)(mac[1]) << 8
++ | (cuckoo_hash_mac_key)(mac[2]) << 16
++ | (cuckoo_hash_mac_key)(mac[3]) << 24
++ | (cuckoo_hash_mac_key)(mac[4]) << 32
++ | (cuckoo_hash_mac_key)(mac[5]) << 40;
++}
++
++
++/*! Update an entry already in the hash table to take a new value
++ *
++ * \param hashtab The hash table to add the entry to
++ * \param key Pointer to a mac address to use as a key
++ * \param value The value to store
++ *
++ * \return 0 on success, non-zero on failure.
++ */
++int cuckoo_hash_update(cuckoo_hash_table *hashtab, cuckoo_hash_key *key,
++ cuckoo_hash_value value);
++
++
++/*! Go through the hash table and return all used entries (one per call)
++ *
++ * \param hashtab The hash table to iterate over
++ * \param key Pointer to a key to take the returned key
++ * \param value Pointer to a value to take the returned value
++ *
++ * \return 0 on success (key, value set), non-zero on failure.
++ */
++int cuckoo_hash_iterate(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key, cuckoo_hash_value *value);
++void cuckoo_hash_iterate_reset(cuckoo_hash_table *hashtab);
++
++/* debug, not compiled by default */
++void cuckoo_hash_valid(cuckoo_hash_table *hashtab);
++void cuckoo_hash_dump(cuckoo_hash_table *hashtab);
++
++#endif /* NET_ACCEL_CUCKOO_HASH_H */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netutil/accel_msg_iface.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/accel_msg_iface.c
+--- linux-2.6.18.8/drivers/xen/sfc_netutil/accel_msg_iface.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/accel_msg_iface.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,301 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <xen/evtchn.h>
++
++#include "accel_util.h"
++#include "accel_msg_iface.h"
++
++#define NET_ACCEL_MSG_Q_SIZE (1024)
++#define NET_ACCEL_MSG_Q_MASK (NET_ACCEL_MSG_Q_SIZE - 1)
++
++#ifdef NDEBUG
++#define NET_ACCEL_CHECK_MAGIC(_p, _errval)
++#define NET_ACCEL_SHOW_QUEUE(_t, _q, _id)
++#else
++#define NET_ACCEL_CHECK_MAGIC(_p, _errval) \
++ if (_p->magic != NET_ACCEL_MSG_MAGIC) { \
++ printk(KERN_ERR "%s: passed invalid shared page %p!\n", \
++ __FUNCTION__, _p); \
++ return _errval; \
++ }
++#define NET_ACCEL_SHOW_QUEUE(_t, _q, _id) \
++ printk(_t ": queue %d write %x read %x base %x limit %x\n", \
++ _id, _q->write, _q->read, _q->base, _q->limit);
++#endif
++
++/*
++ * We've been passed at least 2 pages. 1 control page and 1 or more
++ * data pages.
++ */
++int net_accel_msg_init_page(void *mem, int len, int up)
++{
++ struct net_accel_shared_page *shared_page =
++ (struct net_accel_shared_page*)mem;
++
++ if ((unsigned long)shared_page & NET_ACCEL_MSG_Q_MASK)
++ return -EINVAL;
++
++ shared_page->magic = NET_ACCEL_MSG_MAGIC;
++
++ shared_page->aflags = 0;
++
++ shared_page->net_dev_up = up;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_init_page);
++
++
++void net_accel_msg_init_queue(sh_msg_fifo2 *queue,
++ struct net_accel_msg_queue *indices,
++ struct net_accel_msg *base, int size)
++{
++ queue->fifo = base;
++ spin_lock_init(&queue->lock);
++ sh_fifo2_init(queue, size-1, &indices->read, &indices->write);
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_init_queue);
++
++
++static inline int _net_accel_msg_send(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ struct net_accel_msg *msg,
++ int is_reply)
++{
++ int rc = 0;
++ NET_ACCEL_CHECK_MAGIC(sp, -EINVAL);
++ rmb();
++ if (is_reply) {
++ EPRINTK_ON(sh_fifo2_is_full(queue));
++ sh_fifo2_put(queue, *msg);
++ } else {
++ if (sh_fifo2_not_half_full(queue)) {
++ sh_fifo2_put(queue, *msg);
++ } else {
++ rc = -ENOSPC;
++ }
++ }
++ wmb();
++ return rc;
++}
++
++/* Notify after a batch of messages have been sent */
++void net_accel_msg_notify(int irq)
++{
++ notify_remote_via_irq(irq);
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_notify);
++
++/*
++ * Send a message on the specified FIFO. Returns 0 on success, -errno
++ * on failure. The message in msg is copied to the current slot of the
++ * FIFO.
++ */
++int net_accel_msg_send(struct net_accel_shared_page *sp, sh_msg_fifo2 *q,
++ struct net_accel_msg *msg)
++{
++ unsigned long flags;
++ int rc;
++ net_accel_msg_lock_queue(q, &flags);
++ rc = _net_accel_msg_send(sp, q, msg, 0);
++ net_accel_msg_unlock_queue(q, &flags);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_send);
++
++
++/* As net_accel_msg_send but also posts a notification to the far end. */
++int net_accel_msg_send_notify(struct net_accel_shared_page *sp, int irq,
++ sh_msg_fifo2 *q, struct net_accel_msg *msg)
++{
++ unsigned long flags;
++ int rc;
++ net_accel_msg_lock_queue(q, &flags);
++ rc = _net_accel_msg_send(sp, q, msg, 0);
++ net_accel_msg_unlock_queue(q, &flags);
++ if (rc >= 0)
++ notify_remote_via_irq(irq);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_send_notify);
++
++
++int net_accel_msg_reply(struct net_accel_shared_page *sp, sh_msg_fifo2 *q,
++ struct net_accel_msg *msg)
++{
++ unsigned long flags;
++ int rc;
++ net_accel_msg_lock_queue(q, &flags);
++ rc = _net_accel_msg_send(sp, q, msg, 1);
++ net_accel_msg_unlock_queue(q, &flags);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_reply);
++
++
++/* As net_accel_msg_send but also posts a notification to the far end. */
++int net_accel_msg_reply_notify(struct net_accel_shared_page *sp, int irq,
++ sh_msg_fifo2 *q, struct net_accel_msg *msg)
++{
++ unsigned long flags;
++ int rc;
++ net_accel_msg_lock_queue(q, &flags);
++ rc = _net_accel_msg_send(sp, q, msg, 1);
++ net_accel_msg_unlock_queue(q, &flags);
++ if (rc >= 0)
++ notify_remote_via_irq(irq);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_reply_notify);
++
++
++/*
++ * Look at a received message, if any, so a decision can be made about
++ * whether to read it now or not. Cookie is a bit of debug which is
++ * set here and checked when passed to net_accel_msg_recv_next()
++ */
++int net_accel_msg_peek(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ struct net_accel_msg *msg, int *cookie)
++{
++ unsigned long flags;
++ int rc = 0;
++ NET_ACCEL_CHECK_MAGIC(sp, -EINVAL);
++ net_accel_msg_lock_queue(queue, &flags);
++ rmb();
++ if (sh_fifo2_is_empty(queue)) {
++ rc = -ENOENT;
++ } else {
++ *msg = sh_fifo2_peek(queue);
++ *cookie = *(queue->fifo_rd_i);
++ }
++ net_accel_msg_unlock_queue(queue, &flags);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_peek);
++
++
++/*
++ * Move the queue onto the next element, used after finished with a
++ * peeked msg
++ */
++int net_accel_msg_recv_next(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue, int cookie)
++{
++ unsigned long flags;
++ NET_ACCEL_CHECK_MAGIC(sp, -EINVAL);
++ net_accel_msg_lock_queue(queue, &flags);
++ rmb();
++ /* Mustn't be empty */
++ BUG_ON(sh_fifo2_is_empty(queue));
++ /*
++ * Check cookie matches, i.e. we're advancing over the same message
++ * as was got using peek
++ */
++ BUG_ON(cookie != *(queue->fifo_rd_i));
++ sh_fifo2_rd_next(queue);
++ wmb();
++ net_accel_msg_unlock_queue(queue, &flags);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_recv_next);
++
++
++/*
++ * Receive a message on the specified FIFO. Returns 0 on success,
++ * -errno on failure.
++ */
++int net_accel_msg_recv(struct net_accel_shared_page *sp, sh_msg_fifo2 *queue,
++ struct net_accel_msg *msg)
++{
++ unsigned long flags;
++ int rc = 0;
++ NET_ACCEL_CHECK_MAGIC(sp, -EINVAL);
++ net_accel_msg_lock_queue(queue, &flags);
++ rmb();
++ if (sh_fifo2_is_empty(queue)) {
++ rc = -ENOENT;
++ } else {
++ sh_fifo2_get(queue, msg);
++ }
++ wmb();
++ net_accel_msg_unlock_queue(queue, &flags);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_recv);
++
++
++/*
++ * Start sending a message without copying. returns a pointer to a message
++ * that will be filled out in place. The queue is locked until the message
++ * is sent.
++ */
++struct net_accel_msg *net_accel_msg_start_send(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue, unsigned long *flags)
++{
++ struct net_accel_msg *msg;
++ NET_ACCEL_CHECK_MAGIC(sp, NULL);
++ net_accel_msg_lock_queue(queue, flags);
++ rmb();
++ if (sh_fifo2_not_half_full(queue)) {
++ msg = sh_fifo2_pokep(queue);
++ } else {
++ net_accel_msg_unlock_queue(queue, flags);
++ msg = NULL;
++ }
++ return msg;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_start_send);
++
++
++static inline void _msg_complete(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ unsigned long *flags)
++{
++ sh_fifo2_wr_next(queue);
++ net_accel_msg_unlock_queue(queue, flags);
++}
++
++/*
++ * Complete the sending of a message started with net_accel_msg_start_send. The
++ * message is implicit since the queue was locked by _start
++ */
++void net_accel_msg_complete_send(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ unsigned long *flags)
++{
++ _msg_complete(sp, queue, flags);
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_complete_send);
++
++/* As net_accel_msg_complete_send but does the notify. */
++void net_accel_msg_complete_send_notify(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ unsigned long *flags, int irq)
++{
++ _msg_complete(sp, queue, flags);
++ notify_remote_via_irq(irq);
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_complete_send_notify);
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netutil/accel_msg_iface.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/accel_msg_iface.h
+--- linux-2.6.18.8/drivers/xen/sfc_netutil/accel_msg_iface.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/accel_msg_iface.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,414 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NET_ACCEL_MSG_IFACE_H
++#define NET_ACCEL_MSG_IFACE_H
++
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/in.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++
++#include "accel_shared_fifo.h"
++
++#define NET_ACCEL_MSG_MAGIC (0x85465479)
++
++/*! We talk version 0.010 of the interdomain protocol */
++#define NET_ACCEL_MSG_VERSION (0x00001000)
++
++/*! Shared memory portion of inter-domain FIFO */
++struct net_accel_msg_queue {
++ u32 read;
++ u32 write;
++};
++
++
++/*
++ * The aflags in the following structure is used as follows:
++ *
++ * - each bit is set when one of the corresponding variables is
++ * changed by either end.
++ *
++ * - the end that has made the change then forwards an IRQ to the
++ * other
++ *
++ * - the IRQ handler deals with these bits either on the fast path, or
++ * for less common changes, by jumping onto the slow path.
++ *
++ * - once it has seen a change, it clears the relevant bit.
++ *
++ * aflags is accessed atomically using clear_bit, test_bit,
++ * test_and_set_bit etc
++ */
++
++/*
++ * The following used to signify to the other domain when the queue
++ * they want to use is full, and when it is no longer full. Could be
++ * compressed to use fewer bits but done this way for simplicity and
++ * clarity
++ */
++
++/* "dom0->domU queue" is full */
++#define NET_ACCEL_MSG_AFLAGS_QUEUE0FULL 0x1
++#define NET_ACCEL_MSG_AFLAGS_QUEUE0FULL_B 0
++/* "dom0->domU queue" is not full */
++#define NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL 0x2
++#define NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL_B 1
++/* "domU->dom0 queue" is full */
++#define NET_ACCEL_MSG_AFLAGS_QUEUEUFULL 0x4
++#define NET_ACCEL_MSG_AFLAGS_QUEUEUFULL_B 2
++/* "domU->dom0 queue" is not full */
++#define NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL 0x8
++#define NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL_B 3
++/* dom0 -> domU net_dev up/down events */
++#define NET_ACCEL_MSG_AFLAGS_NETUPDOWN 0x10
++#define NET_ACCEL_MSG_AFLAGS_NETUPDOWN_B 4
++
++/*
++ * Masks used to test if there are any messages for domU and dom0
++ * respectively
++ */
++#define NET_ACCEL_MSG_AFLAGS_TO_DOMU_MASK \
++ (NET_ACCEL_MSG_AFLAGS_QUEUE0FULL | \
++ NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL | \
++ NET_ACCEL_MSG_AFLAGS_NETUPDOWN)
++#define NET_ACCEL_MSG_AFLAGS_TO_DOM0_MASK \
++ (NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL | \
++ NET_ACCEL_MSG_AFLAGS_QUEUEUFULL)
++
++/*! The shared data structure used for inter-VM communication. */
++struct net_accel_shared_page {
++ /*! Sanity check */
++ u32 magic;
++ /*! Used by host/Dom0 */
++ struct net_accel_msg_queue queue0;
++ /*! Used by guest/DomU */
++ struct net_accel_msg_queue queue1;
++ /*! Atomic flags, used to communicate simple state changes */
++ u32 aflags;
++ /*! State of net_dev used for acceleration */
++ u32 net_dev_up;
++};
++
++
++enum net_accel_hw_type {
++ /*! Not a virtualisable NIC: use slow path. */
++ NET_ACCEL_MSG_HWTYPE_NONE = 0,
++ /*! NIC is Falcon-based */
++ NET_ACCEL_MSG_HWTYPE_FALCON_A = 1,
++ NET_ACCEL_MSG_HWTYPE_FALCON_B = 2,
++};
++
++/*! The maximum number of pages used by an event queue. */
++#define EF_HW_FALCON_EVQ_PAGES 8
++
++struct net_accel_hw_falcon_b {
++ /* VI */
++ /*! Grant for Tx DMA Q */
++ u32 txdmaq_gnt;
++ /*! Grant for Rx DMA Q */
++ u32 rxdmaq_gnt;
++ /*! Machine frame number for Tx/Rx doorbell page */
++ u32 doorbell_mfn;
++ /*! Grant for Tx/Rx doorbell page */
++ u32 doorbell_gnt;
++
++ /* Event Q */
++ /*! Grants for the pages of the EVQ */
++ u32 evq_mem_gnts[EF_HW_FALCON_EVQ_PAGES];
++ u32 evq_offs;
++ /*! log2(pages in event Q) */
++ u32 evq_order;
++ /*! Capacity in events */
++ u32 evq_capacity;
++ /*! Eventq pointer register physical address */
++ u32 evq_rptr;
++ /*! Interface instance */
++ u32 instance;
++ /*! Capacity of RX queue */
++ u32 rx_capacity;
++ /*! Capacity of TX queue */
++ u32 tx_capacity;
++
++ /* NIC */
++ s32 nic_arch;
++ s32 nic_revision;
++ u8 nic_variant;
++};
++
++struct net_accel_hw_falcon_a {
++ struct net_accel_hw_falcon_b common;
++ u32 evq_rptr_gnt;
++};
++
++
++/*! Description of the hardware that the DomU is being given. */
++struct net_accel_msg_hw {
++ u32 type; /*!< Hardware type */
++ union {
++ struct net_accel_hw_falcon_a falcon_a;
++ struct net_accel_hw_falcon_b falcon_b;
++ } resources;
++};
++
++/*! Start-of-day handshake message. Dom0 fills in its version and
++ * sends, DomU checks, inserts its version and replies
++ */
++struct net_accel_msg_hello {
++ /*! Sender's version (set by each side in turn) */
++ u32 version;
++ /*! max pages allocated/allowed for buffers */
++ u32 max_pages;
++};
++
++/*! Maximum number of page requests that can fit in a message. */
++#define NET_ACCEL_MSG_MAX_PAGE_REQ (8)
++
++/*! Request for NIC buffers. DomU fils out pages and grants (and
++ * optionally) reqid, dom0 fills out buf and sends reply
++ */
++struct net_accel_msg_map_buffers {
++ u32 reqid; /*!< Optional request ID */
++ u32 pages; /*!< Number of pages to map */
++ u32 grants[NET_ACCEL_MSG_MAX_PAGE_REQ]; /*!< Grant ids to map */
++ u32 buf; /*!< NIC buffer address of pages obtained */
++};
++
++/*! Notification of a change to local mac address, used to filter
++ locally destined packets off the fast path */
++struct net_accel_msg_localmac {
++ u32 flags; /*!< Should this be added or removed? */
++ u8 mac[ETH_ALEN]; /*!< The mac address to filter onto slow path */
++};
++
++struct net_accel_msg_fastpath {
++ u32 flags; /*!< Should this be added or removed? */
++ u8 mac[ETH_ALEN];/*!< The mac address to filter onto fast path */
++ u16 port; /*!< The port of the connection */
++ u32 ip; /*!< The IP address of the connection */
++ u8 proto; /*!< The protocol of connection (TCP/UDP) */
++};
++
++/*! Values for struct ef_msg_localmac/fastpath.flags */
++#define NET_ACCEL_MSG_ADD 0x1
++#define NET_ACCEL_MSG_REMOVE 0x2
++
++/*! Overall message structure */
++struct net_accel_msg {
++ /*! ID specifying type of messge */
++ u32 id;
++ union {
++ /*! handshake */
++ struct net_accel_msg_hello hello;
++ /*! hardware description */
++ struct net_accel_msg_hw hw;
++ /*! buffer map request */
++ struct net_accel_msg_map_buffers mapbufs;
++ /*! mac address of a local interface */
++ struct net_accel_msg_localmac localmac;
++ /*! address of a new fastpath connection */
++ struct net_accel_msg_fastpath fastpath;
++ /*! make the message a fixed size */
++ u8 pad[128 - sizeof(u32)];
++ } u;
++};
++
++
++#define NET_ACCEL_MSG_HW_TO_MSG(_u) container_of(_u, struct net_accel_msg, u.hw)
++
++/*! Inter-domain message FIFO */
++typedef struct {
++ struct net_accel_msg *fifo;
++ u32 fifo_mask;
++ u32 *fifo_rd_i;
++ u32 *fifo_wr_i;
++ spinlock_t lock;
++ u32 is_locked; /* Debug flag */
++} sh_msg_fifo2;
++
++
++#define NET_ACCEL_MSG_OFFSET_MASK PAGE_MASK
++
++/* Modifiers */
++#define NET_ACCEL_MSG_REPLY (0x80000000)
++#define NET_ACCEL_MSG_ERROR (0x40000000)
++
++/* Dom0 -> DomU and reply. Handshake/version check. */
++#define NET_ACCEL_MSG_HELLO (0x00000001)
++/* Dom0 -> DomU : hardware setup (VI info.) */
++#define NET_ACCEL_MSG_SETHW (0x00000002)
++/*
++ * Dom0 -> DomU. Notification of a local mac to add/remove from slow
++ * path filter
++ */
++#define NET_ACCEL_MSG_LOCALMAC (0x00000003)
++/*
++ * DomU -> Dom0 and reply. Request for buffer table entries for
++ * preallocated pages.
++ */
++#define NET_ACCEL_MSG_MAPBUF (0x00000004)
++/*
++ * Dom0 -> DomU. Notification of a local mac to add/remove from fast
++ * path filter
++ */
++#define NET_ACCEL_MSG_FASTPATH (0x00000005)
++
++/*! Initialise a message and set the type
++ * \param message : the message
++ * \param code : the message type
++ */
++static inline void net_accel_msg_init(struct net_accel_msg *msg, int code) {
++ msg->id = (u32)code;
++}
++
++/*! initialise a shared page structure
++ * \param shared_page : mapped memory in which the structure resides
++ * \param len : size of the message FIFO area that follows
++ * \param up : initial up/down state of netdev
++ * \return 0 or an error code
++ */
++extern int net_accel_msg_init_page(void *shared_page, int len, int up);
++
++/*! initialise a message queue
++ * \param queue : the message FIFO to initialise
++ * \param indices : the read and write indices in shared memory
++ * \param base : the start of the memory area for the FIFO
++ * \param size : the size of the FIFO in bytes
++ */
++extern void net_accel_msg_init_queue(sh_msg_fifo2 *queue,
++ struct net_accel_msg_queue *indices,
++ struct net_accel_msg *base, int size);
++
++/* Notify after a batch of messages have been sent */
++extern void net_accel_msg_notify(int irq);
++
++/*! Send a message on the specified FIFO. The message is copied to the
++ * current slot of the FIFO.
++ * \param sp : pointer to shared page
++ * \param q : pointer to message FIFO to use
++ * \param msg : pointer to message
++ * \return 0 on success, -errno on
++ */
++extern int net_accel_msg_send(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *q,
++ struct net_accel_msg *msg);
++extern int net_accel_msg_reply(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *q,
++ struct net_accel_msg *msg);
++
++/*! As net_accel_msg_send but also posts a notification to the far end. */
++extern int net_accel_msg_send_notify(struct net_accel_shared_page *sp,
++ int irq, sh_msg_fifo2 *q,
++ struct net_accel_msg *msg);
++/*! As net_accel_msg_send but also posts a notification to the far end. */
++extern int net_accel_msg_reply_notify(struct net_accel_shared_page *sp,
++ int irq, sh_msg_fifo2 *q,
++ struct net_accel_msg *msg);
++
++/*! Receive a message on the specified FIFO. Returns 0 on success,
++ * -errno on failure.
++ */
++extern int net_accel_msg_recv(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *q,
++ struct net_accel_msg *msg);
++
++/*! Look at a received message, if any, so a decision can be made
++ * about whether to read it now or not. Cookie is a bit of debug
++ * which is set here and checked when passed to
++ * net_accel_msg_recv_next()
++ */
++extern int net_accel_msg_peek(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ struct net_accel_msg *msg, int *cookie);
++/*! Move the queue onto the next element, used after finished with a
++ * peeked msg
++ */
++extern int net_accel_msg_recv_next(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue, int cookie);
++
++/*! Start sending a message without copying. returns a pointer to a
++ * message that will be filled out in place. The queue is locked
++ * until the message is sent.
++ */
++extern
++struct net_accel_msg *net_accel_msg_start_send(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ unsigned long *flags);
++
++
++/*! Complete the sending of a message started with
++ * net_accel_msg_start_send. The message is implicit since the queue
++ * was locked by _start
++ */
++extern void net_accel_msg_complete_send(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ unsigned long *flags);
++
++/*! As net_accel_msg_complete_send but does the notify. */
++extern void net_accel_msg_complete_send_notify(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ unsigned long *flags, int irq);
++
++/*! Lock the queue so that multiple "_locked" functions can be called
++ * without the queue being modified by others
++ */
++static inline
++void net_accel_msg_lock_queue(sh_msg_fifo2 *queue, unsigned long *flags)
++{
++ spin_lock_irqsave(&queue->lock, (*flags));
++ rmb();
++ BUG_ON(queue->is_locked);
++ queue->is_locked = 1;
++}
++
++/*! Unlock the queue */
++static inline
++void net_accel_msg_unlock_queue(sh_msg_fifo2 *queue, unsigned long *flags)
++{
++ BUG_ON(!queue->is_locked);
++ queue->is_locked = 0;
++ wmb();
++ spin_unlock_irqrestore(&queue->lock, (*flags));
++}
++
++/*! Give up without sending a message that was started with
++ * net_accel_msg_start_send()
++ */
++static inline
++void net_accel_msg_abort_send(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue, unsigned long *flags)
++{
++ net_accel_msg_unlock_queue(queue, flags);
++}
++
++/*! Test the queue to ensure there is sufficient space */
++static inline
++int net_accel_msg_check_space(sh_msg_fifo2 *queue, unsigned space)
++{
++ return sh_fifo2_space(queue) >= space;
++}
++
++#endif /* NET_ACCEL_MSG_IFACE_H */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netutil/accel_shared_fifo.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/accel_shared_fifo.h
+--- linux-2.6.18.8/drivers/xen/sfc_netutil/accel_shared_fifo.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/accel_shared_fifo.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,127 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NET_ACCEL_SHARED_FIFO_H
++#define NET_ACCEL_SHARED_FIFO_H
++
++/*
++ * This is based on fifo.h, but handles sharing between address spaces
++ * that don't trust each other, by splitting out the read and write
++ * indices. This costs at least one pointer indirection more than the
++ * vanilla version per access.
++ */
++
++typedef struct {
++ char* fifo;
++ unsigned fifo_mask;
++ unsigned *fifo_rd_i;
++ unsigned *fifo_wr_i;
++} sh_byte_fifo2;
++
++#define SH_FIFO2_M(f, x) ((x) & ((f)->fifo_mask))
++
++static inline unsigned log2_ge(unsigned long n, unsigned min_order) {
++ unsigned order = min_order;
++ while((1ul << order) < n) ++order;
++ return order;
++}
++
++static inline unsigned long pow2(unsigned order) {
++ return (1ul << order);
++}
++
++#define is_pow2(x) (pow2(log2_ge((x), 0)) == (x))
++
++#define sh_fifo2_valid(f) ((f) && (f)->fifo && (f)->fifo_mask > 0 && \
++ is_pow2((f)->fifo_mask+1u))
++
++#define sh_fifo2_init(f, cap, _rptr, _wptr) \
++ do { \
++ BUG_ON(!is_pow2((cap) + 1)); \
++ (f)->fifo_rd_i = _rptr; \
++ (f)->fifo_wr_i = _wptr; \
++ *(f)->fifo_rd_i = *(f)->fifo_wr_i = 0u; \
++ (f)->fifo_mask = (cap); \
++ } while(0)
++
++#define sh_fifo2_num(f) SH_FIFO2_M((f),*(f)->fifo_wr_i - *(f)->fifo_rd_i)
++#define sh_fifo2_space(f) SH_FIFO2_M((f),*(f)->fifo_rd_i - *(f)->fifo_wr_i-1u)
++#define sh_fifo2_is_empty(f) (sh_fifo2_num(f)==0)
++#define sh_fifo2_not_empty(f) (sh_fifo2_num(f)!=0)
++#define sh_fifo2_is_full(f) (sh_fifo2_space(f)==0u)
++#define sh_fifo2_not_full(f) (sh_fifo2_space(f)!=0u)
++#define sh_fifo2_buf_size(f) ((f)->fifo_mask + 1u)
++#define sh_fifo2_capacity(f) ((f)->fifo_mask)
++#define sh_fifo2_end(f) ((f)->fifo + sh_fifo2_buf_size(f))
++#define sh_fifo2_not_half_full(f) (sh_fifo2_space(f) > (sh_fifo2_capacity(f) >> 1))
++
++#define sh_fifo2_peek(f) ((f)->fifo[SH_FIFO2_M((f), *(f)->fifo_rd_i)])
++#define sh_fifo2_peekp(f) ((f)->fifo + SH_FIFO2_M((f), *(f)->fifo_rd_i))
++#define sh_fifo2_poke(f) ((f)->fifo[SH_FIFO2_M((f), *(f)->fifo_wr_i)])
++#define sh_fifo2_pokep(f) ((f)->fifo + SH_FIFO2_M((f), *(f)->fifo_wr_i))
++#define sh_fifo2_peek_i(f,i) ((f)->fifo[SH_FIFO2_M((f), *(f)->fifo_rd_i+(i))])
++#define sh_fifo2_poke_i(f,i) ((f)->fifo[SH_FIFO2_M((f), *(f)->fifo_wr_i+(i))])
++
++#define sh_fifo2_rd_next(f) \
++ do {*(f)->fifo_rd_i = *(f)->fifo_rd_i + 1u;} while(0)
++#define sh_fifo2_wr_next(f) \
++ do {*(f)->fifo_wr_i = *(f)->fifo_wr_i + 1u;} while(0)
++#define sh_fifo2_rd_adv(f, n) \
++ do {*(f)->fifo_rd_i = *(f)->fifo_rd_i + (n);} while(0)
++#define sh_fifo2_wr_adv(f, n) \
++ do {*(f)->fifo_wr_i = *(f)->fifo_wr_i + (n);} while(0)
++
++#define sh_fifo2_put(f, v) \
++ do {sh_fifo2_poke(f) = (v); wmb(); sh_fifo2_wr_next(f);} while(0)
++
++#define sh_fifo2_get(f, pv) \
++ do {*(pv) = sh_fifo2_peek(f); mb(); sh_fifo2_rd_next(f);} while(0)
++
++static inline unsigned sh_fifo2_contig_num(sh_byte_fifo2 *f)
++{
++ unsigned fifo_wr_i = SH_FIFO2_M(f, *f->fifo_wr_i);
++ unsigned fifo_rd_i = SH_FIFO2_M(f, *f->fifo_rd_i);
++
++ return (fifo_wr_i >= fifo_rd_i)
++ ? fifo_wr_i - fifo_rd_i
++ : f->fifo_mask + 1u - *(f)->fifo_rd_i;
++}
++
++static inline unsigned sh_fifo2_contig_space(sh_byte_fifo2 *f)
++{
++ unsigned fifo_wr_i = SH_FIFO2_M(f, *f->fifo_wr_i);
++ unsigned fifo_rd_i = SH_FIFO2_M(f, *f->fifo_rd_i);
++
++ return (fifo_rd_i > fifo_wr_i)
++ ? fifo_rd_i - fifo_wr_i - 1
++ : (f->fifo_mask + 1u - fifo_wr_i
++ /*
++ * The last byte can't be used if the read pointer
++ * is at zero.
++ */
++ - (fifo_rd_i==0));
++}
++
++
++#endif /* NET_ACCEL_SHARED_FIFO_H */
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netutil/accel_util.c linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/accel_util.c
+--- linux-2.6.18.8/drivers/xen/sfc_netutil/accel_util.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/accel_util.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,333 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/if_ether.h>
++#include <asm/io.h>
++#include <asm/pgtable.h>
++#include <asm/hypercall.h>
++#include <xen/xenbus.h>
++#include <xen/driver_util.h>
++#include <xen/gnttab.h>
++
++#include "accel_util.h"
++
++#ifdef EFX_GCOV
++#include "gcov.h"
++
++static int __init net_accel_init(void)
++{
++ gcov_provider_init(THIS_MODULE);
++ return 0;
++}
++module_init(net_accel_init);
++
++static void __exit net_accel_exit(void)
++{
++ gcov_provider_fini(THIS_MODULE);
++}
++module_exit(net_accel_exit);
++#endif
++
++/* Shutdown remote domain that is misbehaving */
++int net_accel_shutdown_remote(int domain)
++{
++ struct sched_remote_shutdown sched_shutdown = {
++ .domain_id = domain,
++ .reason = SHUTDOWN_crash
++ };
++
++ EPRINTK("Crashing domain %d\n", domain);
++
++ return HYPERVISOR_sched_op(SCHEDOP_remote_shutdown, &sched_shutdown);
++}
++EXPORT_SYMBOL(net_accel_shutdown_remote);
++
++
++/* Based on xenbus_backend_client.c:xenbus_map_ring() */
++static int net_accel_map_grant(struct xenbus_device *dev, int gnt_ref,
++ grant_handle_t *handle, void *vaddr,
++ u64 *dev_bus_addr, unsigned flags)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)vaddr, flags,
++ gnt_ref, dev->otherend_id);
++
++ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
++
++ if (op.status != GNTST_okay) {
++ xenbus_dev_error
++ (dev, op.status,
++ "failed mapping in shared page %d from domain %d\n",
++ gnt_ref, dev->otherend_id);
++ } else {
++ *handle = op.handle;
++ if (dev_bus_addr)
++ *dev_bus_addr = op.dev_bus_addr;
++ }
++
++ return op.status;
++}
++
++
++/* Based on xenbus_backend_client.c:xenbus_unmap_ring() */
++static int net_accel_unmap_grant(struct xenbus_device *dev,
++ grant_handle_t handle,
++ void *vaddr, u64 dev_bus_addr,
++ unsigned flags)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)vaddr, flags, handle);
++
++ if (dev_bus_addr)
++ op.dev_bus_addr = dev_bus_addr;
++
++ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
++
++ if (op.status != GNTST_okay)
++ xenbus_dev_error(dev, op.status,
++ "failed unmapping page at handle %d error %d\n",
++ handle, op.status);
++
++ return op.status;
++}
++
++
++int net_accel_map_device_page(struct xenbus_device *dev,
++ int gnt_ref, grant_handle_t *handle,
++ u64 *dev_bus_addr)
++{
++ return net_accel_map_grant(dev, gnt_ref, handle, 0, dev_bus_addr,
++ GNTMAP_device_map);
++}
++EXPORT_SYMBOL_GPL(net_accel_map_device_page);
++
++
++int net_accel_unmap_device_page(struct xenbus_device *dev,
++ grant_handle_t handle, u64 dev_bus_addr)
++{
++ return net_accel_unmap_grant(dev, handle, 0, dev_bus_addr,
++ GNTMAP_device_map);
++}
++EXPORT_SYMBOL_GPL(net_accel_unmap_device_page);
++
++
++struct net_accel_valloc_grant_mapping {
++ struct vm_struct *vm;
++ int pages;
++ grant_handle_t grant_handles[0];
++};
++
++/* Map a series of grants into a contiguous virtual area */
++static void *net_accel_map_grants_valloc(struct xenbus_device *dev,
++ unsigned *grants, int npages,
++ unsigned flags, void **priv)
++{
++ struct net_accel_valloc_grant_mapping *map;
++ struct vm_struct *vm;
++ void *addr;
++ int i, j, rc;
++
++ vm = alloc_vm_area(PAGE_SIZE * npages);
++ if (vm == NULL) {
++ EPRINTK("No memory from alloc_vm_area.\n");
++ return NULL;
++ }
++ /*
++ * Get a structure in which we will record all the info needed
++ * to undo the mapping.
++ */
++ map = kzalloc(sizeof(struct net_accel_valloc_grant_mapping) +
++ npages * sizeof(grant_handle_t), GFP_KERNEL);
++ if (map == NULL) {
++ EPRINTK("No memory for net_accel_valloc_grant_mapping\n");
++ free_vm_area(vm);
++ return NULL;
++ }
++ map->vm = vm;
++ map->pages = npages;
++
++ /* Do the actual mapping */
++ addr = vm->addr;
++ for (i = 0; i < npages; i++) {
++ rc = net_accel_map_grant(dev, grants[i], map->grant_handles + i,
++ addr, NULL, flags);
++ if (rc != 0)
++ goto undo;
++ addr = (void*)((unsigned long)addr + PAGE_SIZE);
++ }
++
++ if (priv)
++ *priv = (void *)map;
++ else
++ kfree(map);
++
++ return vm->addr;
++
++ undo:
++ EPRINTK("Aborting contig map due to single map failure %d (%d of %d)\n",
++ rc, i+1, npages);
++ for (j = 0; j < i; j++) {
++ addr = (void*)((unsigned long)vm->addr + (j * PAGE_SIZE));
++ net_accel_unmap_grant(dev, map->grant_handles[j], addr, 0,
++ flags);
++ }
++ free_vm_area(vm);
++ kfree(map);
++ return NULL;
++}
++
++/* Undo the result of the mapping */
++static void net_accel_unmap_grants_vfree(struct xenbus_device *dev,
++ unsigned flags, void *priv)
++{
++ struct net_accel_valloc_grant_mapping *map =
++ (struct net_accel_valloc_grant_mapping *)priv;
++
++ void *addr = map->vm->addr;
++ int npages = map->pages;
++ int i;
++
++ for (i = 0; i < npages; i++) {
++ net_accel_unmap_grant(dev, map->grant_handles[i], addr, 0,
++ flags);
++ addr = (void*)((unsigned long)addr + PAGE_SIZE);
++ }
++ free_vm_area(map->vm);
++ kfree(map);
++}
++
++
++void *net_accel_map_grants_contig(struct xenbus_device *dev,
++ unsigned *grants, int npages,
++ void **priv)
++{
++ return net_accel_map_grants_valloc(dev, grants, npages,
++ GNTMAP_host_map, priv);
++}
++EXPORT_SYMBOL(net_accel_map_grants_contig);
++
++
++void net_accel_unmap_grants_contig(struct xenbus_device *dev,
++ void *priv)
++{
++ net_accel_unmap_grants_vfree(dev, GNTMAP_host_map, priv);
++}
++EXPORT_SYMBOL(net_accel_unmap_grants_contig);
++
++
++void *net_accel_map_iomem_page(struct xenbus_device *dev, int gnt_ref,
++ void **priv)
++{
++ return net_accel_map_grants_valloc(dev, &gnt_ref, 1,
++ GNTMAP_host_map, priv);
++}
++EXPORT_SYMBOL(net_accel_map_iomem_page);
++
++
++void net_accel_unmap_iomem_page(struct xenbus_device *dev, void *priv)
++{
++ net_accel_unmap_grants_vfree(dev, GNTMAP_host_map, priv);
++}
++EXPORT_SYMBOL(net_accel_unmap_iomem_page);
++
++
++int net_accel_grant_page(struct xenbus_device *dev, unsigned long mfn,
++ int is_iomem)
++{
++ int err = gnttab_grant_foreign_access(dev->otherend_id, mfn,
++ is_iomem ? GTF_PCD : 0);
++ if (err < 0)
++ xenbus_dev_error(dev, err, "failed granting access to page\n");
++ return err;
++}
++EXPORT_SYMBOL_GPL(net_accel_grant_page);
++
++
++int net_accel_ungrant_page(grant_ref_t gntref)
++{
++ if (unlikely(gnttab_query_foreign_access(gntref) != 0)) {
++ EPRINTK("%s: remote domain still using grant %d\n", __FUNCTION__,
++ gntref);
++ return -EBUSY;
++ }
++
++ gnttab_end_foreign_access(gntref, 0);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(net_accel_ungrant_page);
++
++
++int net_accel_xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
++{
++ char *s, *e, *macstr;
++ int i;
++
++ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
++ if (IS_ERR(macstr))
++ return PTR_ERR(macstr);
++
++ for (i = 0; i < ETH_ALEN; i++) {
++ mac[i] = simple_strtoul(s, &e, 16);
++ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
++ kfree(macstr);
++ return -ENOENT;
++ }
++ s = e+1;
++ }
++
++ kfree(macstr);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(net_accel_xen_net_read_mac);
++
++
++void net_accel_update_state(struct xenbus_device *dev, int state)
++{
++ struct xenbus_transaction tr;
++ int err;
++
++ DPRINTK("%s: setting accelstate to %s\n", __FUNCTION__,
++ xenbus_strstate(state));
++
++ if (xenbus_exists(XBT_NIL, dev->nodename, "")) {
++ VPRINTK("%s: nodename %s\n", __FUNCTION__, dev->nodename);
++ again:
++ err = xenbus_transaction_start(&tr);
++ if (err == 0)
++ err = xenbus_printf(tr, dev->nodename, "accelstate",
++ "%d", state);
++ if (err != 0) {
++ xenbus_transaction_end(tr, 1);
++ } else {
++ err = xenbus_transaction_end(tr, 0);
++ if (err == -EAGAIN)
++ goto again;
++ }
++ }
++}
++EXPORT_SYMBOL_GPL(net_accel_update_state);
++
++MODULE_LICENSE("GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netutil/accel_util.h linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/accel_util.h
+--- linux-2.6.18.8/drivers/xen/sfc_netutil/accel_util.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/accel_util.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,127 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NETBACK_ACCEL_UTIL_H
++#define NETBACK_ACCEL_UTIL_H
++
++#ifdef DPRINTK
++#undef DPRINTK
++#endif
++
++#define FILE_LEAF strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__
++
++#if 1
++#define VPRINTK(_f, _a...)
++#else
++#define VPRINTK(_f, _a...) \
++ printk("(file=%s, line=%d) " _f, \
++ FILE_LEAF , __LINE__ , ## _a )
++#endif
++
++#if 1
++#define DPRINTK(_f, _a...)
++#else
++#define DPRINTK(_f, _a...) \
++ printk("(file=%s, line=%d) " _f, \
++ FILE_LEAF , __LINE__ , ## _a )
++#endif
++
++#define EPRINTK(_f, _a...) \
++ printk("(file=%s, line=%d) " _f, \
++ FILE_LEAF , __LINE__ , ## _a )
++
++#define EPRINTK_ON(exp) \
++ do { \
++ if (exp) \
++ EPRINTK("%s at %s:%d\n", #exp, __FILE__, __LINE__); \
++ } while(0)
++
++#define DPRINTK_ON(exp) \
++ do { \
++ if (exp) \
++ DPRINTK("%s at %s:%d\n", #exp, __FILE__, __LINE__); \
++ } while(0)
++
++#define MAC_FMT "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x"
++#define MAC_ARG(_mac) (_mac)[0], (_mac)[1], (_mac)[2], (_mac)[3], (_mac)[4], (_mac)[5]
++
++#include <xen/xenbus.h>
++
++/*! Map a set of pages from another domain
++ * \param dev The xenbus device context
++ * \param priv The private data returned by the mapping function
++ */
++extern
++void *net_accel_map_grants_contig(struct xenbus_device *dev,
++ unsigned *grants, int npages,
++ void **priv);
++
++/*! Unmap a set of pages mapped using net_accel_map_grants_contig.
++ * \param dev The xenbus device context
++ * \param priv The private data returned by the mapping function
++ */
++extern
++void net_accel_unmap_grants_contig(struct xenbus_device *dev, void *priv);
++
++/*! Read the MAC address of a device from xenstore */
++extern
++int net_accel_xen_net_read_mac(struct xenbus_device *dev, u8 mac[]);
++
++/*! Update the accelstate field for a device in xenstore */
++extern
++void net_accel_update_state(struct xenbus_device *dev, int state);
++
++/* These four map/unmap functions are based on
++ * xenbus_backend_client.c:xenbus_map_ring(). However, they are not
++ * used for ring buffers, instead just to map pages between domains,
++ * or to map a page so that it is accessible by a device
++ */
++extern
++int net_accel_map_device_page(struct xenbus_device *dev,
++ int gnt_ref, grant_handle_t *handle,
++ u64 *dev_bus_addr);
++extern
++int net_accel_unmap_device_page(struct xenbus_device *dev,
++ grant_handle_t handle, u64 dev_bus_addr);
++extern
++void *net_accel_map_iomem_page(struct xenbus_device *dev, int gnt_ref,
++ void **priv);
++extern
++void net_accel_unmap_iomem_page(struct xenbus_device *dev, void *priv);
++
++/*! Grrant a page to remote domain */
++extern
++int net_accel_grant_page(struct xenbus_device *dev, unsigned long mfn,
++ int is_iomem);
++/*! Undo a net_accel_grant_page */
++extern
++int net_accel_ungrant_page(grant_ref_t gntref);
++
++
++/*! Shutdown remote domain that is misbehaving */
++extern
++int net_accel_shutdown_remote(int domain);
++
++
++#endif
+diff -rpuN linux-2.6.18.8/drivers/xen/sfc_netutil/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/Makefile
+--- linux-2.6.18.8/drivers/xen/sfc_netutil/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/sfc_netutil/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,11 @@
++EXTRA_CFLAGS += -Idrivers/xen/sfc_netutil
++EXTRA_CFLAGS += -Werror
++
++ifdef GGOV
++EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage -DEFX_GCOV
++endif
++
++obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL) := sfc_netutil.o
++
++sfc_netutil-objs := accel_cuckoo_hash.o accel_msg_iface.o accel_util.o
++
+diff -rpuN linux-2.6.18.8/drivers/xen/tpmback/common.h linux-2.6.18-xen-3.3.0/drivers/xen/tpmback/common.h
+--- linux-2.6.18.8/drivers/xen/tpmback/common.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/tpmback/common.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,85 @@
++/******************************************************************************
++ * drivers/xen/tpmback/common.h
++ */
++
++#ifndef __TPM__BACKEND__COMMON_H__
++#define __TPM__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <xen/evtchn.h>
++#include <xen/driver_util.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/tpmif.h>
++#include <asm/io.h>
++#include <asm/pgalloc.h>
++
++#define DPRINTK(_f, _a...) \
++ pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++
++struct backend_info;
++
++typedef struct tpmif_st {
++ struct list_head tpmif_list;
++ /* Unique identifier for this interface. */
++ domid_t domid;
++ unsigned int handle;
++
++ /* Physical parameters of the comms window. */
++ unsigned int irq;
++
++ /* The shared rings and indexes. */
++ tpmif_tx_interface_t *tx;
++ struct vm_struct *tx_area;
++
++ /* Miscellaneous private stuff. */
++ enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
++ int active;
++
++ struct tpmif_st *hash_next;
++ struct list_head list; /* scheduling list */
++ atomic_t refcnt;
++
++ struct backend_info *bi;
++
++ grant_handle_t shmem_handle;
++ grant_ref_t shmem_ref;
++ struct page **mmap_pages;
++
++ char devname[20];
++} tpmif_t;
++
++void tpmif_disconnect_complete(tpmif_t * tpmif);
++tpmif_t *tpmif_find(domid_t domid, struct backend_info *bi);
++void tpmif_interface_init(void);
++void tpmif_interface_exit(void);
++void tpmif_schedule_work(tpmif_t * tpmif);
++void tpmif_deschedule_work(tpmif_t * tpmif);
++void tpmif_xenbus_init(void);
++void tpmif_xenbus_exit(void);
++int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn);
++irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++
++long int tpmback_get_instance(struct backend_info *bi);
++
++int vtpm_release_packets(tpmif_t * tpmif, int send_msgs);
++
++
++#define tpmif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define tpmif_put(_b) \
++ do { \
++ if (atomic_dec_and_test(&(_b)->refcnt)) \
++ tpmif_disconnect_complete(_b); \
++ } while (0)
++
++extern int num_frontends;
++
++static inline unsigned long idx_to_kaddr(tpmif_t *t, unsigned int idx)
++{
++ return (unsigned long)pfn_to_kaddr(page_to_pfn(t->mmap_pages[idx]));
++}
++
++#endif /* __TPMIF__BACKEND__COMMON_H__ */
+diff -rpuN linux-2.6.18.8/drivers/xen/tpmback/interface.c linux-2.6.18-xen-3.3.0/drivers/xen/tpmback/interface.c
+--- linux-2.6.18.8/drivers/xen/tpmback/interface.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/tpmback/interface.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,168 @@
++ /*****************************************************************************
++ * drivers/xen/tpmback/interface.c
++ *
++ * Vritual TPM interface management.
++ *
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb@us.ibm.com
++ *
++ * This code has been derived from drivers/xen/netback/interface.c
++ * Copyright (c) 2004, Keir Fraser
++ */
++
++#include "common.h"
++#include <xen/balloon.h>
++#include <xen/gnttab.h>
++
++static kmem_cache_t *tpmif_cachep;
++int num_frontends = 0;
++
++LIST_HEAD(tpmif_list);
++
++static tpmif_t *alloc_tpmif(domid_t domid, struct backend_info *bi)
++{
++ tpmif_t *tpmif;
++
++ tpmif = kmem_cache_alloc(tpmif_cachep, GFP_KERNEL);
++ if (tpmif == NULL)
++ goto out_of_memory;
++
++ memset(tpmif, 0, sizeof (*tpmif));
++ tpmif->domid = domid;
++ tpmif->status = DISCONNECTED;
++ tpmif->bi = bi;
++ snprintf(tpmif->devname, sizeof(tpmif->devname), "tpmif%d", domid);
++ atomic_set(&tpmif->refcnt, 1);
++
++ tpmif->mmap_pages = alloc_empty_pages_and_pagevec(TPMIF_TX_RING_SIZE);
++ if (tpmif->mmap_pages == NULL)
++ goto out_of_memory;
++
++ list_add(&tpmif->tpmif_list, &tpmif_list);
++ num_frontends++;
++
++ return tpmif;
++
++ out_of_memory:
++ if (tpmif != NULL)
++ kmem_cache_free(tpmif_cachep, tpmif);
++ printk("%s: out of memory\n", __FUNCTION__);
++ return ERR_PTR(-ENOMEM);
++}
++
++static void free_tpmif(tpmif_t * tpmif)
++{
++ num_frontends--;
++ list_del(&tpmif->tpmif_list);
++ free_empty_pages_and_pagevec(tpmif->mmap_pages, TPMIF_TX_RING_SIZE);
++ kmem_cache_free(tpmif_cachep, tpmif);
++}
++
++tpmif_t *tpmif_find(domid_t domid, struct backend_info *bi)
++{
++ tpmif_t *tpmif;
++
++ list_for_each_entry(tpmif, &tpmif_list, tpmif_list) {
++ if (tpmif->bi == bi) {
++ if (tpmif->domid == domid) {
++ tpmif_get(tpmif);
++ return tpmif;
++ } else {
++ return ERR_PTR(-EEXIST);
++ }
++ }
++ }
++
++ return alloc_tpmif(domid, bi);
++}
++
++static int map_frontend_page(tpmif_t *tpmif, unsigned long shared_page)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)tpmif->tx_area->addr,
++ GNTMAP_host_map, shared_page, tpmif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return op.status;
++ }
++
++ tpmif->shmem_ref = shared_page;
++ tpmif->shmem_handle = op.handle;
++
++ return 0;
++}
++
++static void unmap_frontend_page(tpmif_t *tpmif)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)tpmif->tx_area->addr,
++ GNTMAP_host_map, tpmif->shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++}
++
++int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn)
++{
++ int err;
++
++ if (tpmif->irq)
++ return 0;
++
++ if ((tpmif->tx_area = alloc_vm_area(PAGE_SIZE)) == NULL)
++ return -ENOMEM;
++
++ err = map_frontend_page(tpmif, shared_page);
++ if (err) {
++ free_vm_area(tpmif->tx_area);
++ return err;
++ }
++
++ tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
++ memset(tpmif->tx, 0, PAGE_SIZE);
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ tpmif->domid, evtchn, tpmif_be_int, 0, tpmif->devname, tpmif);
++ if (err < 0) {
++ unmap_frontend_page(tpmif);
++ free_vm_area(tpmif->tx_area);
++ return err;
++ }
++ tpmif->irq = err;
++
++ tpmif->shmem_ref = shared_page;
++ tpmif->active = 1;
++
++ return 0;
++}
++
++void tpmif_disconnect_complete(tpmif_t *tpmif)
++{
++ if (tpmif->irq)
++ unbind_from_irqhandler(tpmif->irq, tpmif);
++
++ if (tpmif->tx) {
++ unmap_frontend_page(tpmif);
++ free_vm_area(tpmif->tx_area);
++ }
++
++ free_tpmif(tpmif);
++}
++
++void __init tpmif_interface_init(void)
++{
++ tpmif_cachep = kmem_cache_create("tpmif_cache", sizeof (tpmif_t),
++ 0, 0, NULL, NULL);
++}
++
++void __exit tpmif_interface_exit(void)
++{
++ kmem_cache_destroy(tpmif_cachep);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/tpmback/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/tpmback/Makefile
+--- linux-2.6.18.8/drivers/xen/tpmback/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/tpmback/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,4 @@
++
++obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmbk.o
++
++tpmbk-y += tpmback.o interface.o xenbus.o
+diff -rpuN linux-2.6.18.8/drivers/xen/tpmback/tpmback.c linux-2.6.18-xen-3.3.0/drivers/xen/tpmback/tpmback.c
+--- linux-2.6.18.8/drivers/xen/tpmback/tpmback.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/tpmback/tpmback.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,944 @@
++/******************************************************************************
++ * drivers/xen/tpmback/tpmback.c
++ *
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb@us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from drivers/xen/netback/netback.c
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ */
++
++#include "common.h"
++#include <xen/evtchn.h>
++
++#include <linux/types.h>
++#include <linux/list.h>
++#include <linux/miscdevice.h>
++#include <linux/poll.h>
++#include <asm/uaccess.h>
++#include <xen/xenbus.h>
++#include <xen/interface/grant_table.h>
++#include <xen/gnttab.h>
++
++/* local data structures */
++struct data_exchange {
++ struct list_head pending_pak;
++ struct list_head current_pak;
++ unsigned int copied_so_far;
++ u8 has_opener:1;
++ u8 aborted:1;
++ rwlock_t pak_lock; // protects all of the previous fields
++ wait_queue_head_t wait_queue;
++};
++
++struct vtpm_resp_hdr {
++ uint32_t instance_no;
++ uint16_t tag_no;
++ uint32_t len_no;
++ uint32_t ordinal_no;
++} __attribute__ ((packed));
++
++struct packet {
++ struct list_head next;
++ unsigned int data_len;
++ u8 *data_buffer;
++ tpmif_t *tpmif;
++ u32 tpm_instance;
++ u8 req_tag;
++ u32 last_read;
++ u8 flags;
++ struct timer_list processing_timer;
++};
++
++enum {
++ PACKET_FLAG_DISCARD_RESPONSE = 1,
++};
++
++/* local variables */
++static struct data_exchange dataex;
++
++/* local function prototypes */
++static int _packet_write(struct packet *pak,
++ const char *data, size_t size, int userbuffer);
++static void processing_timeout(unsigned long ptr);
++static int packet_read_shmem(struct packet *pak,
++ tpmif_t * tpmif,
++ u32 offset,
++ char *buffer, int isuserbuffer, u32 left);
++static int vtpm_queue_packet(struct packet *pak);
++
++/***************************************************************
++ Buffer copying fo user and kernel space buffes.
++***************************************************************/
++static inline int copy_from_buffer(void *to,
++ const void *from, unsigned long size,
++ int isuserbuffer)
++{
++ if (isuserbuffer) {
++ if (copy_from_user(to, (void __user *)from, size))
++ return -EFAULT;
++ } else {
++ memcpy(to, from, size);
++ }
++ return 0;
++}
++
++static inline int copy_to_buffer(void *to,
++ const void *from, unsigned long size,
++ int isuserbuffer)
++{
++ if (isuserbuffer) {
++ if (copy_to_user((void __user *)to, from, size))
++ return -EFAULT;
++ } else {
++ memcpy(to, from, size);
++ }
++ return 0;
++}
++
++
++static void dataex_init(struct data_exchange *dataex)
++{
++ INIT_LIST_HEAD(&dataex->pending_pak);
++ INIT_LIST_HEAD(&dataex->current_pak);
++ dataex->has_opener = 0;
++ rwlock_init(&dataex->pak_lock);
++ init_waitqueue_head(&dataex->wait_queue);
++}
++
++/***************************************************************
++ Packet-related functions
++***************************************************************/
++
++static struct packet *packet_find_instance(struct list_head *head,
++ u32 tpm_instance)
++{
++ struct packet *pak;
++ struct list_head *p;
++
++ /*
++ * traverse the list of packets and return the first
++ * one with the given instance number
++ */
++ list_for_each(p, head) {
++ pak = list_entry(p, struct packet, next);
++
++ if (pak->tpm_instance == tpm_instance) {
++ return pak;
++ }
++ }
++ return NULL;
++}
++
++static struct packet *packet_find_packet(struct list_head *head, void *packet)
++{
++ struct packet *pak;
++ struct list_head *p;
++
++ /*
++ * traverse the list of packets and return the first
++ * one with the given instance number
++ */
++ list_for_each(p, head) {
++ pak = list_entry(p, struct packet, next);
++
++ if (pak == packet) {
++ return pak;
++ }
++ }
++ return NULL;
++}
++
++static struct packet *packet_alloc(tpmif_t * tpmif,
++ u32 size, u8 req_tag, u8 flags)
++{
++ struct packet *pak = NULL;
++ pak = kzalloc(sizeof (struct packet), GFP_ATOMIC);
++ if (NULL != pak) {
++ if (tpmif) {
++ pak->tpmif = tpmif;
++ pak->tpm_instance = tpmback_get_instance(tpmif->bi);
++ tpmif_get(tpmif);
++ }
++ pak->data_len = size;
++ pak->req_tag = req_tag;
++ pak->last_read = 0;
++ pak->flags = flags;
++
++ /*
++ * cannot do tpmif_get(tpmif); bad things happen
++ * on the last tpmif_put()
++ */
++ init_timer(&pak->processing_timer);
++ pak->processing_timer.function = processing_timeout;
++ pak->processing_timer.data = (unsigned long)pak;
++ }
++ return pak;
++}
++
++static void inline packet_reset(struct packet *pak)
++{
++ pak->last_read = 0;
++}
++
++static void packet_free(struct packet *pak)
++{
++ if (timer_pending(&pak->processing_timer)) {
++ BUG();
++ }
++
++ if (pak->tpmif)
++ tpmif_put(pak->tpmif);
++ kfree(pak->data_buffer);
++ /*
++ * cannot do tpmif_put(pak->tpmif); bad things happen
++ * on the last tpmif_put()
++ */
++ kfree(pak);
++}
++
++
++/*
++ * Write data to the shared memory and send it to the FE.
++ */
++static int packet_write(struct packet *pak,
++ const char *data, size_t size, int isuserbuffer)
++{
++ int rc = 0;
++
++ if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
++ /* Don't send a respone to this packet. Just acknowledge it. */
++ rc = size;
++ } else {
++ rc = _packet_write(pak, data, size, isuserbuffer);
++ }
++
++ return rc;
++}
++
++int _packet_write(struct packet *pak,
++ const char *data, size_t size, int isuserbuffer)
++{
++ /*
++ * Write into the shared memory pages directly
++ * and send it to the front end.
++ */
++ tpmif_t *tpmif = pak->tpmif;
++ grant_handle_t handle;
++ int rc = 0;
++ unsigned int i = 0;
++ unsigned int offset = 0;
++
++ if (tpmif == NULL) {
++ return -EFAULT;
++ }
++
++ if (tpmif->status == DISCONNECTED) {
++ return size;
++ }
++
++ while (offset < size && i < TPMIF_TX_RING_SIZE) {
++ unsigned int tocopy;
++ struct gnttab_map_grant_ref map_op;
++ struct gnttab_unmap_grant_ref unmap_op;
++ tpmif_tx_request_t *tx;
++
++ tx = &tpmif->tx->ring[i].req;
++
++ if (0 == tx->addr) {
++ DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
++ return 0;
++ }
++
++ gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
++ GNTMAP_host_map, tx->ref, tpmif->domid);
++
++ if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++ &map_op, 1))) {
++ BUG();
++ }
++
++ handle = map_op.handle;
++
++ if (map_op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return 0;
++ }
++
++ tocopy = min_t(size_t, size - offset, PAGE_SIZE);
++
++ if (copy_from_buffer((void *)(idx_to_kaddr(tpmif, i) |
++ (tx->addr & ~PAGE_MASK)),
++ &data[offset], tocopy, isuserbuffer)) {
++ tpmif_put(tpmif);
++ return -EFAULT;
++ }
++ tx->size = tocopy;
++
++ gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
++ GNTMAP_host_map, handle);
++
++ if (unlikely
++ (HYPERVISOR_grant_table_op
++ (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
++ BUG();
++ }
++
++ offset += tocopy;
++ i++;
++ }
++
++ rc = offset;
++ DPRINTK("Notifying frontend via irq %d\n", tpmif->irq);
++ notify_remote_via_irq(tpmif->irq);
++
++ return rc;
++}
++
++/*
++ * Read data from the shared memory and copy it directly into the
++ * provided buffer. Advance the read_last indicator which tells
++ * how many bytes have already been read.
++ */
++static int packet_read(struct packet *pak, size_t numbytes,
++ char *buffer, size_t buffersize, int isuserbuffer)
++{
++ tpmif_t *tpmif = pak->tpmif;
++
++ /*
++ * Read 'numbytes' of data from the buffer. The first 4
++ * bytes are the instance number in network byte order,
++ * after that come the data from the shared memory buffer.
++ */
++ u32 to_copy;
++ u32 offset = 0;
++ u32 room_left = buffersize;
++
++ if (pak->last_read < 4) {
++ /*
++ * copy the instance number into the buffer
++ */
++ u32 instance_no = htonl(pak->tpm_instance);
++ u32 last_read = pak->last_read;
++
++ to_copy = min_t(size_t, 4 - last_read, numbytes);
++
++ if (copy_to_buffer(&buffer[0],
++ &(((u8 *) & instance_no)[last_read]),
++ to_copy, isuserbuffer)) {
++ return -EFAULT;
++ }
++
++ pak->last_read += to_copy;
++ offset += to_copy;
++ room_left -= to_copy;
++ }
++
++ /*
++ * If the packet has a data buffer appended, read from it...
++ */
++
++ if (room_left > 0) {
++ if (pak->data_buffer) {
++ u32 to_copy = min_t(u32, pak->data_len - offset, room_left);
++ u32 last_read = pak->last_read - 4;
++
++ if (copy_to_buffer(&buffer[offset],
++ &pak->data_buffer[last_read],
++ to_copy, isuserbuffer)) {
++ return -EFAULT;
++ }
++ pak->last_read += to_copy;
++ offset += to_copy;
++ } else {
++ offset = packet_read_shmem(pak,
++ tpmif,
++ offset,
++ buffer,
++ isuserbuffer, room_left);
++ }
++ }
++ return offset;
++}
++
++static int packet_read_shmem(struct packet *pak,
++ tpmif_t * tpmif,
++ u32 offset, char *buffer, int isuserbuffer,
++ u32 room_left)
++{
++ u32 last_read = pak->last_read - 4;
++ u32 i = (last_read / PAGE_SIZE);
++ u32 pg_offset = last_read & (PAGE_SIZE - 1);
++ u32 to_copy;
++ grant_handle_t handle;
++
++ tpmif_tx_request_t *tx;
++
++ tx = &tpmif->tx->ring[0].req;
++ /*
++ * Start copying data at the page with index 'index'
++ * and within that page at offset 'offset'.
++ * Copy a maximum of 'room_left' bytes.
++ */
++ to_copy = min_t(u32, PAGE_SIZE - pg_offset, room_left);
++ while (to_copy > 0) {
++ void *src;
++ struct gnttab_map_grant_ref map_op;
++ struct gnttab_unmap_grant_ref unmap_op;
++
++ tx = &tpmif->tx->ring[i].req;
++
++ gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
++ GNTMAP_host_map, tx->ref, tpmif->domid);
++
++ if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++ &map_op, 1))) {
++ BUG();
++ }
++
++ if (map_op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return -EFAULT;
++ }
++
++ handle = map_op.handle;
++
++ if (to_copy > tx->size) {
++ /*
++ * User requests more than what's available
++ */
++ to_copy = min_t(u32, tx->size, to_copy);
++ }
++
++ DPRINTK("Copying from mapped memory at %08lx\n",
++ (unsigned long)(idx_to_kaddr(tpmif, i) |
++ (tx->addr & ~PAGE_MASK)));
++
++ src = (void *)(idx_to_kaddr(tpmif, i) |
++ ((tx->addr & ~PAGE_MASK) + pg_offset));
++ if (copy_to_buffer(&buffer[offset],
++ src, to_copy, isuserbuffer)) {
++ return -EFAULT;
++ }
++
++ DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
++ tpmif->domid, buffer[offset], buffer[offset + 1],
++ buffer[offset + 2], buffer[offset + 3]);
++
++ gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
++ GNTMAP_host_map, handle);
++
++ if (unlikely
++ (HYPERVISOR_grant_table_op
++ (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
++ BUG();
++ }
++
++ offset += to_copy;
++ pg_offset = 0;
++ last_read += to_copy;
++ room_left -= to_copy;
++
++ to_copy = min_t(u32, PAGE_SIZE, room_left);
++ i++;
++ } /* while (to_copy > 0) */
++ /*
++ * Adjust the last_read pointer
++ */
++ pak->last_read = last_read + 4;
++ return offset;
++}
++
++/* ============================================================
++ * The file layer for reading data from this device
++ * ============================================================
++ */
++static int vtpm_op_open(struct inode *inode, struct file *f)
++{
++ int rc = 0;
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ if (dataex.has_opener == 0) {
++ dataex.has_opener = 1;
++ } else {
++ rc = -EPERM;
++ }
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ return rc;
++}
++
++static ssize_t vtpm_op_read(struct file *file,
++ char __user * data, size_t size, loff_t * offset)
++{
++ int ret_size = -ENODATA;
++ struct packet *pak = NULL;
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ if (dataex.aborted) {
++ dataex.aborted = 0;
++ dataex.copied_so_far = 0;
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ return -EIO;
++ }
++
++ if (list_empty(&dataex.pending_pak)) {
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ wait_event_interruptible(dataex.wait_queue,
++ !list_empty(&dataex.pending_pak));
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ dataex.copied_so_far = 0;
++ }
++
++ if (!list_empty(&dataex.pending_pak)) {
++ unsigned int left;
++
++ pak = list_entry(dataex.pending_pak.next, struct packet, next);
++ left = pak->data_len - dataex.copied_so_far;
++ list_del(&pak->next);
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++
++ DPRINTK("size given by app: %d, available: %d\n", size, left);
++
++ ret_size = min_t(size_t, size, left);
++
++ ret_size = packet_read(pak, ret_size, data, size, 1);
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++
++ if (ret_size < 0) {
++ del_singleshot_timer_sync(&pak->processing_timer);
++ packet_free(pak);
++ dataex.copied_so_far = 0;
++ } else {
++ DPRINTK("Copied %d bytes to user buffer\n", ret_size);
++
++ dataex.copied_so_far += ret_size;
++ if (dataex.copied_so_far >= pak->data_len + 4) {
++ DPRINTK("All data from this packet given to app.\n");
++ /* All data given to app */
++
++ del_singleshot_timer_sync(&pak->
++ processing_timer);
++ list_add_tail(&pak->next, &dataex.current_pak);
++ /*
++ * The more fontends that are handled at the same time,
++ * the more time we give the TPM to process the request.
++ */
++ mod_timer(&pak->processing_timer,
++ jiffies + (num_frontends * 60 * HZ));
++ dataex.copied_so_far = 0;
++ } else {
++ list_add(&pak->next, &dataex.pending_pak);
++ }
++ }
++ }
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++
++ DPRINTK("Returning result from read to app: %d\n", ret_size);
++
++ return ret_size;
++}
++
++/*
++ * Write operation - only works after a previous read operation!
++ */
++static ssize_t vtpm_op_write(struct file *file,
++ const char __user * data, size_t size,
++ loff_t * offset)
++{
++ struct packet *pak;
++ int rc = 0;
++ unsigned int off = 4;
++ unsigned long flags;
++ struct vtpm_resp_hdr vrh;
++
++ /*
++ * Minimum required packet size is:
++ * 4 bytes for instance number
++ * 2 bytes for tag
++ * 4 bytes for paramSize
++ * 4 bytes for the ordinal
++ * sum: 14 bytes
++ */
++ if (size < sizeof (vrh))
++ return -EFAULT;
++
++ if (copy_from_user(&vrh, data, sizeof (vrh)))
++ return -EFAULT;
++
++ /* malformed packet? */
++ if ((off + ntohl(vrh.len_no)) != size)
++ return -EFAULT;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ pak = packet_find_instance(&dataex.current_pak,
++ ntohl(vrh.instance_no));
++
++ if (pak == NULL) {
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ DPRINTK(KERN_ALERT "No associated packet! (inst=%d)\n",
++ ntohl(vrh.instance_no));
++ return -EFAULT;
++ }
++
++ del_singleshot_timer_sync(&pak->processing_timer);
++ list_del(&pak->next);
++
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++
++ /*
++ * The first 'offset' bytes must be the instance number - skip them.
++ */
++ size -= off;
++
++ rc = packet_write(pak, &data[off], size, 1);
++
++ if (rc > 0) {
++ /* I neglected the first 4 bytes */
++ rc += off;
++ }
++ packet_free(pak);
++ return rc;
++}
++
++static int vtpm_op_release(struct inode *inode, struct file *file)
++{
++ unsigned long flags;
++
++ vtpm_release_packets(NULL, 1);
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ dataex.has_opener = 0;
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ return 0;
++}
++
++static unsigned int vtpm_op_poll(struct file *file,
++ struct poll_table_struct *pts)
++{
++ unsigned int flags = POLLOUT | POLLWRNORM;
++
++ poll_wait(file, &dataex.wait_queue, pts);
++ if (!list_empty(&dataex.pending_pak)) {
++ flags |= POLLIN | POLLRDNORM;
++ }
++ return flags;
++}
++
++static const struct file_operations vtpm_ops = {
++ .owner = THIS_MODULE,
++ .llseek = no_llseek,
++ .open = vtpm_op_open,
++ .read = vtpm_op_read,
++ .write = vtpm_op_write,
++ .release = vtpm_op_release,
++ .poll = vtpm_op_poll,
++};
++
++static struct miscdevice vtpms_miscdevice = {
++ .minor = 225,
++ .name = "vtpm",
++ .fops = &vtpm_ops,
++};
++
++/***************************************************************
++ Utility functions
++***************************************************************/
++
++static int tpm_send_fail_message(struct packet *pak, u8 req_tag)
++{
++ int rc;
++ static const unsigned char tpm_error_message_fail[] = {
++ 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x0a,
++ 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
++ };
++ unsigned char buffer[sizeof (tpm_error_message_fail)];
++
++ memcpy(buffer, tpm_error_message_fail,
++ sizeof (tpm_error_message_fail));
++ /*
++ * Insert the right response tag depending on the given tag
++ * All response tags are '+3' to the request tag.
++ */
++ buffer[1] = req_tag + 3;
++
++ /*
++ * Write the data to shared memory and notify the front-end
++ */
++ rc = packet_write(pak, buffer, sizeof (buffer), 0);
++
++ return rc;
++}
++
++static int _vtpm_release_packets(struct list_head *head,
++ tpmif_t * tpmif, int send_msgs)
++{
++ int aborted = 0;
++ int c = 0;
++ struct packet *pak;
++ struct list_head *pos, *tmp;
++
++ list_for_each_safe(pos, tmp, head) {
++ pak = list_entry(pos, struct packet, next);
++ c += 1;
++
++ if (tpmif == NULL || pak->tpmif == tpmif) {
++ int can_send = 0;
++
++ del_singleshot_timer_sync(&pak->processing_timer);
++ list_del(&pak->next);
++
++ if (pak->tpmif && pak->tpmif->status == CONNECTED) {
++ can_send = 1;
++ }
++
++ if (send_msgs && can_send) {
++ tpm_send_fail_message(pak, pak->req_tag);
++ }
++ packet_free(pak);
++ if (c == 1)
++ aborted = 1;
++ }
++ }
++ return aborted;
++}
++
++int vtpm_release_packets(tpmif_t * tpmif, int send_msgs)
++{
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++
++ dataex.aborted = _vtpm_release_packets(&dataex.pending_pak,
++ tpmif,
++ send_msgs);
++ _vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
++
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ return 0;
++}
++
++static int vtpm_queue_packet(struct packet *pak)
++{
++ int rc = 0;
++
++ if (dataex.has_opener) {
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ list_add_tail(&pak->next, &dataex.pending_pak);
++ /* give the TPM some time to pick up the request */
++ mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++
++ wake_up_interruptible(&dataex.wait_queue);
++ } else {
++ rc = -EFAULT;
++ }
++ return rc;
++}
++
++static int vtpm_receive(tpmif_t * tpmif, u32 size)
++{
++ int rc = 0;
++ unsigned char buffer[10];
++ __be32 *native_size;
++ struct packet *pak = packet_alloc(tpmif, size, 0, 0);
++
++ if (!pak)
++ return -ENOMEM;
++ /*
++ * Read 10 bytes from the received buffer to test its
++ * content for validity.
++ */
++ if (sizeof (buffer) != packet_read(pak,
++ sizeof (buffer), buffer,
++ sizeof (buffer), 0)) {
++ goto failexit;
++ }
++ /*
++ * Reset the packet read pointer so we can read all its
++ * contents again.
++ */
++ packet_reset(pak);
++
++ native_size = (__force __be32 *) (&buffer[4 + 2]);
++ /*
++ * Verify that the size of the packet is correct
++ * as indicated and that there's actually someone reading packets.
++ * The minimum size of the packet is '10' for tag, size indicator
++ * and ordinal.
++ */
++ if (size < 10 ||
++ be32_to_cpu(*native_size) != size ||
++ 0 == dataex.has_opener || tpmif->status != CONNECTED) {
++ rc = -EINVAL;
++ goto failexit;
++ } else {
++ rc = vtpm_queue_packet(pak);
++ if (rc < 0)
++ goto failexit;
++ }
++ return 0;
++
++ failexit:
++ if (pak) {
++ tpm_send_fail_message(pak, buffer[4 + 1]);
++ packet_free(pak);
++ }
++ return rc;
++}
++
++/*
++ * Timeout function that gets invoked when a packet has not been processed
++ * during the timeout period.
++ * The packet must be on a list when this function is invoked. This
++ * also means that once its taken off a list, the timer must be
++ * destroyed as well.
++ */
++static void processing_timeout(unsigned long ptr)
++{
++ struct packet *pak = (struct packet *)ptr;
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ /*
++ * The packet needs to be searched whether it
++ * is still on the list.
++ */
++ if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
++ pak == packet_find_packet(&dataex.current_pak, pak)) {
++ if ((pak->flags & PACKET_FLAG_DISCARD_RESPONSE) == 0) {
++ tpm_send_fail_message(pak, pak->req_tag);
++ }
++ /* discard future responses */
++ pak->flags |= PACKET_FLAG_DISCARD_RESPONSE;
++ }
++
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++}
++
++static void tpm_tx_action(unsigned long unused);
++static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
++
++static struct list_head tpm_schedule_list;
++static spinlock_t tpm_schedule_list_lock;
++
++static inline void maybe_schedule_tx_action(void)
++{
++ smp_mb();
++ tasklet_schedule(&tpm_tx_tasklet);
++}
++
++static inline int __on_tpm_schedule_list(tpmif_t * tpmif)
++{
++ return tpmif->list.next != NULL;
++}
++
++static void remove_from_tpm_schedule_list(tpmif_t * tpmif)
++{
++ spin_lock_irq(&tpm_schedule_list_lock);
++ if (likely(__on_tpm_schedule_list(tpmif))) {
++ list_del(&tpmif->list);
++ tpmif->list.next = NULL;
++ tpmif_put(tpmif);
++ }
++ spin_unlock_irq(&tpm_schedule_list_lock);
++}
++
++static void add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
++{
++ if (__on_tpm_schedule_list(tpmif))
++ return;
++
++ spin_lock_irq(&tpm_schedule_list_lock);
++ if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
++ list_add_tail(&tpmif->list, &tpm_schedule_list);
++ tpmif_get(tpmif);
++ }
++ spin_unlock_irq(&tpm_schedule_list_lock);
++}
++
++void tpmif_schedule_work(tpmif_t * tpmif)
++{
++ add_to_tpm_schedule_list_tail(tpmif);
++ maybe_schedule_tx_action();
++}
++
++void tpmif_deschedule_work(tpmif_t * tpmif)
++{
++ remove_from_tpm_schedule_list(tpmif);
++}
++
++static void tpm_tx_action(unsigned long unused)
++{
++ struct list_head *ent;
++ tpmif_t *tpmif;
++ tpmif_tx_request_t *tx;
++
++ DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
++
++ while (!list_empty(&tpm_schedule_list)) {
++ /* Get a tpmif from the list with work to do. */
++ ent = tpm_schedule_list.next;
++ tpmif = list_entry(ent, tpmif_t, list);
++ tpmif_get(tpmif);
++ remove_from_tpm_schedule_list(tpmif);
++
++ tx = &tpmif->tx->ring[0].req;
++
++ /* pass it up */
++ vtpm_receive(tpmif, tx->size);
++
++ tpmif_put(tpmif);
++ }
++}
++
++irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++ tpmif_t *tpmif = (tpmif_t *) dev_id;
++
++ add_to_tpm_schedule_list_tail(tpmif);
++ maybe_schedule_tx_action();
++ return IRQ_HANDLED;
++}
++
++static int __init tpmback_init(void)
++{
++ int rc;
++
++ if ((rc = misc_register(&vtpms_miscdevice)) != 0) {
++ printk(KERN_ALERT
++ "Could not register misc device for TPM BE.\n");
++ return rc;
++ }
++
++ dataex_init(&dataex);
++
++ spin_lock_init(&tpm_schedule_list_lock);
++ INIT_LIST_HEAD(&tpm_schedule_list);
++
++ tpmif_interface_init();
++ tpmif_xenbus_init();
++
++ printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
++
++ return 0;
++}
++
++module_init(tpmback_init);
++
++void __exit tpmback_exit(void)
++{
++ vtpm_release_packets(NULL, 0);
++ tpmif_xenbus_exit();
++ tpmif_interface_exit();
++ misc_deregister(&vtpms_miscdevice);
++}
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/tpmback/xenbus.c linux-2.6.18-xen-3.3.0/drivers/xen/tpmback/xenbus.c
+--- linux-2.6.18.8/drivers/xen/tpmback/xenbus.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/tpmback/xenbus.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,289 @@
++/* Xenbus code for tpmif backend
++ Copyright (C) 2005 IBM Corporation
++ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++*/
++#include <stdarg.h>
++#include <linux/module.h>
++#include <xen/xenbus.h>
++#include "common.h"
++
++struct backend_info
++{
++ struct xenbus_device *dev;
++
++ /* our communications channel */
++ tpmif_t *tpmif;
++
++ long int frontend_id;
++ long int instance; // instance of TPM
++ u8 is_instance_set;// whether instance number has been set
++
++ /* watch front end for changes */
++ struct xenbus_watch backend_watch;
++};
++
++static void maybe_connect(struct backend_info *be);
++static void connect(struct backend_info *be);
++static int connect_ring(struct backend_info *be);
++static void backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len);
++static void frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state);
++
++long int tpmback_get_instance(struct backend_info *bi)
++{
++ long int res = -1;
++ if (bi && bi->is_instance_set)
++ res = bi->instance;
++ return res;
++}
++
++static int tpmback_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ if (!be) return 0;
++
++ if (be->backend_watch.node) {
++ unregister_xenbus_watch(&be->backend_watch);
++ kfree(be->backend_watch.node);
++ be->backend_watch.node = NULL;
++ }
++ if (be->tpmif) {
++ be->tpmif->bi = NULL;
++ vtpm_release_packets(be->tpmif, 0);
++ tpmif_put(be->tpmif);
++ be->tpmif = NULL;
++ }
++ kfree(be);
++ dev->dev.driver_data = NULL;
++ return 0;
++}
++
++static int tpmback_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
++
++ be->is_instance_set = 0;
++ be->dev = dev;
++ dev->dev.driver_data = be;
++
++ err = xenbus_watch_path2(dev, dev->nodename,
++ "instance", &be->backend_watch,
++ backend_changed);
++ if (err) {
++ goto fail;
++ }
++
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err) {
++ goto fail;
++ }
++ return 0;
++fail:
++ tpmback_remove(dev);
++ return err;
++}
++
++
++static void backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ int err;
++ long instance;
++ struct backend_info *be
++ = container_of(watch, struct backend_info, backend_watch);
++ struct xenbus_device *dev = be->dev;
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename,
++ "instance","%li", &instance);
++ if (XENBUS_EXIST_ERR(err)) {
++ return;
++ }
++
++ if (err != 1) {
++ xenbus_dev_fatal(dev, err, "reading instance");
++ return;
++ }
++
++ if (be->is_instance_set == 0) {
++ be->instance = instance;
++ be->is_instance_set = 1;
++ }
++}
++
++
++static void frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
++{
++ struct backend_info *be = dev->dev.driver_data;
++ int err;
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitialised:
++ break;
++
++ case XenbusStateConnected:
++ err = connect_ring(be);
++ if (err) {
++ return;
++ }
++ maybe_connect(be);
++ break;
++
++ case XenbusStateClosing:
++ be->instance = -1;
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
++
++ case XenbusStateUnknown: /* keep it here */
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ device_unregister(&be->dev->dev);
++ tpmback_remove(dev);
++ break;
++
++ default:
++ xenbus_dev_fatal(dev, -EINVAL,
++ "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
++
++
++
++static void maybe_connect(struct backend_info *be)
++{
++ if (be->tpmif == NULL || be->tpmif->status == CONNECTED)
++ return;
++
++ connect(be);
++}
++
++
++static void connect(struct backend_info *be)
++{
++ struct xenbus_transaction xbt;
++ int err;
++ struct xenbus_device *dev = be->dev;
++ unsigned long ready = 1;
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(be->dev, err, "starting transaction");
++ return;
++ }
++
++ err = xenbus_printf(xbt, be->dev->nodename,
++ "ready", "%lu", ready);
++ if (err) {
++ xenbus_dev_fatal(be->dev, err, "writing 'ready'");
++ goto abort;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN)
++ goto again;
++ if (err)
++ xenbus_dev_fatal(be->dev, err, "end of transaction");
++
++ err = xenbus_switch_state(dev, XenbusStateConnected);
++ if (!err)
++ be->tpmif->status = CONNECTED;
++ return;
++abort:
++ xenbus_transaction_end(xbt, 1);
++}
++
++
++static int connect_ring(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned long ring_ref;
++ unsigned int evtchn;
++ int err;
++
++ err = xenbus_gather(XBT_NIL, dev->otherend,
++ "ring-ref", "%lu", &ring_ref,
++ "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_error(dev, err,
++ "reading %s/ring-ref and event-channel",
++ dev->otherend);
++ return err;
++ }
++
++ if (!be->tpmif) {
++ be->tpmif = tpmif_find(dev->otherend_id, be);
++ if (IS_ERR(be->tpmif)) {
++ err = PTR_ERR(be->tpmif);
++ be->tpmif = NULL;
++ xenbus_dev_fatal(dev,err,"creating vtpm interface");
++ return err;
++ }
++ }
++
++ if (be->tpmif != NULL) {
++ err = tpmif_map(be->tpmif, ring_ref, evtchn);
++ if (err) {
++ xenbus_dev_error(dev, err,
++ "mapping shared-frame %lu port %u",
++ ring_ref, evtchn);
++ return err;
++ }
++ }
++ return 0;
++}
++
++
++static const struct xenbus_device_id tpmback_ids[] = {
++ { "vtpm" },
++ { "" }
++};
++
++
++static struct xenbus_driver tpmback = {
++ .name = "vtpm",
++ .owner = THIS_MODULE,
++ .ids = tpmback_ids,
++ .probe = tpmback_probe,
++ .remove = tpmback_remove,
++ .otherend_changed = frontend_changed,
++};
++
++
++void tpmif_xenbus_init(void)
++{
++ xenbus_register_backend(&tpmback);
++}
++
++void tpmif_xenbus_exit(void)
++{
++ xenbus_unregister_driver(&tpmback);
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/util.c linux-2.6.18-xen-3.3.0/drivers/xen/util.c
+--- linux-2.6.18.8/drivers/xen/util.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/util.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,65 @@
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <asm/uaccess.h>
++#include <xen/driver_util.h>
++
++struct class *get_xen_class(void)
++{
++ static struct class *xen_class;
++
++ if (xen_class)
++ return xen_class;
++
++ xen_class = class_create(THIS_MODULE, "xen");
++ if (IS_ERR(xen_class)) {
++ printk("Failed to create xen sysfs class.\n");
++ xen_class = NULL;
++ }
++
++ return xen_class;
++}
++EXPORT_SYMBOL_GPL(get_xen_class);
++
++#ifdef CONFIG_X86
++static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++ /* apply_to_page_range() does all the hard work. */
++ return 0;
++}
++
++struct vm_struct *alloc_vm_area(unsigned long size)
++{
++ struct vm_struct *area;
++
++ area = get_vm_area(size, VM_IOREMAP);
++ if (area == NULL)
++ return NULL;
++
++ /*
++ * This ensures that page tables are constructed for this region
++ * of kernel virtual address space and mapped into init_mm.
++ */
++ if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
++ area->size, f, NULL)) {
++ free_vm_area(area);
++ return NULL;
++ }
++
++ /* Map page directories into every address space. */
++ vmalloc_sync_all();
++
++ return area;
++}
++EXPORT_SYMBOL_GPL(alloc_vm_area);
++
++void free_vm_area(struct vm_struct *area)
++{
++ struct vm_struct *ret;
++ ret = remove_vm_area(area->addr);
++ BUG_ON(ret != area);
++ kfree(area);
++}
++EXPORT_SYMBOL_GPL(free_vm_area);
++#endif /* CONFIG_X86 */
+diff -rpuN linux-2.6.18.8/drivers/xen/xenbus/Makefile linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/Makefile
+--- linux-2.6.18.8/drivers/xen/xenbus/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,9 @@
++obj-y += xenbus_client.o xenbus_comms.o xenbus_xs.o xenbus_probe.o
++obj-$(CONFIG_XEN_BACKEND) += xenbus_be.o
++
++xenbus_be-objs =
++xenbus_be-objs += xenbus_backend_client.o
++
++xenbus-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
++obj-y += $(xenbus-y) $(xenbus-m)
++obj-$(CONFIG_XEN_XENBUS_DEV) += xenbus_dev.o
+diff -rpuN linux-2.6.18.8/drivers/xen/xenbus/xenbus_backend_client.c linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_backend_client.c
+--- linux-2.6.18.8/drivers/xen/xenbus/xenbus_backend_client.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_backend_client.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,147 @@
++/******************************************************************************
++ * Backend-client-facing interface for the Xenbus driver. In other words, the
++ * interface between the Xenbus and the device-specific code in the backend
++ * driver.
++ *
++ * Copyright (C) 2005-2006 XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/err.h>
++#include <xen/gnttab.h>
++#include <xen/xenbus.h>
++#include <xen/driver_util.h>
++
++/* Based on Rusty Russell's skeleton driver's map_page */
++struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref)
++{
++ struct gnttab_map_grant_ref op;
++ struct vm_struct *area;
++
++ area = alloc_vm_area(PAGE_SIZE);
++ if (!area)
++ return ERR_PTR(-ENOMEM);
++
++ gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map,
++ gnt_ref, dev->otherend_id);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status != GNTST_okay) {
++ free_vm_area(area);
++ xenbus_dev_fatal(dev, op.status,
++ "mapping in shared page %d from domain %d",
++ gnt_ref, dev->otherend_id);
++ BUG_ON(!IS_ERR(ERR_PTR(op.status)));
++ return ERR_PTR(op.status);
++ }
++
++ /* Stuff the handle in an unused field */
++ area->phys_addr = (unsigned long)op.handle;
++
++ return area;
++}
++EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
++
++
++int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
++ grant_handle_t *handle, void *vaddr)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
++ gnt_ref, dev->otherend_id);
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status != GNTST_okay) {
++ xenbus_dev_fatal(dev, op.status,
++ "mapping in shared page %d from domain %d",
++ gnt_ref, dev->otherend_id);
++ } else
++ *handle = op.handle;
++
++ return op.status;
++}
++EXPORT_SYMBOL_GPL(xenbus_map_ring);
++
++
++/* Based on Rusty Russell's skeleton driver's unmap_page */
++int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)area->addr, GNTMAP_host_map,
++ (grant_handle_t)area->phys_addr);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status == GNTST_okay)
++ free_vm_area(area);
++ else
++ xenbus_dev_error(dev, op.status,
++ "unmapping page at handle %d error %d",
++ (int16_t)area->phys_addr, op.status);
++
++ return op.status;
++}
++EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
++
++
++int xenbus_unmap_ring(struct xenbus_device *dev,
++ grant_handle_t handle, void *vaddr)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
++ handle);
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status != GNTST_okay)
++ xenbus_dev_error(dev, op.status,
++ "unmapping page at handle %d error %d",
++ handle, op.status);
++
++ return op.status;
++}
++EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
++
++int xenbus_dev_is_online(struct xenbus_device *dev)
++{
++ int rc, val;
++
++ rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
++ if (rc != 1)
++ val = 0; /* no online node present */
++
++ return val;
++}
++EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff -rpuN linux-2.6.18.8/drivers/xen/xenbus/xenbus_client.c linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_client.c
+--- linux-2.6.18.8/drivers/xen/xenbus/xenbus_client.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_client.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,284 @@
++/******************************************************************************
++ * Client-facing interface for the Xenbus driver. In other words, the
++ * interface between the Xenbus and the device-specific code, be it the
++ * frontend or the backend of that driver.
++ *
++ * Copyright (C) 2005 XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/slab.h>
++#include <xen/evtchn.h>
++#include <xen/gnttab.h>
++#include <xen/xenbus.h>
++#include <xen/driver_util.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++
++const char *xenbus_strstate(enum xenbus_state state)
++{
++ static const char *const name[] = {
++ [ XenbusStateUnknown ] = "Unknown",
++ [ XenbusStateInitialising ] = "Initialising",
++ [ XenbusStateInitWait ] = "InitWait",
++ [ XenbusStateInitialised ] = "Initialised",
++ [ XenbusStateConnected ] = "Connected",
++ [ XenbusStateClosing ] = "Closing",
++ [ XenbusStateClosed ] = "Closed",
++ };
++ return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
++}
++EXPORT_SYMBOL_GPL(xenbus_strstate);
++
++int xenbus_watch_path(struct xenbus_device *dev, const char *path,
++ struct xenbus_watch *watch,
++ void (*callback)(struct xenbus_watch *,
++ const char **, unsigned int))
++{
++ int err;
++
++ watch->node = path;
++ watch->callback = callback;
++
++ err = register_xenbus_watch(watch);
++
++ if (err) {
++ watch->node = NULL;
++ watch->callback = NULL;
++ xenbus_dev_fatal(dev, err, "adding watch on %s", path);
++ }
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(xenbus_watch_path);
++
++
++int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
++ const char *path2, struct xenbus_watch *watch,
++ void (*callback)(struct xenbus_watch *,
++ const char **, unsigned int))
++{
++ int err;
++ char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2);
++ if (!state) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
++ return -ENOMEM;
++ }
++ err = xenbus_watch_path(dev, state, watch, callback);
++
++ if (err)
++ kfree(state);
++ return err;
++}
++EXPORT_SYMBOL_GPL(xenbus_watch_path2);
++
++
++int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
++{
++ /* We check whether the state is currently set to the given value, and
++ if not, then the state is set. We don't want to unconditionally
++ write the given state, because we don't want to fire watches
++ unnecessarily. Furthermore, if the node has gone, we don't write
++ to it, as the device will be tearing down, and we don't want to
++ resurrect that directory.
++
++ Note that, because of this cached value of our state, this function
++ will not work inside a Xenstore transaction (something it was
++ trying to in the past) because dev->state would not get reset if
++ the transaction was aborted.
++
++ */
++
++ int current_state;
++ int err;
++
++ if (state == dev->state)
++ return 0;
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d",
++ &current_state);
++ if (err != 1)
++ return 0;
++
++ err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state);
++ if (err) {
++ if (state != XenbusStateClosing) /* Avoid looping */
++ xenbus_dev_fatal(dev, err, "writing new state");
++ return err;
++ }
++
++ dev->state = state;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(xenbus_switch_state);
++
++int xenbus_frontend_closed(struct xenbus_device *dev)
++{
++ xenbus_switch_state(dev, XenbusStateClosed);
++ complete(&dev->down);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
++
++/**
++ * Return the path to the error node for the given device, or NULL on failure.
++ * If the value returned is non-NULL, then it is the caller's to kfree.
++ */
++static char *error_path(struct xenbus_device *dev)
++{
++ return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
++}
++
++
++void _dev_error(struct xenbus_device *dev, int err, const char *fmt,
++ va_list ap)
++{
++ int ret;
++ unsigned int len;
++ char *printf_buffer = NULL, *path_buffer = NULL;
++
++#define PRINTF_BUFFER_SIZE 4096
++ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
++ if (printf_buffer == NULL)
++ goto fail;
++
++ len = sprintf(printf_buffer, "%i ", -err);
++ ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
++
++ BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
++
++ dev_err(&dev->dev, "%s\n", printf_buffer);
++
++ path_buffer = error_path(dev);
++
++ if (path_buffer == NULL) {
++ printk("xenbus: failed to write error node for %s (%s)\n",
++ dev->nodename, printf_buffer);
++ goto fail;
++ }
++
++ if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
++ printk("xenbus: failed to write error node for %s (%s)\n",
++ dev->nodename, printf_buffer);
++ goto fail;
++ }
++
++fail:
++ if (printf_buffer)
++ kfree(printf_buffer);
++ if (path_buffer)
++ kfree(path_buffer);
++}
++
++
++void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
++ ...)
++{
++ va_list ap;
++
++ va_start(ap, fmt);
++ _dev_error(dev, err, fmt, ap);
++ va_end(ap);
++}
++EXPORT_SYMBOL_GPL(xenbus_dev_error);
++
++
++void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
++ ...)
++{
++ va_list ap;
++
++ va_start(ap, fmt);
++ _dev_error(dev, err, fmt, ap);
++ va_end(ap);
++
++ xenbus_switch_state(dev, XenbusStateClosing);
++}
++EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
++
++
++int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
++{
++ int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
++ if (err < 0)
++ xenbus_dev_fatal(dev, err, "granting access to ring page");
++ return err;
++}
++EXPORT_SYMBOL_GPL(xenbus_grant_ring);
++
++
++int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
++{
++ struct evtchn_alloc_unbound alloc_unbound;
++ int err;
++
++ alloc_unbound.dom = DOMID_SELF;
++ alloc_unbound.remote_dom = dev->otherend_id;
++
++ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
++ &alloc_unbound);
++ if (err)
++ xenbus_dev_fatal(dev, err, "allocating event channel");
++ else
++ *port = alloc_unbound.port;
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
++
++
++int xenbus_free_evtchn(struct xenbus_device *dev, int port)
++{
++ struct evtchn_close close;
++ int err;
++
++ close.port = port;
++
++ err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
++ if (err)
++ xenbus_dev_error(dev, err, "freeing event channel %d", port);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
++
++
++enum xenbus_state xenbus_read_driver_state(const char *path)
++{
++ enum xenbus_state result;
++ int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
++ if (err)
++ result = XenbusStateUnknown;
++
++ return result;
++}
++EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
+diff -rpuN linux-2.6.18.8/drivers/xen/xenbus/xenbus_comms.c linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_comms.c
+--- linux-2.6.18.8/drivers/xen/xenbus/xenbus_comms.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_comms.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,233 @@
++/******************************************************************************
++ * xenbus_comms.c
++ *
++ * Low level code to talks to Xen Store: ringbuffer and event channel.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/err.h>
++#include <linux/ptrace.h>
++#include <linux/workqueue.h>
++#include <xen/evtchn.h>
++#include <xen/xenbus.h>
++
++#include <asm/hypervisor.h>
++
++#include "xenbus_comms.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++static int xenbus_irq;
++
++extern void xenbus_probe(void *);
++extern int xenstored_ready;
++static DECLARE_WORK(probe_work, xenbus_probe, NULL);
++
++static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
++
++static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
++{
++ if (unlikely(xenstored_ready == 0)) {
++ xenstored_ready = 1;
++ schedule_work(&probe_work);
++ }
++
++ wake_up(&xb_waitq);
++ return IRQ_HANDLED;
++}
++
++static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
++{
++ return ((prod - cons) <= XENSTORE_RING_SIZE);
++}
++
++static void *get_output_chunk(XENSTORE_RING_IDX cons,
++ XENSTORE_RING_IDX prod,
++ char *buf, uint32_t *len)
++{
++ *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
++ if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
++ *len = XENSTORE_RING_SIZE - (prod - cons);
++ return buf + MASK_XENSTORE_IDX(prod);
++}
++
++static const void *get_input_chunk(XENSTORE_RING_IDX cons,
++ XENSTORE_RING_IDX prod,
++ const char *buf, uint32_t *len)
++{
++ *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
++ if ((prod - cons) < *len)
++ *len = prod - cons;
++ return buf + MASK_XENSTORE_IDX(cons);
++}
++
++int xb_write(const void *data, unsigned len)
++{
++ struct xenstore_domain_interface *intf = xen_store_interface;
++ XENSTORE_RING_IDX cons, prod;
++ int rc;
++
++ while (len != 0) {
++ void *dst;
++ unsigned int avail;
++
++ rc = wait_event_interruptible(
++ xb_waitq,
++ (intf->req_prod - intf->req_cons) !=
++ XENSTORE_RING_SIZE);
++ if (rc < 0)
++ return rc;
++
++ /* Read indexes, then verify. */
++ cons = intf->req_cons;
++ prod = intf->req_prod;
++ if (!check_indexes(cons, prod)) {
++ intf->req_cons = intf->req_prod = 0;
++ return -EIO;
++ }
++
++ dst = get_output_chunk(cons, prod, intf->req, &avail);
++ if (avail == 0)
++ continue;
++ if (avail > len)
++ avail = len;
++
++ /* Must write data /after/ reading the consumer index. */
++ mb();
++
++ memcpy(dst, data, avail);
++ data += avail;
++ len -= avail;
++
++ /* Other side must not see new producer until data is there. */
++ wmb();
++ intf->req_prod += avail;
++
++ /* Implies mb(): other side will see the updated producer. */
++ notify_remote_via_evtchn(xen_store_evtchn);
++ }
++
++ return 0;
++}
++
++int xb_data_to_read(void)
++{
++ struct xenstore_domain_interface *intf = xen_store_interface;
++ return (intf->rsp_cons != intf->rsp_prod);
++}
++
++int xb_wait_for_data_to_read(void)
++{
++ return wait_event_interruptible(xb_waitq, xb_data_to_read());
++}
++
++int xb_read(void *data, unsigned len)
++{
++ struct xenstore_domain_interface *intf = xen_store_interface;
++ XENSTORE_RING_IDX cons, prod;
++ int rc;
++
++ while (len != 0) {
++ unsigned int avail;
++ const char *src;
++
++ rc = xb_wait_for_data_to_read();
++ if (rc < 0)
++ return rc;
++
++ /* Read indexes, then verify. */
++ cons = intf->rsp_cons;
++ prod = intf->rsp_prod;
++ if (!check_indexes(cons, prod)) {
++ intf->rsp_cons = intf->rsp_prod = 0;
++ return -EIO;
++ }
++
++ src = get_input_chunk(cons, prod, intf->rsp, &avail);
++ if (avail == 0)
++ continue;
++ if (avail > len)
++ avail = len;
++
++ /* Must read data /after/ reading the producer index. */
++ rmb();
++
++ memcpy(data, src, avail);
++ data += avail;
++ len -= avail;
++
++ /* Other side must not see free space until we've copied out */
++ mb();
++ intf->rsp_cons += avail;
++
++ pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
++
++ /* Implies mb(): other side will see the updated consumer. */
++ notify_remote_via_evtchn(xen_store_evtchn);
++ }
++
++ return 0;
++}
++
++/* Set up interrupt handler off store event channel. */
++int xb_init_comms(void)
++{
++ struct xenstore_domain_interface *intf = xen_store_interface;
++ int err;
++
++ if (intf->req_prod != intf->req_cons)
++ printk(KERN_ERR "XENBUS request ring is not quiescent "
++ "(%08x:%08x)!\n", intf->req_cons, intf->req_prod);
++
++ if (intf->rsp_prod != intf->rsp_cons) {
++ printk(KERN_WARNING "XENBUS response ring is not quiescent "
++ "(%08x:%08x): fixing up\n",
++ intf->rsp_cons, intf->rsp_prod);
++ intf->rsp_cons = intf->rsp_prod;
++ }
++
++ if (xenbus_irq)
++ unbind_from_irqhandler(xenbus_irq, &xb_waitq);
++
++ err = bind_caller_port_to_irqhandler(
++ xen_store_evtchn, wake_waiting,
++ 0, "xenbus", &xb_waitq);
++ if (err <= 0) {
++ printk(KERN_ERR "XENBUS request irq failed %i\n", err);
++ return err;
++ }
++
++ xenbus_irq = err;
++
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/xenbus/xenbus_comms.h linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_comms.h
+--- linux-2.6.18.8/drivers/xen/xenbus/xenbus_comms.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_comms.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,46 @@
++/*
++ * Private include for xenbus communications.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef _XENBUS_COMMS_H
++#define _XENBUS_COMMS_H
++
++int xs_init(void);
++int xb_init_comms(void);
++
++/* Low level routines. */
++int xb_write(const void *data, unsigned len);
++int xb_read(void *data, unsigned len);
++int xb_data_to_read(void);
++int xb_wait_for_data_to_read(void);
++int xs_input_avail(void);
++extern struct xenstore_domain_interface *xen_store_interface;
++extern int xen_store_evtchn;
++
++#endif /* _XENBUS_COMMS_H */
+diff -rpuN linux-2.6.18.8/drivers/xen/xenbus/xenbus_dev.c linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_dev.c
+--- linux-2.6.18.8/drivers/xen/xenbus/xenbus_dev.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_dev.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,408 @@
++/*
++ * xenbus_dev.c
++ *
++ * Driver giving user-space access to the kernel's xenbus connection
++ * to xenstore.
++ *
++ * Copyright (c) 2005, Christian Limpach
++ * Copyright (c) 2005, Rusty Russell, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/uio.h>
++#include <linux/notifier.h>
++#include <linux/wait.h>
++#include <linux/fs.h>
++#include <linux/poll.h>
++#include <linux/mutex.h>
++
++#include "xenbus_comms.h"
++
++#include <asm/uaccess.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/xen_proc.h>
++#include <asm/hypervisor.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++struct xenbus_dev_transaction {
++ struct list_head list;
++ struct xenbus_transaction handle;
++};
++
++struct read_buffer {
++ struct list_head list;
++ unsigned int cons;
++ unsigned int len;
++ char msg[];
++};
++
++struct xenbus_dev_data {
++ /* In-progress transaction. */
++ struct list_head transactions;
++
++ /* Active watches. */
++ struct list_head watches;
++
++ /* Partial request. */
++ unsigned int len;
++ union {
++ struct xsd_sockmsg msg;
++ char buffer[PAGE_SIZE];
++ } u;
++
++ /* Response queue. */
++ struct list_head read_buffers;
++ wait_queue_head_t read_waitq;
++
++ struct mutex reply_mutex;
++};
++
++static struct proc_dir_entry *xenbus_dev_intf;
++
++static ssize_t xenbus_dev_read(struct file *filp,
++ char __user *ubuf,
++ size_t len, loff_t *ppos)
++{
++ struct xenbus_dev_data *u = filp->private_data;
++ struct read_buffer *rb;
++ int i, ret;
++
++ mutex_lock(&u->reply_mutex);
++ while (list_empty(&u->read_buffers)) {
++ mutex_unlock(&u->reply_mutex);
++ ret = wait_event_interruptible(u->read_waitq,
++ !list_empty(&u->read_buffers));
++ if (ret)
++ return ret;
++ mutex_lock(&u->reply_mutex);
++ }
++
++ rb = list_entry(u->read_buffers.next, struct read_buffer, list);
++ for (i = 0; i < len;) {
++ put_user(rb->msg[rb->cons], ubuf + i);
++ i++;
++ rb->cons++;
++ if (rb->cons == rb->len) {
++ list_del(&rb->list);
++ kfree(rb);
++ if (list_empty(&u->read_buffers))
++ break;
++ rb = list_entry(u->read_buffers.next,
++ struct read_buffer, list);
++ }
++ }
++ mutex_unlock(&u->reply_mutex);
++
++ return i;
++}
++
++static void queue_reply(struct xenbus_dev_data *u,
++ char *data, unsigned int len)
++{
++ struct read_buffer *rb;
++
++ if (len == 0)
++ return;
++
++ rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
++ BUG_ON(rb == NULL);
++
++ rb->cons = 0;
++ rb->len = len;
++
++ memcpy(rb->msg, data, len);
++
++ list_add_tail(&rb->list, &u->read_buffers);
++
++ wake_up(&u->read_waitq);
++}
++
++struct watch_adapter
++{
++ struct list_head list;
++ struct xenbus_watch watch;
++ struct xenbus_dev_data *dev_data;
++ char *token;
++};
++
++static void free_watch_adapter (struct watch_adapter *watch)
++{
++ kfree(watch->watch.node);
++ kfree(watch->token);
++ kfree(watch);
++}
++
++static void watch_fired(struct xenbus_watch *watch,
++ const char **vec,
++ unsigned int len)
++{
++ struct watch_adapter *adap =
++ container_of(watch, struct watch_adapter, watch);
++ struct xsd_sockmsg hdr;
++ const char *path, *token;
++ int path_len, tok_len, body_len, data_len = 0;
++
++ path = vec[XS_WATCH_PATH];
++ token = adap->token;
++
++ path_len = strlen(path) + 1;
++ tok_len = strlen(token) + 1;
++ if (len > 2)
++ data_len = vec[len] - vec[2] + 1;
++ body_len = path_len + tok_len + data_len;
++
++ hdr.type = XS_WATCH_EVENT;
++ hdr.len = body_len;
++
++ mutex_lock(&adap->dev_data->reply_mutex);
++ queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr));
++ queue_reply(adap->dev_data, (char *)path, path_len);
++ queue_reply(adap->dev_data, (char *)token, tok_len);
++ if (len > 2)
++ queue_reply(adap->dev_data, (char *)vec[2], data_len);
++ mutex_unlock(&adap->dev_data->reply_mutex);
++}
++
++static LIST_HEAD(watch_list);
++
++static ssize_t xenbus_dev_write(struct file *filp,
++ const char __user *ubuf,
++ size_t len, loff_t *ppos)
++{
++ struct xenbus_dev_data *u = filp->private_data;
++ struct xenbus_dev_transaction *trans = NULL;
++ uint32_t msg_type;
++ void *reply;
++ char *path, *token;
++ struct watch_adapter *watch, *tmp_watch;
++ int err, rc = len;
++
++ if ((len + u->len) > sizeof(u->u.buffer)) {
++ rc = -EINVAL;
++ goto out;
++ }
++
++ if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) {
++ rc = -EFAULT;
++ goto out;
++ }
++
++ u->len += len;
++ if ((u->len < sizeof(u->u.msg)) ||
++ (u->len < (sizeof(u->u.msg) + u->u.msg.len)))
++ return rc;
++
++ msg_type = u->u.msg.type;
++
++ switch (msg_type) {
++ case XS_TRANSACTION_START:
++ case XS_TRANSACTION_END:
++ case XS_DIRECTORY:
++ case XS_READ:
++ case XS_GET_PERMS:
++ case XS_RELEASE:
++ case XS_GET_DOMAIN_PATH:
++ case XS_WRITE:
++ case XS_MKDIR:
++ case XS_RM:
++ case XS_SET_PERMS:
++ if (msg_type == XS_TRANSACTION_START) {
++ trans = kmalloc(sizeof(*trans), GFP_KERNEL);
++ if (!trans) {
++ rc = -ENOMEM;
++ goto out;
++ }
++ }
++
++ reply = xenbus_dev_request_and_reply(&u->u.msg);
++ if (IS_ERR(reply)) {
++ kfree(trans);
++ rc = PTR_ERR(reply);
++ goto out;
++ }
++
++ if (msg_type == XS_TRANSACTION_START) {
++ trans->handle.id = simple_strtoul(reply, NULL, 0);
++ list_add(&trans->list, &u->transactions);
++ } else if (msg_type == XS_TRANSACTION_END) {
++ list_for_each_entry(trans, &u->transactions, list)
++ if (trans->handle.id == u->u.msg.tx_id)
++ break;
++ BUG_ON(&trans->list == &u->transactions);
++ list_del(&trans->list);
++ kfree(trans);
++ }
++ mutex_lock(&u->reply_mutex);
++ queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg));
++ queue_reply(u, (char *)reply, u->u.msg.len);
++ mutex_unlock(&u->reply_mutex);
++ kfree(reply);
++ break;
++
++ case XS_WATCH:
++ case XS_UNWATCH: {
++ static const char *XS_RESP = "OK";
++ struct xsd_sockmsg hdr;
++
++ path = u->u.buffer + sizeof(u->u.msg);
++ token = memchr(path, 0, u->u.msg.len);
++ if (token == NULL) {
++ rc = -EILSEQ;
++ goto out;
++ }
++ token++;
++
++ if (msg_type == XS_WATCH) {
++ watch = kzalloc(sizeof(*watch), GFP_KERNEL);
++ watch->watch.node = kmalloc(strlen(path)+1,
++ GFP_KERNEL);
++ strcpy((char *)watch->watch.node, path);
++ watch->watch.callback = watch_fired;
++ watch->token = kmalloc(strlen(token)+1, GFP_KERNEL);
++ strcpy(watch->token, token);
++ watch->dev_data = u;
++
++ err = register_xenbus_watch(&watch->watch);
++ if (err) {
++ free_watch_adapter(watch);
++ rc = err;
++ goto out;
++ }
++
++ list_add(&watch->list, &u->watches);
++ } else {
++ list_for_each_entry_safe(watch, tmp_watch,
++ &u->watches, list) {
++ if (!strcmp(watch->token, token) &&
++ !strcmp(watch->watch.node, path))
++ {
++ unregister_xenbus_watch(&watch->watch);
++ list_del(&watch->list);
++ free_watch_adapter(watch);
++ break;
++ }
++ }
++ }
++
++ hdr.type = msg_type;
++ hdr.len = strlen(XS_RESP) + 1;
++ mutex_lock(&u->reply_mutex);
++ queue_reply(u, (char *)&hdr, sizeof(hdr));
++ queue_reply(u, (char *)XS_RESP, hdr.len);
++ mutex_unlock(&u->reply_mutex);
++ break;
++ }
++
++ default:
++ rc = -EINVAL;
++ break;
++ }
++
++ out:
++ u->len = 0;
++ return rc;
++}
++
++static int xenbus_dev_open(struct inode *inode, struct file *filp)
++{
++ struct xenbus_dev_data *u;
++
++ if (xen_store_evtchn == 0)
++ return -ENOENT;
++
++ nonseekable_open(inode, filp);
++
++ u = kzalloc(sizeof(*u), GFP_KERNEL);
++ if (u == NULL)
++ return -ENOMEM;
++
++ INIT_LIST_HEAD(&u->transactions);
++ INIT_LIST_HEAD(&u->watches);
++ INIT_LIST_HEAD(&u->read_buffers);
++ init_waitqueue_head(&u->read_waitq);
++
++ mutex_init(&u->reply_mutex);
++
++ filp->private_data = u;
++
++ return 0;
++}
++
++static int xenbus_dev_release(struct inode *inode, struct file *filp)
++{
++ struct xenbus_dev_data *u = filp->private_data;
++ struct xenbus_dev_transaction *trans, *tmp;
++ struct watch_adapter *watch, *tmp_watch;
++
++ list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
++ xenbus_transaction_end(trans->handle, 1);
++ list_del(&trans->list);
++ kfree(trans);
++ }
++
++ list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
++ unregister_xenbus_watch(&watch->watch);
++ list_del(&watch->list);
++ free_watch_adapter(watch);
++ }
++
++ kfree(u);
++
++ return 0;
++}
++
++static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait)
++{
++ struct xenbus_dev_data *u = file->private_data;
++
++ poll_wait(file, &u->read_waitq, wait);
++ if (!list_empty(&u->read_buffers))
++ return POLLIN | POLLRDNORM;
++ return 0;
++}
++
++static const struct file_operations xenbus_dev_file_ops = {
++ .read = xenbus_dev_read,
++ .write = xenbus_dev_write,
++ .open = xenbus_dev_open,
++ .release = xenbus_dev_release,
++ .poll = xenbus_dev_poll,
++};
++
++int xenbus_dev_init(void)
++{
++ xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400);
++ if (xenbus_dev_intf)
++ xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops;
++
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/xenbus/xenbus_probe_backend.c linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_probe_backend.c
+--- linux-2.6.18.8/drivers/xen/xenbus/xenbus_probe_backend.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_probe_backend.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,292 @@
++/******************************************************************************
++ * Talks to Xen Store to figure out what devices we have (backend half).
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
++ * Copyright (C) 2005, 2006 XenSource Ltd
++ * Copyright (C) 2007 Solarflare Communications, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
++ __FUNCTION__, __LINE__, ##args)
++
++#include <linux/kernel.h>
++#include <linux/err.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <linux/fcntl.h>
++#include <linux/mm.h>
++#include <linux/notifier.h>
++
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/maddr.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/xen_proc.h>
++#include <xen/evtchn.h>
++#include <xen/features.h>
++
++#include "xenbus_comms.h"
++#include "xenbus_probe.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++static int xenbus_uevent_backend(struct device *dev, char **envp,
++ int num_envp, char *buffer, int buffer_size);
++static int xenbus_probe_backend(const char *type, const char *domid);
++
++extern int read_otherend_details(struct xenbus_device *xendev,
++ char *id_node, char *path_node);
++
++static int read_frontend_details(struct xenbus_device *xendev)
++{
++ return read_otherend_details(xendev, "frontend-id", "frontend");
++}
++
++/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
++static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
++{
++ int domid, err;
++ const char *devid, *type, *frontend;
++ unsigned int typelen;
++
++ type = strchr(nodename, '/');
++ if (!type)
++ return -EINVAL;
++ type++;
++ typelen = strcspn(type, "/");
++ if (!typelen || type[typelen] != '/')
++ return -EINVAL;
++
++ devid = strrchr(nodename, '/') + 1;
++
++ err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid,
++ "frontend", NULL, &frontend,
++ NULL);
++ if (err)
++ return err;
++ if (strlen(frontend) == 0)
++ err = -ERANGE;
++ if (!err && !xenbus_exists(XBT_NIL, frontend, ""))
++ err = -ENOENT;
++ kfree(frontend);
++
++ if (err)
++ return err;
++
++ if (snprintf(bus_id, BUS_ID_SIZE,
++ "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE)
++ return -ENOSPC;
++ return 0;
++}
++
++static struct xen_bus_type xenbus_backend = {
++ .root = "backend",
++ .levels = 3, /* backend/type/<frontend>/<id> */
++ .get_bus_id = backend_bus_id,
++ .probe = xenbus_probe_backend,
++ .error = -ENODEV,
++ .bus = {
++ .name = "xen-backend",
++ .match = xenbus_match,
++ .probe = xenbus_dev_probe,
++ .remove = xenbus_dev_remove,
++// .shutdown = xenbus_dev_shutdown,
++ .uevent = xenbus_uevent_backend,
++ },
++ .dev = {
++ .bus_id = "xen-backend",
++ },
++};
++
++static int xenbus_uevent_backend(struct device *dev, char **envp,
++ int num_envp, char *buffer, int buffer_size)
++{
++ struct xenbus_device *xdev;
++ struct xenbus_driver *drv;
++ int i = 0;
++ int length = 0;
++
++ DPRINTK("");
++
++ if (dev == NULL)
++ return -ENODEV;
++
++ xdev = to_xenbus_device(dev);
++ if (xdev == NULL)
++ return -ENODEV;
++
++ /* stuff we want to pass to /sbin/hotplug */
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "XENBUS_TYPE=%s", xdev->devicetype);
++
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "XENBUS_PATH=%s", xdev->nodename);
++
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "XENBUS_BASE_PATH=%s", xenbus_backend.root);
++
++ /* terminate, set to next free slot, shrink available space */
++ envp[i] = NULL;
++ envp = &envp[i];
++ num_envp -= i;
++ buffer = &buffer[length];
++ buffer_size -= length;
++
++ if (dev->driver) {
++ drv = to_xenbus_driver(dev->driver);
++ if (drv && drv->uevent)
++ return drv->uevent(xdev, envp, num_envp, buffer,
++ buffer_size);
++ }
++
++ return 0;
++}
++
++int xenbus_register_backend(struct xenbus_driver *drv)
++{
++ drv->read_otherend_details = read_frontend_details;
++
++ return xenbus_register_driver_common(drv, &xenbus_backend);
++}
++EXPORT_SYMBOL_GPL(xenbus_register_backend);
++
++/* backend/<typename>/<frontend-uuid>/<name> */
++static int xenbus_probe_backend_unit(const char *dir,
++ const char *type,
++ const char *name)
++{
++ char *nodename;
++ int err;
++
++ nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
++ if (!nodename)
++ return -ENOMEM;
++
++ DPRINTK("%s\n", nodename);
++
++ err = xenbus_probe_node(&xenbus_backend, type, nodename);
++ kfree(nodename);
++ return err;
++}
++
++/* backend/<typename>/<frontend-domid> */
++static int xenbus_probe_backend(const char *type, const char *domid)
++{
++ char *nodename;
++ int err = 0;
++ char **dir;
++ unsigned int i, dir_n = 0;
++
++ DPRINTK("");
++
++ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid);
++ if (!nodename)
++ return -ENOMEM;
++
++ dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n);
++ if (IS_ERR(dir)) {
++ kfree(nodename);
++ return PTR_ERR(dir);
++ }
++
++ for (i = 0; i < dir_n; i++) {
++ err = xenbus_probe_backend_unit(nodename, type, dir[i]);
++ if (err)
++ break;
++ }
++ kfree(dir);
++ kfree(nodename);
++ return err;
++}
++
++static void backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ DPRINTK("");
++
++ dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
++}
++
++static struct xenbus_watch be_watch = {
++ .node = "backend",
++ .callback = backend_changed,
++};
++
++void xenbus_backend_suspend(int (*fn)(struct device *, void *))
++{
++ DPRINTK("");
++ if (!xenbus_backend.error)
++ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn);
++}
++
++void xenbus_backend_resume(int (*fn)(struct device *, void *))
++{
++ DPRINTK("");
++ if (!xenbus_backend.error)
++ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn);
++}
++
++void xenbus_backend_probe_and_watch(void)
++{
++ xenbus_probe_devices(&xenbus_backend);
++ register_xenbus_watch(&be_watch);
++}
++
++void xenbus_backend_bus_register(void)
++{
++ xenbus_backend.error = bus_register(&xenbus_backend.bus);
++ if (xenbus_backend.error)
++ printk(KERN_WARNING
++ "XENBUS: Error registering backend bus: %i\n",
++ xenbus_backend.error);
++}
++
++void xenbus_backend_device_register(void)
++{
++ if (xenbus_backend.error)
++ return;
++
++ xenbus_backend.error = device_register(&xenbus_backend.dev);
++ if (xenbus_backend.error) {
++ bus_unregister(&xenbus_backend.bus);
++ printk(KERN_WARNING
++ "XENBUS: Error registering backend device: %i\n",
++ xenbus_backend.error);
++ }
++}
++
++int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *))
++{
++ return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn);
++}
++EXPORT_SYMBOL_GPL(xenbus_for_each_backend);
+diff -rpuN linux-2.6.18.8/drivers/xen/xenbus/xenbus_probe.c linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_probe.c
+--- linux-2.6.18.8/drivers/xen/xenbus/xenbus_probe.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_probe.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,1153 @@
++/******************************************************************************
++ * Talks to Xen Store to figure out what devices we have.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
++ * Copyright (C) 2005, 2006 XenSource Ltd
++ * Copyright (C) 2007 Solarflare Communications, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
++ __FUNCTION__, __LINE__, ##args)
++
++#include <linux/kernel.h>
++#include <linux/err.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <linux/fcntl.h>
++#include <linux/mm.h>
++#include <linux/notifier.h>
++#include <linux/mutex.h>
++#include <linux/module.h>
++
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/maddr.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/xen_proc.h>
++#include <xen/evtchn.h>
++#include <xen/features.h>
++#ifdef MODULE
++#include <xen/hvm.h>
++#endif
++
++#include "xenbus_comms.h"
++#include "xenbus_probe.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++int xen_store_evtchn;
++struct xenstore_domain_interface *xen_store_interface;
++static unsigned long xen_store_mfn;
++
++extern struct mutex xenwatch_mutex;
++
++static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
++
++static void wait_for_devices(struct xenbus_driver *xendrv);
++
++static int xenbus_probe_frontend(const char *type, const char *name);
++
++static void xenbus_dev_shutdown(struct device *_dev);
++
++/* If something in array of ids matches this device, return it. */
++static const struct xenbus_device_id *
++match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
++{
++ for (; *arr->devicetype != '\0'; arr++) {
++ if (!strcmp(arr->devicetype, dev->devicetype))
++ return arr;
++ }
++ return NULL;
++}
++
++int xenbus_match(struct device *_dev, struct device_driver *_drv)
++{
++ struct xenbus_driver *drv = to_xenbus_driver(_drv);
++
++ if (!drv->ids)
++ return 0;
++
++ return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
++}
++
++/* device/<type>/<id> => <type>-<id> */
++static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
++{
++ nodename = strchr(nodename, '/');
++ if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) {
++ printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
++ return -EINVAL;
++ }
++
++ strlcpy(bus_id, nodename + 1, BUS_ID_SIZE);
++ if (!strchr(bus_id, '/')) {
++ printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
++ return -EINVAL;
++ }
++ *strchr(bus_id, '/') = '-';
++ return 0;
++}
++
++
++static void free_otherend_details(struct xenbus_device *dev)
++{
++ kfree(dev->otherend);
++ dev->otherend = NULL;
++}
++
++
++static void free_otherend_watch(struct xenbus_device *dev)
++{
++ if (dev->otherend_watch.node) {
++ unregister_xenbus_watch(&dev->otherend_watch);
++ kfree(dev->otherend_watch.node);
++ dev->otherend_watch.node = NULL;
++ }
++}
++
++
++int read_otherend_details(struct xenbus_device *xendev,
++ char *id_node, char *path_node)
++{
++ int err = xenbus_gather(XBT_NIL, xendev->nodename,
++ id_node, "%i", &xendev->otherend_id,
++ path_node, NULL, &xendev->otherend,
++ NULL);
++ if (err) {
++ xenbus_dev_fatal(xendev, err,
++ "reading other end details from %s",
++ xendev->nodename);
++ return err;
++ }
++ if (strlen(xendev->otherend) == 0 ||
++ !xenbus_exists(XBT_NIL, xendev->otherend, "")) {
++ xenbus_dev_fatal(xendev, -ENOENT,
++ "unable to read other end from %s. "
++ "missing or inaccessible.",
++ xendev->nodename);
++ free_otherend_details(xendev);
++ return -ENOENT;
++ }
++
++ return 0;
++}
++
++
++static int read_backend_details(struct xenbus_device *xendev)
++{
++ return read_otherend_details(xendev, "backend-id", "backend");
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++static int xenbus_uevent_frontend(struct device *dev, char **envp,
++ int num_envp, char *buffer, int buffer_size)
++{
++ struct xenbus_device *xdev;
++ int length = 0, i = 0;
++
++ if (dev == NULL)
++ return -ENODEV;
++ xdev = to_xenbus_device(dev);
++ if (xdev == NULL)
++ return -ENODEV;
++
++ /* stuff we want to pass to /sbin/hotplug */
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "XENBUS_TYPE=%s", xdev->devicetype);
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "XENBUS_PATH=%s", xdev->nodename);
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "MODALIAS=xen:%s", xdev->devicetype);
++
++ return 0;
++}
++#endif
++
++/* Bus type for frontend drivers. */
++static struct xen_bus_type xenbus_frontend = {
++ .root = "device",
++ .levels = 2, /* device/type/<id> */
++ .get_bus_id = frontend_bus_id,
++ .probe = xenbus_probe_frontend,
++ .error = -ENODEV,
++ .bus = {
++ .name = "xen",
++ .match = xenbus_match,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ .probe = xenbus_dev_probe,
++ .remove = xenbus_dev_remove,
++ .shutdown = xenbus_dev_shutdown,
++ .uevent = xenbus_uevent_frontend,
++#endif
++ },
++ .dev = {
++ .bus_id = "xen",
++ },
++};
++
++static void otherend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ struct xenbus_device *dev =
++ container_of(watch, struct xenbus_device, otherend_watch);
++ struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
++ enum xenbus_state state;
++
++ /* Protect us against watches firing on old details when the otherend
++ details change, say immediately after a resume. */
++ if (!dev->otherend ||
++ strncmp(dev->otherend, vec[XS_WATCH_PATH],
++ strlen(dev->otherend))) {
++ DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]);
++ return;
++ }
++
++ state = xenbus_read_driver_state(dev->otherend);
++
++ DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state),
++ dev->otherend_watch.node, vec[XS_WATCH_PATH]);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ /*
++ * Ignore xenbus transitions during shutdown. This prevents us doing
++ * work that can fail e.g., when the rootfs is gone.
++ */
++ if (system_state > SYSTEM_RUNNING) {
++ struct xen_bus_type *bus = bus;
++ bus = container_of(dev->dev.bus, struct xen_bus_type, bus);
++ /* If we're frontend, drive the state machine to Closed. */
++ /* This should cause the backend to release our resources. */
++ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing))
++ xenbus_frontend_closed(dev);
++ return;
++ }
++#endif
++
++ if (drv->otherend_changed)
++ drv->otherend_changed(dev, state);
++}
++
++
++static int talk_to_otherend(struct xenbus_device *dev)
++{
++ struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
++
++ free_otherend_watch(dev);
++ free_otherend_details(dev);
++
++ return drv->read_otherend_details(dev);
++}
++
++
++static int watch_otherend(struct xenbus_device *dev)
++{
++ return xenbus_watch_path2(dev, dev->otherend, "state",
++ &dev->otherend_watch, otherend_changed);
++}
++
++
++int xenbus_dev_probe(struct device *_dev)
++{
++ struct xenbus_device *dev = to_xenbus_device(_dev);
++ struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
++ const struct xenbus_device_id *id;
++ int err;
++
++ DPRINTK("%s", dev->nodename);
++
++ if (!drv->probe) {
++ err = -ENODEV;
++ goto fail;
++ }
++
++ id = match_device(drv->ids, dev);
++ if (!id) {
++ err = -ENODEV;
++ goto fail;
++ }
++
++ err = talk_to_otherend(dev);
++ if (err) {
++ printk(KERN_WARNING
++ "xenbus_probe: talk_to_otherend on %s failed.\n",
++ dev->nodename);
++ return err;
++ }
++
++ err = drv->probe(dev, id);
++ if (err)
++ goto fail;
++
++ err = watch_otherend(dev);
++ if (err) {
++ printk(KERN_WARNING
++ "xenbus_probe: watch_otherend on %s failed.\n",
++ dev->nodename);
++ return err;
++ }
++
++ return 0;
++fail:
++ xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
++ xenbus_switch_state(dev, XenbusStateClosed);
++ return -ENODEV;
++}
++
++int xenbus_dev_remove(struct device *_dev)
++{
++ struct xenbus_device *dev = to_xenbus_device(_dev);
++ struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
++
++ DPRINTK("%s", dev->nodename);
++
++ free_otherend_watch(dev);
++ free_otherend_details(dev);
++
++ if (drv->remove)
++ drv->remove(dev);
++
++ xenbus_switch_state(dev, XenbusStateClosed);
++ return 0;
++}
++
++static void xenbus_dev_shutdown(struct device *_dev)
++{
++ struct xenbus_device *dev = to_xenbus_device(_dev);
++ unsigned long timeout = 5*HZ;
++
++ DPRINTK("%s", dev->nodename);
++
++ if (is_initial_xendomain())
++ return;
++
++ get_device(&dev->dev);
++ if (dev->state != XenbusStateConnected) {
++ printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__,
++ dev->nodename, xenbus_strstate(dev->state));
++ goto out;
++ }
++ xenbus_switch_state(dev, XenbusStateClosing);
++ timeout = wait_for_completion_timeout(&dev->down, timeout);
++ if (!timeout)
++ printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename);
++ out:
++ put_device(&dev->dev);
++}
++
++int xenbus_register_driver_common(struct xenbus_driver *drv,
++ struct xen_bus_type *bus)
++{
++ int ret;
++
++ if (bus->error)
++ return bus->error;
++
++ drv->driver.name = drv->name;
++ drv->driver.bus = &bus->bus;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
++ drv->driver.owner = drv->owner;
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++ drv->driver.probe = xenbus_dev_probe;
++ drv->driver.remove = xenbus_dev_remove;
++ drv->driver.shutdown = xenbus_dev_shutdown;
++#endif
++
++ mutex_lock(&xenwatch_mutex);
++ ret = driver_register(&drv->driver);
++ mutex_unlock(&xenwatch_mutex);
++ return ret;
++}
++
++int xenbus_register_frontend(struct xenbus_driver *drv)
++{
++ int ret;
++
++ drv->read_otherend_details = read_backend_details;
++
++ ret = xenbus_register_driver_common(drv, &xenbus_frontend);
++ if (ret)
++ return ret;
++
++ /* If this driver is loaded as a module wait for devices to attach. */
++ wait_for_devices(drv);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(xenbus_register_frontend);
++
++void xenbus_unregister_driver(struct xenbus_driver *drv)
++{
++ driver_unregister(&drv->driver);
++}
++EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
++
++struct xb_find_info
++{
++ struct xenbus_device *dev;
++ const char *nodename;
++};
++
++static int cmp_dev(struct device *dev, void *data)
++{
++ struct xenbus_device *xendev = to_xenbus_device(dev);
++ struct xb_find_info *info = data;
++
++ if (!strcmp(xendev->nodename, info->nodename)) {
++ info->dev = xendev;
++ get_device(dev);
++ return 1;
++ }
++ return 0;
++}
++
++struct xenbus_device *xenbus_device_find(const char *nodename,
++ struct bus_type *bus)
++{
++ struct xb_find_info info = { .dev = NULL, .nodename = nodename };
++
++ bus_for_each_dev(bus, NULL, &info, cmp_dev);
++ return info.dev;
++}
++
++static int cleanup_dev(struct device *dev, void *data)
++{
++ struct xenbus_device *xendev = to_xenbus_device(dev);
++ struct xb_find_info *info = data;
++ int len = strlen(info->nodename);
++
++ DPRINTK("%s", info->nodename);
++
++ /* Match the info->nodename path, or any subdirectory of that path. */
++ if (strncmp(xendev->nodename, info->nodename, len))
++ return 0;
++
++ /* If the node name is longer, ensure it really is a subdirectory. */
++ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
++ return 0;
++
++ info->dev = xendev;
++ get_device(dev);
++ return 1;
++}
++
++static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
++{
++ struct xb_find_info info = { .nodename = path };
++
++ do {
++ info.dev = NULL;
++ bus_for_each_dev(bus, NULL, &info, cleanup_dev);
++ if (info.dev) {
++ device_unregister(&info.dev->dev);
++ put_device(&info.dev->dev);
++ }
++ } while (info.dev);
++}
++
++static void xenbus_dev_release(struct device *dev)
++{
++ if (dev)
++ kfree(to_xenbus_device(dev));
++}
++
++static ssize_t xendev_show_nodename(struct device *dev,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
++ struct device_attribute *attr,
++#endif
++ char *buf)
++{
++ return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
++}
++DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
++
++static ssize_t xendev_show_devtype(struct device *dev,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
++ struct device_attribute *attr,
++#endif
++ char *buf)
++{
++ return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
++}
++DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
++
++
++int xenbus_probe_node(struct xen_bus_type *bus,
++ const char *type,
++ const char *nodename)
++{
++ int err;
++ struct xenbus_device *xendev;
++ size_t stringlen;
++ char *tmpstring;
++
++ enum xenbus_state state = xenbus_read_driver_state(nodename);
++
++ if (bus->error)
++ return bus->error;
++
++ if (state != XenbusStateInitialising) {
++ /* Device is not new, so ignore it. This can happen if a
++ device is going away after switching to Closed. */
++ return 0;
++ }
++
++ stringlen = strlen(nodename) + 1 + strlen(type) + 1;
++ xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
++ if (!xendev)
++ return -ENOMEM;
++
++ xendev->state = XenbusStateInitialising;
++
++ /* Copy the strings into the extra space. */
++
++ tmpstring = (char *)(xendev + 1);
++ strcpy(tmpstring, nodename);
++ xendev->nodename = tmpstring;
++
++ tmpstring += strlen(tmpstring) + 1;
++ strcpy(tmpstring, type);
++ xendev->devicetype = tmpstring;
++ init_completion(&xendev->down);
++
++ xendev->dev.parent = &bus->dev;
++ xendev->dev.bus = &bus->bus;
++ xendev->dev.release = xenbus_dev_release;
++
++ err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename);
++ if (err)
++ goto fail;
++
++ /* Register with generic device framework. */
++ err = device_register(&xendev->dev);
++ if (err)
++ goto fail;
++
++ err = device_create_file(&xendev->dev, &dev_attr_nodename);
++ if (err)
++ goto unregister;
++ err = device_create_file(&xendev->dev, &dev_attr_devtype);
++ if (err)
++ goto unregister;
++
++ return 0;
++unregister:
++ device_remove_file(&xendev->dev, &dev_attr_nodename);
++ device_remove_file(&xendev->dev, &dev_attr_devtype);
++ device_unregister(&xendev->dev);
++fail:
++ kfree(xendev);
++ return err;
++}
++
++/* device/<typename>/<name> */
++static int xenbus_probe_frontend(const char *type, const char *name)
++{
++ char *nodename;
++ int err;
++
++ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name);
++ if (!nodename)
++ return -ENOMEM;
++
++ DPRINTK("%s", nodename);
++
++ err = xenbus_probe_node(&xenbus_frontend, type, nodename);
++ kfree(nodename);
++ return err;
++}
++
++static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
++{
++ int err = 0;
++ char **dir;
++ unsigned int dir_n = 0;
++ int i;
++
++ dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n);
++ if (IS_ERR(dir))
++ return PTR_ERR(dir);
++
++ for (i = 0; i < dir_n; i++) {
++ err = bus->probe(type, dir[i]);
++ if (err)
++ break;
++ }
++ kfree(dir);
++ return err;
++}
++
++int xenbus_probe_devices(struct xen_bus_type *bus)
++{
++ int err = 0;
++ char **dir;
++ unsigned int i, dir_n;
++
++ if (bus->error)
++ return bus->error;
++
++ dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
++ if (IS_ERR(dir))
++ return PTR_ERR(dir);
++
++ for (i = 0; i < dir_n; i++) {
++ err = xenbus_probe_device_type(bus, dir[i]);
++ if (err)
++ break;
++ }
++ kfree(dir);
++ return err;
++}
++
++static unsigned int char_count(const char *str, char c)
++{
++ unsigned int i, ret = 0;
++
++ for (i = 0; str[i]; i++)
++ if (str[i] == c)
++ ret++;
++ return ret;
++}
++
++static int strsep_len(const char *str, char c, unsigned int len)
++{
++ unsigned int i;
++
++ for (i = 0; str[i]; i++)
++ if (str[i] == c) {
++ if (len == 0)
++ return i;
++ len--;
++ }
++ return (len == 0) ? i : -ERANGE;
++}
++
++void dev_changed(const char *node, struct xen_bus_type *bus)
++{
++ int exists, rootlen;
++ struct xenbus_device *dev;
++ char type[BUS_ID_SIZE];
++ const char *p, *root;
++
++ if (bus->error || char_count(node, '/') < 2)
++ return;
++
++ exists = xenbus_exists(XBT_NIL, node, "");
++ if (!exists) {
++ xenbus_cleanup_devices(node, &bus->bus);
++ return;
++ }
++
++ /* backend/<type>/... or device/<type>/... */
++ p = strchr(node, '/') + 1;
++ snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
++ type[BUS_ID_SIZE-1] = '\0';
++
++ rootlen = strsep_len(node, '/', bus->levels);
++ if (rootlen < 0)
++ return;
++ root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
++ if (!root)
++ return;
++
++ dev = xenbus_device_find(root, &bus->bus);
++ if (!dev)
++ xenbus_probe_node(bus, type, root);
++ else
++ put_device(&dev->dev);
++
++ kfree(root);
++}
++
++static void frontend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ DPRINTK("");
++
++ dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
++}
++
++/* We watch for devices appearing and vanishing. */
++static struct xenbus_watch fe_watch = {
++ .node = "device",
++ .callback = frontend_changed,
++};
++
++static int suspend_dev(struct device *dev, void *data)
++{
++ int err = 0;
++ struct xenbus_driver *drv;
++ struct xenbus_device *xdev;
++
++ DPRINTK("");
++
++ if (dev->driver == NULL)
++ return 0;
++ drv = to_xenbus_driver(dev->driver);
++ xdev = container_of(dev, struct xenbus_device, dev);
++ if (drv->suspend)
++ err = drv->suspend(xdev);
++ if (err)
++ printk(KERN_WARNING
++ "xenbus: suspend %s failed: %i\n", dev->bus_id, err);
++ return 0;
++}
++
++static int suspend_cancel_dev(struct device *dev, void *data)
++{
++ int err = 0;
++ struct xenbus_driver *drv;
++ struct xenbus_device *xdev;
++
++ DPRINTK("");
++
++ if (dev->driver == NULL)
++ return 0;
++ drv = to_xenbus_driver(dev->driver);
++ xdev = container_of(dev, struct xenbus_device, dev);
++ if (drv->suspend_cancel)
++ err = drv->suspend_cancel(xdev);
++ if (err)
++ printk(KERN_WARNING
++ "xenbus: suspend_cancel %s failed: %i\n",
++ dev->bus_id, err);
++ return 0;
++}
++
++static int resume_dev(struct device *dev, void *data)
++{
++ int err;
++ struct xenbus_driver *drv;
++ struct xenbus_device *xdev;
++
++ DPRINTK("");
++
++ if (dev->driver == NULL)
++ return 0;
++
++ drv = to_xenbus_driver(dev->driver);
++ xdev = container_of(dev, struct xenbus_device, dev);
++
++ err = talk_to_otherend(xdev);
++ if (err) {
++ printk(KERN_WARNING
++ "xenbus: resume (talk_to_otherend) %s failed: %i\n",
++ dev->bus_id, err);
++ return err;
++ }
++
++ xdev->state = XenbusStateInitialising;
++
++ if (drv->resume) {
++ err = drv->resume(xdev);
++ if (err) {
++ printk(KERN_WARNING
++ "xenbus: resume %s failed: %i\n",
++ dev->bus_id, err);
++ return err;
++ }
++ }
++
++ err = watch_otherend(xdev);
++ if (err) {
++ printk(KERN_WARNING
++ "xenbus_probe: resume (watch_otherend) %s failed: "
++ "%d.\n", dev->bus_id, err);
++ return err;
++ }
++
++ return 0;
++}
++
++void xenbus_suspend(void)
++{
++ DPRINTK("");
++
++ if (!xenbus_frontend.error)
++ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
++ xenbus_backend_suspend(suspend_dev);
++ xs_suspend();
++}
++EXPORT_SYMBOL_GPL(xenbus_suspend);
++
++void xenbus_resume(void)
++{
++ xb_init_comms();
++ xs_resume();
++ if (!xenbus_frontend.error)
++ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
++ xenbus_backend_resume(resume_dev);
++}
++EXPORT_SYMBOL_GPL(xenbus_resume);
++
++void xenbus_suspend_cancel(void)
++{
++ xs_suspend_cancel();
++ if (!xenbus_frontend.error)
++ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
++ xenbus_backend_resume(suspend_cancel_dev);
++}
++EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
++
++/* A flag to determine if xenstored is 'ready' (i.e. has started) */
++int xenstored_ready = 0;
++
++
++int register_xenstore_notifier(struct notifier_block *nb)
++{
++ int ret = 0;
++
++ if (xenstored_ready > 0)
++ ret = nb->notifier_call(nb, 0, NULL);
++ else
++ blocking_notifier_chain_register(&xenstore_chain, nb);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(register_xenstore_notifier);
++
++void unregister_xenstore_notifier(struct notifier_block *nb)
++{
++ blocking_notifier_chain_unregister(&xenstore_chain, nb);
++}
++EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
++
++
++void xenbus_probe(void *unused)
++{
++ BUG_ON((xenstored_ready <= 0));
++
++ /* Enumerate devices in xenstore and watch for changes. */
++ xenbus_probe_devices(&xenbus_frontend);
++ register_xenbus_watch(&fe_watch);
++ xenbus_backend_probe_and_watch();
++
++ /* Notify others that xenstore is up */
++ blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
++}
++
++
++#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
++static struct file_operations xsd_kva_fops;
++static struct proc_dir_entry *xsd_kva_intf;
++static struct proc_dir_entry *xsd_port_intf;
++
++static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ size_t size = vma->vm_end - vma->vm_start;
++
++ if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
++ return -EINVAL;
++
++ if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn),
++ size, vma->vm_page_prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static int xsd_kva_read(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int len;
++
++ len = sprintf(page, "0x%p", xen_store_interface);
++ *eof = 1;
++ return len;
++}
++
++static int xsd_port_read(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int len;
++
++ len = sprintf(page, "%d", xen_store_evtchn);
++ *eof = 1;
++ return len;
++}
++#endif
++
++static int xenbus_probe_init(void)
++{
++ int err = 0;
++ unsigned long page = 0;
++
++ DPRINTK("");
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ /* Register ourselves with the kernel bus subsystem */
++ xenbus_frontend.error = bus_register(&xenbus_frontend.bus);
++ if (xenbus_frontend.error)
++ printk(KERN_WARNING
++ "XENBUS: Error registering frontend bus: %i\n",
++ xenbus_frontend.error);
++ xenbus_backend_bus_register();
++
++ /*
++ * Domain0 doesn't have a store_evtchn or store_mfn yet.
++ */
++ if (is_initial_xendomain()) {
++ struct evtchn_alloc_unbound alloc_unbound;
++
++ /* Allocate page. */
++ page = get_zeroed_page(GFP_KERNEL);
++ if (!page)
++ return -ENOMEM;
++
++ xen_store_mfn = xen_start_info->store_mfn =
++ pfn_to_mfn(virt_to_phys((void *)page) >>
++ PAGE_SHIFT);
++
++ /* Next allocate a local port which xenstored can bind to */
++ alloc_unbound.dom = DOMID_SELF;
++ alloc_unbound.remote_dom = 0;
++
++ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
++ &alloc_unbound);
++ if (err == -ENOSYS)
++ goto err;
++ BUG_ON(err);
++ xen_store_evtchn = xen_start_info->store_evtchn =
++ alloc_unbound.port;
++
++#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
++ /* And finally publish the above info in /proc/xen */
++ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600);
++ if (xsd_kva_intf) {
++ memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops,
++ sizeof(xsd_kva_fops));
++ xsd_kva_fops.mmap = xsd_kva_mmap;
++ xsd_kva_intf->proc_fops = &xsd_kva_fops;
++ xsd_kva_intf->read_proc = xsd_kva_read;
++ }
++ xsd_port_intf = create_xen_proc_entry("xsd_port", 0400);
++ if (xsd_port_intf)
++ xsd_port_intf->read_proc = xsd_port_read;
++#endif
++ xen_store_interface = mfn_to_virt(xen_store_mfn);
++ } else {
++ xenstored_ready = 1;
++#ifdef CONFIG_XEN
++ xen_store_evtchn = xen_start_info->store_evtchn;
++ xen_store_mfn = xen_start_info->store_mfn;
++ xen_store_interface = mfn_to_virt(xen_store_mfn);
++#else
++ xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN);
++ xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN);
++ xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT,
++ PAGE_SIZE);
++#endif
++ }
++
++
++ xenbus_dev_init();
++
++ /* Initialize the interface to xenstore. */
++ err = xs_init();
++ if (err) {
++ printk(KERN_WARNING
++ "XENBUS: Error initializing xenstore comms: %i\n", err);
++ goto err;
++ }
++
++ /* Register ourselves with the kernel device subsystem */
++ if (!xenbus_frontend.error) {
++ xenbus_frontend.error = device_register(&xenbus_frontend.dev);
++ if (xenbus_frontend.error) {
++ bus_unregister(&xenbus_frontend.bus);
++ printk(KERN_WARNING
++ "XENBUS: Error registering frontend device: %i\n",
++ xenbus_frontend.error);
++ }
++ }
++ xenbus_backend_device_register();
++
++ if (!is_initial_xendomain())
++ xenbus_probe(NULL);
++
++ return 0;
++
++ err:
++ if (page)
++ free_page(page);
++
++ /*
++ * Do not unregister the xenbus front/backend buses here. The buses
++ * must exist because front/backend drivers will use them when they are
++ * registered.
++ */
++
++ return err;
++}
++
++#ifdef CONFIG_XEN
++postcore_initcall(xenbus_probe_init);
++MODULE_LICENSE("Dual BSD/GPL");
++#else
++int xenbus_init(void)
++{
++ return xenbus_probe_init();
++}
++#endif
++
++static int is_disconnected_device(struct device *dev, void *data)
++{
++ struct xenbus_device *xendev = to_xenbus_device(dev);
++ struct device_driver *drv = data;
++ struct xenbus_driver *xendrv;
++
++ /*
++ * A device with no driver will never connect. We care only about
++ * devices which should currently be in the process of connecting.
++ */
++ if (!dev->driver)
++ return 0;
++
++ /* Is this search limited to a particular driver? */
++ if (drv && (dev->driver != drv))
++ return 0;
++
++ xendrv = to_xenbus_driver(dev->driver);
++ return (xendev->state < XenbusStateConnected ||
++ (xendrv->is_ready && !xendrv->is_ready(xendev)));
++}
++
++static int exists_disconnected_device(struct device_driver *drv)
++{
++ if (xenbus_frontend.error)
++ return xenbus_frontend.error;
++ return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
++ is_disconnected_device);
++}
++
++static int print_device_status(struct device *dev, void *data)
++{
++ struct xenbus_device *xendev = to_xenbus_device(dev);
++ struct device_driver *drv = data;
++ struct xenbus_driver *xendrv;
++
++ /* Is this operation limited to a particular driver? */
++ if (drv && (dev->driver != drv))
++ return 0;
++
++ if (!dev->driver) {
++ /* Information only: is this too noisy? */
++ printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
++ xendev->nodename);
++ return 0;
++ }
++
++ if (xendev->state < XenbusStateConnected) {
++ enum xenbus_state rstate = XenbusStateUnknown;
++ if (xendev->otherend)
++ rstate = xenbus_read_driver_state(xendev->otherend);
++ printk(KERN_WARNING "XENBUS: Timeout connecting "
++ "to device: %s (local state %d, remote state %d)\n",
++ xendev->nodename, xendev->state, rstate);
++ }
++
++ xendrv = to_xenbus_driver(dev->driver);
++ if (xendrv->is_ready && !xendrv->is_ready(xendev))
++ printk(KERN_WARNING "XENBUS: Device not ready: %s\n",
++ xendev->nodename);
++
++ return 0;
++}
++
++/* We only wait for device setup after most initcalls have run. */
++static int ready_to_wait_for_devices;
++
++/*
++ * On a 5-minute timeout, wait for all devices currently configured. We need
++ * to do this to guarantee that the filesystems and / or network devices
++ * needed for boot are available, before we can allow the boot to proceed.
++ *
++ * This needs to be on a late_initcall, to happen after the frontend device
++ * drivers have been initialised, but before the root fs is mounted.
++ *
++ * A possible improvement here would be to have the tools add a per-device
++ * flag to the store entry, indicating whether it is needed at boot time.
++ * This would allow people who knew what they were doing to accelerate their
++ * boot slightly, but of course needs tools or manual intervention to set up
++ * those flags correctly.
++ */
++static void wait_for_devices(struct xenbus_driver *xendrv)
++{
++ unsigned long start = jiffies;
++ struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
++ unsigned int seconds_waited = 0;
++
++ if (!ready_to_wait_for_devices || !is_running_on_xen())
++ return;
++
++ while (exists_disconnected_device(drv)) {
++ if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
++ if (!seconds_waited)
++ printk(KERN_WARNING "XENBUS: Waiting for "
++ "devices to initialise: ");
++ seconds_waited += 5;
++ printk("%us...", 300 - seconds_waited);
++ if (seconds_waited == 300)
++ break;
++ }
++
++ schedule_timeout_interruptible(HZ/10);
++ }
++
++ if (seconds_waited)
++ printk("\n");
++
++ bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
++ print_device_status);
++}
++
++#ifndef MODULE
++static int __init boot_wait_for_devices(void)
++{
++ if (!xenbus_frontend.error) {
++ ready_to_wait_for_devices = 1;
++ wait_for_devices(NULL);
++ }
++ return 0;
++}
++
++late_initcall(boot_wait_for_devices);
++#endif
++
++int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *))
++{
++ return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn);
++}
++EXPORT_SYMBOL_GPL(xenbus_for_each_frontend);
+diff -rpuN linux-2.6.18.8/drivers/xen/xenbus/xenbus_probe.h linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_probe.h
+--- linux-2.6.18.8/drivers/xen/xenbus/xenbus_probe.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_probe.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,75 @@
++/******************************************************************************
++ * xenbus_probe.h
++ *
++ * Talks to Xen Store to figure out what devices we have.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 XenSource Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef _XENBUS_PROBE_H
++#define _XENBUS_PROBE_H
++
++#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE)
++extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
++extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
++extern void xenbus_backend_probe_and_watch(void);
++extern void xenbus_backend_bus_register(void);
++extern void xenbus_backend_device_register(void);
++#else
++static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
++static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
++static inline void xenbus_backend_probe_and_watch(void) {}
++static inline void xenbus_backend_bus_register(void) {}
++static inline void xenbus_backend_device_register(void) {}
++#endif
++
++struct xen_bus_type
++{
++ char *root;
++ int error;
++ unsigned int levels;
++ int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
++ int (*probe)(const char *type, const char *dir);
++ struct bus_type bus;
++ struct device dev;
++};
++
++extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
++extern int xenbus_dev_probe(struct device *_dev);
++extern int xenbus_dev_remove(struct device *_dev);
++extern int xenbus_register_driver_common(struct xenbus_driver *drv,
++ struct xen_bus_type *bus);
++extern int xenbus_probe_node(struct xen_bus_type *bus,
++ const char *type,
++ const char *nodename);
++extern int xenbus_probe_devices(struct xen_bus_type *bus);
++
++extern void dev_changed(const char *node, struct xen_bus_type *bus);
++
++#endif
++
+diff -rpuN linux-2.6.18.8/drivers/xen/xenbus/xenbus_xs.c linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_xs.c
+--- linux-2.6.18.8/drivers/xen/xenbus/xenbus_xs.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/xenbus/xenbus_xs.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,902 @@
++/******************************************************************************
++ * xenbus_xs.c
++ *
++ * This is the kernel equivalent of the "xs" library. We don't need everything
++ * and we use xenbus_comms for communication.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/unistd.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/uio.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/fcntl.h>
++#include <linux/kthread.h>
++#include <linux/rwsem.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <xen/xenbus.h>
++#include "xenbus_comms.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */
++#define PF_NOFREEZE 0
++#endif
++
++struct xs_stored_msg {
++ struct list_head list;
++
++ struct xsd_sockmsg hdr;
++
++ union {
++ /* Queued replies. */
++ struct {
++ char *body;
++ } reply;
++
++ /* Queued watch events. */
++ struct {
++ struct xenbus_watch *handle;
++ char **vec;
++ unsigned int vec_size;
++ } watch;
++ } u;
++};
++
++struct xs_handle {
++ /* A list of replies. Currently only one will ever be outstanding. */
++ struct list_head reply_list;
++ spinlock_t reply_lock;
++ wait_queue_head_t reply_waitq;
++
++ /*
++ * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex.
++ * response_mutex is never taken simultaneously with the other three.
++ */
++
++ /* One request at a time. */
++ struct mutex request_mutex;
++
++ /* Protect xenbus reader thread against save/restore. */
++ struct mutex response_mutex;
++
++ /* Protect transactions against save/restore. */
++ struct rw_semaphore transaction_mutex;
++
++ /* Protect watch (de)register against save/restore. */
++ struct rw_semaphore watch_mutex;
++};
++
++static struct xs_handle xs_state;
++
++/* List of registered watches, and a lock to protect it. */
++static LIST_HEAD(watches);
++static DEFINE_SPINLOCK(watches_lock);
++
++/* List of pending watch callback events, and a lock to protect it. */
++static LIST_HEAD(watch_events);
++static DEFINE_SPINLOCK(watch_events_lock);
++
++/*
++ * Details of the xenwatch callback kernel thread. The thread waits on the
++ * watch_events_waitq for work to do (queued on watch_events list). When it
++ * wakes up it acquires the xenwatch_mutex before reading the list and
++ * carrying out work.
++ */
++static pid_t xenwatch_pid;
++/* static */ DEFINE_MUTEX(xenwatch_mutex);
++static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
++
++static int get_error(const char *errorstring)
++{
++ unsigned int i;
++
++ for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) {
++ if (i == ARRAY_SIZE(xsd_errors) - 1) {
++ printk(KERN_WARNING
++ "XENBUS xen store gave: unknown error %s",
++ errorstring);
++ return EINVAL;
++ }
++ }
++ return xsd_errors[i].errnum;
++}
++
++static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
++{
++ struct xs_stored_msg *msg;
++ char *body;
++
++ spin_lock(&xs_state.reply_lock);
++
++ while (list_empty(&xs_state.reply_list)) {
++ spin_unlock(&xs_state.reply_lock);
++ /* XXX FIXME: Avoid synchronous wait for response here. */
++ wait_event(xs_state.reply_waitq,
++ !list_empty(&xs_state.reply_list));
++ spin_lock(&xs_state.reply_lock);
++ }
++
++ msg = list_entry(xs_state.reply_list.next,
++ struct xs_stored_msg, list);
++ list_del(&msg->list);
++
++ spin_unlock(&xs_state.reply_lock);
++
++ *type = msg->hdr.type;
++ if (len)
++ *len = msg->hdr.len;
++ body = msg->u.reply.body;
++
++ kfree(msg);
++
++ return body;
++}
++
++void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
++{
++ void *ret;
++ struct xsd_sockmsg req_msg = *msg;
++ int err;
++
++ if (req_msg.type == XS_TRANSACTION_START)
++ down_read(&xs_state.transaction_mutex);
++
++ mutex_lock(&xs_state.request_mutex);
++
++ err = xb_write(msg, sizeof(*msg) + msg->len);
++ if (err) {
++ msg->type = XS_ERROR;
++ ret = ERR_PTR(err);
++ } else
++ ret = read_reply(&msg->type, &msg->len);
++
++ mutex_unlock(&xs_state.request_mutex);
++
++ if ((req_msg.type == XS_TRANSACTION_END) ||
++ ((req_msg.type == XS_TRANSACTION_START) &&
++ (msg->type == XS_ERROR)))
++ up_read(&xs_state.transaction_mutex);
++
++ return ret;
++}
++
++/* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */
++static void *xs_talkv(struct xenbus_transaction t,
++ enum xsd_sockmsg_type type,
++ const struct kvec *iovec,
++ unsigned int num_vecs,
++ unsigned int *len)
++{
++ struct xsd_sockmsg msg;
++ void *ret = NULL;
++ unsigned int i;
++ int err;
++
++ msg.tx_id = t.id;
++ msg.req_id = 0;
++ msg.type = type;
++ msg.len = 0;
++ for (i = 0; i < num_vecs; i++)
++ msg.len += iovec[i].iov_len;
++
++ mutex_lock(&xs_state.request_mutex);
++
++ err = xb_write(&msg, sizeof(msg));
++ if (err) {
++ mutex_unlock(&xs_state.request_mutex);
++ return ERR_PTR(err);
++ }
++
++ for (i = 0; i < num_vecs; i++) {
++ err = xb_write(iovec[i].iov_base, iovec[i].iov_len);;
++ if (err) {
++ mutex_unlock(&xs_state.request_mutex);
++ return ERR_PTR(err);
++ }
++ }
++
++ ret = read_reply(&msg.type, len);
++
++ mutex_unlock(&xs_state.request_mutex);
++
++ if (IS_ERR(ret))
++ return ret;
++
++ if (msg.type == XS_ERROR) {
++ err = get_error(ret);
++ kfree(ret);
++ return ERR_PTR(-err);
++ }
++
++ if (msg.type != type) {
++ if (printk_ratelimit())
++ printk(KERN_WARNING
++ "XENBUS unexpected type [%d], expected [%d]\n",
++ msg.type, type);
++ kfree(ret);
++ return ERR_PTR(-EINVAL);
++ }
++ return ret;
++}
++
++/* Simplified version of xs_talkv: single message. */
++static void *xs_single(struct xenbus_transaction t,
++ enum xsd_sockmsg_type type,
++ const char *string,
++ unsigned int *len)
++{
++ struct kvec iovec;
++
++ iovec.iov_base = (void *)string;
++ iovec.iov_len = strlen(string) + 1;
++ return xs_talkv(t, type, &iovec, 1, len);
++}
++
++/* Many commands only need an ack, don't care what it says. */
++static int xs_error(char *reply)
++{
++ if (IS_ERR(reply))
++ return PTR_ERR(reply);
++ kfree(reply);
++ return 0;
++}
++
++static unsigned int count_strings(const char *strings, unsigned int len)
++{
++ unsigned int num;
++ const char *p;
++
++ for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
++ num++;
++
++ return num;
++}
++
++/* Return the path to dir with /name appended. Buffer must be kfree()'ed. */
++static char *join(const char *dir, const char *name)
++{
++ char *buffer;
++
++ if (strlen(name) == 0)
++ buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir);
++ else
++ buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name);
++ return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
++}
++
++static char **split(char *strings, unsigned int len, unsigned int *num)
++{
++ char *p, **ret;
++
++ /* Count the strings. */
++ *num = count_strings(strings, len) + 1;
++
++ /* Transfer to one big alloc for easy freeing. */
++ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH);
++ if (!ret) {
++ kfree(strings);
++ return ERR_PTR(-ENOMEM);
++ }
++ memcpy(&ret[*num], strings, len);
++ kfree(strings);
++
++ strings = (char *)&ret[*num];
++ for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
++ ret[(*num)++] = p;
++ ret[*num] = strings + len;
++
++ return ret;
++}
++
++char **xenbus_directory(struct xenbus_transaction t,
++ const char *dir, const char *node, unsigned int *num)
++{
++ char *strings, *path;
++ unsigned int len;
++
++ path = join(dir, node);
++ if (IS_ERR(path))
++ return (char **)path;
++
++ strings = xs_single(t, XS_DIRECTORY, path, &len);
++ kfree(path);
++ if (IS_ERR(strings))
++ return (char **)strings;
++
++ return split(strings, len, num);
++}
++EXPORT_SYMBOL_GPL(xenbus_directory);
++
++/* Check if a path exists. Return 1 if it does. */
++int xenbus_exists(struct xenbus_transaction t,
++ const char *dir, const char *node)
++{
++ char **d;
++ int dir_n;
++
++ d = xenbus_directory(t, dir, node, &dir_n);
++ if (IS_ERR(d))
++ return 0;
++ kfree(d);
++ return 1;
++}
++EXPORT_SYMBOL_GPL(xenbus_exists);
++
++/* Get the value of a single file.
++ * Returns a kmalloced value: call free() on it after use.
++ * len indicates length in bytes.
++ */
++void *xenbus_read(struct xenbus_transaction t,
++ const char *dir, const char *node, unsigned int *len)
++{
++ char *path;
++ void *ret;
++
++ path = join(dir, node);
++ if (IS_ERR(path))
++ return (void *)path;
++
++ ret = xs_single(t, XS_READ, path, len);
++ kfree(path);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_read);
++
++/* Write the value of a single file.
++ * Returns -err on failure.
++ */
++int xenbus_write(struct xenbus_transaction t,
++ const char *dir, const char *node, const char *string)
++{
++ const char *path;
++ struct kvec iovec[2];
++ int ret;
++
++ path = join(dir, node);
++ if (IS_ERR(path))
++ return PTR_ERR(path);
++
++ iovec[0].iov_base = (void *)path;
++ iovec[0].iov_len = strlen(path) + 1;
++ iovec[1].iov_base = (void *)string;
++ iovec[1].iov_len = strlen(string);
++
++ ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
++ kfree(path);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_write);
++
++/* Create a new directory. */
++int xenbus_mkdir(struct xenbus_transaction t,
++ const char *dir, const char *node)
++{
++ char *path;
++ int ret;
++
++ path = join(dir, node);
++ if (IS_ERR(path))
++ return PTR_ERR(path);
++
++ ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
++ kfree(path);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_mkdir);
++
++/* Destroy a file or directory (directories must be empty). */
++int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node)
++{
++ char *path;
++ int ret;
++
++ path = join(dir, node);
++ if (IS_ERR(path))
++ return PTR_ERR(path);
++
++ ret = xs_error(xs_single(t, XS_RM, path, NULL));
++ kfree(path);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_rm);
++
++/* Start a transaction: changes by others will not be seen during this
++ * transaction, and changes will not be visible to others until end.
++ */
++int xenbus_transaction_start(struct xenbus_transaction *t)
++{
++ char *id_str;
++
++ down_read(&xs_state.transaction_mutex);
++
++ id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
++ if (IS_ERR(id_str)) {
++ up_read(&xs_state.transaction_mutex);
++ return PTR_ERR(id_str);
++ }
++
++ t->id = simple_strtoul(id_str, NULL, 0);
++ kfree(id_str);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(xenbus_transaction_start);
++
++/* End a transaction.
++ * If abandon is true, transaction is discarded instead of committed.
++ */
++int xenbus_transaction_end(struct xenbus_transaction t, int abort)
++{
++ char abortstr[2];
++ int err;
++
++ if (abort)
++ strcpy(abortstr, "F");
++ else
++ strcpy(abortstr, "T");
++
++ err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
++
++ up_read(&xs_state.transaction_mutex);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(xenbus_transaction_end);
++
++/* Single read and scanf: returns -errno or num scanned. */
++int xenbus_scanf(struct xenbus_transaction t,
++ const char *dir, const char *node, const char *fmt, ...)
++{
++ va_list ap;
++ int ret;
++ char *val;
++
++ val = xenbus_read(t, dir, node, NULL);
++ if (IS_ERR(val))
++ return PTR_ERR(val);
++
++ va_start(ap, fmt);
++ ret = vsscanf(val, fmt, ap);
++ va_end(ap);
++ kfree(val);
++ /* Distinctive errno. */
++ if (ret == 0)
++ return -ERANGE;
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_scanf);
++
++/* Single printf and write: returns -errno or 0. */
++int xenbus_printf(struct xenbus_transaction t,
++ const char *dir, const char *node, const char *fmt, ...)
++{
++ va_list ap;
++ int ret;
++#define PRINTF_BUFFER_SIZE 4096
++ char *printf_buffer;
++
++ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
++ if (printf_buffer == NULL)
++ return -ENOMEM;
++
++ va_start(ap, fmt);
++ ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
++ va_end(ap);
++
++ BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
++ ret = xenbus_write(t, dir, node, printf_buffer);
++
++ kfree(printf_buffer);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_printf);
++
++/* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
++int xenbus_gather(struct xenbus_transaction t, const char *dir, ...)
++{
++ va_list ap;
++ const char *name;
++ int ret = 0;
++
++ va_start(ap, dir);
++ while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
++ const char *fmt = va_arg(ap, char *);
++ void *result = va_arg(ap, void *);
++ char *p;
++
++ p = xenbus_read(t, dir, name, NULL);
++ if (IS_ERR(p)) {
++ ret = PTR_ERR(p);
++ break;
++ }
++ if (fmt) {
++ if (sscanf(p, fmt, result) == 0)
++ ret = -EINVAL;
++ kfree(p);
++ } else
++ *(char **)result = p;
++ }
++ va_end(ap);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(xenbus_gather);
++
++static int xs_watch(const char *path, const char *token)
++{
++ struct kvec iov[2];
++
++ iov[0].iov_base = (void *)path;
++ iov[0].iov_len = strlen(path) + 1;
++ iov[1].iov_base = (void *)token;
++ iov[1].iov_len = strlen(token) + 1;
++
++ return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov,
++ ARRAY_SIZE(iov), NULL));
++}
++
++static int xs_unwatch(const char *path, const char *token)
++{
++ struct kvec iov[2];
++
++ iov[0].iov_base = (char *)path;
++ iov[0].iov_len = strlen(path) + 1;
++ iov[1].iov_base = (char *)token;
++ iov[1].iov_len = strlen(token) + 1;
++
++ return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov,
++ ARRAY_SIZE(iov), NULL));
++}
++
++static struct xenbus_watch *find_watch(const char *token)
++{
++ struct xenbus_watch *i, *cmp;
++
++ cmp = (void *)simple_strtoul(token, NULL, 16);
++
++ list_for_each_entry(i, &watches, list)
++ if (i == cmp)
++ return i;
++
++ return NULL;
++}
++
++/* Register callback to watch this node. */
++int register_xenbus_watch(struct xenbus_watch *watch)
++{
++ /* Pointer in ascii is the token. */
++ char token[sizeof(watch) * 2 + 1];
++ int err;
++
++ sprintf(token, "%lX", (long)watch);
++
++ down_read(&xs_state.watch_mutex);
++
++ spin_lock(&watches_lock);
++ BUG_ON(find_watch(token));
++ list_add(&watch->list, &watches);
++ spin_unlock(&watches_lock);
++
++ err = xs_watch(watch->node, token);
++
++ /* Ignore errors due to multiple registration. */
++ if ((err != 0) && (err != -EEXIST)) {
++ spin_lock(&watches_lock);
++ list_del(&watch->list);
++ spin_unlock(&watches_lock);
++ }
++
++ up_read(&xs_state.watch_mutex);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(register_xenbus_watch);
++
++void unregister_xenbus_watch(struct xenbus_watch *watch)
++{
++ struct xs_stored_msg *msg, *tmp;
++ char token[sizeof(watch) * 2 + 1];
++ int err;
++
++ BUG_ON(watch->flags & XBWF_new_thread);
++
++ sprintf(token, "%lX", (long)watch);
++
++ down_read(&xs_state.watch_mutex);
++
++ spin_lock(&watches_lock);
++ BUG_ON(!find_watch(token));
++ list_del(&watch->list);
++ spin_unlock(&watches_lock);
++
++ err = xs_unwatch(watch->node, token);
++ if (err)
++ printk(KERN_WARNING
++ "XENBUS Failed to release watch %s: %i\n",
++ watch->node, err);
++
++ up_read(&xs_state.watch_mutex);
++
++ /* Cancel pending watch events. */
++ spin_lock(&watch_events_lock);
++ list_for_each_entry_safe(msg, tmp, &watch_events, list) {
++ if (msg->u.watch.handle != watch)
++ continue;
++ list_del(&msg->list);
++ kfree(msg->u.watch.vec);
++ kfree(msg);
++ }
++ spin_unlock(&watch_events_lock);
++
++ /* Flush any currently-executing callback, unless we are it. :-) */
++ if (current->pid != xenwatch_pid) {
++ mutex_lock(&xenwatch_mutex);
++ mutex_unlock(&xenwatch_mutex);
++ }
++}
++EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
++
++void xs_suspend(void)
++{
++ down_write(&xs_state.transaction_mutex);
++ down_write(&xs_state.watch_mutex);
++ mutex_lock(&xs_state.request_mutex);
++ mutex_lock(&xs_state.response_mutex);
++}
++
++void xs_resume(void)
++{
++ struct xenbus_watch *watch;
++ char token[sizeof(watch) * 2 + 1];
++
++ mutex_unlock(&xs_state.response_mutex);
++ mutex_unlock(&xs_state.request_mutex);
++ up_write(&xs_state.transaction_mutex);
++
++ /* No need for watches_lock: the watch_mutex is sufficient. */
++ list_for_each_entry(watch, &watches, list) {
++ sprintf(token, "%lX", (long)watch);
++ xs_watch(watch->node, token);
++ }
++
++ up_write(&xs_state.watch_mutex);
++}
++
++void xs_suspend_cancel(void)
++{
++ mutex_unlock(&xs_state.response_mutex);
++ mutex_unlock(&xs_state.request_mutex);
++ up_write(&xs_state.watch_mutex);
++ up_write(&xs_state.transaction_mutex);
++}
++
++static int xenwatch_handle_callback(void *data)
++{
++ struct xs_stored_msg *msg = data;
++
++ msg->u.watch.handle->callback(msg->u.watch.handle,
++ (const char **)msg->u.watch.vec,
++ msg->u.watch.vec_size);
++
++ kfree(msg->u.watch.vec);
++ kfree(msg);
++
++ /* Kill this kthread if we were spawned just for this callback. */
++ if (current->pid != xenwatch_pid)
++ do_exit(0);
++
++ return 0;
++}
++
++static int xenwatch_thread(void *unused)
++{
++ struct list_head *ent;
++ struct xs_stored_msg *msg;
++
++ current->flags |= PF_NOFREEZE;
++ for (;;) {
++ wait_event_interruptible(watch_events_waitq,
++ !list_empty(&watch_events));
++
++ if (kthread_should_stop())
++ break;
++
++ mutex_lock(&xenwatch_mutex);
++
++ spin_lock(&watch_events_lock);
++ ent = watch_events.next;
++ if (ent != &watch_events)
++ list_del(ent);
++ spin_unlock(&watch_events_lock);
++
++ if (ent == &watch_events) {
++ mutex_unlock(&xenwatch_mutex);
++ continue;
++ }
++
++ msg = list_entry(ent, struct xs_stored_msg, list);
++
++ /*
++ * Unlock the mutex before running an XBWF_new_thread
++ * handler. kthread_run can block which can deadlock
++ * against unregister_xenbus_watch() if we need to
++ * unregister other watches in order to make
++ * progress. This can occur on resume before the swap
++ * device is attached.
++ */
++ if (msg->u.watch.handle->flags & XBWF_new_thread) {
++ mutex_unlock(&xenwatch_mutex);
++ kthread_run(xenwatch_handle_callback,
++ msg, "xenwatch_cb");
++ } else {
++ xenwatch_handle_callback(msg);
++ mutex_unlock(&xenwatch_mutex);
++ }
++ }
++
++ return 0;
++}
++
++static int process_msg(void)
++{
++ struct xs_stored_msg *msg;
++ char *body;
++ int err;
++
++ /*
++ * We must disallow save/restore while reading a xenstore message.
++ * A partial read across s/r leaves us out of sync with xenstored.
++ */
++ for (;;) {
++ err = xb_wait_for_data_to_read();
++ if (err)
++ return err;
++ mutex_lock(&xs_state.response_mutex);
++ if (xb_data_to_read())
++ break;
++ /* We raced with save/restore: pending data 'disappeared'. */
++ mutex_unlock(&xs_state.response_mutex);
++ }
++
++
++ msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH);
++ if (msg == NULL) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xb_read(&msg->hdr, sizeof(msg->hdr));
++ if (err) {
++ kfree(msg);
++ goto out;
++ }
++
++ body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
++ if (body == NULL) {
++ kfree(msg);
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xb_read(body, msg->hdr.len);
++ if (err) {
++ kfree(body);
++ kfree(msg);
++ goto out;
++ }
++ body[msg->hdr.len] = '\0';
++
++ if (msg->hdr.type == XS_WATCH_EVENT) {
++ msg->u.watch.vec = split(body, msg->hdr.len,
++ &msg->u.watch.vec_size);
++ if (IS_ERR(msg->u.watch.vec)) {
++ err = PTR_ERR(msg->u.watch.vec);
++ kfree(msg);
++ goto out;
++ }
++
++ spin_lock(&watches_lock);
++ msg->u.watch.handle = find_watch(
++ msg->u.watch.vec[XS_WATCH_TOKEN]);
++ if (msg->u.watch.handle != NULL) {
++ spin_lock(&watch_events_lock);
++ list_add_tail(&msg->list, &watch_events);
++ wake_up(&watch_events_waitq);
++ spin_unlock(&watch_events_lock);
++ } else {
++ kfree(msg->u.watch.vec);
++ kfree(msg);
++ }
++ spin_unlock(&watches_lock);
++ } else {
++ msg->u.reply.body = body;
++ spin_lock(&xs_state.reply_lock);
++ list_add_tail(&msg->list, &xs_state.reply_list);
++ spin_unlock(&xs_state.reply_lock);
++ wake_up(&xs_state.reply_waitq);
++ }
++
++ out:
++ mutex_unlock(&xs_state.response_mutex);
++ return err;
++}
++
++static int xenbus_thread(void *unused)
++{
++ int err;
++
++ current->flags |= PF_NOFREEZE;
++ for (;;) {
++ err = process_msg();
++ if (err)
++ printk(KERN_WARNING "XENBUS error %d while reading "
++ "message\n", err);
++ if (kthread_should_stop())
++ break;
++ }
++
++ return 0;
++}
++
++int xs_init(void)
++{
++ int err;
++ struct task_struct *task;
++
++ INIT_LIST_HEAD(&xs_state.reply_list);
++ spin_lock_init(&xs_state.reply_lock);
++ init_waitqueue_head(&xs_state.reply_waitq);
++
++ mutex_init(&xs_state.request_mutex);
++ mutex_init(&xs_state.response_mutex);
++ init_rwsem(&xs_state.transaction_mutex);
++ init_rwsem(&xs_state.watch_mutex);
++
++ /* Initialize the shared memory rings to talk to xenstored */
++ err = xb_init_comms();
++ if (err)
++ return err;
++
++ task = kthread_run(xenwatch_thread, NULL, "xenwatch");
++ if (IS_ERR(task))
++ return PTR_ERR(task);
++ xenwatch_pid = task->pid;
++
++ task = kthread_run(xenbus_thread, NULL, "xenbus");
++ if (IS_ERR(task))
++ return PTR_ERR(task);
++
++ return 0;
++}
+diff -rpuN linux-2.6.18.8/drivers/xen/xenoprof/xenoprofile.c linux-2.6.18-xen-3.3.0/drivers/xen/xenoprof/xenoprofile.c
+--- linux-2.6.18.8/drivers/xen/xenoprof/xenoprofile.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/drivers/xen/xenoprof/xenoprofile.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,546 @@
++/**
++ * @file xenoprofile.c
++ *
++ * @remark Copyright 2002 OProfile authors
++ * @remark Read the file COPYING
++ *
++ * @author John Levon <levon@movementarian.org>
++ *
++ * Modified by Aravind Menon and Jose Renato Santos for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ *
++ * Separated out arch-generic part
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ */
++
++#include <linux/init.h>
++#include <linux/notifier.h>
++#include <linux/smp.h>
++#include <linux/oprofile.h>
++#include <linux/sysdev.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/vmalloc.h>
++#include <asm/pgtable.h>
++#include <xen/evtchn.h>
++#include <xen/xenoprof.h>
++#include <xen/driver_util.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/xenoprof.h>
++#include "../../../drivers/oprofile/cpu_buffer.h"
++#include "../../../drivers/oprofile/event_buffer.h"
++
++#define MAX_XENOPROF_SAMPLES 16
++
++/* sample buffers shared with Xen */
++xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
++/* Shared buffer area */
++struct xenoprof_shared_buffer shared_buffer;
++
++/* Passive sample buffers shared with Xen */
++xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
++/* Passive shared buffer area */
++struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
++
++static int xenoprof_start(void);
++static void xenoprof_stop(void);
++
++static int xenoprof_enabled = 0;
++static int xenoprof_is_primary = 0;
++static int active_defined;
++
++extern unsigned long backtrace_depth;
++
++/* Number of buffers in shared area (one per VCPU) */
++int nbuf;
++/* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
++int ovf_irq[NR_CPUS];
++/* cpu model type string - copied from Xen on XENOPROF_init command */
++char cpu_type[XENOPROF_CPU_TYPE_SIZE];
++
++#ifdef CONFIG_PM
++
++static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
++{
++ if (xenoprof_enabled == 1)
++ xenoprof_stop();
++ return 0;
++}
++
++
++static int xenoprof_resume(struct sys_device * dev)
++{
++ if (xenoprof_enabled == 1)
++ xenoprof_start();
++ return 0;
++}
++
++
++static struct sysdev_class oprofile_sysclass = {
++ set_kset_name("oprofile"),
++ .resume = xenoprof_resume,
++ .suspend = xenoprof_suspend
++};
++
++
++static struct sys_device device_oprofile = {
++ .id = 0,
++ .cls = &oprofile_sysclass,
++};
++
++
++static int __init init_driverfs(void)
++{
++ int error;
++ if (!(error = sysdev_class_register(&oprofile_sysclass)))
++ error = sysdev_register(&device_oprofile);
++ return error;
++}
++
++
++static void exit_driverfs(void)
++{
++ sysdev_unregister(&device_oprofile);
++ sysdev_class_unregister(&oprofile_sysclass);
++}
++
++#else
++#define init_driverfs() do { } while (0)
++#define exit_driverfs() do { } while (0)
++#endif /* CONFIG_PM */
++
++unsigned long long oprofile_samples = 0;
++unsigned long long p_oprofile_samples = 0;
++
++unsigned int pdomains;
++struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
++
++/* Check whether the given entry is an escape code */
++static int xenoprof_is_escape(xenoprof_buf_t * buf, int tail)
++{
++ return (buf->event_log[tail].eip == XENOPROF_ESCAPE_CODE);
++}
++
++/* Get the event at the given entry */
++static uint8_t xenoprof_get_event(xenoprof_buf_t * buf, int tail)
++{
++ return (buf->event_log[tail].event);
++}
++
++static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
++{
++ int head, tail, size;
++ int tracing = 0;
++
++ head = buf->event_head;
++ tail = buf->event_tail;
++ size = buf->event_size;
++
++ while (tail != head) {
++ if (xenoprof_is_escape(buf, tail) &&
++ xenoprof_get_event(buf, tail) == XENOPROF_TRACE_BEGIN) {
++ tracing=1;
++ oprofile_add_pc(ESCAPE_CODE, buf->event_log[tail].mode,
++ CPU_TRACE_BEGIN);
++ if (!is_passive)
++ oprofile_samples++;
++ else
++ p_oprofile_samples++;
++
++ } else {
++ oprofile_add_pc(buf->event_log[tail].eip,
++ buf->event_log[tail].mode,
++ buf->event_log[tail].event);
++ if (!tracing) {
++ if (!is_passive)
++ oprofile_samples++;
++ else
++ p_oprofile_samples++;
++ }
++
++ }
++ tail++;
++ if(tail==size)
++ tail=0;
++ }
++ buf->event_tail = tail;
++}
++
++static void xenoprof_handle_passive(void)
++{
++ int i, j;
++ int flag_domain, flag_switch = 0;
++
++ for (i = 0; i < pdomains; i++) {
++ flag_domain = 0;
++ for (j = 0; j < passive_domains[i].nbuf; j++) {
++ xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
++ if (buf->event_head == buf->event_tail)
++ continue;
++ if (!flag_domain) {
++ if (!oprofile_add_domain_switch(
++ passive_domains[i].domain_id))
++ goto done;
++ flag_domain = 1;
++ }
++ xenoprof_add_pc(buf, 1);
++ flag_switch = 1;
++ }
++ }
++done:
++ if (flag_switch)
++ oprofile_add_domain_switch(COORDINATOR_DOMAIN);
++}
++
++static irqreturn_t
++xenoprof_ovf_interrupt(int irq, void * dev_id, struct pt_regs * regs)
++{
++ struct xenoprof_buf * buf;
++ static unsigned long flag;
++
++ buf = xenoprof_buf[smp_processor_id()];
++
++ xenoprof_add_pc(buf, 0);
++
++ if (xenoprof_is_primary && !test_and_set_bit(0, &flag)) {
++ xenoprof_handle_passive();
++ smp_mb__before_clear_bit();
++ clear_bit(0, &flag);
++ }
++
++ return IRQ_HANDLED;
++}
++
++
++static void unbind_virq(void)
++{
++ unsigned int i;
++
++ for_each_online_cpu(i) {
++ if (ovf_irq[i] >= 0) {
++ unbind_from_irqhandler(ovf_irq[i], NULL);
++ ovf_irq[i] = -1;
++ }
++ }
++}
++
++
++static int bind_virq(void)
++{
++ unsigned int i;
++ int result;
++
++ for_each_online_cpu(i) {
++ result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
++ i,
++ xenoprof_ovf_interrupt,
++ SA_INTERRUPT,
++ "xenoprof",
++ NULL);
++
++ if (result < 0) {
++ unbind_virq();
++ return result;
++ }
++
++ ovf_irq[i] = result;
++ }
++
++ return 0;
++}
++
++
++static void unmap_passive_list(void)
++{
++ int i;
++ for (i = 0; i < pdomains; i++)
++ xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
++ pdomains = 0;
++}
++
++
++static int map_xenoprof_buffer(int max_samples)
++{
++ struct xenoprof_get_buffer get_buffer;
++ struct xenoprof_buf *buf;
++ int ret, i;
++
++ if ( shared_buffer.buffer )
++ return 0;
++
++ get_buffer.max_samples = max_samples;
++ ret = xenoprof_arch_map_shared_buffer(&get_buffer, &shared_buffer);
++ if (ret)
++ return ret;
++ nbuf = get_buffer.nbuf;
++
++ for (i=0; i< nbuf; i++) {
++ buf = (struct xenoprof_buf*)
++ &shared_buffer.buffer[i * get_buffer.bufsize];
++ BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
++ xenoprof_buf[buf->vcpu_id] = buf;
++ }
++
++ return 0;
++}
++
++
++static int xenoprof_setup(void)
++{
++ int ret;
++
++ if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
++ return ret;
++
++ if ( (ret = bind_virq()) )
++ return ret;
++
++ if (xenoprof_is_primary) {
++ /* Define dom0 as an active domain if not done yet */
++ if (!active_defined) {
++ domid_t domid;
++ ret = HYPERVISOR_xenoprof_op(
++ XENOPROF_reset_active_list, NULL);
++ if (ret)
++ goto err;
++ domid = 0;
++ ret = HYPERVISOR_xenoprof_op(
++ XENOPROF_set_active, &domid);
++ if (ret)
++ goto err;
++ active_defined = 1;
++ }
++
++ if (backtrace_depth > 0) {
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_backtrace,
++ &backtrace_depth);
++ if (ret)
++ backtrace_depth = 0;
++ }
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
++ if (ret)
++ goto err;
++
++ xenoprof_arch_counter();
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
++ if (ret)
++ goto err;
++ }
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
++ if (ret)
++ goto err;
++
++ xenoprof_enabled = 1;
++ return 0;
++ err:
++ unbind_virq();
++ return ret;
++}
++
++
++static void xenoprof_shutdown(void)
++{
++ xenoprof_enabled = 0;
++
++ WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL));
++
++ if (xenoprof_is_primary) {
++ WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_release_counters,
++ NULL));
++ active_defined = 0;
++ }
++
++ unbind_virq();
++
++ xenoprof_arch_unmap_shared_buffer(&shared_buffer);
++ if (xenoprof_is_primary)
++ unmap_passive_list();
++}
++
++
++static int xenoprof_start(void)
++{
++ int ret = 0;
++
++ if (xenoprof_is_primary)
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
++ if (!ret)
++ xenoprof_arch_start();
++ return ret;
++}
++
++
++static void xenoprof_stop(void)
++{
++ if (xenoprof_is_primary)
++ WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL));
++ xenoprof_arch_stop();
++}
++
++
++static int xenoprof_set_active(int * active_domains,
++ unsigned int adomains)
++{
++ int ret = 0;
++ int i;
++ int set_dom0 = 0;
++ domid_t domid;
++
++ if (!xenoprof_is_primary)
++ return 0;
++
++ if (adomains > MAX_OPROF_DOMAINS)
++ return -E2BIG;
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
++ if (ret)
++ return ret;
++
++ for (i=0; i<adomains; i++) {
++ domid = active_domains[i];
++ if (domid != active_domains[i]) {
++ ret = -EINVAL;
++ goto out;
++ }
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
++ if (ret)
++ goto out;
++ if (active_domains[i] == 0)
++ set_dom0 = 1;
++ }
++ /* dom0 must always be active but may not be in the list */
++ if (!set_dom0) {
++ domid = 0;
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
++ }
++
++out:
++ if (ret)
++ WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list,
++ NULL));
++ active_defined = !ret;
++ return ret;
++}
++
++static int xenoprof_set_passive(int * p_domains,
++ unsigned int pdoms)
++{
++ int ret;
++ unsigned int i, j;
++ struct xenoprof_buf *buf;
++
++ if (!xenoprof_is_primary)
++ return 0;
++
++ if (pdoms > MAX_OPROF_DOMAINS)
++ return -E2BIG;
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
++ if (ret)
++ return ret;
++ unmap_passive_list();
++
++ for (i = 0; i < pdoms; i++) {
++ passive_domains[i].domain_id = p_domains[i];
++ passive_domains[i].max_samples = 2048;
++ ret = xenoprof_arch_set_passive(&passive_domains[i],
++ &p_shared_buffer[i]);
++ if (ret)
++ goto out;
++ for (j = 0; j < passive_domains[i].nbuf; j++) {
++ buf = (struct xenoprof_buf *)
++ &p_shared_buffer[i].buffer[
++ j * passive_domains[i].bufsize];
++ BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
++ p_xenoprof_buf[i][buf->vcpu_id] = buf;
++ }
++ }
++
++ pdomains = pdoms;
++ return 0;
++
++out:
++ for (j = 0; j < i; j++)
++ xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
++
++ return ret;
++}
++
++
++/* The dummy backtrace function to keep oprofile happy
++ * The real backtrace is done in xen
++ */
++static void xenoprof_dummy_backtrace(struct pt_regs * const regs,
++ unsigned int depth)
++{
++ /* this should never be called */
++ BUG();
++ return;
++}
++
++
++
++struct oprofile_operations xenoprof_ops = {
++#ifdef HAVE_XENOPROF_CREATE_FILES
++ .create_files = xenoprof_create_files,
++#endif
++ .set_active = xenoprof_set_active,
++ .set_passive = xenoprof_set_passive,
++ .setup = xenoprof_setup,
++ .shutdown = xenoprof_shutdown,
++ .start = xenoprof_start,
++ .stop = xenoprof_stop,
++ .backtrace = xenoprof_dummy_backtrace
++};
++
++
++/* in order to get driverfs right */
++static int using_xenoprof;
++
++int __init xenoprofile_init(struct oprofile_operations * ops)
++{
++ struct xenoprof_init init;
++ unsigned int i;
++ int ret;
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
++ if (!ret) {
++ xenoprof_arch_init_counter(&init);
++ xenoprof_is_primary = init.is_primary;
++
++ /* cpu_type is detected by Xen */
++ cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
++ strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
++ xenoprof_ops.cpu_type = cpu_type;
++
++ init_driverfs();
++ using_xenoprof = 1;
++ *ops = xenoprof_ops;
++
++ for (i=0; i<NR_CPUS; i++)
++ ovf_irq[i] = -1;
++
++ active_defined = 0;
++ }
++
++ printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n",
++ __func__, ret, init.num_events, xenoprof_is_primary);
++ return ret;
++}
++
++
++void xenoprofile_exit(void)
++{
++ if (using_xenoprof)
++ exit_driverfs();
++
++ xenoprof_arch_unmap_shared_buffer(&shared_buffer);
++ if (xenoprof_is_primary) {
++ unmap_passive_list();
++ WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL));
++ }
++}
+diff -rpuN linux-2.6.18.8/fs/aio.c linux-2.6.18-xen-3.3.0/fs/aio.c
+--- linux-2.6.18.8/fs/aio.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/fs/aio.c 2008-08-21 11:36:07.000000000 +0200
+@@ -34,6 +34,11 @@
+ #include <asm/uaccess.h>
+ #include <asm/mmu_context.h>
+
++#ifdef CONFIG_EPOLL
++#include <linux/poll.h>
++#include <linux/eventpoll.h>
++#endif
++
+ #if DEBUG > 1
+ #define dprintk printk
+ #else
+@@ -1015,6 +1020,10 @@ put_rq:
+ if (waitqueue_active(&ctx->wait))
+ wake_up(&ctx->wait);
+
++#ifdef CONFIG_EPOLL
++ if (ctx->file && waitqueue_active(&ctx->poll_wait))
++ wake_up(&ctx->poll_wait);
++#endif
+ if (ret)
+ put_ioctx(ctx);
+
+@@ -1024,6 +1033,8 @@ put_rq:
+ /* aio_read_evt
+ * Pull an event off of the ioctx's event ring. Returns the number of
+ * events fetched (0 or 1 ;-)
++ * If ent parameter is 0, just returns the number of events that would
++ * be fetched.
+ * FIXME: make this use cmpxchg.
+ * TODO: make the ringbuffer user mmap()able (requires FIXME).
+ */
+@@ -1046,13 +1057,18 @@ static int aio_read_evt(struct kioctx *i
+
+ head = ring->head % info->nr;
+ if (head != ring->tail) {
+- struct io_event *evp = aio_ring_event(info, head, KM_USER1);
+- *ent = *evp;
+- head = (head + 1) % info->nr;
+- smp_mb(); /* finish reading the event before updatng the head */
+- ring->head = head;
+- ret = 1;
+- put_aio_ring_event(evp, KM_USER1);
++ if (ent) { /* event requested */
++ struct io_event *evp =
++ aio_ring_event(info, head, KM_USER1);
++ *ent = *evp;
++ head = (head + 1) % info->nr;
++ /* finish reading the event before updatng the head */
++ smp_mb();
++ ring->head = head;
++ ret = 1;
++ put_aio_ring_event(evp, KM_USER1);
++ } else /* only need to know availability */
++ ret = 1;
+ }
+ spin_unlock(&info->ring_lock);
+
+@@ -1235,9 +1251,78 @@ static void io_destroy(struct kioctx *io
+
+ aio_cancel_all(ioctx);
+ wait_for_all_aios(ioctx);
++#ifdef CONFIG_EPOLL
++ /* forget the poll file, but it's up to the user to close it */
++ if (ioctx->file) {
++ ioctx->file->private_data = 0;
++ ioctx->file = 0;
++ }
++#endif
+ put_ioctx(ioctx); /* once for the lookup */
+ }
+
++#ifdef CONFIG_EPOLL
++
++static int aio_queue_fd_close(struct inode *inode, struct file *file)
++{
++ struct kioctx *ioctx = file->private_data;
++ if (ioctx) {
++ file->private_data = 0;
++ spin_lock_irq(&ioctx->ctx_lock);
++ ioctx->file = 0;
++ spin_unlock_irq(&ioctx->ctx_lock);
++ }
++ return 0;
++}
++
++static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait)
++{ unsigned int pollflags = 0;
++ struct kioctx *ioctx = file->private_data;
++
++ if (ioctx) {
++
++ spin_lock_irq(&ioctx->ctx_lock);
++ /* Insert inside our poll wait queue */
++ poll_wait(file, &ioctx->poll_wait, wait);
++
++ /* Check our condition */
++ if (aio_read_evt(ioctx, 0))
++ pollflags = POLLIN | POLLRDNORM;
++ spin_unlock_irq(&ioctx->ctx_lock);
++ }
++
++ return pollflags;
++}
++
++static const struct file_operations aioq_fops = {
++ .release = aio_queue_fd_close,
++ .poll = aio_queue_fd_poll
++};
++
++/* make_aio_fd:
++ * Create a file descriptor that can be used to poll the event queue.
++ * Based and piggybacked on the excellent epoll code.
++ */
++
++static int make_aio_fd(struct kioctx *ioctx)
++{
++ int error, fd;
++ struct inode *inode;
++ struct file *file;
++
++ error = ep_getfd(&fd, &inode, &file, NULL, &aioq_fops);
++ if (error)
++ return error;
++
++ /* associate the file with the IO context */
++ file->private_data = ioctx;
++ ioctx->file = file;
++ init_waitqueue_head(&ioctx->poll_wait);
++ return fd;
++}
++#endif
++
++
+ /* sys_io_setup:
+ * Create an aio_context capable of receiving at least nr_events.
+ * ctxp must not point to an aio_context that already exists, and
+@@ -1250,18 +1335,30 @@ static void io_destroy(struct kioctx *io
+ * resources are available. May fail with -EFAULT if an invalid
+ * pointer is passed for ctxp. Will fail with -ENOSYS if not
+ * implemented.
++ *
++ * To request a selectable fd, the user context has to be initialized
++ * to 1, instead of 0, and the return value is the fd.
++ * This keeps the system call compatible, since a non-zero value
++ * was not allowed so far.
+ */
+ asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
+ {
+ struct kioctx *ioctx = NULL;
+ unsigned long ctx;
+ long ret;
++ int make_fd = 0;
+
+ ret = get_user(ctx, ctxp);
+ if (unlikely(ret))
+ goto out;
+
+ ret = -EINVAL;
++#ifdef CONFIG_EPOLL
++ if (ctx == 1) {
++ make_fd = 1;
++ ctx = 0;
++ }
++#endif
+ if (unlikely(ctx || nr_events == 0)) {
+ pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
+ ctx, nr_events);
+@@ -1272,8 +1369,12 @@ asmlinkage long sys_io_setup(unsigned nr
+ ret = PTR_ERR(ioctx);
+ if (!IS_ERR(ioctx)) {
+ ret = put_user(ioctx->user_id, ctxp);
+- if (!ret)
+- return 0;
++#ifdef CONFIG_EPOLL
++ if (make_fd && ret >= 0)
++ ret = make_aio_fd(ioctx);
++#endif
++ if (ret >= 0)
++ return ret;
+
+ get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
+ io_destroy(ioctx);
+diff -rpuN linux-2.6.18.8/fs/binfmt_elf.c linux-2.6.18-xen-3.3.0/fs/binfmt_elf.c
+--- linux-2.6.18.8/fs/binfmt_elf.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/fs/binfmt_elf.c 2008-08-21 11:36:07.000000000 +0200
+@@ -1170,6 +1170,10 @@ static int dump_seek(struct file *file,
+ */
+ static int maydump(struct vm_area_struct *vma)
+ {
++ /* The vma can be set up to tell us the answer directly. */
++ if (vma->vm_flags & VM_ALWAYSDUMP)
++ return 1;
++
+ /* Do not dump I/O mapped devices or special mappings */
+ if (vma->vm_flags & (VM_IO | VM_RESERVED))
+ return 0;
+diff -rpuN linux-2.6.18.8/fs/bio.c linux-2.6.18-xen-3.3.0/fs/bio.c
+--- linux-2.6.18.8/fs/bio.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/fs/bio.c 2008-08-21 11:36:07.000000000 +0200
+@@ -112,7 +112,8 @@ void bio_free(struct bio *bio, struct bi
+
+ BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
+
+- mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
++ if (bio->bi_io_vec)
++ mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
+ mempool_free(bio, bio_set->bio_pool);
+ }
+
+diff -rpuN linux-2.6.18.8/fs/compat_ioctl.c linux-2.6.18-xen-3.3.0/fs/compat_ioctl.c
+--- linux-2.6.18.8/fs/compat_ioctl.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/fs/compat_ioctl.c 2008-08-21 11:36:07.000000000 +0200
+@@ -124,6 +124,13 @@
+ #include <linux/dvb/video.h>
+ #include <linux/lp.h>
+
++#ifdef CONFIG_XEN
++#include <xen/interface/xen.h>
++#include <xen/public/evtchn.h>
++#include <xen/public/privcmd.h>
++#include <xen/compat_ioctl.h>
++#endif
++
+ /* Aiee. Someone does not find a difference between int and long */
+ #define EXT2_IOC32_GETFLAGS _IOR('f', 1, int)
+ #define EXT2_IOC32_SETFLAGS _IOW('f', 2, int)
+@@ -2948,6 +2955,18 @@ COMPATIBLE_IOCTL(LPRESET)
+ /*LPGETSTATS not implemented, but no kernels seem to compile it in anyways*/
+ COMPATIBLE_IOCTL(LPGETFLAGS)
+ HANDLE_IOCTL(LPSETTIMEOUT, lp_timeout_trans)
++
++#ifdef CONFIG_XEN
++HANDLE_IOCTL(IOCTL_PRIVCMD_MMAP_32, privcmd_ioctl_32)
++HANDLE_IOCTL(IOCTL_PRIVCMD_MMAPBATCH_32, privcmd_ioctl_32)
++COMPATIBLE_IOCTL(IOCTL_PRIVCMD_HYPERCALL)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_VIRQ)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_INTERDOMAIN)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_UNBOUND_PORT)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_UNBIND)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_NOTIFY)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_RESET)
++#endif
+ };
+
+ int ioctl_table_size = ARRAY_SIZE(ioctl_start);
+diff -rpuN linux-2.6.18.8/fs/debugfs/file.c linux-2.6.18-xen-3.3.0/fs/debugfs/file.c
+--- linux-2.6.18.8/fs/debugfs/file.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/fs/debugfs/file.c 2008-08-21 11:36:07.000000000 +0200
+@@ -170,6 +170,51 @@ struct dentry *debugfs_create_u32(const
+ }
+ EXPORT_SYMBOL_GPL(debugfs_create_u32);
+
++
++static void debugfs_u64_set(void *data, u64 val)
++{
++ *(u64 *)data = val;
++}
++
++static u64 debugfs_u64_get(void *data)
++{
++ return *(u64 *)data;
++}
++DEFINE_SIMPLE_ATTRIBUTE(fops_u64, debugfs_u64_get, debugfs_u64_set, "%llu\n");
++
++/**
++ * debugfs_create_u64 - create a file in the debugfs filesystem that is used to read and write an unsigned 64 bit value.
++ *
++ * @name: a pointer to a string containing the name of the file to create.
++ * @mode: the permission that the file should have
++ * @parent: a pointer to the parent dentry for this file. This should be a
++ * directory dentry if set. If this paramater is NULL, then the
++ * file will be created in the root of the debugfs filesystem.
++ * @value: a pointer to the variable that the file should read to and write
++ * from.
++ *
++ * This function creates a file in debugfs with the given name that
++ * contains the value of the variable @value. If the @mode variable is so
++ * set, it can be read from, and written to.
++ *
++ * This function will return a pointer to a dentry if it succeeds. This
++ * pointer must be passed to the debugfs_remove() function when the file is
++ * to be removed (no automatic cleanup happens if your module is unloaded,
++ * you are responsible here.) If an error occurs, NULL will be returned.
++ *
++ * If debugfs is not enabled in the kernel, the value -ENODEV will be
++ * returned. It is not wise to check for this value, but rather, check for
++ * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
++ * code.
++ */
++struct dentry *debugfs_create_u64(const char *name, mode_t mode,
++ struct dentry *parent, u64 *value)
++{
++ return debugfs_create_file(name, mode, parent, value, &fops_u64);
++}
++EXPORT_SYMBOL_GPL(debugfs_create_u64);
++
++
+ static ssize_t read_file_bool(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+ {
+diff -rpuN linux-2.6.18.8/fs/eventpoll.c linux-2.6.18-xen-3.3.0/fs/eventpoll.c
+--- linux-2.6.18.8/fs/eventpoll.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/fs/eventpoll.c 2008-08-21 11:36:07.000000000 +0200
+@@ -236,8 +236,6 @@ struct ep_pqueue {
+
+ static void ep_poll_safewake_init(struct poll_safewake *psw);
+ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq);
+-static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
+- struct eventpoll *ep);
+ static int ep_alloc(struct eventpoll **pep);
+ static void ep_free(struct eventpoll *ep);
+ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd);
+@@ -267,7 +265,7 @@ static int ep_events_transfer(struct eve
+ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
+ int maxevents, long timeout);
+ static int eventpollfs_delete_dentry(struct dentry *dentry);
+-static struct inode *ep_eventpoll_inode(void);
++static struct inode *ep_eventpoll_inode(const struct file_operations *fops);
+ static int eventpollfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt);
+@@ -517,7 +515,7 @@ asmlinkage long sys_epoll_create(int siz
+ * Creates all the items needed to setup an eventpoll file. That is,
+ * a file structure, and inode and a free file descriptor.
+ */
+- error = ep_getfd(&fd, &inode, &file, ep);
++ error = ep_getfd(&fd, &inode, &file, ep, &eventpoll_fops);
+ if (error)
+ goto eexit_2;
+
+@@ -702,8 +700,8 @@ eexit_1:
+ /*
+ * Creates the file descriptor to be used by the epoll interface.
+ */
+-static int ep_getfd(int *efd, struct inode **einode, struct file **efile,
+- struct eventpoll *ep)
++int ep_getfd(int *efd, struct inode **einode, struct file **efile,
++ struct eventpoll *ep, const struct file_operations *fops)
+ {
+ struct qstr this;
+ char name[32];
+@@ -719,7 +717,7 @@ static int ep_getfd(int *efd, struct ino
+ goto eexit_1;
+
+ /* Allocates an inode from the eventpoll file system */
+- inode = ep_eventpoll_inode();
++ inode = ep_eventpoll_inode(fops);
+ error = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto eexit_2;
+@@ -750,7 +748,7 @@ static int ep_getfd(int *efd, struct ino
+
+ file->f_pos = 0;
+ file->f_flags = O_RDONLY;
+- file->f_op = &eventpoll_fops;
++ file->f_op = fops;
+ file->f_mode = FMODE_READ;
+ file->f_version = 0;
+ file->private_data = ep;
+@@ -1569,7 +1567,7 @@ static int eventpollfs_delete_dentry(str
+ }
+
+
+-static struct inode *ep_eventpoll_inode(void)
++static struct inode *ep_eventpoll_inode(const struct file_operations *fops)
+ {
+ int error = -ENOMEM;
+ struct inode *inode = new_inode(eventpoll_mnt->mnt_sb);
+@@ -1577,7 +1575,7 @@ static struct inode *ep_eventpoll_inode(
+ if (!inode)
+ goto eexit_1;
+
+- inode->i_fop = &eventpoll_fops;
++ inode->i_fop = fops;
+
+ /*
+ * Mark the inode dirty from the very beginning,
+diff -rpuN linux-2.6.18.8/fs/Kconfig linux-2.6.18-xen-3.3.0/fs/Kconfig
+--- linux-2.6.18.8/fs/Kconfig 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/fs/Kconfig 2008-08-21 11:36:07.000000000 +0200
+@@ -826,6 +826,11 @@ config PROC_VMCORE
+ help
+ Exports the dump image of crashed kernel in ELF format.
+
++config PROC_IOMEM_MACHINE
++ bool
++ depends on PROC_FS && EXPERIMENTAL && KEXEC && XEN && IA64
++ default y
++
+ config SYSFS
+ bool "sysfs file system support" if EMBEDDED
+ default y
+@@ -865,6 +870,7 @@ config TMPFS
+ config HUGETLBFS
+ bool "HugeTLB file system support"
+ depends X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN
++ depends on !XEN
+ help
+ hugetlbfs is a filesystem backing for HugeTLB pages, based on
+ ramfs. For architectures that support it, say Y here and read
+diff -rpuN linux-2.6.18.8/fs/lockd/svc.c linux-2.6.18-xen-3.3.0/fs/lockd/svc.c
+--- linux-2.6.18.8/fs/lockd/svc.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/fs/lockd/svc.c 2008-08-21 11:36:07.000000000 +0200
+@@ -323,9 +323,6 @@ EXPORT_SYMBOL(lockd_down);
+ * Sysctl parameters (same as module parameters, different interface).
+ */
+
+-/* Something that isn't CTL_ANY, CTL_NONE or a value that may clash. */
+-#define CTL_UNNUMBERED -2
+-
+ static ctl_table nlm_sysctls[] = {
+ {
+ .ctl_name = CTL_UNNUMBERED,
+diff -rpuN linux-2.6.18.8/fs/nfs/sysctl.c linux-2.6.18-xen-3.3.0/fs/nfs/sysctl.c
+--- linux-2.6.18.8/fs/nfs/sysctl.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/fs/nfs/sysctl.c 2008-08-21 11:36:07.000000000 +0200
+@@ -18,11 +18,6 @@
+ static const int nfs_set_port_min = 0;
+ static const int nfs_set_port_max = 65535;
+ static struct ctl_table_header *nfs_callback_sysctl_table;
+-/*
+- * Something that isn't CTL_ANY, CTL_NONE or a value that may clash.
+- * Use the same values as fs/lockd/svc.c
+- */
+-#define CTL_UNNUMBERED -2
+
+ static ctl_table nfs_cb_sysctls[] = {
+ #ifdef CONFIG_NFS_V4
+diff -rpuN linux-2.6.18.8/fs/proc/proc_misc.c linux-2.6.18-xen-3.3.0/fs/proc/proc_misc.c
+--- linux-2.6.18.8/fs/proc/proc_misc.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/fs/proc/proc_misc.c 2008-08-21 11:36:07.000000000 +0200
+@@ -471,7 +471,7 @@ static int show_stat(struct seq_file *p,
+ (unsigned long long)cputime64_to_clock_t(irq),
+ (unsigned long long)cputime64_to_clock_t(softirq),
+ (unsigned long long)cputime64_to_clock_t(steal));
+- for_each_online_cpu(i) {
++ for_each_possible_cpu(i) {
+
+ /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
+ user = kstat_cpu(i).cpustat.user;
+diff -rpuN linux-2.6.18.8/fs/proc/vmcore.c linux-2.6.18-xen-3.3.0/fs/proc/vmcore.c
+--- linux-2.6.18.8/fs/proc/vmcore.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/fs/proc/vmcore.c 2008-08-21 11:36:07.000000000 +0200
+@@ -514,7 +514,7 @@ static int __init parse_crash_elf64_head
+ /* Do some basic Verification. */
+ if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
+ (ehdr.e_type != ET_CORE) ||
+- !elf_check_arch(&ehdr) ||
++ !vmcore_elf_check_arch(&ehdr) ||
+ ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
+ ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
+ ehdr.e_version != EV_CURRENT ||
+diff -rpuN linux-2.6.18.8/fs/splice.c linux-2.6.18-xen-3.3.0/fs/splice.c
+--- linux-2.6.18.8/fs/splice.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/fs/splice.c 2008-08-21 11:36:07.000000000 +0200
+@@ -1141,6 +1141,9 @@ static int get_iovec_page_array(const st
+ if (unlikely(!base))
+ break;
+
++ if (unlikely(!access_ok(VERIFY_READ, base, len)))
++ break;
++
+ /*
+ * Get this base offset and number of pages, then map
+ * in the user pages.
+diff -rpuN linux-2.6.18.8/fs/xfs/linux-2.6/xfs_buf.c linux-2.6.18-xen-3.3.0/fs/xfs/linux-2.6/xfs_buf.c
+--- linux-2.6.18.8/fs/xfs/linux-2.6/xfs_buf.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/fs/xfs/linux-2.6/xfs_buf.c 2008-08-21 11:36:07.000000000 +0200
+@@ -182,6 +182,19 @@ free_address(
+ {
+ a_list_t *aentry;
+
++#ifdef CONFIG_XEN
++ /*
++ * Xen needs to be able to make sure it can get an exclusive
++ * RO mapping of pages it wants to turn into a pagetable. If
++ * a newly allocated page is also still being vmap()ed by xfs,
++ * it will cause pagetable construction to fail. This is a
++ * quick workaround to always eagerly unmap pages so that Xen
++ * is happy.
++ */
++ vunmap(addr);
++ return;
++#endif
++
+ aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
+ if (likely(aentry)) {
+ spin_lock(&as_lock);
+diff -rpuN linux-2.6.18.8/.hg_archival.txt linux-2.6.18-xen-3.3.0/.hg_archival.txt
+--- linux-2.6.18.8/.hg_archival.txt 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/.hg_archival.txt 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2 @@
++repo: 831230e53067cb45d27b07d037b2e907b663c2db
++node: e86b8e0ac6fdc3144976c4ea0bd922532a1fe8b4
+diff -rpuN linux-2.6.18.8/.hgignore linux-2.6.18-xen-3.3.0/.hgignore
+--- linux-2.6.18.8/.hgignore 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/.hgignore 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,3 @@
++.*\.orig$
++.*\.rej$
++\.valid-src$
+\ No newline at end of file
+diff -rpuN linux-2.6.18.8/.hgtags linux-2.6.18-xen-3.3.0/.hgtags
+--- linux-2.6.18.8/.hgtags 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/.hgtags 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2 @@
++831230e53067cb45d27b07d037b2e907b663c2db v2.6.18
++08e85e79c65d0316bfda5e77e8a0dc7ab9ca181a xen-3.2.0
+diff -rpuN linux-2.6.18.8/include/acpi/aclocal.h linux-2.6.18-xen-3.3.0/include/acpi/aclocal.h
+--- linux-2.6.18.8/include/acpi/aclocal.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/acpi/aclocal.h 2008-08-21 11:36:07.000000000 +0200
+@@ -708,7 +708,7 @@ struct acpi_bit_register_info {
+ * must be preserved.
+ */
+ #define ACPI_PM1_STATUS_PRESERVED_BITS 0x0800 /* Bit 11 */
+-#define ACPI_PM1_CONTROL_PRESERVED_BITS 0x0201 /* Bit 9, Bit 0 (SCI_EN) */
++#define ACPI_PM1_CONTROL_PRESERVED_BITS 0x0200 /* Bit 9 (whatever) */
+
+ /*
+ * Register IDs
+diff -rpuN linux-2.6.18.8/include/acpi/pdc_intel.h linux-2.6.18-xen-3.3.0/include/acpi/pdc_intel.h
+--- linux-2.6.18.8/include/acpi/pdc_intel.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/acpi/pdc_intel.h 2008-08-21 11:36:07.000000000 +0200
+@@ -13,6 +13,7 @@
+ #define ACPI_PDC_SMP_C_SWCOORD (0x0040)
+ #define ACPI_PDC_SMP_T_SWCOORD (0x0080)
+ #define ACPI_PDC_C_C1_FFH (0x0100)
++#define ACPI_PDC_C_C2C3_FFH (0x0200)
+
+ #define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \
+ ACPI_PDC_C_C1_HALT | \
+@@ -25,6 +26,8 @@
+
+ #define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \
+ ACPI_PDC_SMP_C1PT | \
+- ACPI_PDC_C_C1_HALT)
++ ACPI_PDC_C_C1_HALT | \
++ ACPI_PDC_C_C1_FFH | \
++ ACPI_PDC_C_C2C3_FFH)
+
+ #endif /* __PDC_INTEL_H__ */
+diff -rpuN linux-2.6.18.8/include/acpi/processor.h linux-2.6.18-xen-3.3.0/include/acpi/processor.h
+--- linux-2.6.18.8/include/acpi/processor.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/acpi/processor.h 2008-08-21 11:36:07.000000000 +0200
+@@ -21,6 +21,12 @@
+ #define ACPI_PSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */
+ #define ACPI_PSD_REV0_ENTRIES 5
+
++#ifdef CONFIG_XEN
++#define NR_ACPI_CPUS (NR_CPUS < 256 ? 256 : NR_CPUS)
++#else
++#define NR_ACPI_CPUS NR_CPUS
++#endif /* CONFIG_XEN */
++
+ /*
+ * Types of coordination defined in ACPI 3.0. Same macros can be used across
+ * P, C and T states
+@@ -33,6 +39,17 @@
+
+ struct acpi_processor_cx;
+
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++struct acpi_csd_package {
++ acpi_integer num_entries;
++ acpi_integer revision;
++ acpi_integer domain;
++ acpi_integer coord_type;
++ acpi_integer num_processors;
++ acpi_integer index;
++} __attribute__ ((packed));
++#endif
++
+ struct acpi_power_register {
+ u8 descriptor;
+ u16 length;
+@@ -63,6 +80,12 @@ struct acpi_processor_cx {
+ u32 power;
+ u32 usage;
+ u64 time;
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++ /* Require raw information for external control logic */
++ struct acpi_power_register reg;
++ u32 csd_count;
++ struct acpi_csd_package *domain_info;
++#endif
+ struct acpi_processor_cx_policy promotion;
+ struct acpi_processor_cx_policy demotion;
+ };
+@@ -231,6 +254,9 @@ static inline void acpi_processor_ppc_ex
+ {
+ return;
+ }
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++int acpi_processor_ppc_has_changed(struct acpi_processor *pr);
++#else
+ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
+ {
+ static unsigned int printout = 1;
+@@ -243,6 +269,7 @@ static inline int acpi_processor_ppc_has
+ }
+ return 0;
+ }
++#endif /* CONFIG_PROCESSOR_EXTERNAL_CONTROL */
+ #endif /* CONFIG_CPU_FREQ */
+
+ /* in processor_throttling.c */
+@@ -275,4 +302,79 @@ static inline void acpi_thermal_cpufreq_
+ }
+ #endif
+
++/*
++ * Following are interfaces geared to external processor PM control
++ * logic like a VMM
++ */
++/* Events notified to external control logic */
++#define PROCESSOR_PM_INIT 1
++#define PROCESSOR_PM_CHANGE 2
++#define PROCESSOR_HOTPLUG 3
++
++/* Objects for the PM events */
++#define PM_TYPE_IDLE 0
++#define PM_TYPE_PERF 1
++#define PM_TYPE_THR 2
++#define PM_TYPE_MAX 3
++
++/* Processor hotplug events */
++#define HOTPLUG_TYPE_ADD 0
++#define HOTPLUG_TYPE_REMOVE 1
++
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++struct processor_extcntl_ops {
++ /* Transfer processor PM events to external control logic */
++ int (*pm_ops[PM_TYPE_MAX])(struct acpi_processor *pr, int event);
++ /* Notify physical processor status to external control logic */
++ int (*hotplug)(struct acpi_processor *pr, int event);
++};
++extern const struct processor_extcntl_ops *processor_extcntl_ops;
++
++static inline int processor_cntl_external(void)
++{
++ return (processor_extcntl_ops != NULL);
++}
++
++static inline int processor_pm_external(void)
++{
++ return processor_cntl_external() &&
++ (processor_extcntl_ops->pm_ops[PM_TYPE_IDLE] != NULL);
++}
++
++static inline int processor_pmperf_external(void)
++{
++ return processor_cntl_external() &&
++ (processor_extcntl_ops->pm_ops[PM_TYPE_PERF] != NULL);
++}
++
++static inline int processor_pmthr_external(void)
++{
++ return processor_cntl_external() &&
++ (processor_extcntl_ops->pm_ops[PM_TYPE_THR] != NULL);
++}
++
++extern int processor_notify_external(struct acpi_processor *pr,
++ int event, int type);
++extern void processor_extcntl_init(void);
++extern int processor_extcntl_prepare(struct acpi_processor *pr);
++extern int acpi_processor_get_performance_info(struct acpi_processor *pr);
++extern int acpi_processor_get_psd(struct acpi_processor *pr);
++void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **);
++#else
++static inline int processor_cntl_external(void) {return 0;}
++static inline int processor_pm_external(void) {return 0;}
++static inline int processor_pmperf_external(void) {return 0;}
++static inline int processor_pmthr_external(void) {return 0;}
++static inline int processor_notify_external(struct acpi_processor *pr,
++ int event, int type)
++{
++ return 0;
++}
++static inline void processor_extcntl_init(void) {}
++static inline int processor_extcntl_prepare(struct acpi_processor *pr)
++{
++ return 0;
++}
++#endif /* CONFIG_PROCESSOR_EXTERNAL_CONTROL */
++
+ #endif
+diff -rpuN linux-2.6.18.8/include/asm-generic/pci.h linux-2.6.18-xen-3.3.0/include/asm-generic/pci.h
+--- linux-2.6.18.8/include/asm-generic/pci.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-generic/pci.h 2008-08-21 11:36:07.000000000 +0200
+@@ -43,7 +43,9 @@ pcibios_select_root(struct pci_dev *pdev
+ return root;
+ }
+
++#ifndef pcibios_scan_all_fns
+ #define pcibios_scan_all_fns(a, b) 0
++#endif
+
+ #ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
+ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+diff -rpuN linux-2.6.18.8/include/asm-generic/pgtable.h linux-2.6.18-xen-3.3.0/include/asm-generic/pgtable.h
+--- linux-2.6.18.8/include/asm-generic/pgtable.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-generic/pgtable.h 2008-08-21 11:36:07.000000000 +0200
+@@ -188,6 +188,10 @@ static inline void ptep_set_wrprotect(st
+ })
+ #endif
+
++#ifndef arch_change_pte_range
++#define arch_change_pte_range(mm, pmd, addr, end, newprot) 0
++#endif
++
+ #ifndef __ASSEMBLY__
+ /*
+ * When walking page tables, we usually want to skip any p?d_none entries;
+diff -rpuN linux-2.6.18.8/include/asm-generic/vmlinux.lds.h linux-2.6.18-xen-3.3.0/include/asm-generic/vmlinux.lds.h
+--- linux-2.6.18.8/include/asm-generic/vmlinux.lds.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-generic/vmlinux.lds.h 2008-08-21 11:36:07.000000000 +0200
+@@ -194,3 +194,6 @@
+ .stab.index 0 : { *(.stab.index) } \
+ .stab.indexstr 0 : { *(.stab.indexstr) } \
+ .comment 0 : { *(.comment) }
++
++#define NOTES \
++ .notes : { *(.note.*) } :note
+diff -rpuN linux-2.6.18.8/include/asm-i386/acpi.h linux-2.6.18-xen-3.3.0/include/asm-i386/acpi.h
+--- linux-2.6.18.8/include/asm-i386/acpi.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/acpi.h 2008-08-21 11:36:07.000000000 +0200
+@@ -31,6 +31,9 @@
+ #include <acpi/pdc_intel.h>
+
+ #include <asm/system.h> /* defines cmpxchg */
++#ifdef CONFIG_XEN
++#include <xen/interface/platform.h>
++#endif
+
+ #define COMPILER_DEPENDENT_INT64 long long
+ #define COMPILER_DEPENDENT_UINT64 unsigned long long
+@@ -156,6 +159,27 @@ static inline void acpi_disable_pci(void
+ }
+ extern int acpi_irq_balance_set(char *str);
+
++#ifdef CONFIG_XEN
++static inline int acpi_notify_hypervisor_state(u8 sleep_state,
++ u32 pm1a_cnt_val,
++ u32 pm1b_cnt_val)
++{
++ struct xen_platform_op op = {
++ .cmd = XENPF_enter_acpi_sleep,
++ .interface_version = XENPF_INTERFACE_VERSION,
++ .u = {
++ .enter_acpi_sleep = {
++ .pm1a_cnt_val = pm1a_cnt_val,
++ .pm1b_cnt_val = pm1b_cnt_val,
++ .sleep_state = sleep_state,
++ },
++ },
++ };
++
++ return HYPERVISOR_platform_op(&op);
++}
++#endif /* CONFIG_XEN */
++
+ #else /* !CONFIG_ACPI */
+
+ #define acpi_lapic 0
+@@ -181,7 +205,9 @@ extern void acpi_reserve_bootmem(void);
+
+ extern u8 x86_acpiid_to_apicid[];
+
++#ifndef CONFIG_XEN
+ #define ARCH_HAS_POWER_INIT 1
++#endif
+
+ #endif /*__KERNEL__*/
+
+diff -rpuN linux-2.6.18.8/include/asm-i386/agp.h linux-2.6.18-xen-3.3.0/include/asm-i386/agp.h
+--- linux-2.6.18.8/include/asm-i386/agp.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/agp.h 2008-08-21 11:36:07.000000000 +0200
+@@ -12,8 +12,10 @@
+ * data corruption on some CPUs.
+ */
+
+-int map_page_into_agp(struct page *page);
+-int unmap_page_from_agp(struct page *page);
++/* Caller's responsibility to call global_flush_tlb() for
++ * performance reasons */
++#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
++#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
+ #define flush_agp_mappings() global_flush_tlb()
+
+ /* Could use CLFLUSH here if the cpu supports it. But then it would
+diff -rpuN linux-2.6.18.8/include/asm-i386/apic.h linux-2.6.18-xen-3.3.0/include/asm-i386/apic.h
+--- linux-2.6.18.8/include/asm-i386/apic.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/apic.h 2008-08-21 11:36:07.000000000 +0200
+@@ -119,10 +119,12 @@ extern void enable_NMI_through_LVT0 (voi
+
+ extern int disable_timer_pin_1;
+
++#ifndef CONFIG_XEN
+ void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
+ void switch_APIC_timer_to_ipi(void *cpumask);
+ void switch_ipi_to_APIC_timer(void *cpumask);
+ #define ARCH_APICTIMER_STOPS_ON_C3 1
++#endif
+
+ extern int timer_over_8254;
+
+diff -rpuN linux-2.6.18.8/include/asm-i386/e820.h linux-2.6.18-xen-3.3.0/include/asm-i386/e820.h
+--- linux-2.6.18.8/include/asm-i386/e820.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/e820.h 2008-08-21 11:36:07.000000000 +0200
+@@ -38,6 +38,7 @@ extern struct e820map e820;
+
+ extern int e820_all_mapped(unsigned long start, unsigned long end,
+ unsigned type);
++extern int e820_any_mapped(u64 start, u64 end, unsigned type);
+
+ #endif/*!__ASSEMBLY__*/
+
+diff -rpuN linux-2.6.18.8/include/asm-i386/elf.h linux-2.6.18-xen-3.3.0/include/asm-i386/elf.h
+--- linux-2.6.18.8/include/asm-i386/elf.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/elf.h 2008-08-21 11:36:07.000000000 +0200
+@@ -169,50 +169,6 @@ do if (vdso_enabled) { \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_COMPAT_BASE); \
+ } while (0)
+
+-/*
+- * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
+- * extra segments containing the vsyscall DSO contents. Dumping its
+- * contents makes post-mortem fully interpretable later without matching up
+- * the same kernel and hardware config to see what PC values meant.
+- * Dumping its extra ELF program headers includes all the other information
+- * a debugger needs to easily find how the vsyscall DSO was being used.
+- */
+-#define ELF_CORE_EXTRA_PHDRS (VDSO_HIGH_EHDR->e_phnum)
+-#define ELF_CORE_WRITE_EXTRA_PHDRS \
+-do { \
+- const struct elf_phdr *const vsyscall_phdrs = \
+- (const struct elf_phdr *) (VDSO_HIGH_BASE \
+- + VDSO_HIGH_EHDR->e_phoff); \
+- int i; \
+- Elf32_Off ofs = 0; \
+- for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \
+- struct elf_phdr phdr = vsyscall_phdrs[i]; \
+- if (phdr.p_type == PT_LOAD) { \
+- BUG_ON(ofs != 0); \
+- ofs = phdr.p_offset = offset; \
+- phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \
+- phdr.p_filesz = phdr.p_memsz; \
+- offset += phdr.p_filesz; \
+- } \
+- else \
+- phdr.p_offset += ofs; \
+- phdr.p_paddr = 0; /* match other core phdrs */ \
+- DUMP_WRITE(&phdr, sizeof(phdr)); \
+- } \
+-} while (0)
+-#define ELF_CORE_WRITE_EXTRA_DATA \
+-do { \
+- const struct elf_phdr *const vsyscall_phdrs = \
+- (const struct elf_phdr *) (VDSO_HIGH_BASE \
+- + VDSO_HIGH_EHDR->e_phoff); \
+- int i; \
+- for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \
+- if (vsyscall_phdrs[i].p_type == PT_LOAD) \
+- DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \
+- PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \
+- } \
+-} while (0)
+-
+ #endif
+
+ #endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/fixmap.h linux-2.6.18-xen-3.3.0/include/asm-i386/fixmap.h
+--- linux-2.6.18.8/include/asm-i386/fixmap.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/fixmap.h 2008-08-21 11:36:07.000000000 +0200
+@@ -19,7 +19,7 @@
+ * Leave one empty page between vmalloc'ed areas and
+ * the start of the fixmap.
+ */
+-#define __FIXADDR_TOP 0xfffff000
++extern unsigned long __FIXADDR_TOP;
+
+ #ifndef __ASSEMBLY__
+ #include <linux/kernel.h>
+@@ -94,6 +94,8 @@ enum fixed_addresses {
+ extern void __set_fixmap (enum fixed_addresses idx,
+ unsigned long phys, pgprot_t flags);
+
++extern void set_fixaddr_top(unsigned long top);
++
+ #define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, PAGE_KERNEL)
+ /*
+diff -rpuN linux-2.6.18.8/include/asm-i386/io_apic.h linux-2.6.18-xen-3.3.0/include/asm-i386/io_apic.h
+--- linux-2.6.18.8/include/asm-i386/io_apic.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/io_apic.h 2008-08-21 11:36:07.000000000 +0200
+@@ -12,7 +12,7 @@
+
+ #ifdef CONFIG_X86_IO_APIC
+
+-#ifdef CONFIG_PCI_MSI
++#if defined(CONFIG_PCI_MSI) && !defined(CONFIG_XEN)
+ static inline int use_pci_vector(void) {return 1;}
+ static inline void disable_edge_ioapic_vector(unsigned int vector) { }
+ static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
+diff -rpuN linux-2.6.18.8/include/asm-i386/kexec.h linux-2.6.18-xen-3.3.0/include/asm-i386/kexec.h
+--- linux-2.6.18.8/include/asm-i386/kexec.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/kexec.h 2008-08-21 11:36:07.000000000 +0200
+@@ -1,6 +1,26 @@
+ #ifndef _I386_KEXEC_H
+ #define _I386_KEXEC_H
+
++#define PA_CONTROL_PAGE 0
++#define VA_CONTROL_PAGE 1
++#define PA_PGD 2
++#define VA_PGD 3
++#define PA_PTE_0 4
++#define VA_PTE_0 5
++#define PA_PTE_1 6
++#define VA_PTE_1 7
++#ifdef CONFIG_X86_PAE
++#define PA_PMD_0 8
++#define VA_PMD_0 9
++#define PA_PMD_1 10
++#define VA_PMD_1 11
++#define PAGES_NR 12
++#else
++#define PAGES_NR 8
++#endif
++
++#ifndef __ASSEMBLY__
++
+ #include <asm/fixmap.h>
+ #include <asm/ptrace.h>
+ #include <asm/string.h>
+@@ -27,6 +47,9 @@
+ /* The native architecture */
+ #define KEXEC_ARCH KEXEC_ARCH_386
+
++/* We can also handle crash dumps from 64 bit kernel. */
++#define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
++
+ #define MAX_NOTE_BYTES 1024
+
+ /* CPU does not save ss and esp on stack if execution is already
+@@ -72,5 +95,26 @@ static inline void crash_setup_regs(stru
+ newregs->eip = (unsigned long)current_text_addr();
+ }
+ }
++asmlinkage NORET_TYPE void
++relocate_kernel(unsigned long indirection_page,
++ unsigned long control_page,
++ unsigned long start_address,
++ unsigned int has_pae) ATTRIB_NORET;
++
++
++/* Under Xen we need to work with machine addresses. These macros give the
++ * machine address of a certain page to the generic kexec code instead of
++ * the pseudo physical address which would be given by the default macros.
++ */
++
++#ifdef CONFIG_XEN
++#define KEXEC_ARCH_HAS_PAGE_MACROS
++#define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page))
++#define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn))
++#define kexec_virt_to_phys(addr) virt_to_machine(addr)
++#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr))
++#endif
++
++#endif /* __ASSEMBLY__ */
+
+ #endif /* _I386_KEXEC_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-default/mach_traps.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-default/mach_traps.h
+--- linux-2.6.18.8/include/asm-i386/mach-default/mach_traps.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-default/mach_traps.h 2008-08-21 11:36:07.000000000 +0200
+@@ -15,6 +15,18 @@ static inline void clear_mem_error(unsig
+ outb(reason, 0x61);
+ }
+
++static inline void clear_io_check_error(unsigned char reason)
++{
++ unsigned long i;
++
++ reason = (reason & 0xf) | 8;
++ outb(reason, 0x61);
++ i = 2000;
++ while (--i) udelay(1000);
++ reason &= ~8;
++ outb(reason, 0x61);
++}
++
+ static inline unsigned char get_nmi_reason(void)
+ {
+ return inb(0x61);
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/agp.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/agp.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/agp.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/agp.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,44 @@
++#ifndef AGP_H
++#define AGP_H 1
++
++#include <asm/pgtable.h>
++#include <asm/cacheflush.h>
++#include <asm/system.h>
++
++/*
++ * Functions to keep the agpgart mappings coherent with the MMU.
++ * The GART gives the CPU a physical alias of pages in memory. The alias region is
++ * mapped uncacheable. Make sure there are no conflicting mappings
++ * with different cachability attributes for the same page. This avoids
++ * data corruption on some CPUs.
++ */
++
++/* Caller's responsibility to call global_flush_tlb() for
++ * performance reasons */
++#define map_page_into_agp(page) ( \
++ xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \
++ ?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE))
++#define unmap_page_from_agp(page) ( \
++ xen_destroy_contiguous_region((unsigned long)page_address(page), 0), \
++ /* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \
++ change_page_attr(page, 1, PAGE_KERNEL))
++#define flush_agp_mappings() global_flush_tlb()
++
++/* Could use CLFLUSH here if the cpu supports it. But then it would
++ need to be called for each cacheline of the whole page so it may not be
++ worth it. Would need a page for it. */
++#define flush_agp_cache() wbinvd()
++
++/* Convert a physical address to an address suitable for the GART. */
++#define phys_to_gart(x) phys_to_machine(x)
++#define gart_to_phys(x) machine_to_phys(x)
++
++/* GATT allocation. Returns/accepts GATT kernel virtual address. */
++#define alloc_gatt_pages(order) ({ \
++ char *_t; dma_addr_t _d; \
++ _t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL); \
++ _t; })
++#define free_gatt_pages(table, order) \
++ dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/desc.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/desc.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/desc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/desc.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,166 @@
++#ifndef __ARCH_DESC_H
++#define __ARCH_DESC_H
++
++#include <asm/ldt.h>
++#include <asm/segment.h>
++
++#define CPU_16BIT_STACK_SIZE 1024
++
++#ifndef __ASSEMBLY__
++
++#include <linux/preempt.h>
++#include <linux/smp.h>
++
++#include <asm/mmu.h>
++
++extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
++
++DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
++
++struct Xgt_desc_struct {
++ unsigned short size;
++ unsigned long address __attribute__((packed));
++ unsigned short pad;
++} __attribute__ ((packed));
++
++extern struct Xgt_desc_struct idt_descr;
++DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
++
++
++static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
++{
++ return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
++}
++
++#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
++#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
++
++#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
++#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
++#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
++#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
++
++#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
++#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
++#define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
++#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))
++
++/*
++ * This is the ldt that every process will get unless we need
++ * something other than this.
++ */
++extern struct desc_struct default_ldt[];
++extern void set_intr_gate(unsigned int irq, void * addr);
++
++#define _set_tssldt_desc(n,addr,limit,type) \
++__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
++ "movw %w1,2(%2)\n\t" \
++ "rorl $16,%1\n\t" \
++ "movb %b1,4(%2)\n\t" \
++ "movb %4,5(%2)\n\t" \
++ "movb $0,6(%2)\n\t" \
++ "movb %h1,7(%2)\n\t" \
++ "rorl $16,%1" \
++ : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
++
++#ifndef CONFIG_X86_NO_TSS
++static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
++{
++ _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
++ offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
++}
++
++#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
++#endif
++
++static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
++{
++ _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
++}
++
++#define LDT_entry_a(info) \
++ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
++
++#define LDT_entry_b(info) \
++ (((info)->base_addr & 0xff000000) | \
++ (((info)->base_addr & 0x00ff0000) >> 16) | \
++ ((info)->limit & 0xf0000) | \
++ (((info)->read_exec_only ^ 1) << 9) | \
++ ((info)->contents << 10) | \
++ (((info)->seg_not_present ^ 1) << 15) | \
++ ((info)->seg_32bit << 22) | \
++ ((info)->limit_in_pages << 23) | \
++ ((info)->useable << 20) | \
++ 0x7000)
++
++#define LDT_empty(info) (\
++ (info)->base_addr == 0 && \
++ (info)->limit == 0 && \
++ (info)->contents == 0 && \
++ (info)->read_exec_only == 1 && \
++ (info)->seg_32bit == 0 && \
++ (info)->limit_in_pages == 0 && \
++ (info)->seg_not_present == 1 && \
++ (info)->useable == 0 )
++
++extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
++
++#if TLS_SIZE != 24
++# error update this code.
++#endif
++
++static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
++{
++#define C(i) if (HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), \
++ *(u64 *)&t->tls_array[i])) \
++ BUG();
++ C(0); C(1); C(2);
++#undef C
++}
++
++static inline void clear_LDT(void)
++{
++ int cpu = get_cpu();
++
++ /*
++ * NB. We load the default_ldt for lcall7/27 handling on demand, as
++ * it slows down context switching. Noone uses it anyway.
++ */
++ cpu = cpu; /* XXX avoid compiler warning */
++ xen_set_ldt(NULL, 0);
++ put_cpu();
++}
++
++/*
++ * load one particular LDT into the current CPU
++ */
++static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
++{
++ void *segments = pc->ldt;
++ int count = pc->size;
++
++ if (likely(!count))
++ segments = NULL;
++
++ xen_set_ldt(segments, count);
++}
++
++static inline void load_LDT(mm_context_t *pc)
++{
++ int cpu = get_cpu();
++ load_LDT_nolock(pc, cpu);
++ put_cpu();
++}
++
++static inline unsigned long get_desc_base(unsigned long *desc)
++{
++ unsigned long base;
++ base = ((desc[0] >> 16) & 0x0000ffff) |
++ ((desc[1] << 16) & 0x00ff0000) |
++ (desc[1] & 0xff000000);
++ return base;
++}
++
++#endif /* !__ASSEMBLY__ */
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/dma-mapping.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/dma-mapping.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/dma-mapping.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/dma-mapping.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,151 @@
++#ifndef _ASM_I386_DMA_MAPPING_H
++#define _ASM_I386_DMA_MAPPING_H
++
++/*
++ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
++ * documentation.
++ */
++
++#include <linux/mm.h>
++#include <asm/cache.h>
++#include <asm/io.h>
++#include <asm/scatterlist.h>
++#include <asm/swiotlb.h>
++
++static inline int
++address_needs_mapping(struct device *hwdev, dma_addr_t addr)
++{
++ dma_addr_t mask = 0xffffffff;
++ /* If the device has a mask, use it, otherwise default to 32 bits */
++ if (hwdev && hwdev->dma_mask)
++ mask = *hwdev->dma_mask;
++ return (addr & ~mask) != 0;
++}
++
++extern int range_straddles_page_boundary(paddr_t p, size_t size);
++
++#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
++#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
++
++void *dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flag);
++
++void dma_free_coherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle);
++
++extern dma_addr_t
++dma_map_single(struct device *dev, void *ptr, size_t size,
++ enum dma_data_direction direction);
++
++extern void
++dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
++ enum dma_data_direction direction);
++
++extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
++ int nents, enum dma_data_direction direction);
++extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
++ int nents, enum dma_data_direction direction);
++
++#ifdef CONFIG_HIGHMEM
++extern dma_addr_t
++dma_map_page(struct device *dev, struct page *page, unsigned long offset,
++ size_t size, enum dma_data_direction direction);
++
++extern void
++dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
++ enum dma_data_direction direction);
++#else
++#define dma_map_page(dev, page, offset, size, dir) \
++ dma_map_single(dev, page_address(page) + (offset), (size), (dir))
++#define dma_unmap_page dma_unmap_single
++#endif
++
++extern void
++dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction);
++
++extern void
++dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction);
++
++static inline void
++dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction)
++{
++ dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
++}
++
++static inline void
++dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction)
++{
++ dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
++}
++
++static inline void
++dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
++ flush_write_buffers();
++}
++
++extern int
++dma_mapping_error(dma_addr_t dma_addr);
++
++extern int
++dma_supported(struct device *dev, u64 mask);
++
++static inline int
++dma_set_mask(struct device *dev, u64 mask)
++{
++ if(!dev->dma_mask || !dma_supported(dev, mask))
++ return -EIO;
++
++ *dev->dma_mask = mask;
++
++ return 0;
++}
++
++static inline int
++dma_get_cache_alignment(void)
++{
++ /* no easy way to get cache size on all x86, so return the
++ * maximum possible, to be safe */
++ return (1 << INTERNODE_CACHE_SHIFT);
++}
++
++#define dma_is_consistent(d) (1)
++
++static inline void
++dma_cache_sync(void *vaddr, size_t size,
++ enum dma_data_direction direction)
++{
++ flush_write_buffers();
++}
++
++#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
++extern int
++dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
++ dma_addr_t device_addr, size_t size, int flags);
++
++extern void
++dma_release_declared_memory(struct device *dev);
++
++extern void *
++dma_mark_declared_memory_occupied(struct device *dev,
++ dma_addr_t device_addr, size_t size);
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/fixmap.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/fixmap.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/fixmap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/fixmap.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,155 @@
++/*
++ * fixmap.h: compile-time virtual memory allocation
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1998 Ingo Molnar
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ */
++
++#ifndef _ASM_FIXMAP_H
++#define _ASM_FIXMAP_H
++
++
++/* used by vmalloc.c, vsyscall.lds.S.
++ *
++ * Leave one empty page between vmalloc'ed areas and
++ * the start of the fixmap.
++ */
++extern unsigned long __FIXADDR_TOP;
++
++#ifndef __ASSEMBLY__
++#include <linux/kernel.h>
++#include <asm/acpi.h>
++#include <asm/apicdef.h>
++#include <asm/page.h>
++#ifdef CONFIG_HIGHMEM
++#include <linux/threads.h>
++#include <asm/kmap_types.h>
++#endif
++
++/*
++ * Here we define all the compile-time 'special' virtual
++ * addresses. The point is to have a constant address at
++ * compile time, but to set the physical address only
++ * in the boot process. We allocate these special addresses
++ * from the end of virtual memory (0xfffff000) backwards.
++ * Also this lets us do fail-safe vmalloc(), we
++ * can guarantee that these special addresses and
++ * vmalloc()-ed addresses never overlap.
++ *
++ * these 'compile-time allocated' memory buffers are
++ * fixed-size 4k pages. (or larger if used with an increment
++ * highger than 1) use fixmap_set(idx,phys) to associate
++ * physical memory with fixmap indices.
++ *
++ * TLB entries of such buffers will not be flushed across
++ * task switches.
++ */
++enum fixed_addresses {
++ FIX_HOLE,
++ FIX_VDSO,
++#ifdef CONFIG_X86_LOCAL_APIC
++ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
++#endif
++#ifdef CONFIG_X86_IO_APIC
++ FIX_IO_APIC_BASE_0,
++ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
++#endif
++#ifdef CONFIG_X86_VISWS_APIC
++ FIX_CO_CPU, /* Cobalt timer */
++ FIX_CO_APIC, /* Cobalt APIC Redirection Table */
++ FIX_LI_PCIA, /* Lithium PCI Bridge A */
++ FIX_LI_PCIB, /* Lithium PCI Bridge B */
++#endif
++#ifdef CONFIG_X86_F00F_BUG
++ FIX_F00F_IDT, /* Virtual mapping for IDT */
++#endif
++#ifdef CONFIG_X86_CYCLONE_TIMER
++ FIX_CYCLONE_TIMER, /*cyclone timer register*/
++#endif
++#ifdef CONFIG_HIGHMEM
++ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
++ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
++#endif
++#ifdef CONFIG_ACPI
++ FIX_ACPI_BEGIN,
++ FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
++#endif
++#ifdef CONFIG_PCI_MMCONFIG
++ FIX_PCIE_MCFG,
++#endif
++ FIX_SHARED_INFO,
++#define NR_FIX_ISAMAPS 256
++ FIX_ISAMAP_END,
++ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
++ __end_of_permanent_fixed_addresses,
++ /* temporary boot-time mappings, used before ioremap() is functional */
++#define NR_FIX_BTMAPS 16
++ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
++ FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
++ FIX_WP_TEST,
++ __end_of_fixed_addresses
++};
++
++extern void set_fixaddr_top(unsigned long top);
++
++extern void __set_fixmap(enum fixed_addresses idx,
++ maddr_t phys, pgprot_t flags);
++
++#define set_fixmap(idx, phys) \
++ __set_fixmap(idx, phys, PAGE_KERNEL)
++/*
++ * Some hardware wants to get fixmapped without caching.
++ */
++#define set_fixmap_nocache(idx, phys) \
++ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
++
++#define clear_fixmap(idx) \
++ __set_fixmap(idx, 0, __pgprot(0))
++
++#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
++
++#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
++#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
++#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
++#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
++
++#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
++#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
++
++extern void __this_fixmap_does_not_exist(void);
++
++/*
++ * 'index to address' translation. If anyone tries to use the idx
++ * directly without tranlation, we catch the bug with a NULL-deference
++ * kernel oops. Illegal ranges of incoming indices are caught too.
++ */
++static __always_inline unsigned long fix_to_virt(const unsigned int idx)
++{
++ /*
++ * this branch gets completely eliminated after inlining,
++ * except when someone tries to use fixaddr indices in an
++ * illegal way. (such as mixing up address types or using
++ * out-of-range indices).
++ *
++ * If it doesn't get removed, the linker will complain
++ * loudly with a reasonably clear error message..
++ */
++ if (idx >= __end_of_fixed_addresses)
++ __this_fixmap_does_not_exist();
++
++ return __fix_to_virt(idx);
++}
++
++static inline unsigned long virt_to_fix(const unsigned long vaddr)
++{
++ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
++ return __virt_to_fix(vaddr);
++}
++
++#endif /* !__ASSEMBLY__ */
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/gnttab_dma.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/gnttab_dma.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/gnttab_dma.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/gnttab_dma.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,41 @@
++/*
++ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
++ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef _ASM_I386_GNTTAB_DMA_H
++#define _ASM_I386_GNTTAB_DMA_H
++
++static inline int gnttab_dma_local_pfn(struct page *page)
++{
++ /* Has it become a local MFN? */
++ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page))));
++}
++
++static inline maddr_t gnttab_dma_map_page(struct page *page)
++{
++ __gnttab_dma_map_page(page);
++ return ((maddr_t)pfn_to_mfn(page_to_pfn(page)) << PAGE_SHIFT);
++}
++
++static inline void gnttab_dma_unmap_page(maddr_t maddr)
++{
++ __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr)));
++}
++
++#endif /* _ASM_I386_GNTTAB_DMA_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/highmem.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/highmem.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/highmem.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/highmem.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,80 @@
++/*
++ * highmem.h: virtual kernel memory mappings for high memory
++ *
++ * Used in CONFIG_HIGHMEM systems for memory pages which
++ * are not addressable by direct kernel virtual addresses.
++ *
++ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
++ * Gerhard.Wichert@pdb.siemens.de
++ *
++ *
++ * Redesigned the x86 32-bit VM architecture to deal with
++ * up to 16 Terabyte physical memory. With current x86 CPUs
++ * we now support up to 64 Gigabytes physical RAM.
++ *
++ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
++ */
++
++#ifndef _ASM_HIGHMEM_H
++#define _ASM_HIGHMEM_H
++
++#ifdef __KERNEL__
++
++#include <linux/interrupt.h>
++#include <linux/threads.h>
++#include <asm/kmap_types.h>
++#include <asm/tlbflush.h>
++
++/* declarations for highmem.c */
++extern unsigned long highstart_pfn, highend_pfn;
++
++extern pte_t *kmap_pte;
++extern pgprot_t kmap_prot;
++extern pte_t *pkmap_page_table;
++
++/*
++ * Right now we initialize only a single pte table. It can be extended
++ * easily, subsequent pte tables have to be allocated in one physical
++ * chunk of RAM.
++ */
++#ifdef CONFIG_X86_PAE
++#define LAST_PKMAP 512
++#else
++#define LAST_PKMAP 1024
++#endif
++/*
++ * Ordering is:
++ *
++ * FIXADDR_TOP
++ * fixed_addresses
++ * FIXADDR_START
++ * temp fixed addresses
++ * FIXADDR_BOOT_START
++ * Persistent kmap area
++ * PKMAP_BASE
++ * VMALLOC_END
++ * Vmalloc area
++ * VMALLOC_START
++ * high_memory
++ */
++#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK )
++#define LAST_PKMAP_MASK (LAST_PKMAP-1)
++#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
++#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
++
++extern void * FASTCALL(kmap_high(struct page *page));
++extern void FASTCALL(kunmap_high(struct page *page));
++
++void *kmap(struct page *page);
++void kunmap(struct page *page);
++void *kmap_atomic(struct page *page, enum km_type type);
++void *kmap_atomic_pte(struct page *page, enum km_type type);
++void kunmap_atomic(void *kvaddr, enum km_type type);
++void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
++struct page *kmap_atomic_to_page(void *ptr);
++
++#define flush_cache_kmaps() do { } while (0)
++
++#endif /* __KERNEL__ */
++
++#endif /* _ASM_HIGHMEM_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/hw_irq.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/hw_irq.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/hw_irq.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/hw_irq.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,72 @@
++#ifndef _ASM_HW_IRQ_H
++#define _ASM_HW_IRQ_H
++
++/*
++ * linux/include/asm/hw_irq.h
++ *
++ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
++ *
++ * moved some of the old arch/i386/kernel/irq.h to here. VY
++ *
++ * IRQ/IPI changes taken from work by Thomas Radke
++ * <tomsoft@informatik.tu-chemnitz.de>
++ */
++
++#include <linux/profile.h>
++#include <asm/atomic.h>
++#include <asm/irq.h>
++#include <asm/sections.h>
++
++struct hw_interrupt_type;
++
++#define NMI_VECTOR 0x02
++
++/*
++ * Various low-level irq details needed by irq.c, process.c,
++ * time.c, io_apic.c and smp.c
++ *
++ * Interrupt entry/exit code at both C and assembly level
++ */
++
++extern u8 irq_vector[NR_IRQ_VECTORS];
++#define IO_APIC_VECTOR(irq) (irq_vector[irq])
++#define AUTO_ASSIGN -1
++
++extern void (*interrupt[NR_IRQS])(void);
++
++#ifdef CONFIG_SMP
++fastcall void reschedule_interrupt(void);
++fastcall void invalidate_interrupt(void);
++fastcall void call_function_interrupt(void);
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++fastcall void apic_timer_interrupt(void);
++fastcall void error_interrupt(void);
++fastcall void spurious_interrupt(void);
++fastcall void thermal_interrupt(struct pt_regs *);
++#define platform_legacy_irq(irq) ((irq) < 16)
++#endif
++
++void disable_8259A_irq(unsigned int irq);
++void enable_8259A_irq(unsigned int irq);
++int i8259A_irq_pending(unsigned int irq);
++void make_8259A_irq(unsigned int irq);
++void init_8259A(int aeoi);
++void FASTCALL(send_IPI_self(int vector));
++void init_VISWS_APIC_irqs(void);
++void setup_IO_APIC(void);
++void disable_IO_APIC(void);
++#define print_IO_APIC()
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
++void send_IPI(int dest, int vector);
++void setup_ioapic_dest(void);
++
++extern unsigned long io_apic_irqs;
++
++extern atomic_t irq_err_count;
++extern atomic_t irq_mis_count;
++
++#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
++
++#endif /* _ASM_HW_IRQ_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/hypercall.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/hypercall.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/hypercall.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/hypercall.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,416 @@
++/******************************************************************************
++ * hypercall.h
++ *
++ * Linux-specific hypervisor handling.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERCALL_H__
++#define __HYPERCALL_H__
++
++#include <linux/string.h> /* memcpy() */
++#include <linux/stringify.h>
++
++#ifndef __HYPERVISOR_H__
++# error "please don't include this file directly"
++#endif
++
++#ifdef CONFIG_XEN
++#define HYPERCALL_STR(name) \
++ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)"
++#else
++#define HYPERCALL_STR(name) \
++ "mov hypercall_stubs,%%eax; " \
++ "add $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\
++ "call *%%eax"
++#endif
++
++#define _hypercall0(type, name) \
++({ \
++ type __res; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res) \
++ : \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall1(type, name, a1) \
++({ \
++ type __res; \
++ long __ign1; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1) \
++ : "1" ((long)(a1)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall2(type, name, a1, a2) \
++({ \
++ type __res; \
++ long __ign1, __ign2; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
++ : "1" ((long)(a1)), "2" ((long)(a2)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall3(type, name, a1, a2, a3) \
++({ \
++ type __res; \
++ long __ign1, __ign2, __ign3; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
++ "=d" (__ign3) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall4(type, name, a1, a2, a3, a4) \
++({ \
++ type __res; \
++ long __ign1, __ign2, __ign3, __ign4; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
++ "=d" (__ign3), "=S" (__ign4) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)), "4" ((long)(a4)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
++({ \
++ type __res; \
++ long __ign1, __ign2, __ign3, __ign4, __ign5; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
++ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)), "4" ((long)(a4)), \
++ "5" ((long)(a5)) \
++ : "memory" ); \
++ __res; \
++})
++
++static inline int __must_check
++HYPERVISOR_set_trap_table(
++ const trap_info_t *table)
++{
++ return _hypercall1(int, set_trap_table, table);
++}
++
++static inline int __must_check
++HYPERVISOR_mmu_update(
++ mmu_update_t *req, unsigned int count, unsigned int *success_count,
++ domid_t domid)
++{
++ return _hypercall4(int, mmu_update, req, count, success_count, domid);
++}
++
++static inline int __must_check
++HYPERVISOR_mmuext_op(
++ struct mmuext_op *op, unsigned int count, unsigned int *success_count,
++ domid_t domid)
++{
++ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
++}
++
++static inline int __must_check
++HYPERVISOR_set_gdt(
++ unsigned long *frame_list, unsigned int entries)
++{
++ return _hypercall2(int, set_gdt, frame_list, entries);
++}
++
++static inline int __must_check
++HYPERVISOR_stack_switch(
++ unsigned long ss, unsigned long esp)
++{
++ return _hypercall2(int, stack_switch, ss, esp);
++}
++
++static inline int __must_check
++HYPERVISOR_set_callbacks(
++ unsigned long event_selector, unsigned long event_address,
++ unsigned long failsafe_selector, unsigned long failsafe_address)
++{
++ return _hypercall4(int, set_callbacks,
++ event_selector, event_address,
++ failsafe_selector, failsafe_address);
++}
++
++static inline int
++HYPERVISOR_fpu_taskswitch(
++ int set)
++{
++ return _hypercall1(int, fpu_taskswitch, set);
++}
++
++static inline int __must_check
++HYPERVISOR_sched_op_compat(
++ int cmd, unsigned long arg)
++{
++ return _hypercall2(int, sched_op_compat, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_sched_op(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, sched_op, cmd, arg);
++}
++
++static inline long __must_check
++HYPERVISOR_set_timer_op(
++ u64 timeout)
++{
++ unsigned long timeout_hi = (unsigned long)(timeout>>32);
++ unsigned long timeout_lo = (unsigned long)timeout;
++ return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
++}
++
++static inline int __must_check
++HYPERVISOR_platform_op(
++ struct xen_platform_op *platform_op)
++{
++ platform_op->interface_version = XENPF_INTERFACE_VERSION;
++ return _hypercall1(int, platform_op, platform_op);
++}
++
++static inline int __must_check
++HYPERVISOR_set_debugreg(
++ unsigned int reg, unsigned long value)
++{
++ return _hypercall2(int, set_debugreg, reg, value);
++}
++
++static inline unsigned long __must_check
++HYPERVISOR_get_debugreg(
++ unsigned int reg)
++{
++ return _hypercall1(unsigned long, get_debugreg, reg);
++}
++
++static inline int __must_check
++HYPERVISOR_update_descriptor(
++ u64 ma, u64 desc)
++{
++ return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
++}
++
++static inline int __must_check
++HYPERVISOR_memory_op(
++ unsigned int cmd, void *arg)
++{
++ return _hypercall2(int, memory_op, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_multicall(
++ multicall_entry_t *call_list, unsigned int nr_calls)
++{
++ return _hypercall2(int, multicall, call_list, nr_calls);
++}
++
++static inline int __must_check
++HYPERVISOR_update_va_mapping(
++ unsigned long va, pte_t new_val, unsigned long flags)
++{
++ unsigned long pte_hi = 0;
++#ifdef CONFIG_X86_PAE
++ pte_hi = new_val.pte_high;
++#endif
++ return _hypercall4(int, update_va_mapping, va,
++ new_val.pte_low, pte_hi, flags);
++}
++
++static inline int __must_check
++HYPERVISOR_event_channel_op(
++ int cmd, void *arg)
++{
++ int rc = _hypercall2(int, event_channel_op, cmd, arg);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ struct evtchn_op op;
++ op.cmd = cmd;
++ memcpy(&op.u, arg, sizeof(op.u));
++ rc = _hypercall1(int, event_channel_op_compat, &op);
++ memcpy(arg, &op.u, sizeof(op.u));
++ }
++#endif
++
++ return rc;
++}
++
++static inline int __must_check
++HYPERVISOR_acm_op(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, acm_op, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_xen_version(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, xen_version, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_console_io(
++ int cmd, unsigned int count, char *str)
++{
++ return _hypercall3(int, console_io, cmd, count, str);
++}
++
++static inline int __must_check
++HYPERVISOR_physdev_op(
++ int cmd, void *arg)
++{
++ int rc = _hypercall2(int, physdev_op, cmd, arg);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ struct physdev_op op;
++ op.cmd = cmd;
++ memcpy(&op.u, arg, sizeof(op.u));
++ rc = _hypercall1(int, physdev_op_compat, &op);
++ memcpy(arg, &op.u, sizeof(op.u));
++ }
++#endif
++
++ return rc;
++}
++
++static inline int __must_check
++HYPERVISOR_grant_table_op(
++ unsigned int cmd, void *uop, unsigned int count)
++{
++ return _hypercall3(int, grant_table_op, cmd, uop, count);
++}
++
++static inline int __must_check
++HYPERVISOR_update_va_mapping_otherdomain(
++ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
++{
++ unsigned long pte_hi = 0;
++#ifdef CONFIG_X86_PAE
++ pte_hi = new_val.pte_high;
++#endif
++ return _hypercall5(int, update_va_mapping_otherdomain, va,
++ new_val.pte_low, pte_hi, flags, domid);
++}
++
++static inline int __must_check
++HYPERVISOR_vm_assist(
++ unsigned int cmd, unsigned int type)
++{
++ return _hypercall2(int, vm_assist, cmd, type);
++}
++
++static inline int __must_check
++HYPERVISOR_vcpu_op(
++ int cmd, unsigned int vcpuid, void *extra_args)
++{
++ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
++}
++
++static inline int __must_check
++HYPERVISOR_suspend(
++ unsigned long srec)
++{
++ struct sched_shutdown sched_shutdown = {
++ .reason = SHUTDOWN_suspend
++ };
++
++ int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
++ &sched_shutdown, srec);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
++ SHUTDOWN_suspend, srec);
++#endif
++
++ return rc;
++}
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++static inline int
++HYPERVISOR_nmi_op(
++ unsigned long op, void *arg)
++{
++ return _hypercall2(int, nmi_op, op, arg);
++}
++#endif
++
++#ifndef CONFIG_XEN
++static inline unsigned long __must_check
++HYPERVISOR_hvm_op(
++ int op, void *arg)
++{
++ return _hypercall2(unsigned long, hvm_op, op, arg);
++}
++#endif
++
++static inline int __must_check
++HYPERVISOR_callback_op(
++ int cmd, const void *arg)
++{
++ return _hypercall2(int, callback_op, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_xenoprof_op(
++ int op, void *arg)
++{
++ return _hypercall2(int, xenoprof_op, op, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_kexec_op(
++ unsigned long op, void *args)
++{
++ return _hypercall2(int, kexec_op, op, args);
++}
++
++
++
++#endif /* __HYPERCALL_H__ */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/hypervisor.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/hypervisor.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/hypervisor.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/hypervisor.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,259 @@
++/******************************************************************************
++ * hypervisor.h
++ *
++ * Linux-specific hypervisor handling.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERVISOR_H__
++#define __HYPERVISOR_H__
++
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/errno.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/platform.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/sched.h>
++#include <xen/interface/nmi.h>
++#include <asm/ptrace.h>
++#include <asm/page.h>
++#if defined(__i386__)
++# ifdef CONFIG_X86_PAE
++# include <asm-generic/pgtable-nopud.h>
++# else
++# include <asm-generic/pgtable-nopmd.h>
++# endif
++#elif defined(__x86_64__) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
++# include <asm-generic/pgtable-nopud.h>
++#endif
++
++extern shared_info_t *HYPERVISOR_shared_info;
++
++#define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu))
++#ifdef CONFIG_SMP
++#define current_vcpu_info() vcpu_info(smp_processor_id())
++#else
++#define current_vcpu_info() vcpu_info(0)
++#endif
++
++#ifdef CONFIG_X86_32
++extern unsigned long hypervisor_virt_start;
++#endif
++
++/* arch/xen/i386/kernel/setup.c */
++extern start_info_t *xen_start_info;
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
++#else
++#define is_initial_xendomain() 0
++#endif
++
++/* arch/xen/kernel/evtchn.c */
++/* Force a proper event-channel callback from Xen. */
++void force_evtchn_callback(void);
++
++/* arch/xen/kernel/process.c */
++void xen_cpu_idle (void);
++
++/* arch/xen/i386/kernel/hypervisor.c */
++void do_hypervisor_callback(struct pt_regs *regs);
++
++/* arch/xen/i386/mm/hypervisor.c */
++/*
++ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
++ * be MACHINE addresses.
++ */
++
++void xen_pt_switch(unsigned long ptr);
++void xen_new_user_pt(unsigned long ptr); /* x86_64 only */
++void xen_load_gs(unsigned int selector); /* x86_64 only */
++void xen_tlb_flush(void);
++void xen_invlpg(unsigned long ptr);
++
++void xen_l1_entry_update(pte_t *ptr, pte_t val);
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
++void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
++void xen_pgd_pin(unsigned long ptr);
++void xen_pgd_unpin(unsigned long ptr);
++
++void xen_set_ldt(const void *ptr, unsigned int ents);
++
++#ifdef CONFIG_SMP
++#include <linux/cpumask.h>
++void xen_tlb_flush_all(void);
++void xen_invlpg_all(unsigned long ptr);
++void xen_tlb_flush_mask(cpumask_t *mask);
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
++#endif
++
++/* Returns zero on success else negative errno. */
++int xen_create_contiguous_region(
++ unsigned long vstart, unsigned int order, unsigned int address_bits);
++void xen_destroy_contiguous_region(
++ unsigned long vstart, unsigned int order);
++
++struct page;
++
++int xen_limit_pages_to_max_mfn(
++ struct page *pages, unsigned int order, unsigned int address_bits);
++
++/* Turn jiffies into Xen system time. */
++u64 jiffies_to_st(unsigned long jiffies);
++
++#ifdef CONFIG_XEN_SCRUB_PAGES
++void scrub_pages(void *, unsigned int);
++#else
++#define scrub_pages(_p,_n) ((void)0)
++#endif
++
++#include <xen/hypercall.h>
++
++#if defined(CONFIG_X86_64)
++#define MULTI_UVMFLAGS_INDEX 2
++#define MULTI_UVMDOMID_INDEX 3
++#else
++#define MULTI_UVMFLAGS_INDEX 3
++#define MULTI_UVMDOMID_INDEX 4
++#endif
++
++#ifdef CONFIG_XEN
++#define is_running_on_xen() 1
++#else
++extern char *hypercall_stubs;
++#define is_running_on_xen() (!!hypercall_stubs)
++#endif
++
++static inline int
++HYPERVISOR_yield(
++ void)
++{
++ int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
++#endif
++
++ return rc;
++}
++
++static inline int
++HYPERVISOR_block(
++ void)
++{
++ int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0);
++#endif
++
++ return rc;
++}
++
++static inline void /*__noreturn*/
++HYPERVISOR_shutdown(
++ unsigned int reason)
++{
++ struct sched_shutdown sched_shutdown = {
++ .reason = reason
++ };
++
++ VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason));
++#endif
++ /* Don't recurse needlessly. */
++ BUG_ON(reason != SHUTDOWN_crash);
++ for(;;);
++}
++
++static inline int __must_check
++HYPERVISOR_poll(
++ evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
++{
++ int rc;
++ struct sched_poll sched_poll = {
++ .nr_ports = nr_ports,
++ .timeout = jiffies_to_st(timeout)
++ };
++ set_xen_guest_handle(sched_poll.ports, ports);
++
++ rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
++#endif
++
++ return rc;
++}
++
++#ifdef CONFIG_XEN
++
++static inline void
++MULTI_update_va_mapping(
++ multicall_entry_t *mcl, unsigned long va,
++ pte_t new_val, unsigned long flags)
++{
++ mcl->op = __HYPERVISOR_update_va_mapping;
++ mcl->args[0] = va;
++#if defined(CONFIG_X86_64)
++ mcl->args[1] = new_val.pte;
++#elif defined(CONFIG_X86_PAE)
++ mcl->args[1] = new_val.pte_low;
++ mcl->args[2] = new_val.pte_high;
++#else
++ mcl->args[1] = new_val.pte_low;
++ mcl->args[2] = 0;
++#endif
++ mcl->args[MULTI_UVMFLAGS_INDEX] = flags;
++}
++
++static inline void
++MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd,
++ void *uop, unsigned int count)
++{
++ mcl->op = __HYPERVISOR_grant_table_op;
++ mcl->args[0] = cmd;
++ mcl->args[1] = (unsigned long)uop;
++ mcl->args[2] = count;
++}
++
++#else /* !defined(CONFIG_XEN) */
++
++/* Multicalls not supported for HVM guests. */
++#define MULTI_update_va_mapping(a,b,c,d) ((void)0)
++#define MULTI_grant_table_op(a,b,c,d) ((void)0)
++
++#endif
++
++#endif /* __HYPERVISOR_H__ */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/io.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/io.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/io.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/io.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,389 @@
++#ifndef _ASM_IO_H
++#define _ASM_IO_H
++
++#include <linux/string.h>
++#include <linux/compiler.h>
++
++/*
++ * This file contains the definitions for the x86 IO instructions
++ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
++ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
++ * versions of the single-IO instructions (inb_p/inw_p/..).
++ *
++ * This file is not meant to be obfuscating: it's just complicated
++ * to (a) handle it all in a way that makes gcc able to optimize it
++ * as well as possible and (b) trying to avoid writing the same thing
++ * over and over again with slight variations and possibly making a
++ * mistake somewhere.
++ */
++
++/*
++ * Thanks to James van Artsdalen for a better timing-fix than
++ * the two short jumps: using outb's to a nonexistent port seems
++ * to guarantee better timings even on fast machines.
++ *
++ * On the other hand, I'd like to be sure of a non-existent port:
++ * I feel a bit unsafe about using 0x80 (should be safe, though)
++ *
++ * Linus
++ */
++
++ /*
++ * Bit simplified and optimized by Jan Hubicka
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
++ *
++ * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
++ * isa_read[wl] and isa_write[wl] fixed
++ * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
++ */
++
++#define IO_SPACE_LIMIT 0xffff
++
++#define XQUAD_PORTIO_BASE 0xfe400000
++#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
++
++#ifdef __KERNEL__
++
++#include <asm-generic/iomap.h>
++
++#include <linux/vmalloc.h>
++#include <asm/fixmap.h>
++
++/*
++ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
++ * access
++ */
++#define xlate_dev_mem_ptr(p) __va(p)
++
++/*
++ * Convert a virtual cached pointer to an uncached pointer
++ */
++#define xlate_dev_kmem_ptr(p) p
++
++/**
++ * virt_to_phys - map virtual addresses to physical
++ * @address: address to remap
++ *
++ * The returned physical address is the physical (CPU) mapping for
++ * the memory address given. It is only valid to use this function on
++ * addresses directly mapped or allocated via kmalloc.
++ *
++ * This function does not give bus mappings for DMA transfers. In
++ * almost all conceivable cases a device driver should not be using
++ * this function
++ */
++
++static inline unsigned long virt_to_phys(volatile void * address)
++{
++ return __pa(address);
++}
++
++/**
++ * phys_to_virt - map physical address to virtual
++ * @address: address to remap
++ *
++ * The returned virtual address is a current CPU mapping for
++ * the memory address given. It is only valid to use this function on
++ * addresses that have a kernel mapping
++ *
++ * This function does not handle bus mappings for DMA transfers. In
++ * almost all conceivable cases a device driver should not be using
++ * this function
++ */
++
++static inline void * phys_to_virt(unsigned long address)
++{
++ return __va(address);
++}
++
++/*
++ * Change "struct page" to physical address.
++ */
++#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
++#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
++#define page_to_bus(page) (phys_to_machine(page_to_pseudophys(page)))
++
++#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
++ (unsigned long) bio_offset((bio)))
++#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
++ (unsigned long) (bv)->bv_offset)
++
++#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
++ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
++ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
++ bvec_to_pseudophys((vec2))))
++
++extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
++
++/**
++ * ioremap - map bus memory into CPU space
++ * @offset: bus address of the memory
++ * @size: size of the resource to map
++ *
++ * ioremap performs a platform specific sequence of operations to
++ * make bus memory CPU accessible via the readb/readw/readl/writeb/
++ * writew/writel functions and the other mmio helpers. The returned
++ * address is not guaranteed to be usable directly as a virtual
++ * address.
++ */
++
++static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
++{
++ return __ioremap(offset, size, 0);
++}
++
++extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
++extern void iounmap(volatile void __iomem *addr);
++
++/*
++ * bt_ioremap() and bt_iounmap() are for temporary early boot-time
++ * mappings, before the real ioremap() is functional.
++ * A boot-time mapping is currently limited to at most 16 pages.
++ */
++extern void *bt_ioremap(unsigned long offset, unsigned long size);
++extern void bt_iounmap(void *addr, unsigned long size);
++
++/* Use early IO mappings for DMI because it's initialized early */
++#define dmi_ioremap bt_ioremap
++#define dmi_iounmap bt_iounmap
++#define dmi_alloc alloc_bootmem
++
++/*
++ * ISA I/O bus memory addresses are 1:1 with the physical address.
++ */
++#define isa_virt_to_bus(_x) ({ BUG(); virt_to_bus(_x); })
++#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
++#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
++
++/*
++ * However PCI ones are not necessarily 1:1 and therefore these interfaces
++ * are forbidden in portable PCI drivers.
++ *
++ * Allow them on x86 for legacy drivers, though.
++ */
++#define virt_to_bus(_x) phys_to_machine(__pa(_x))
++#define bus_to_virt(_x) __va(machine_to_phys(_x))
++
++/*
++ * readX/writeX() are used to access memory mapped devices. On some
++ * architectures the memory mapped IO stuff needs to be accessed
++ * differently. On the x86 architecture, we just read/write the
++ * memory location directly.
++ */
++
++static inline unsigned char readb(const volatile void __iomem *addr)
++{
++ return *(volatile unsigned char __force *) addr;
++}
++static inline unsigned short readw(const volatile void __iomem *addr)
++{
++ return *(volatile unsigned short __force *) addr;
++}
++static inline unsigned int readl(const volatile void __iomem *addr)
++{
++ return *(volatile unsigned int __force *) addr;
++}
++#define readb_relaxed(addr) readb(addr)
++#define readw_relaxed(addr) readw(addr)
++#define readl_relaxed(addr) readl(addr)
++#define __raw_readb readb
++#define __raw_readw readw
++#define __raw_readl readl
++
++static inline void writeb(unsigned char b, volatile void __iomem *addr)
++{
++ *(volatile unsigned char __force *) addr = b;
++}
++static inline void writew(unsigned short b, volatile void __iomem *addr)
++{
++ *(volatile unsigned short __force *) addr = b;
++}
++static inline void writel(unsigned int b, volatile void __iomem *addr)
++{
++ *(volatile unsigned int __force *) addr = b;
++}
++#define __raw_writeb writeb
++#define __raw_writew writew
++#define __raw_writel writel
++
++#define mmiowb()
++
++static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
++{
++ memset((void __force *) addr, val, count);
++}
++static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
++{
++ __memcpy(dst, (void __force *) src, count);
++}
++static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
++{
++ __memcpy((void __force *) dst, src, count);
++}
++
++/*
++ * ISA space is 'always mapped' on a typical x86 system, no need to
++ * explicitly ioremap() it. The fact that the ISA IO space is mapped
++ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
++ * are physical addresses. The following constant pointer can be
++ * used as the IO-area pointer (it can be iounmapped as well, so the
++ * analogy with PCI is quite large):
++ */
++#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
++
++/*
++ * Again, i386 does not require mem IO specific function.
++ */
++
++#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d))
++
++/**
++ * check_signature - find BIOS signatures
++ * @io_addr: mmio address to check
++ * @signature: signature block
++ * @length: length of signature
++ *
++ * Perform a signature comparison with the mmio address io_addr. This
++ * address should have been obtained by ioremap.
++ * Returns 1 on a match.
++ */
++
++static inline int check_signature(volatile void __iomem * io_addr,
++ const unsigned char *signature, int length)
++{
++ int retval = 0;
++ do {
++ if (readb(io_addr) != *signature)
++ goto out;
++ io_addr++;
++ signature++;
++ length--;
++ } while (length);
++ retval = 1;
++out:
++ return retval;
++}
++
++/*
++ * Cache management
++ *
++ * This needed for two cases
++ * 1. Out of order aware processors
++ * 2. Accidentally out of order processors (PPro errata #51)
++ */
++
++#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
++
++static inline void flush_write_buffers(void)
++{
++ __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
++}
++
++#define dma_cache_inv(_start,_size) flush_write_buffers()
++#define dma_cache_wback(_start,_size) flush_write_buffers()
++#define dma_cache_wback_inv(_start,_size) flush_write_buffers()
++
++#else
++
++/* Nothing to do */
++
++#define dma_cache_inv(_start,_size) do { } while (0)
++#define dma_cache_wback(_start,_size) do { } while (0)
++#define dma_cache_wback_inv(_start,_size) do { } while (0)
++#define flush_write_buffers()
++
++#endif
++
++#endif /* __KERNEL__ */
++
++#ifdef SLOW_IO_BY_JUMPING
++#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
++#else
++#define __SLOW_DOWN_IO "outb %%al,$0x80;"
++#endif
++
++static inline void slow_down_io(void) {
++ __asm__ __volatile__(
++ __SLOW_DOWN_IO
++#ifdef REALLY_SLOW_IO
++ __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
++#endif
++ : : );
++}
++
++#ifdef CONFIG_X86_NUMAQ
++extern void *xquad_portio; /* Where the IO area was mapped */
++#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
++#define __BUILDIO(bwl,bw,type) \
++static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
++ if (xquad_portio) \
++ write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
++ else \
++ out##bwl##_local(value, port); \
++} \
++static inline void out##bwl(unsigned type value, int port) { \
++ out##bwl##_quad(value, port, 0); \
++} \
++static inline unsigned type in##bwl##_quad(int port, int quad) { \
++ if (xquad_portio) \
++ return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
++ else \
++ return in##bwl##_local(port); \
++} \
++static inline unsigned type in##bwl(int port) { \
++ return in##bwl##_quad(port, 0); \
++}
++#else
++#define __BUILDIO(bwl,bw,type) \
++static inline void out##bwl(unsigned type value, int port) { \
++ out##bwl##_local(value, port); \
++} \
++static inline unsigned type in##bwl(int port) { \
++ return in##bwl##_local(port); \
++}
++#endif
++
++
++#define BUILDIO(bwl,bw,type) \
++static inline void out##bwl##_local(unsigned type value, int port) { \
++ __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
++} \
++static inline unsigned type in##bwl##_local(int port) { \
++ unsigned type value; \
++ __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
++ return value; \
++} \
++static inline void out##bwl##_local_p(unsigned type value, int port) { \
++ out##bwl##_local(value, port); \
++ slow_down_io(); \
++} \
++static inline unsigned type in##bwl##_local_p(int port) { \
++ unsigned type value = in##bwl##_local(port); \
++ slow_down_io(); \
++ return value; \
++} \
++__BUILDIO(bwl,bw,type) \
++static inline void out##bwl##_p(unsigned type value, int port) { \
++ out##bwl(value, port); \
++ slow_down_io(); \
++} \
++static inline unsigned type in##bwl##_p(int port) { \
++ unsigned type value = in##bwl(port); \
++ slow_down_io(); \
++ return value; \
++} \
++static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
++ __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
++} \
++static inline void ins##bwl(int port, void *addr, unsigned long count) { \
++ __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
++}
++
++BUILDIO(b,b,char)
++BUILDIO(w,w,short)
++BUILDIO(l,,int)
++
++/* We will be supplying our own /dev/mem implementation */
++#define ARCH_HAS_DEV_MEM
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/irqflags.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/irqflags.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/irqflags.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/irqflags.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,127 @@
++/*
++ * include/asm-i386/irqflags.h
++ *
++ * IRQ flags handling
++ *
++ * This file gets included from lowlevel asm headers too, to provide
++ * wrapped versions of the local_irq_*() APIs, based on the
++ * raw_local_irq_*() functions from the lowlevel headers.
++ */
++#ifndef _ASM_IRQFLAGS_H
++#define _ASM_IRQFLAGS_H
++
++#ifndef __ASSEMBLY__
++
++/*
++ * The use of 'barrier' in the following reflects their use as local-lock
++ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
++ * critical operations are executed. All critical operations must complete
++ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
++ * includes these barriers, for example.
++ */
++
++#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
++
++#define raw_local_save_flags(flags) \
++ do { (flags) = __raw_local_save_flags(); } while (0)
++
++#define raw_local_irq_restore(x) \
++do { \
++ vcpu_info_t *_vcpu; \
++ barrier(); \
++ _vcpu = current_vcpu_info(); \
++ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
++ barrier(); /* unmask then check (avoid races) */ \
++ if (unlikely(_vcpu->evtchn_upcall_pending)) \
++ force_evtchn_callback(); \
++ } \
++} while (0)
++
++#define raw_local_irq_disable() \
++do { \
++ current_vcpu_info()->evtchn_upcall_mask = 1; \
++ barrier(); \
++} while (0)
++
++#define raw_local_irq_enable() \
++do { \
++ vcpu_info_t *_vcpu; \
++ barrier(); \
++ _vcpu = current_vcpu_info(); \
++ _vcpu->evtchn_upcall_mask = 0; \
++ barrier(); /* unmask then check (avoid races) */ \
++ if (unlikely(_vcpu->evtchn_upcall_pending)) \
++ force_evtchn_callback(); \
++} while (0)
++
++/*
++ * Used in the idle loop; sti takes one instruction cycle
++ * to complete:
++ */
++void raw_safe_halt(void);
++
++/*
++ * Used when interrupts are already enabled or to
++ * shutdown the processor:
++ */
++void halt(void);
++
++static inline int raw_irqs_disabled_flags(unsigned long flags)
++{
++ return (flags != 0);
++}
++
++#define raw_irqs_disabled() \
++({ \
++ unsigned long flags = __raw_local_save_flags(); \
++ \
++ raw_irqs_disabled_flags(flags); \
++})
++
++/*
++ * For spinlocks, etc:
++ */
++#define __raw_local_irq_save() \
++({ \
++ unsigned long flags = __raw_local_save_flags(); \
++ \
++ raw_local_irq_disable(); \
++ \
++ flags; \
++})
++
++#define raw_local_irq_save(flags) \
++ do { (flags) = __raw_local_irq_save(); } while (0)
++
++#endif /* __ASSEMBLY__ */
++
++/*
++ * Do the CPU's IRQ-state tracing from assembly code. We call a
++ * C function, so save all the C-clobbered registers:
++ */
++#ifdef CONFIG_TRACE_IRQFLAGS
++
++# define TRACE_IRQS_ON \
++ pushl %eax; \
++ pushl %ecx; \
++ pushl %edx; \
++ call trace_hardirqs_on; \
++ popl %edx; \
++ popl %ecx; \
++ popl %eax;
++
++# define TRACE_IRQS_OFF \
++ pushl %eax; \
++ pushl %ecx; \
++ pushl %edx; \
++ call trace_hardirqs_off; \
++ popl %edx; \
++ popl %ecx; \
++ popl %eax;
++
++#else
++# define TRACE_IRQS_ON
++# define TRACE_IRQS_OFF
++#endif
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/maddr.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/maddr.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/maddr.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/maddr.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,193 @@
++#ifndef _I386_MADDR_H
++#define _I386_MADDR_H
++
++#include <xen/features.h>
++#include <xen/interface/xen.h>
++
++/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
++#define INVALID_P2M_ENTRY (~0UL)
++#define FOREIGN_FRAME_BIT (1UL<<31)
++#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
++
++/* Definitions for machine and pseudophysical addresses. */
++#ifdef CONFIG_X86_PAE
++typedef unsigned long long paddr_t;
++typedef unsigned long long maddr_t;
++#else
++typedef unsigned long paddr_t;
++typedef unsigned long maddr_t;
++#endif
++
++#ifdef CONFIG_XEN
++
++extern unsigned long *phys_to_machine_mapping;
++extern unsigned long max_mapnr;
++
++#undef machine_to_phys_mapping
++extern unsigned long *machine_to_phys_mapping;
++extern unsigned int machine_to_phys_order;
++
++static inline unsigned long pfn_to_mfn(unsigned long pfn)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return pfn;
++ BUG_ON(max_mapnr && pfn >= max_mapnr);
++ return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT;
++}
++
++static inline int phys_to_machine_mapping_valid(unsigned long pfn)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return 1;
++ BUG_ON(max_mapnr && pfn >= max_mapnr);
++ return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
++}
++
++static inline unsigned long mfn_to_pfn(unsigned long mfn)
++{
++ unsigned long pfn;
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return mfn;
++
++ if (unlikely((mfn >> machine_to_phys_order) != 0))
++ return max_mapnr;
++
++ /* The array access can fail (e.g., device space beyond end of RAM). */
++ asm (
++ "1: movl %1,%0\n"
++ "2:\n"
++ ".section .fixup,\"ax\"\n"
++ "3: movl %2,%0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,3b\n"
++ ".previous"
++ : "=r" (pfn)
++ : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) );
++
++ return pfn;
++}
++
++/*
++ * We detect special mappings in one of two ways:
++ * 1. If the MFN is an I/O page then Xen will set the m2p entry
++ * to be outside our maximum possible pseudophys range.
++ * 2. If the MFN belongs to a different domain then we will certainly
++ * not have MFN in our p2m table. Conversely, if the page is ours,
++ * then we'll have p2m(m2p(MFN))==MFN.
++ * If we detect a special mapping then it doesn't have a 'struct page'.
++ * We force !pfn_valid() by returning an out-of-range pointer.
++ *
++ * NB. These checks require that, for any MFN that is not in our reservation,
++ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
++ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
++ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
++ *
++ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
++ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
++ * require. In all the cases we care about, the FOREIGN_FRAME bit is
++ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
++ */
++static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
++{
++ unsigned long pfn = mfn_to_pfn(mfn);
++ if ((pfn < max_mapnr)
++ && !xen_feature(XENFEAT_auto_translated_physmap)
++ && (phys_to_machine_mapping[pfn] != mfn))
++ return max_mapnr; /* force !pfn_valid() */
++ return pfn;
++}
++
++static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++{
++ BUG_ON(max_mapnr && pfn >= max_mapnr);
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
++ return;
++ }
++ phys_to_machine_mapping[pfn] = mfn;
++}
++
++static inline maddr_t phys_to_machine(paddr_t phys)
++{
++ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
++ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
++ return machine;
++}
++
++static inline paddr_t machine_to_phys(maddr_t machine)
++{
++ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
++ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
++ return phys;
++}
++
++#ifdef CONFIG_X86_PAE
++static inline paddr_t pte_phys_to_machine(paddr_t phys)
++{
++ /*
++ * In PAE mode, the NX bit needs to be dealt with in the value
++ * passed to pfn_to_mfn(). On x86_64, we need to mask it off,
++ * but for i386 the conversion to ulong for the argument will
++ * clip it off.
++ */
++ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
++ machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK);
++ return machine;
++}
++
++static inline paddr_t pte_machine_to_phys(maddr_t machine)
++{
++ /*
++ * In PAE mode, the NX bit needs to be dealt with in the value
++ * passed to mfn_to_pfn(). On x86_64, we need to mask it off,
++ * but for i386 the conversion to ulong for the argument will
++ * clip it off.
++ */
++ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
++ phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
++ return phys;
++}
++#endif
++
++#ifdef CONFIG_X86_PAE
++#define __pte_ma(x) ((pte_t) { (x), (maddr_t)(x) >> 32 } )
++static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot)
++{
++ pte_t pte;
++
++ pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
++ (pgprot_val(pgprot) >> 32);
++ pte.pte_high &= (__supported_pte_mask >> 32);
++ pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
++ __supported_pte_mask;
++ return pte;
++}
++#else
++#define __pte_ma(x) ((pte_t) { (x) } )
++#define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++#endif
++
++#else /* !CONFIG_XEN */
++
++#define pfn_to_mfn(pfn) (pfn)
++#define mfn_to_pfn(mfn) (mfn)
++#define mfn_to_local_pfn(mfn) (mfn)
++#define set_phys_to_machine(pfn, mfn) ((void)0)
++#define phys_to_machine_mapping_valid(pfn) (1)
++#define phys_to_machine(phys) ((maddr_t)(phys))
++#define machine_to_phys(mach) ((paddr_t)(mach))
++#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
++#define __pte_ma(x) __pte(x)
++
++#endif /* !CONFIG_XEN */
++
++/* VIRT <-> MACHINE conversion */
++#define virt_to_machine(v) (phys_to_machine(__pa(v)))
++#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
++#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
++
++#endif /* _I386_MADDR_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/mmu_context.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/mmu_context.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/mmu_context.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/mmu_context.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,108 @@
++#ifndef __I386_SCHED_H
++#define __I386_SCHED_H
++
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/pgalloc.h>
++#include <asm/tlbflush.h>
++
++/*
++ * Used for LDT copy/destruction.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
++void destroy_context(struct mm_struct *mm);
++
++
++static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
++{
++#if 0 /* XEN: no lazy tlb */
++ unsigned cpu = smp_processor_id();
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
++ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
++#endif
++}
++
++#define prepare_arch_switch(next) __prepare_arch_switch()
++
++static inline void __prepare_arch_switch(void)
++{
++ /*
++ * Save away %fs and %gs. No need to save %es and %ds, as those
++ * are always kernel segments while inside the kernel. Must
++ * happen before reload of cr3/ldt (i.e., not in __switch_to).
++ */
++ asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
++ : "=m" (current->thread.fs),
++ "=m" (current->thread.gs));
++ asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
++ : : "r" (0) );
++}
++
++extern void mm_pin(struct mm_struct *mm);
++extern void mm_unpin(struct mm_struct *mm);
++void mm_pin_all(void);
++
++static inline void switch_mm(struct mm_struct *prev,
++ struct mm_struct *next,
++ struct task_struct *tsk)
++{
++ int cpu = smp_processor_id();
++ struct mmuext_op _op[2], *op = _op;
++
++ if (likely(prev != next)) {
++ BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
++ !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags));
++
++ /* stop flush ipis for the previous mm */
++ cpu_clear(cpu, prev->cpu_vm_mask);
++#if 0 /* XEN: no lazy tlb */
++ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
++ per_cpu(cpu_tlbstate, cpu).active_mm = next;
++#endif
++ cpu_set(cpu, next->cpu_vm_mask);
++
++ /* Re-load page tables: load_cr3(next->pgd) */
++ op->cmd = MMUEXT_NEW_BASEPTR;
++ op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
++ op++;
++
++ /*
++ * load the LDT, if the LDT is different:
++ */
++ if (unlikely(prev->context.ldt != next->context.ldt)) {
++ /* load_LDT_nolock(&next->context, cpu) */
++ op->cmd = MMUEXT_SET_LDT;
++ op->arg1.linear_addr = (unsigned long)next->context.ldt;
++ op->arg2.nr_ents = next->context.size;
++ op++;
++ }
++
++ BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
++ }
++#if 0 /* XEN: no lazy tlb */
++ else {
++ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
++ BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
++
++ if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
++ /* We were in lazy tlb mode and leave_mm disabled
++ * tlb flush IPI delivery. We must reload %cr3.
++ */
++ load_cr3(next->pgd);
++ load_LDT_nolock(&next->context, cpu);
++ }
++ }
++#endif
++}
++
++#define deactivate_mm(tsk, mm) \
++ asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
++
++static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
++{
++ if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
++ mm_pin(next);
++ switch_mm(prev, next, NULL);
++}
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/mmu.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/mmu.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/mmu.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/mmu.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,29 @@
++#ifndef __i386_MMU_H
++#define __i386_MMU_H
++
++#include <asm/semaphore.h>
++/*
++ * The i386 doesn't have a mmu context, but
++ * we put the segment information here.
++ *
++ * cpu_vm_mask is used to optimize ldt flushing.
++ */
++typedef struct {
++ int size;
++ struct semaphore sem;
++ void *ldt;
++ void *vdso;
++#ifdef CONFIG_XEN
++ int has_foreign_mappings;
++#endif
++} mm_context_t;
++
++/* mm/memory.c:exit_mmap hook */
++extern void _arch_exit_mmap(struct mm_struct *mm);
++#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
++
++/* kernel/fork.c:dup_mmap hook */
++extern void _arch_dup_mmap(struct mm_struct *mm);
++#define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm))
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/page.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/page.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/page.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/page.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,233 @@
++#ifndef _I386_PAGE_H
++#define _I386_PAGE_H
++
++/* PAGE_SHIFT determines the page size */
++#define PAGE_SHIFT 12
++#define PAGE_SIZE (1UL << PAGE_SHIFT)
++#define PAGE_MASK (~(PAGE_SIZE-1))
++
++#ifdef CONFIG_X86_PAE
++#define __PHYSICAL_MASK_SHIFT 40
++#define __PHYSICAL_MASK ((1ULL << __PHYSICAL_MASK_SHIFT) - 1)
++#define PHYSICAL_PAGE_MASK (~((1ULL << PAGE_SHIFT) - 1) & __PHYSICAL_MASK)
++#else
++#define __PHYSICAL_MASK_SHIFT 32
++#define __PHYSICAL_MASK (~0UL)
++#define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
++#endif
++
++#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
++#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
++
++#ifdef __KERNEL__
++
++/*
++ * Need to repeat this here in order to not include pgtable.h (which in turn
++ * depends on definitions made here), but to be able to use the symbolic
++ * below. The preprocessor will warn if the two definitions aren't identical.
++ */
++#define _PAGE_PRESENT 0x001
++#define _PAGE_IO 0x200
++
++#ifndef __ASSEMBLY__
++
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <asm/bug.h>
++#include <xen/interface/xen.h>
++#include <xen/features.h>
++
++#ifdef CONFIG_X86_USE_3DNOW
++
++#include <asm/mmx.h>
++
++#define clear_page(page) mmx_clear_page((void *)(page))
++#define copy_page(to,from) mmx_copy_page(to,from)
++
++#else
++
++#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
++#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
++
++/*
++ * On older X86 processors it's not a win to use MMX here it seems.
++ * Maybe the K6-III ?
++ */
++
++#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
++#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
++
++#endif
++
++#define clear_user_page(page, vaddr, pg) clear_page(page)
++#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
++
++/*
++ * These are used to make use of C type-checking..
++ */
++extern int nx_enabled;
++#ifdef CONFIG_X86_PAE
++extern unsigned long long __supported_pte_mask;
++typedef struct { unsigned long pte_low, pte_high; } pte_t;
++typedef struct { unsigned long long pmd; } pmd_t;
++typedef struct { unsigned long long pgd; } pgd_t;
++typedef struct { unsigned long long pgprot; } pgprot_t;
++#define pgprot_val(x) ((x).pgprot)
++#include <asm/maddr.h>
++#define __pte(x) ({ unsigned long long _x = (x); \
++ if ((_x & (_PAGE_PRESENT|_PAGE_IO)) == _PAGE_PRESENT) \
++ _x = pte_phys_to_machine(_x); \
++ ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); })
++#define __pgd(x) ({ unsigned long long _x = (x); \
++ (pgd_t) {((_x) & _PAGE_PRESENT) ? pte_phys_to_machine(_x) : (_x)}; })
++#define __pmd(x) ({ unsigned long long _x = (x); \
++ (pmd_t) {((_x) & _PAGE_PRESENT) ? pte_phys_to_machine(_x) : (_x)}; })
++static inline unsigned long long __pte_val(pte_t x)
++{
++ return ((unsigned long long)x.pte_high << 32) | x.pte_low;
++}
++static inline unsigned long long pte_val(pte_t x)
++{
++ unsigned long long ret = __pte_val(x);
++ if ((x.pte_low & (_PAGE_PRESENT|_PAGE_IO)) == _PAGE_PRESENT)
++ ret = pte_machine_to_phys(ret);
++ return ret;
++}
++#define __pmd_val(x) ((x).pmd)
++static inline unsigned long long pmd_val(pmd_t x)
++{
++ unsigned long long ret = __pmd_val(x);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
++#else
++ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++#endif
++ return ret;
++}
++#define __pud_val(x) __pgd_val((x).pgd)
++#define __pgd_val(x) ((x).pgd)
++static inline unsigned long long pgd_val(pgd_t x)
++{
++ unsigned long long ret = __pgd_val(x);
++ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++ return ret;
++}
++#define HPAGE_SHIFT 21
++#else
++typedef struct { unsigned long pte_low; } pte_t;
++typedef struct { unsigned long pgd; } pgd_t;
++typedef struct { unsigned long pgprot; } pgprot_t;
++#define pgprot_val(x) ((x).pgprot)
++#include <asm/maddr.h>
++#define boot_pte_t pte_t /* or would you rather have a typedef */
++#define __pte_val(x) ((x).pte_low)
++#define pte_val(x) ((__pte_val(x) & (_PAGE_PRESENT|_PAGE_IO)) \
++ == _PAGE_PRESENT ? \
++ machine_to_phys(__pte_val(x)) : \
++ __pte_val(x))
++#define __pte(x) ({ unsigned long _x = (x); \
++ if ((_x & (_PAGE_PRESENT|_PAGE_IO)) == _PAGE_PRESENT) \
++ _x = phys_to_machine(_x); \
++ ((pte_t) { _x }); })
++#define __pmd_val(x) __pud_val((x).pud)
++#define __pud_val(x) __pgd_val((x).pgd)
++#define __pgd(x) ({ unsigned long _x = (x); \
++ (pgd_t) {((_x) & _PAGE_PRESENT) ? phys_to_machine(_x) : (_x)}; })
++#define __pgd_val(x) ((x).pgd)
++static inline unsigned long pgd_val(pgd_t x)
++{
++ unsigned long ret = __pgd_val(x);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret) ret = machine_to_phys(ret) | _PAGE_PRESENT;
++#else
++ if (ret & _PAGE_PRESENT) ret = machine_to_phys(ret);
++#endif
++ return ret;
++}
++#define HPAGE_SHIFT 22
++#endif
++#define PTE_MASK PHYSICAL_PAGE_MASK
++
++#ifdef CONFIG_HUGETLB_PAGE
++#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
++#define HPAGE_MASK (~(HPAGE_SIZE - 1))
++#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
++#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
++#endif
++
++#define __pgprot(x) ((pgprot_t) { (x) } )
++
++#endif /* !__ASSEMBLY__ */
++
++/* to align the pointer to the (next) page boundary */
++#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
++
++/*
++ * This handles the memory map.. We could make this a config
++ * option, but too many people screw it up, and too few need
++ * it.
++ *
++ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
++ * a virtual address space of one gigabyte, which limits the
++ * amount of physical memory you can use to about 950MB.
++ *
++ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
++ * and CONFIG_HIGHMEM64G options in the kernel configuration.
++ */
++
++#ifndef __ASSEMBLY__
++
++struct vm_area_struct;
++
++/*
++ * This much address space is reserved for vmalloc() and iomap()
++ * as well as fixmap mappings.
++ */
++extern unsigned int __VMALLOC_RESERVE;
++
++extern int sysctl_legacy_va_layout;
++
++extern int page_is_ram(unsigned long pagenr);
++
++#endif /* __ASSEMBLY__ */
++
++#ifdef __ASSEMBLY__
++#define __PAGE_OFFSET CONFIG_PAGE_OFFSET
++#define __PHYSICAL_START CONFIG_PHYSICAL_START
++#else
++#define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET)
++#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
++#endif
++#define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START)
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++#undef LOAD_OFFSET
++#define LOAD_OFFSET 0
++#endif
++
++#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
++#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
++#define MAXMEM (__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE)
++#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
++#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
++#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
++#ifdef CONFIG_FLATMEM
++#define pfn_valid(pfn) ((pfn) < max_mapnr)
++#endif /* CONFIG_FLATMEM */
++#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
++
++#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
++
++#define VM_DATA_DEFAULT_FLAGS \
++ (VM_READ | VM_WRITE | \
++ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
++ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++
++#include <asm-generic/memory_model.h>
++#include <asm-generic/page.h>
++
++#define __HAVE_ARCH_GATE_AREA 1
++#endif /* __KERNEL__ */
++
++#endif /* _I386_PAGE_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/pci.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/pci.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/pci.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/pci.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,148 @@
++#ifndef __i386_PCI_H
++#define __i386_PCI_H
++
++
++#ifdef __KERNEL__
++#include <linux/mm.h> /* for struct page */
++
++/* Can be used to override the logic in pci_scan_bus for skipping
++ already-configured bus numbers - to be used for buggy BIOSes
++ or architectures with incomplete PCI setup by the loader */
++
++#ifdef CONFIG_PCI
++extern unsigned int pcibios_assign_all_busses(void);
++#else
++#define pcibios_assign_all_busses() 0
++#endif
++
++#include <asm/hypervisor.h>
++#define pcibios_scan_all_fns(a, b) (!is_initial_xendomain())
++
++extern unsigned long pci_mem_start;
++#define PCIBIOS_MIN_IO 0x1000
++#define PCIBIOS_MIN_MEM (pci_mem_start)
++
++#define PCIBIOS_MIN_CARDBUS_IO 0x4000
++
++void pcibios_config_init(void);
++struct pci_bus * pcibios_scan_root(int bus);
++
++void pcibios_set_master(struct pci_dev *dev);
++void pcibios_penalize_isa_irq(int irq, int active);
++struct irq_routing_table *pcibios_get_irq_routing_table(void);
++int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
++
++/* Dynamic DMA mapping stuff.
++ * i386 has everything mapped statically.
++ */
++
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <asm/scatterlist.h>
++#include <linux/string.h>
++#include <asm/io.h>
++
++struct pci_dev;
++
++#ifdef CONFIG_SWIOTLB
++
++
++/* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */
++#define PCI_DMA_BUS_IS_PHYS (0)
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
++ dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
++ __u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME) \
++ ((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
++ (((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME) \
++ ((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
++ (((PTR)->LEN_NAME) = (VAL))
++
++#else
++
++/* The PCI address space does equal the physical memory
++ * address space. The networking and block device layers use
++ * this boolean for bounce buffer decisions.
++ */
++#define PCI_DMA_BUS_IS_PHYS (1)
++
++/* pci_unmap_{page,single} is a nop so... */
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
++#define pci_unmap_addr(PTR, ADDR_NAME) (0)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
++#define pci_unmap_len(PTR, LEN_NAME) (0)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
++
++#endif
++
++/* This is always fine. */
++#define pci_dac_dma_supported(pci_dev, mask) (1)
++
++static inline dma64_addr_t
++pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
++{
++ return ((dma64_addr_t) page_to_phys(page) +
++ (dma64_addr_t) offset);
++}
++
++static inline struct page *
++pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
++{
++ return pfn_to_page(dma_addr >> PAGE_SHIFT);
++}
++
++static inline unsigned long
++pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
++{
++ return (dma_addr & ~PAGE_MASK);
++}
++
++static inline void
++pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++}
++
++static inline void
++pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++ flush_write_buffers();
++}
++
++#define HAVE_PCI_MMAP
++extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
++ enum pci_mmap_state mmap_state, int write_combine);
++
++
++static inline void pcibios_add_platform_entries(struct pci_dev *dev)
++{
++}
++
++#ifdef CONFIG_PCI
++static inline void pci_dma_burst_advice(struct pci_dev *pdev,
++ enum pci_dma_burst_strategy *strat,
++ unsigned long *strategy_parameter)
++{
++ *strat = PCI_DMA_BURST_INFINITY;
++ *strategy_parameter = ~0UL;
++}
++#endif
++
++#endif /* __KERNEL__ */
++
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++#include <xen/pcifront.h>
++#endif /* CONFIG_XEN_PCIDEV_FRONTEND */
++
++/* implement the pci_ DMA API in terms of the generic device dma_ one */
++#include <asm-generic/pci-dma-compat.h>
++
++/* generic pci stuff */
++#include <asm-generic/pci.h>
++
++#endif /* __i386_PCI_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/pgalloc.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/pgalloc.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/pgalloc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/pgalloc.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,59 @@
++#ifndef _I386_PGALLOC_H
++#define _I386_PGALLOC_H
++
++#include <asm/fixmap.h>
++#include <linux/threads.h>
++#include <linux/mm.h> /* for struct page */
++#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
++
++#define pmd_populate_kernel(mm, pmd, pte) \
++ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
++
++#define pmd_populate(mm, pmd, pte) \
++do { \
++ unsigned long pfn = page_to_pfn(pte); \
++ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) { \
++ if (!PageHighMem(pte)) \
++ BUG_ON(HYPERVISOR_update_va_mapping( \
++ (unsigned long)__va(pfn << PAGE_SHIFT), \
++ pfn_pte(pfn, PAGE_KERNEL_RO), 0)); \
++ else if (!test_and_set_bit(PG_pinned, &pte->flags)) \
++ kmap_flush_unused(); \
++ set_pmd(pmd, \
++ __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT))); \
++ } else \
++ *(pmd) = __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT)); \
++} while (0)
++
++/*
++ * Allocate and free page tables.
++ */
++extern pgd_t *pgd_alloc(struct mm_struct *);
++extern void pgd_free(pgd_t *pgd);
++
++extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
++extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
++
++static inline void pte_free_kernel(pte_t *pte)
++{
++ make_lowmem_page_writable(pte, XENFEAT_writable_page_tables);
++ free_page((unsigned long)pte);
++}
++
++extern void pte_free(struct page *pte);
++
++#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
++
++#ifdef CONFIG_X86_PAE
++/*
++ * In the PAE case we free the pmds as part of the pgd.
++ */
++#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
++#define pmd_free(x) do { } while (0)
++#define __pmd_free_tlb(tlb,x) do { } while (0)
++#define pud_populate(mm, pmd, pte) BUG()
++#endif
++
++#define check_pgt_cache() do { } while (0)
++
++#endif /* _I386_PGALLOC_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,20 @@
++#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
++#define _I386_PGTABLE_2LEVEL_DEFS_H
++
++#define HAVE_SHARED_KERNEL_PMD 0
++
++/*
++ * traditional i386 two-level paging structure:
++ */
++
++#define PGDIR_SHIFT 22
++#define PTRS_PER_PGD 1024
++
++/*
++ * the i386 is two-level, so we don't really have any
++ * PMD directory physically.
++ */
++
++#define PTRS_PER_PTE 1024
++
++#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/pgtable-2level.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/pgtable-2level.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/pgtable-2level.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/pgtable-2level.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,121 @@
++#ifndef _I386_PGTABLE_2LEVEL_H
++#define _I386_PGTABLE_2LEVEL_H
++
++#include <asm-generic/pgtable-nopmd.h>
++
++#define pte_ERROR(e) \
++ printk("%s:%d: bad pte %08lx (pfn %05lx).\n", __FILE__, __LINE__, \
++ __pte_val(e), pte_pfn(e))
++#define pgd_ERROR(e) \
++ printk("%s:%d: bad pgd %08lx (pfn %05lx).\n", __FILE__, __LINE__, \
++ __pgd_val(e), pgd_val(e) >> PAGE_SHIFT)
++
++/*
++ * Certain architectures need to do special things when PTEs
++ * within a page table are directly modified. Thus, the following
++ * hook is made available.
++ */
++#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
++
++#define set_pte_at(_mm,addr,ptep,pteval) do { \
++ if (((_mm) != current->mm && (_mm) != &init_mm) || \
++ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
++ set_pte((ptep), (pteval)); \
++} while (0)
++
++#define set_pte_at_sync(_mm,addr,ptep,pteval) do { \
++ if (((_mm) != current->mm && (_mm) != &init_mm) || \
++ HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
++ set_pte((ptep), (pteval)); \
++ xen_invlpg((addr)); \
++ } \
++} while (0)
++
++#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
++
++#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
++
++#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
++#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
++
++#define pte_none(x) (!(x).pte_low)
++
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (!pte_none(pte)) {
++ if ((mm != &init_mm) ||
++ HYPERVISOR_update_va_mapping(addr, __pte(0), 0))
++ pte = __pte_ma(xchg(&ptep->pte_low, 0));
++ }
++ return pte;
++}
++
++#define ptep_clear_flush(vma, addr, ptep) \
++({ \
++ pte_t *__ptep = (ptep); \
++ pte_t __res = *__ptep; \
++ if (!pte_none(__res) && \
++ ((vma)->vm_mm != current->mm || \
++ HYPERVISOR_update_va_mapping(addr, __pte(0), \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI))) { \
++ __ptep->pte_low = 0; \
++ flush_tlb_page(vma, addr); \
++ } \
++ __res; \
++})
++
++#define pte_same(a, b) ((a).pte_low == (b).pte_low)
++
++#define __pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
++#define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
++ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
++#define pte_pfn(_pte) ((_pte).pte_low & _PAGE_IO ? max_mapnr : \
++ (_pte).pte_low & _PAGE_PRESENT ? \
++ mfn_to_local_pfn(__pte_mfn(_pte)) : \
++ __pte_mfn(_pte))
++
++#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
++
++#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++
++/*
++ * All present user pages are user-executable:
++ */
++static inline int pte_exec(pte_t pte)
++{
++ return pte_user(pte);
++}
++
++/*
++ * All present pages are kernel-executable:
++ */
++static inline int pte_exec_kernel(pte_t pte)
++{
++ return 1;
++}
++
++/*
++ * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
++ * into this range:
++ */
++#define PTE_FILE_MAX_BITS 29
++
++#define pte_to_pgoff(pte) \
++ ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
++
++#define pgoff_to_pte(off) \
++ ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
++
++/* Encode and de-code a swap entry */
++#define __swp_type(x) (((x).val >> 1) & 0x1f)
++#define __swp_offset(x) ((x).val >> 8)
++#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
++#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
++#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
++
++void vmalloc_sync_all(void);
++
++#endif /* _I386_PGTABLE_2LEVEL_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,24 @@
++#ifndef _I386_PGTABLE_3LEVEL_DEFS_H
++#define _I386_PGTABLE_3LEVEL_DEFS_H
++
++#define HAVE_SHARED_KERNEL_PMD 0
++
++/*
++ * PGDIR_SHIFT determines what a top-level page table entry can map
++ */
++#define PGDIR_SHIFT 30
++#define PTRS_PER_PGD 4
++
++/*
++ * PMD_SHIFT determines the size of the area a middle-level
++ * page table can map
++ */
++#define PMD_SHIFT 21
++#define PTRS_PER_PMD 512
++
++/*
++ * entries per page directory level
++ */
++#define PTRS_PER_PTE 512
++
++#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/pgtable-3level.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/pgtable-3level.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/pgtable-3level.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/pgtable-3level.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,211 @@
++#ifndef _I386_PGTABLE_3LEVEL_H
++#define _I386_PGTABLE_3LEVEL_H
++
++#include <asm-generic/pgtable-nopud.h>
++
++/*
++ * Intel Physical Address Extension (PAE) Mode - three-level page
++ * tables on PPro+ CPUs.
++ *
++ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
++ */
++
++#define pte_ERROR(e) \
++ printk("%s:%d: bad pte %p(%016Lx pfn %08lx).\n", __FILE__, __LINE__, \
++ &(e), __pte_val(e), pte_pfn(e))
++#define pmd_ERROR(e) \
++ printk("%s:%d: bad pmd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \
++ &(e), __pmd_val(e), (pmd_val(e) & PTE_MASK) >> PAGE_SHIFT)
++#define pgd_ERROR(e) \
++ printk("%s:%d: bad pgd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \
++ &(e), __pgd_val(e), (pgd_val(e) & PTE_MASK) >> PAGE_SHIFT)
++
++#define pud_none(pud) 0
++#define pud_bad(pud) 0
++#define pud_present(pud) 1
++
++/*
++ * Is the pte executable?
++ */
++static inline int pte_x(pte_t pte)
++{
++ return !(__pte_val(pte) & _PAGE_NX);
++}
++
++/*
++ * All present user-pages with !NX bit are user-executable:
++ */
++static inline int pte_exec(pte_t pte)
++{
++ return pte_user(pte) && pte_x(pte);
++}
++/*
++ * All present pages with !NX bit are kernel-executable:
++ */
++static inline int pte_exec_kernel(pte_t pte)
++{
++ return pte_x(pte);
++}
++
++/* Rules for using set_pte: the pte being assigned *must* be
++ * either not present or in a state where the hardware will
++ * not attempt to update the pte. In places where this is
++ * not possible, use pte_get_and_clear to obtain the old pte
++ * value and then use set_pte to update it. -ben
++ */
++#define __HAVE_ARCH_SET_PTE_ATOMIC
++
++static inline void set_pte(pte_t *ptep, pte_t pte)
++{
++ ptep->pte_high = pte.pte_high;
++ smp_wmb();
++ ptep->pte_low = pte.pte_low;
++}
++#define set_pte_atomic(pteptr,pteval) \
++ set_64bit((unsigned long long *)(pteptr),__pte_val(pteval))
++
++#define set_pte_at(_mm,addr,ptep,pteval) do { \
++ if (((_mm) != current->mm && (_mm) != &init_mm) || \
++ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
++ set_pte((ptep), (pteval)); \
++} while (0)
++
++#define set_pte_at_sync(_mm,addr,ptep,pteval) do { \
++ if (((_mm) != current->mm && (_mm) != &init_mm) || \
++ HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
++ set_pte((ptep), (pteval)); \
++ xen_invlpg((addr)); \
++ } \
++} while (0)
++
++#define set_pmd(pmdptr,pmdval) \
++ xen_l2_entry_update((pmdptr), (pmdval))
++#define set_pud(pudptr,pudval) \
++ xen_l3_entry_update((pudptr), (pudval))
++
++/*
++ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
++ * the TLB via cr3 if the top-level pgd is changed...
++ * We do not let the generic code free and clear pgd entries due to
++ * this erratum.
++ */
++static inline void pud_clear (pud_t * pud) { }
++
++#define pud_page(pud) \
++((struct page *) __va(pud_val(pud) & PAGE_MASK))
++
++#define pud_page_kernel(pud) \
++((unsigned long) __va(pud_val(pud) & PAGE_MASK))
++
++
++/* Find an entry in the second-level page table.. */
++#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
++ pmd_index(address))
++
++static inline int pte_none(pte_t pte)
++{
++ return !(pte.pte_low | pte.pte_high);
++}
++
++/*
++ * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
++ * entry, so clear the bottom half first and enforce ordering with a compiler
++ * barrier.
++ */
++static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ if ((mm != current->mm && mm != &init_mm)
++ || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
++ ptep->pte_low = 0;
++ smp_wmb();
++ ptep->pte_high = 0;
++ }
++}
++
++#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
++
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (!pte_none(pte)) {
++ if ((mm != &init_mm) ||
++ HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
++ uint64_t val = __pte_val(pte);
++ if (__cmpxchg64(ptep, val, 0) != val) {
++ /* xchg acts as a barrier before the setting of the high bits */
++ pte.pte_low = xchg(&ptep->pte_low, 0);
++ pte.pte_high = ptep->pte_high;
++ ptep->pte_high = 0;
++ }
++ }
++ }
++ return pte;
++}
++
++#define ptep_clear_flush(vma, addr, ptep) \
++({ \
++ pte_t *__ptep = (ptep); \
++ pte_t __res = *__ptep; \
++ if (!pte_none(__res) && \
++ ((vma)->vm_mm != current->mm || \
++ HYPERVISOR_update_va_mapping(addr, __pte(0), \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI))) { \
++ __ptep->pte_low = 0; \
++ smp_wmb(); \
++ __ptep->pte_high = 0; \
++ flush_tlb_page(vma, addr); \
++ } \
++ __res; \
++})
++
++static inline int pte_same(pte_t a, pte_t b)
++{
++ return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
++}
++
++#define pte_page(x) pfn_to_page(pte_pfn(x))
++
++#define __pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \
++ ((_pte).pte_high << (32-PAGE_SHIFT)))
++#define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
++ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
++#define pte_pfn(_pte) ((_pte).pte_low & _PAGE_IO ? max_mapnr : \
++ (_pte).pte_low & _PAGE_PRESENT ? \
++ mfn_to_local_pfn(__pte_mfn(_pte)) : \
++ __pte_mfn(_pte))
++
++extern unsigned long long __supported_pte_mask;
++
++static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
++{
++ return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
++ pgprot_val(pgprot)) & __supported_pte_mask);
++}
++
++static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
++{
++ return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
++ pgprot_val(pgprot)) & __supported_pte_mask);
++}
++
++/*
++ * Bits 0, 6 and 7 are taken in the low part of the pte,
++ * put the 32 bits of offset into the high part.
++ */
++#define pte_to_pgoff(pte) ((pte).pte_high)
++#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
++#define PTE_FILE_MAX_BITS 32
++
++/* Encode and de-code a swap entry */
++#define __swp_type(x) (((x).val) & 0x1f)
++#define __swp_offset(x) ((x).val >> 5)
++#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
++#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
++#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
++
++#define __pmd_free_tlb(tlb, x) do { } while (0)
++
++void vmalloc_sync_all(void);
++
++#endif /* _I386_PGTABLE_3LEVEL_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/pgtable.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/pgtable.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/pgtable.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/pgtable.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,537 @@
++#ifndef _I386_PGTABLE_H
++#define _I386_PGTABLE_H
++
++#include <asm/hypervisor.h>
++
++/*
++ * The Linux memory management assumes a three-level page table setup. On
++ * the i386, we use that, but "fold" the mid level into the top-level page
++ * table, so that we physically have the same two-level page table as the
++ * i386 mmu expects.
++ *
++ * This file contains the functions and defines necessary to modify and use
++ * the i386 page table tree.
++ */
++#ifndef __ASSEMBLY__
++#include <asm/processor.h>
++#include <asm/fixmap.h>
++#include <linux/threads.h>
++
++#ifndef _I386_BITOPS_H
++#include <asm/bitops.h>
++#endif
++
++#include <linux/slab.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++
++/* Is this pagetable pinned? */
++#define PG_pinned PG_arch_1
++
++struct mm_struct;
++struct vm_area_struct;
++
++/*
++ * ZERO_PAGE is a global shared page that is always zero: used
++ * for zero-mapped memory areas etc..
++ */
++#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
++extern unsigned long empty_zero_page[1024];
++extern pgd_t *swapper_pg_dir;
++extern kmem_cache_t *pgd_cache;
++extern kmem_cache_t *pmd_cache;
++extern spinlock_t pgd_lock;
++extern struct page *pgd_list;
++
++void pmd_ctor(void *, kmem_cache_t *, unsigned long);
++void pgd_ctor(void *, kmem_cache_t *, unsigned long);
++void pgd_dtor(void *, kmem_cache_t *, unsigned long);
++void pgtable_cache_init(void);
++void paging_init(void);
++
++/*
++ * The Linux x86 paging architecture is 'compile-time dual-mode', it
++ * implements both the traditional 2-level x86 page tables and the
++ * newer 3-level PAE-mode page tables.
++ */
++#ifdef CONFIG_X86_PAE
++# include <asm/pgtable-3level-defs.h>
++# define PMD_SIZE (1UL << PMD_SHIFT)
++# define PMD_MASK (~(PMD_SIZE-1))
++#else
++# include <asm/pgtable-2level-defs.h>
++#endif
++
++#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
++#define PGDIR_MASK (~(PGDIR_SIZE-1))
++
++#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
++#define FIRST_USER_ADDRESS 0
++
++#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
++#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
++
++#define TWOLEVEL_PGDIR_SHIFT 22
++#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
++#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
++
++/* Just any arbitrary offset to the start of the vmalloc VM area: the
++ * current 8MB value just means that there will be a 8MB "hole" after the
++ * physical memory until the kernel virtual memory starts. That means that
++ * any out-of-bounds memory accesses will hopefully be caught.
++ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
++ * area for the same reason. ;)
++ */
++#define VMALLOC_OFFSET (8*1024*1024)
++#define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
++ 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
++#ifdef CONFIG_HIGHMEM
++# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
++#else
++# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
++#endif
++
++/*
++ * _PAGE_PSE set in the page directory entry just means that
++ * the page directory entry points directly to a 4MB-aligned block of
++ * memory.
++ */
++#define _PAGE_BIT_PRESENT 0
++#define _PAGE_BIT_RW 1
++#define _PAGE_BIT_USER 2
++#define _PAGE_BIT_PWT 3
++#define _PAGE_BIT_PCD 4
++#define _PAGE_BIT_ACCESSED 5
++#define _PAGE_BIT_DIRTY 6
++#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
++#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
++/*#define _PAGE_BIT_UNUSED1 9*/ /* available for programmer */
++#define _PAGE_BIT_UNUSED2 10
++#define _PAGE_BIT_UNUSED3 11
++#define _PAGE_BIT_NX 63
++
++#define _PAGE_PRESENT 0x001
++#define _PAGE_RW 0x002
++#define _PAGE_USER 0x004
++#define _PAGE_PWT 0x008
++#define _PAGE_PCD 0x010
++#define _PAGE_ACCESSED 0x020
++#define _PAGE_DIRTY 0x040
++#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
++#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
++/*#define _PAGE_UNUSED1 0x200*/ /* available for programmer */
++#define _PAGE_UNUSED2 0x400
++#define _PAGE_UNUSED3 0x800
++
++/* If _PAGE_PRESENT is clear, we use these: */
++#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
++#define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE;
++ pte_present gives true */
++#ifdef CONFIG_X86_PAE
++#define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
++#else
++#define _PAGE_NX 0
++#endif
++
++/* Mapped page is I/O or foreign and has no associated page struct. */
++#define _PAGE_IO 0x200
++
++#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_IO)
++
++#define PAGE_NONE \
++ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
++#define PAGE_SHARED \
++ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++
++#define PAGE_SHARED_EXEC \
++ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY_NOEXEC \
++ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_COPY_EXEC \
++ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY \
++ PAGE_COPY_NOEXEC
++#define PAGE_READONLY \
++ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_READONLY_EXEC \
++ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++
++#define _PAGE_KERNEL \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
++#define _PAGE_KERNEL_EXEC \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
++
++extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
++#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
++#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
++#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
++#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
++
++#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
++#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
++#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
++#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
++#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
++#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
++
++/*
++ * The i386 can't do page protection for execute, and considers that
++ * the same are read. Also, write permissions imply read permissions.
++ * This is the closest we can get..
++ */
++#define __P000 PAGE_NONE
++#define __P001 PAGE_READONLY
++#define __P010 PAGE_COPY
++#define __P011 PAGE_COPY
++#define __P100 PAGE_READONLY_EXEC
++#define __P101 PAGE_READONLY_EXEC
++#define __P110 PAGE_COPY_EXEC
++#define __P111 PAGE_COPY_EXEC
++
++#define __S000 PAGE_NONE
++#define __S001 PAGE_READONLY
++#define __S010 PAGE_SHARED
++#define __S011 PAGE_SHARED
++#define __S100 PAGE_READONLY_EXEC
++#define __S101 PAGE_READONLY_EXEC
++#define __S110 PAGE_SHARED_EXEC
++#define __S111 PAGE_SHARED_EXEC
++
++/*
++ * Define this if things work differently on an i386 and an i486:
++ * it will (on an i486) warn about kernel memory accesses that are
++ * done without a 'access_ok(VERIFY_WRITE,..)'
++ */
++#undef TEST_ACCESS_OK
++
++/* The boot page tables (all created as a single array) */
++extern unsigned long pg0[];
++
++#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
++
++/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
++#define pmd_none(x) (!(unsigned long)__pmd_val(x))
++#if CONFIG_XEN_COMPAT <= 0x030002
++/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
++ can temporarily clear it. */
++#define pmd_present(x) (__pmd_val(x))
++#define pmd_bad(x) ((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
++#else
++#define pmd_present(x) (__pmd_val(x) & _PAGE_PRESENT)
++#define pmd_bad(x) ((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
++#endif
++
++
++#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
++
++/*
++ * The following only work if pte_present() is true.
++ * Undefined behaviour if not..
++ */
++static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
++static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
++static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
++static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
++static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
++static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
++
++/*
++ * The following only works if pte_present() is not true.
++ */
++static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
++
++static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
++static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
++static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
++static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
++static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
++
++#ifdef CONFIG_X86_PAE
++# include <asm/pgtable-3level.h>
++#else
++# include <asm/pgtable-2level.h>
++#endif
++
++#define ptep_test_and_clear_dirty(vma, addr, ptep) \
++({ \
++ pte_t __pte = *(ptep); \
++ int __ret = pte_dirty(__pte); \
++ if (__ret) { \
++ __pte = pte_mkclean(__pte); \
++ if ((vma)->vm_mm != current->mm || \
++ HYPERVISOR_update_va_mapping(addr, __pte, 0)) \
++ (ptep)->pte_low = __pte.pte_low; \
++ } \
++ __ret; \
++})
++
++#define ptep_test_and_clear_young(vma, addr, ptep) \
++({ \
++ pte_t __pte = *(ptep); \
++ int __ret = pte_young(__pte); \
++ if (__ret) \
++ __pte = pte_mkold(__pte); \
++ if ((vma)->vm_mm != current->mm || \
++ HYPERVISOR_update_va_mapping(addr, __pte, 0)) \
++ (ptep)->pte_low = __pte.pte_low; \
++ __ret; \
++})
++
++#define ptep_get_and_clear_full(mm, addr, ptep, full) \
++ ((full) ? ({ \
++ pte_t __res = *(ptep); \
++ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) \
++ xen_l1_entry_update(ptep, __pte(0)); \
++ else \
++ *(ptep) = __pte(0); \
++ __res; \
++ }) : \
++ ptep_get_and_clear(mm, addr, ptep))
++
++static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (pte_write(pte))
++ set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
++}
++
++/*
++ * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
++ *
++ * dst - pointer to pgd range anwhere on a pgd page
++ * src - ""
++ * count - the number of pgds to copy.
++ *
++ * dst and src can be on the same page, but the range must not overlap,
++ * and must not cross a page boundary.
++ */
++static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
++{
++ memcpy(dst, src, count * sizeof(pgd_t));
++}
++
++/*
++ * Macro to mark a page protection value as "uncacheable". On processors which do not support
++ * it, this is a no-op.
++ */
++#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
++ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
++
++/*
++ * Conversion functions: convert a page and protection to a page entry,
++ * and a page entry and page directory to the page they refer to.
++ */
++
++#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
++
++static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
++{
++ /*
++ * Since this might change the present bit (which controls whether
++ * a pte_t object has undergone p2m translation), we must use
++ * pte_val() on the input pte and __pte() for the return value.
++ */
++ paddr_t pteval = pte_val(pte);
++
++ pteval &= _PAGE_CHG_MASK;
++ pteval |= pgprot_val(newprot);
++#ifdef CONFIG_X86_PAE
++ pteval &= __supported_pte_mask;
++#endif
++ return __pte(pteval);
++}
++
++#define pmd_large(pmd) \
++((__pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
++
++/*
++ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
++ *
++ * this macro returns the index of the entry in the pgd page which would
++ * control the given virtual address
++ */
++#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
++#define pgd_index_k(addr) pgd_index(addr)
++
++/*
++ * pgd_offset() returns a (pgd_t *)
++ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
++ */
++#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
++
++/*
++ * a shortcut which implies the use of the kernel's pgd, instead
++ * of a process's
++ */
++#define pgd_offset_k(address) pgd_offset(&init_mm, address)
++
++/*
++ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
++ *
++ * this macro returns the index of the entry in the pmd page which would
++ * control the given virtual address
++ */
++#define pmd_index(address) \
++ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
++
++/*
++ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
++ *
++ * this macro returns the index of the entry in the pte page which would
++ * control the given virtual address
++ */
++#define pte_index(address) \
++ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
++#define pte_offset_kernel(dir, address) \
++ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
++
++#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
++
++#define pmd_page_kernel(pmd) \
++ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
++
++/*
++ * Helper function that returns the kernel pagetable entry controlling
++ * the virtual address 'address'. NULL means no pagetable entry present.
++ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
++ * as a pte too.
++ */
++extern pte_t *lookup_address(unsigned long address);
++
++/*
++ * Make a given kernel text page executable/non-executable.
++ * Returns the previous executability setting of that page (which
++ * is used to restore the previous state). Used by the SMP bootup code.
++ * NOTE: this is an __init function for security reasons.
++ */
++#ifdef CONFIG_X86_PAE
++ extern int set_kernel_exec(unsigned long vaddr, int enable);
++#else
++ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
++#endif
++
++extern void noexec_setup(const char *str);
++
++#if defined(CONFIG_HIGHPTE)
++#define pte_offset_map(dir, address) \
++ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
++ pte_index(address))
++#define pte_offset_map_nested(dir, address) \
++ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
++ pte_index(address))
++#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
++#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
++#else
++#define pte_offset_map(dir, address) \
++ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
++#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
++#define pte_unmap(pte) do { } while (0)
++#define pte_unmap_nested(pte) do { } while (0)
++#endif
++
++#define __HAVE_ARCH_PTEP_ESTABLISH
++#define ptep_establish(vma, address, ptep, pteval) \
++ do { \
++ if ( likely((vma)->vm_mm == current->mm) ) { \
++ BUG_ON(HYPERVISOR_update_va_mapping(address, \
++ pteval, \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI)); \
++ } else { \
++ xen_l1_entry_update(ptep, pteval); \
++ flush_tlb_page(vma, address); \
++ } \
++ } while (0)
++
++/*
++ * The i386 doesn't have any external MMU info: the kernel page
++ * tables contain all the necessary information.
++ *
++ * Also, we only update the dirty/accessed state if we set
++ * the dirty bit by hand in the kernel, since the hardware
++ * will do the accessed bit for us, and we don't want to
++ * race with other CPU's that might be updating the dirty
++ * bit at the same time.
++ */
++#define update_mmu_cache(vma,address,pte) do { } while (0)
++#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
++#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
++ do { \
++ if (dirty) \
++ ptep_establish(vma, address, ptep, entry); \
++ } while (0)
++
++#include <xen/features.h>
++void make_lowmem_page_readonly(void *va, unsigned int feature);
++void make_lowmem_page_writable(void *va, unsigned int feature);
++void make_page_readonly(void *va, unsigned int feature);
++void make_page_writable(void *va, unsigned int feature);
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
++
++#define virt_to_ptep(va) \
++({ \
++ pte_t *__ptep = lookup_address((unsigned long)(va)); \
++ BUG_ON(!__ptep || !pte_present(*__ptep)); \
++ __ptep; \
++})
++
++#define arbitrary_virt_to_machine(va) \
++ (((maddr_t)pte_mfn(*virt_to_ptep(va)) << PAGE_SHIFT) \
++ | ((unsigned long)(va) & (PAGE_SIZE - 1)))
++
++#endif /* !__ASSEMBLY__ */
++
++#ifdef CONFIG_FLATMEM
++#define kern_addr_valid(addr) (1)
++#endif /* CONFIG_FLATMEM */
++
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid);
++int direct_kernel_remap_pfn_range(unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid);
++int create_lookup_pte_addr(struct mm_struct *mm,
++ unsigned long address,
++ uint64_t *ptep);
++int touch_pte_range(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long size);
++
++int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long addr, unsigned long end, pgprot_t newprot);
++
++#define arch_change_pte_range(mm, pmd, addr, end, newprot) \
++ xen_change_pte_range(mm, pmd, addr, end, newprot)
++
++#define io_remap_pfn_range(vma,from,pfn,size,prot) \
++direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
++
++#define MK_IOSPACE_PFN(space, pfn) (pfn)
++#define GET_IOSPACE(pfn) 0
++#define GET_PFN(pfn) (pfn)
++
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
++#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
++#define __HAVE_ARCH_PTEP_SET_WRPROTECT
++#define __HAVE_ARCH_PTE_SAME
++#include <asm-generic/pgtable.h>
++
++#endif /* _I386_PGTABLE_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/processor.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/processor.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/processor.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/processor.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,743 @@
++/*
++ * include/asm-i386/processor.h
++ *
++ * Copyright (C) 1994 Linus Torvalds
++ */
++
++#ifndef __ASM_I386_PROCESSOR_H
++#define __ASM_I386_PROCESSOR_H
++
++#include <asm/vm86.h>
++#include <asm/math_emu.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/types.h>
++#include <asm/sigcontext.h>
++#include <asm/cpufeature.h>
++#include <asm/msr.h>
++#include <asm/system.h>
++#include <linux/cache.h>
++#include <linux/threads.h>
++#include <asm/percpu.h>
++#include <linux/cpumask.h>
++#include <xen/interface/physdev.h>
++
++/* flag for disabling the tsc */
++extern int tsc_disable;
++
++struct desc_struct {
++ unsigned long a,b;
++};
++
++#define desc_empty(desc) \
++ (!((desc)->a | (desc)->b))
++
++#define desc_equal(desc1, desc2) \
++ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
++/*
++ * Default implementation of macro that returns current
++ * instruction pointer ("program counter").
++ */
++#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
++
++/*
++ * CPU type and hardware bug flags. Kept separately for each CPU.
++ * Members of this structure are referenced in head.S, so think twice
++ * before touching them. [mj]
++ */
++
++struct cpuinfo_x86 {
++ __u8 x86; /* CPU family */
++ __u8 x86_vendor; /* CPU vendor */
++ __u8 x86_model;
++ __u8 x86_mask;
++ char wp_works_ok; /* It doesn't on 386's */
++ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
++ char hard_math;
++ char rfu;
++ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
++ unsigned long x86_capability[NCAPINTS];
++ char x86_vendor_id[16];
++ char x86_model_id[64];
++ int x86_cache_size; /* in KB - valid for CPUS which support this
++ call */
++ int x86_cache_alignment; /* In bytes */
++ char fdiv_bug;
++ char f00f_bug;
++ char coma_bug;
++ char pad0;
++ int x86_power;
++ unsigned long loops_per_jiffy;
++#ifdef CONFIG_SMP
++ cpumask_t llc_shared_map; /* cpus sharing the last level cache */
++#endif
++ unsigned char x86_max_cores; /* cpuid returned max cores value */
++ unsigned char apicid;
++#ifdef CONFIG_SMP
++ unsigned char booted_cores; /* number of cores as seen by OS */
++ __u8 phys_proc_id; /* Physical processor id. */
++ __u8 cpu_core_id; /* Core id */
++#endif
++} __attribute__((__aligned__(SMP_CACHE_BYTES)));
++
++#define X86_VENDOR_INTEL 0
++#define X86_VENDOR_CYRIX 1
++#define X86_VENDOR_AMD 2
++#define X86_VENDOR_UMC 3
++#define X86_VENDOR_NEXGEN 4
++#define X86_VENDOR_CENTAUR 5
++#define X86_VENDOR_RISE 6
++#define X86_VENDOR_TRANSMETA 7
++#define X86_VENDOR_NSC 8
++#define X86_VENDOR_NUM 9
++#define X86_VENDOR_UNKNOWN 0xff
++
++/*
++ * capabilities of CPUs
++ */
++
++extern struct cpuinfo_x86 boot_cpu_data;
++extern struct cpuinfo_x86 new_cpu_data;
++#ifndef CONFIG_X86_NO_TSS
++extern struct tss_struct doublefault_tss;
++DECLARE_PER_CPU(struct tss_struct, init_tss);
++#endif
++
++#ifdef CONFIG_SMP
++extern struct cpuinfo_x86 cpu_data[];
++#define current_cpu_data cpu_data[smp_processor_id()]
++#else
++#define cpu_data (&boot_cpu_data)
++#define current_cpu_data boot_cpu_data
++#endif
++
++extern int cpu_llc_id[NR_CPUS];
++extern char ignore_fpu_irq;
++
++extern void identify_cpu(struct cpuinfo_x86 *);
++extern void print_cpu_info(struct cpuinfo_x86 *);
++extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
++extern unsigned short num_cache_leaves;
++
++#ifdef CONFIG_X86_HT
++extern void detect_ht(struct cpuinfo_x86 *c);
++#else
++static inline void detect_ht(struct cpuinfo_x86 *c) {}
++#endif
++
++/*
++ * EFLAGS bits
++ */
++#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
++#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
++#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
++#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
++#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
++#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
++#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
++#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
++#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
++#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
++#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
++#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
++#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
++#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
++#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
++#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
++#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
++
++/*
++ * Generic CPUID function
++ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
++ * resulting in stale register contents being returned.
++ */
++static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
++{
++ __asm__(XEN_CPUID
++ : "=a" (*eax),
++ "=b" (*ebx),
++ "=c" (*ecx),
++ "=d" (*edx)
++ : "0" (op), "c"(0));
++}
++
++/* Some CPUID calls want 'count' to be placed in ecx */
++static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
++ int *edx)
++{
++ __asm__(XEN_CPUID
++ : "=a" (*eax),
++ "=b" (*ebx),
++ "=c" (*ecx),
++ "=d" (*edx)
++ : "0" (op), "c" (count));
++}
++
++/*
++ * CPUID functions returning a single datum
++ */
++static inline unsigned int cpuid_eax(unsigned int op)
++{
++ unsigned int eax;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax)
++ : "0" (op)
++ : "bx", "cx", "dx");
++ return eax;
++}
++static inline unsigned int cpuid_ebx(unsigned int op)
++{
++ unsigned int eax, ebx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=b" (ebx)
++ : "0" (op)
++ : "cx", "dx" );
++ return ebx;
++}
++static inline unsigned int cpuid_ecx(unsigned int op)
++{
++ unsigned int eax, ecx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=c" (ecx)
++ : "0" (op)
++ : "bx", "dx" );
++ return ecx;
++}
++static inline unsigned int cpuid_edx(unsigned int op)
++{
++ unsigned int eax, edx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=d" (edx)
++ : "0" (op)
++ : "bx", "cx");
++ return edx;
++}
++
++#define load_cr3(pgdir) write_cr3(__pa(pgdir))
++
++/*
++ * Intel CPU features in CR4
++ */
++#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
++#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
++#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
++#define X86_CR4_DE 0x0008 /* enable debugging extensions */
++#define X86_CR4_PSE 0x0010 /* enable page size extensions */
++#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
++#define X86_CR4_MCE 0x0040 /* Machine check enable */
++#define X86_CR4_PGE 0x0080 /* enable global pages */
++#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
++#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
++#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
++
++/*
++ * Save the cr4 feature set we're using (ie
++ * Pentium 4MB enable and PPro Global page
++ * enable), so that any CPU's that boot up
++ * after us can get the correct flags.
++ */
++extern unsigned long mmu_cr4_features;
++
++static inline void set_in_cr4 (unsigned long mask)
++{
++ unsigned cr4;
++ mmu_cr4_features |= mask;
++ cr4 = read_cr4();
++ cr4 |= mask;
++ write_cr4(cr4);
++}
++
++static inline void clear_in_cr4 (unsigned long mask)
++{
++ unsigned cr4;
++ mmu_cr4_features &= ~mask;
++ cr4 = read_cr4();
++ cr4 &= ~mask;
++ write_cr4(cr4);
++}
++
++/*
++ * NSC/Cyrix CPU configuration register indexes
++ */
++
++#define CX86_PCR0 0x20
++#define CX86_GCR 0xb8
++#define CX86_CCR0 0xc0
++#define CX86_CCR1 0xc1
++#define CX86_CCR2 0xc2
++#define CX86_CCR3 0xc3
++#define CX86_CCR4 0xe8
++#define CX86_CCR5 0xe9
++#define CX86_CCR6 0xea
++#define CX86_CCR7 0xeb
++#define CX86_PCR1 0xf0
++#define CX86_DIR0 0xfe
++#define CX86_DIR1 0xff
++#define CX86_ARR_BASE 0xc4
++#define CX86_RCR_BASE 0xdc
++
++/*
++ * NSC/Cyrix CPU indexed register access macros
++ */
++
++#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
++
++#define setCx86(reg, data) do { \
++ outb((reg), 0x22); \
++ outb((data), 0x23); \
++} while (0)
++
++/* Stop speculative execution */
++static inline void sync_core(void)
++{
++ int tmp;
++ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
++}
++
++static inline void __monitor(const void *eax, unsigned long ecx,
++ unsigned long edx)
++{
++ /* "monitor %eax,%ecx,%edx;" */
++ asm volatile(
++ ".byte 0x0f,0x01,0xc8;"
++ : :"a" (eax), "c" (ecx), "d"(edx));
++}
++
++static inline void __mwait(unsigned long eax, unsigned long ecx)
++{
++ /* "mwait %eax,%ecx;" */
++ asm volatile(
++ ".byte 0x0f,0x01,0xc9;"
++ : :"a" (eax), "c" (ecx));
++}
++
++/* from system description table in BIOS. Mostly for MCA use, but
++others may find it useful. */
++extern unsigned int machine_id;
++extern unsigned int machine_submodel_id;
++extern unsigned int BIOS_revision;
++extern unsigned int mca_pentium_flag;
++
++/* Boot loader type from the setup header */
++extern int bootloader_type;
++
++/*
++ * User space process size: 3GB (default).
++ */
++#define TASK_SIZE (PAGE_OFFSET)
++
++/* This decides where the kernel will search for a free chunk of vm
++ * space during mmap's.
++ */
++#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
++
++#define HAVE_ARCH_PICK_MMAP_LAYOUT
++
++/*
++ * Size of io_bitmap.
++ */
++#define IO_BITMAP_BITS 65536
++#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
++#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
++#ifndef CONFIG_X86_NO_TSS
++#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
++#endif
++#define INVALID_IO_BITMAP_OFFSET 0x8000
++#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
++
++struct i387_fsave_struct {
++ long cwd;
++ long swd;
++ long twd;
++ long fip;
++ long fcs;
++ long foo;
++ long fos;
++ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
++ long status; /* software status information */
++};
++
++struct i387_fxsave_struct {
++ unsigned short cwd;
++ unsigned short swd;
++ unsigned short twd;
++ unsigned short fop;
++ long fip;
++ long fcs;
++ long foo;
++ long fos;
++ long mxcsr;
++ long mxcsr_mask;
++ long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
++ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
++ long padding[56];
++} __attribute__ ((aligned (16)));
++
++struct i387_soft_struct {
++ long cwd;
++ long swd;
++ long twd;
++ long fip;
++ long fcs;
++ long foo;
++ long fos;
++ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
++ unsigned char ftop, changed, lookahead, no_update, rm, alimit;
++ struct info *info;
++ unsigned long entry_eip;
++};
++
++union i387_union {
++ struct i387_fsave_struct fsave;
++ struct i387_fxsave_struct fxsave;
++ struct i387_soft_struct soft;
++};
++
++typedef struct {
++ unsigned long seg;
++} mm_segment_t;
++
++struct thread_struct;
++
++#ifndef CONFIG_X86_NO_TSS
++struct tss_struct {
++ unsigned short back_link,__blh;
++ unsigned long esp0;
++ unsigned short ss0,__ss0h;
++ unsigned long esp1;
++ unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
++ unsigned long esp2;
++ unsigned short ss2,__ss2h;
++ unsigned long __cr3;
++ unsigned long eip;
++ unsigned long eflags;
++ unsigned long eax,ecx,edx,ebx;
++ unsigned long esp;
++ unsigned long ebp;
++ unsigned long esi;
++ unsigned long edi;
++ unsigned short es, __esh;
++ unsigned short cs, __csh;
++ unsigned short ss, __ssh;
++ unsigned short ds, __dsh;
++ unsigned short fs, __fsh;
++ unsigned short gs, __gsh;
++ unsigned short ldt, __ldth;
++ unsigned short trace, io_bitmap_base;
++ /*
++ * The extra 1 is there because the CPU will access an
++ * additional byte beyond the end of the IO permission
++ * bitmap. The extra byte must be all 1 bits, and must
++ * be within the limit.
++ */
++ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
++ /*
++ * Cache the current maximum and the last task that used the bitmap:
++ */
++ unsigned long io_bitmap_max;
++ struct thread_struct *io_bitmap_owner;
++ /*
++ * pads the TSS to be cacheline-aligned (size is 0x100)
++ */
++ unsigned long __cacheline_filler[35];
++ /*
++ * .. and then another 0x100 bytes for emergency kernel stack
++ */
++ unsigned long stack[64];
++} __attribute__((packed));
++#endif
++
++#define ARCH_MIN_TASKALIGN 16
++
++struct thread_struct {
++/* cached TLS descriptors. */
++ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
++ unsigned long esp0;
++ unsigned long sysenter_cs;
++ unsigned long eip;
++ unsigned long esp;
++ unsigned long fs;
++ unsigned long gs;
++/* Hardware debugging registers */
++ unsigned long debugreg[8]; /* %%db0-7 debug registers */
++/* fault info */
++ unsigned long cr2, trap_no, error_code;
++/* floating point info */
++ union i387_union i387;
++/* virtual 86 mode info */
++ struct vm86_struct __user * vm86_info;
++ unsigned long screen_bitmap;
++ unsigned long v86flags, v86mask, saved_esp0;
++ unsigned int saved_fs, saved_gs;
++/* IO permissions */
++ unsigned long *io_bitmap_ptr;
++ unsigned long iopl;
++/* max allowed port in the bitmap, in bytes: */
++ unsigned long io_bitmap_max;
++};
++
++#define INIT_THREAD { \
++ .vm86_info = NULL, \
++ .sysenter_cs = __KERNEL_CS, \
++ .io_bitmap_ptr = NULL, \
++}
++
++#ifndef CONFIG_X86_NO_TSS
++/*
++ * Note that the .io_bitmap member must be extra-big. This is because
++ * the CPU will access an additional byte beyond the end of the IO
++ * permission bitmap. The extra byte must be all 1 bits, and must
++ * be within the limit.
++ */
++#define INIT_TSS { \
++ .esp0 = sizeof(init_stack) + (long)&init_stack, \
++ .ss0 = __KERNEL_DS, \
++ .ss1 = __KERNEL_CS, \
++ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
++ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
++}
++
++static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread)
++{
++ tss->esp0 = thread->esp0;
++ /* This can only happen when SEP is enabled, no need to test "SEP"arately */
++ if (unlikely(tss->ss1 != thread->sysenter_cs)) {
++ tss->ss1 = thread->sysenter_cs;
++ wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
++ }
++}
++#define load_esp0(tss, thread) \
++ __load_esp0(tss, thread)
++#else
++#define load_esp0(tss, thread) do { \
++ if (HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)) \
++ BUG(); \
++} while (0)
++#endif
++
++#define start_thread(regs, new_eip, new_esp) do { \
++ __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
++ set_fs(USER_DS); \
++ regs->xds = __USER_DS; \
++ regs->xes = __USER_DS; \
++ regs->xss = __USER_DS; \
++ regs->xcs = __USER_CS; \
++ regs->eip = new_eip; \
++ regs->esp = new_esp; \
++} while (0)
++
++/*
++ * These special macros can be used to get or set a debugging register
++ */
++#define get_debugreg(var, register) \
++ (var) = HYPERVISOR_get_debugreg((register))
++#define set_debugreg(value, register) \
++ WARN_ON(HYPERVISOR_set_debugreg((register), (value)))
++
++/*
++ * Set IOPL bits in EFLAGS from given mask
++ */
++static inline void set_iopl_mask(unsigned mask)
++{
++ struct physdev_set_iopl set_iopl;
++
++ /* Force the change at ring 0. */
++ set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
++}
++
++/* Forward declaration, a strange C thing */
++struct task_struct;
++struct mm_struct;
++
++/* Free all resources held by a thread. */
++extern void release_thread(struct task_struct *);
++
++/* Prepare to copy thread state - unlazy all lazy status */
++extern void prepare_to_copy(struct task_struct *tsk);
++
++/*
++ * create a kernel thread without removing it from tasklists
++ */
++extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
++
++extern unsigned long thread_saved_pc(struct task_struct *tsk);
++void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
++
++unsigned long get_wchan(struct task_struct *p);
++
++#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
++#define KSTK_TOP(info) \
++({ \
++ unsigned long *__ptr = (unsigned long *)(info); \
++ (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
++})
++
++/*
++ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
++ * This is necessary to guarantee that the entire "struct pt_regs"
++ * is accessable even if the CPU haven't stored the SS/ESP registers
++ * on the stack (interrupt gate does not save these registers
++ * when switching to the same priv ring).
++ * Therefore beware: accessing the xss/esp fields of the
++ * "struct pt_regs" is possible, but they may contain the
++ * completely wrong values.
++ */
++#define task_pt_regs(task) \
++({ \
++ struct pt_regs *__regs__; \
++ __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
++ __regs__ - 1; \
++})
++
++#define KSTK_EIP(task) (task_pt_regs(task)->eip)
++#define KSTK_ESP(task) (task_pt_regs(task)->esp)
++
++
++struct microcode_header {
++ unsigned int hdrver;
++ unsigned int rev;
++ unsigned int date;
++ unsigned int sig;
++ unsigned int cksum;
++ unsigned int ldrver;
++ unsigned int pf;
++ unsigned int datasize;
++ unsigned int totalsize;
++ unsigned int reserved[3];
++};
++
++struct microcode {
++ struct microcode_header hdr;
++ unsigned int bits[0];
++};
++
++typedef struct microcode microcode_t;
++typedef struct microcode_header microcode_header_t;
++
++/* microcode format is extended from prescott processors */
++struct extended_signature {
++ unsigned int sig;
++ unsigned int pf;
++ unsigned int cksum;
++};
++
++struct extended_sigtable {
++ unsigned int count;
++ unsigned int cksum;
++ unsigned int reserved[3];
++ struct extended_signature sigs[0];
++};
++
++/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
++static inline void rep_nop(void)
++{
++ __asm__ __volatile__("rep;nop": : :"memory");
++}
++
++#define cpu_relax() rep_nop()
++
++/* generic versions from gas */
++#define GENERIC_NOP1 ".byte 0x90\n"
++#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
++#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
++#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
++#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
++#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
++#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
++#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
++
++/* Opteron nops */
++#define K8_NOP1 GENERIC_NOP1
++#define K8_NOP2 ".byte 0x66,0x90\n"
++#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
++#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
++#define K8_NOP5 K8_NOP3 K8_NOP2
++#define K8_NOP6 K8_NOP3 K8_NOP3
++#define K8_NOP7 K8_NOP4 K8_NOP3
++#define K8_NOP8 K8_NOP4 K8_NOP4
++
++/* K7 nops */
++/* uses eax dependencies (arbitary choice) */
++#define K7_NOP1 GENERIC_NOP1
++#define K7_NOP2 ".byte 0x8b,0xc0\n"
++#define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
++#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
++#define K7_NOP5 K7_NOP4 ASM_NOP1
++#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
++#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
++#define K7_NOP8 K7_NOP7 ASM_NOP1
++
++#ifdef CONFIG_MK8
++#define ASM_NOP1 K8_NOP1
++#define ASM_NOP2 K8_NOP2
++#define ASM_NOP3 K8_NOP3
++#define ASM_NOP4 K8_NOP4
++#define ASM_NOP5 K8_NOP5
++#define ASM_NOP6 K8_NOP6
++#define ASM_NOP7 K8_NOP7
++#define ASM_NOP8 K8_NOP8
++#elif defined(CONFIG_MK7)
++#define ASM_NOP1 K7_NOP1
++#define ASM_NOP2 K7_NOP2
++#define ASM_NOP3 K7_NOP3
++#define ASM_NOP4 K7_NOP4
++#define ASM_NOP5 K7_NOP5
++#define ASM_NOP6 K7_NOP6
++#define ASM_NOP7 K7_NOP7
++#define ASM_NOP8 K7_NOP8
++#else
++#define ASM_NOP1 GENERIC_NOP1
++#define ASM_NOP2 GENERIC_NOP2
++#define ASM_NOP3 GENERIC_NOP3
++#define ASM_NOP4 GENERIC_NOP4
++#define ASM_NOP5 GENERIC_NOP5
++#define ASM_NOP6 GENERIC_NOP6
++#define ASM_NOP7 GENERIC_NOP7
++#define ASM_NOP8 GENERIC_NOP8
++#endif
++
++#define ASM_NOP_MAX 8
++
++/* Prefetch instructions for Pentium III and AMD Athlon */
++/* It's not worth to care about 3dnow! prefetches for the K6
++ because they are microcoded there and very slow.
++ However we don't do prefetches for pre XP Athlons currently
++ That should be fixed. */
++#define ARCH_HAS_PREFETCH
++static inline void prefetch(const void *x)
++{
++ alternative_input(ASM_NOP4,
++ "prefetchnta (%1)",
++ X86_FEATURE_XMM,
++ "r" (x));
++}
++
++#define ARCH_HAS_PREFETCH
++#define ARCH_HAS_PREFETCHW
++#define ARCH_HAS_SPINLOCK_PREFETCH
++
++/* 3dnow! prefetch to get an exclusive cache line. Useful for
++ spinlocks to avoid one state transition in the cache coherency protocol. */
++static inline void prefetchw(const void *x)
++{
++ alternative_input(ASM_NOP4,
++ "prefetchw (%1)",
++ X86_FEATURE_3DNOW,
++ "r" (x));
++}
++#define spin_lock_prefetch(x) prefetchw(x)
++
++extern void select_idle_routine(const struct cpuinfo_x86 *c);
++
++#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
++
++extern unsigned long boot_option_idle_override;
++extern void enable_sep_cpu(void);
++extern int sysenter_setup(void);
++
++#endif /* __ASM_I386_PROCESSOR_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/ptrace.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/ptrace.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/ptrace.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/ptrace.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,90 @@
++#ifndef _I386_PTRACE_H
++#define _I386_PTRACE_H
++
++#define EBX 0
++#define ECX 1
++#define EDX 2
++#define ESI 3
++#define EDI 4
++#define EBP 5
++#define EAX 6
++#define DS 7
++#define ES 8
++#define FS 9
++#define GS 10
++#define ORIG_EAX 11
++#define EIP 12
++#define CS 13
++#define EFL 14
++#define UESP 15
++#define SS 16
++#define FRAME_SIZE 17
++
++/* this struct defines the way the registers are stored on the
++ stack during a system call. */
++
++struct pt_regs {
++ long ebx;
++ long ecx;
++ long edx;
++ long esi;
++ long edi;
++ long ebp;
++ long eax;
++ int xds;
++ int xes;
++ long orig_eax;
++ long eip;
++ int xcs;
++ long eflags;
++ long esp;
++ int xss;
++};
++
++/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
++#define PTRACE_GETREGS 12
++#define PTRACE_SETREGS 13
++#define PTRACE_GETFPREGS 14
++#define PTRACE_SETFPREGS 15
++#define PTRACE_GETFPXREGS 18
++#define PTRACE_SETFPXREGS 19
++
++#define PTRACE_OLDSETOPTIONS 21
++
++#define PTRACE_GET_THREAD_AREA 25
++#define PTRACE_SET_THREAD_AREA 26
++
++#define PTRACE_SYSEMU 31
++#define PTRACE_SYSEMU_SINGLESTEP 32
++
++#ifdef __KERNEL__
++
++#include <asm/vm86.h>
++
++struct task_struct;
++extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
++
++/*
++ * user_mode_vm(regs) determines whether a register set came from user mode.
++ * This is true if V8086 mode was enabled OR if the register set was from
++ * protected mode with RPL-3 CS value. This tricky test checks that with
++ * one comparison. Many places in the kernel can bypass this full check
++ * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
++ */
++static inline int user_mode(struct pt_regs *regs)
++{
++ return (regs->xcs & 2) != 0;
++}
++static inline int user_mode_vm(struct pt_regs *regs)
++{
++ return ((regs->xcs & 2) | (regs->eflags & VM_MASK)) != 0;
++}
++#define instruction_pointer(regs) ((regs)->eip)
++#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++extern unsigned long profile_pc(struct pt_regs *regs);
++#else
++#define profile_pc(regs) instruction_pointer(regs)
++#endif
++#endif /* __KERNEL__ */
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/scatterlist.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/scatterlist.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/scatterlist.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/scatterlist.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,22 @@
++#ifndef _I386_SCATTERLIST_H
++#define _I386_SCATTERLIST_H
++
++struct scatterlist {
++ struct page *page;
++ unsigned int offset;
++ unsigned int length;
++ dma_addr_t dma_address;
++ unsigned int dma_length;
++};
++
++/* These macros should be used after a pci_map_sg call has been done
++ * to get bus addresses of each of the SG entries and their lengths.
++ * You should only work with the number of sg entries pci_map_sg
++ * returns.
++ */
++#define sg_dma_address(sg) ((sg)->dma_address)
++#define sg_dma_len(sg) ((sg)->dma_length)
++
++#define ISA_DMA_THRESHOLD (0x00ffffff)
++
++#endif /* !(_I386_SCATTERLIST_H) */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/segment.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/segment.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/segment.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/segment.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,117 @@
++#ifndef _ASM_SEGMENT_H
++#define _ASM_SEGMENT_H
++
++/*
++ * The layout of the per-CPU GDT under Linux:
++ *
++ * 0 - null
++ * 1 - reserved
++ * 2 - reserved
++ * 3 - reserved
++ *
++ * 4 - unused <==== new cacheline
++ * 5 - unused
++ *
++ * ------- start of TLS (Thread-Local Storage) segments:
++ *
++ * 6 - TLS segment #1 [ glibc's TLS segment ]
++ * 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
++ * 8 - TLS segment #3
++ * 9 - reserved
++ * 10 - reserved
++ * 11 - reserved
++ *
++ * ------- start of kernel segments:
++ *
++ * 12 - kernel code segment <==== new cacheline
++ * 13 - kernel data segment
++ * 14 - default user CS
++ * 15 - default user DS
++ * 16 - TSS
++ * 17 - LDT
++ * 18 - PNPBIOS support (16->32 gate)
++ * 19 - PNPBIOS support
++ * 20 - PNPBIOS support
++ * 21 - PNPBIOS support
++ * 22 - PNPBIOS support
++ * 23 - APM BIOS support
++ * 24 - APM BIOS support
++ * 25 - APM BIOS support
++ *
++ * 26 - ESPFIX small SS
++ * 27 - unused
++ * 28 - unused
++ * 29 - unused
++ * 30 - unused
++ * 31 - TSS for double fault handler
++ */
++#define GDT_ENTRY_TLS_ENTRIES 3
++#define GDT_ENTRY_TLS_MIN 6
++#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
++
++#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
++
++#define GDT_ENTRY_DEFAULT_USER_CS 14
++#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
++
++#define GDT_ENTRY_DEFAULT_USER_DS 15
++#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
++
++#define GDT_ENTRY_KERNEL_BASE 12
++
++#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
++#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
++#define GET_KERNEL_CS() (__KERNEL_CS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
++
++#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
++#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
++#define GET_KERNEL_DS() (__KERNEL_DS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
++
++#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
++#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
++
++#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6)
++#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11)
++
++#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
++#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
++
++#define GDT_ENTRY_DOUBLEFAULT_TSS 31
++
++/*
++ * The GDT has 32 entries
++ */
++#define GDT_ENTRIES 32
++
++#define GDT_SIZE (GDT_ENTRIES * 8)
++
++/* Simple and small GDT entries for booting only */
++
++#define GDT_ENTRY_BOOT_CS 2
++#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8)
++
++#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1)
++#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8)
++
++/* The PnP BIOS entries in the GDT */
++#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0)
++#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1)
++#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2)
++#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3)
++#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4)
++
++/* The PnP BIOS selectors */
++#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */
++#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */
++#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */
++#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */
++#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */
++
++/*
++ * The interrupt descriptor table has room for 256 idt's,
++ * the global descriptor table is dependent on the number
++ * of tasks we can have..
++ */
++#define IDT_ENTRIES 256
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/setup.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/setup.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/setup.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/setup.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,81 @@
++/*
++ * Just a place holder. We don't want to have to test x86 before
++ * we include stuff
++ */
++
++#ifndef _i386_SETUP_H
++#define _i386_SETUP_H
++
++#ifdef __KERNEL__
++#include <linux/pfn.h>
++
++/*
++ * Reserved space for vmalloc and iomap - defined in asm/page.h
++ */
++#define MAXMEM_PFN PFN_DOWN(MAXMEM)
++#define MAX_NONPAE_PFN (1 << 20)
++#endif
++
++#define PARAM_SIZE 4096
++#define COMMAND_LINE_SIZE 256
++
++#define OLD_CL_MAGIC_ADDR 0x90020
++#define OLD_CL_MAGIC 0xA33F
++#define OLD_CL_BASE_ADDR 0x90000
++#define OLD_CL_OFFSET 0x90022
++#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
++
++#ifndef __ASSEMBLY__
++/*
++ * This is set up by the setup-routine at boot-time
++ */
++extern unsigned char boot_params[PARAM_SIZE];
++
++#define PARAM (boot_params)
++#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
++#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
++#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
++#define E820_MAP_NR (*(char*) (PARAM+E820NR))
++#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
++#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
++#define IST_INFO (*(struct ist_info *) (PARAM+0x60))
++#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
++#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
++#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
++#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
++#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
++#define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0)))
++#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
++#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
++#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
++#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
++#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
++#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
++#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
++#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
++#define INITRD_START (__pa(xen_start_info->mod_start))
++#define INITRD_SIZE (xen_start_info->mod_len)
++#define EDID_INFO (*(struct edid_info *) (PARAM+0x440))
++#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
++#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
++#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
++#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
++
++/*
++ * Do NOT EVER look at the BIOS memory size location.
++ * It does not work on many machines.
++ */
++#define LOWMEMSIZE() (0x9f000)
++
++struct e820entry;
++
++char * __init machine_specific_memory_setup(void);
++
++int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
++int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
++void __init add_memory_region(unsigned long long start,
++ unsigned long long size, int type);
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _i386_SETUP_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/smp.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/smp.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/smp.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/smp.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,103 @@
++#ifndef __ASM_SMP_H
++#define __ASM_SMP_H
++
++/*
++ * We need the APIC definitions automatically as part of 'smp.h'
++ */
++#ifndef __ASSEMBLY__
++#include <linux/kernel.h>
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#ifndef __ASSEMBLY__
++#include <asm/fixmap.h>
++#include <asm/bitops.h>
++#include <asm/mpspec.h>
++#ifdef CONFIG_X86_IO_APIC
++#include <asm/io_apic.h>
++#endif
++#include <asm/apic.h>
++#endif
++#endif
++
++#define BAD_APICID 0xFFu
++#ifdef CONFIG_SMP
++#ifndef __ASSEMBLY__
++
++/*
++ * Private routines/data
++ */
++
++extern void smp_alloc_memory(void);
++extern int pic_mode;
++extern int smp_num_siblings;
++extern cpumask_t cpu_sibling_map[];
++extern cpumask_t cpu_core_map[];
++
++extern void (*mtrr_hook) (void);
++extern void zap_low_mappings (void);
++extern void lock_ipi_call_lock(void);
++extern void unlock_ipi_call_lock(void);
++
++#define MAX_APICID 256
++extern u8 x86_cpu_to_apicid[];
++
++#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
++
++#ifdef CONFIG_HOTPLUG_CPU
++extern void cpu_exit_clear(void);
++extern void cpu_uninit(void);
++#endif
++
++/*
++ * This function is needed by all SMP systems. It must _always_ be valid
++ * from the initial startup. We map APIC_BASE very early in page_setup(),
++ * so this is correct in the x86 case.
++ */
++#define raw_smp_processor_id() (current_thread_info()->cpu)
++
++extern cpumask_t cpu_possible_map;
++#define cpu_callin_map cpu_possible_map
++
++/* We don't mark CPUs online until __cpu_up(), so we need another measure */
++static inline int num_booting_cpus(void)
++{
++ return cpus_weight(cpu_possible_map);
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++
++#ifdef APIC_DEFINITION
++extern int hard_smp_processor_id(void);
++#else
++#include <mach_apicdef.h>
++static inline int hard_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
++}
++#endif
++
++static __inline int logical_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
++}
++
++#endif
++
++extern int __cpu_disable(void);
++extern void __cpu_die(unsigned int cpu);
++extern void prefill_possible_map(void);
++#endif /* !__ASSEMBLY__ */
++
++#else /* CONFIG_SMP */
++
++#define cpu_physical_id(cpu) boot_cpu_physical_apicid
++
++#define NO_PROC_ID 0xFF /* No processor magic marker */
++
++#endif
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/spinlock.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/spinlock.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/spinlock.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/spinlock.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,202 @@
++#ifndef __ASM_SPINLOCK_H
++#define __ASM_SPINLOCK_H
++
++#include <asm/atomic.h>
++#include <asm/rwlock.h>
++#include <asm/page.h>
++#include <linux/compiler.h>
++
++/*
++ * Your basic SMP spinlocks, allowing only a single CPU anywhere
++ *
++ * Simple spin lock operations. There are two variants, one clears IRQ's
++ * on the local processor, one does not.
++ *
++ * We make no fairness assumptions. They have a cost.
++ *
++ * (the type definitions are in asm/spinlock_types.h)
++ */
++
++#define __raw_spin_is_locked(x) \
++ (*(volatile signed char *)(&(x)->slock) <= 0)
++
++#define __raw_spin_lock_string \
++ "\n1:\t" \
++ LOCK_PREFIX " ; decb %0\n\t" \
++ "jns 3f\n" \
++ "2:\t" \
++ "rep;nop\n\t" \
++ "cmpb $0,%0\n\t" \
++ "jle 2b\n\t" \
++ "jmp 1b\n" \
++ "3:\n\t"
++
++/*
++ * NOTE: there's an irqs-on section here, which normally would have to be
++ * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
++ * __raw_spin_lock_string_flags().
++ */
++#define __raw_spin_lock_string_flags \
++ "\n1:\t" \
++ LOCK_PREFIX " ; decb %0\n\t" \
++ "jns 5f\n" \
++ "2:\t" \
++ "testl $0x200, %1\n\t" \
++ "jz 4f\n\t" \
++ "#sti\n" \
++ "3:\t" \
++ "rep;nop\n\t" \
++ "cmpb $0, %0\n\t" \
++ "jle 3b\n\t" \
++ "#cli\n\t" \
++ "jmp 1b\n" \
++ "4:\t" \
++ "rep;nop\n\t" \
++ "cmpb $0, %0\n\t" \
++ "jg 1b\n\t" \
++ "jmp 4b\n" \
++ "5:\n\t"
++
++static inline void __raw_spin_lock(raw_spinlock_t *lock)
++{
++ asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory");
++}
++
++/*
++ * It is easier for the lock validator if interrupts are not re-enabled
++ * in the middle of a lock-acquire. This is a performance feature anyway
++ * so we turn it off:
++ */
++#ifndef CONFIG_PROVE_LOCKING
++static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
++{
++ asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory");
++}
++#endif
++
++static inline int __raw_spin_trylock(raw_spinlock_t *lock)
++{
++ char oldval;
++ __asm__ __volatile__(
++ "xchgb %b0,%1"
++ :"=q" (oldval), "+m" (lock->slock)
++ :"0" (0) : "memory");
++ return oldval > 0;
++}
++
++/*
++ * __raw_spin_unlock based on writing $1 to the low byte.
++ * This method works. Despite all the confusion.
++ * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
++ * (PPro errata 66, 92)
++ */
++
++#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
++
++#define __raw_spin_unlock_string \
++ "movb $1,%0" \
++ :"+m" (lock->slock) : : "memory"
++
++
++static inline void __raw_spin_unlock(raw_spinlock_t *lock)
++{
++ __asm__ __volatile__(
++ __raw_spin_unlock_string
++ );
++}
++
++#else
++
++#define __raw_spin_unlock_string \
++ "xchgb %b0, %1" \
++ :"=q" (oldval), "+m" (lock->slock) \
++ :"0" (oldval) : "memory"
++
++static inline void __raw_spin_unlock(raw_spinlock_t *lock)
++{
++ char oldval = 1;
++
++ __asm__ __volatile__(
++ __raw_spin_unlock_string
++ );
++}
++
++#endif
++
++#define __raw_spin_unlock_wait(lock) \
++ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
++
++/*
++ * Read-write spinlocks, allowing multiple readers
++ * but only one writer.
++ *
++ * NOTE! it is quite common to have readers in interrupts
++ * but no interrupt writers. For those circumstances we
++ * can "mix" irq-safe locks - any writer needs to get a
++ * irq-safe write-lock, but readers can get non-irqsafe
++ * read-locks.
++ *
++ * On x86, we implement read-write locks as a 32-bit counter
++ * with the high bit (sign) being the "contended" bit.
++ *
++ * The inline assembly is non-obvious. Think about it.
++ *
++ * Changed to use the same technique as rw semaphores. See
++ * semaphore.h for details. -ben
++ *
++ * the helpers are in arch/i386/kernel/semaphore.c
++ */
++
++/**
++ * read_can_lock - would read_trylock() succeed?
++ * @lock: the rwlock in question.
++ */
++#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
++
++/**
++ * write_can_lock - would write_trylock() succeed?
++ * @lock: the rwlock in question.
++ */
++#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
++
++static inline void __raw_read_lock(raw_rwlock_t *rw)
++{
++ __build_read_lock(rw, "__read_lock_failed");
++}
++
++static inline void __raw_write_lock(raw_rwlock_t *rw)
++{
++ __build_write_lock(rw, "__write_lock_failed");
++}
++
++static inline int __raw_read_trylock(raw_rwlock_t *lock)
++{
++ atomic_t *count = (atomic_t *)lock;
++ atomic_dec(count);
++ if (atomic_read(count) >= 0)
++ return 1;
++ atomic_inc(count);
++ return 0;
++}
++
++static inline int __raw_write_trylock(raw_rwlock_t *lock)
++{
++ atomic_t *count = (atomic_t *)lock;
++ if (atomic_sub_and_test(RW_LOCK_BIAS, count))
++ return 1;
++ atomic_add(RW_LOCK_BIAS, count);
++ return 0;
++}
++
++static inline void __raw_read_unlock(raw_rwlock_t *rw)
++{
++ asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
++}
++
++static inline void __raw_write_unlock(raw_rwlock_t *rw)
++{
++ asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
++ : "+m" (rw->lock) : : "memory");
++}
++
++#endif /* __ASM_SPINLOCK_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/swiotlb.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/swiotlb.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/swiotlb.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/swiotlb.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,43 @@
++#ifndef _ASM_SWIOTLB_H
++#define _ASM_SWIOTLB_H 1
++
++/* SWIOTLB interface */
++
++extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
++ int dir);
++extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir);
++extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
++ dma_addr_t dev_addr,
++ size_t size, int dir);
++extern void swiotlb_sync_single_for_device(struct device *hwdev,
++ dma_addr_t dev_addr,
++ size_t size, int dir);
++extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
++ struct scatterlist *sg, int nelems,
++ int dir);
++extern void swiotlb_sync_sg_for_device(struct device *hwdev,
++ struct scatterlist *sg, int nelems,
++ int dir);
++extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
++ int nents, int direction);
++extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
++ int nents, int direction);
++extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
++#ifdef CONFIG_HIGHMEM
++extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction);
++extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
++ size_t size, enum dma_data_direction direction);
++#endif
++extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
++extern void swiotlb_init(void);
++
++#ifdef CONFIG_SWIOTLB
++extern int swiotlb;
++#else
++#define swiotlb 0
++#endif
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/synch_bitops.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/synch_bitops.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/synch_bitops.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/synch_bitops.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,126 @@
++#ifndef __XEN_SYNCH_BITOPS_H__
++#define __XEN_SYNCH_BITOPS_H__
++
++/*
++ * Copyright 1992, Linus Torvalds.
++ * Heavily modified to provide guaranteed strong synchronisation
++ * when communicating with Xen or other guest OSes running on other CPUs.
++ */
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#define ADDR (*(volatile long *) addr)
++
++static __inline__ void synch_set_bit(int nr, volatile void * addr)
++{
++ __asm__ __volatile__ (
++ "lock btsl %1,%0"
++ : "+m" (ADDR) : "Ir" (nr) : "memory" );
++}
++
++static __inline__ void synch_clear_bit(int nr, volatile void * addr)
++{
++ __asm__ __volatile__ (
++ "lock btrl %1,%0"
++ : "+m" (ADDR) : "Ir" (nr) : "memory" );
++}
++
++static __inline__ void synch_change_bit(int nr, volatile void * addr)
++{
++ __asm__ __volatile__ (
++ "lock btcl %1,%0"
++ : "+m" (ADDR) : "Ir" (nr) : "memory" );
++}
++
++static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
++{
++ int oldbit;
++ __asm__ __volatile__ (
++ "lock btsl %2,%1\n\tsbbl %0,%0"
++ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
++ return oldbit;
++}
++
++static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
++{
++ int oldbit;
++ __asm__ __volatile__ (
++ "lock btrl %2,%1\n\tsbbl %0,%0"
++ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
++ return oldbit;
++}
++
++static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
++{
++ int oldbit;
++
++ __asm__ __volatile__ (
++ "lock btcl %2,%1\n\tsbbl %0,%0"
++ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
++ return oldbit;
++}
++
++struct __synch_xchg_dummy { unsigned long a[100]; };
++#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
++
++#define synch_cmpxchg(ptr, old, new) \
++((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
++ (unsigned long)(old), \
++ (unsigned long)(new), \
++ sizeof(*(ptr))))
++
++static inline unsigned long __synch_cmpxchg(volatile void *ptr,
++ unsigned long old,
++ unsigned long new, int size)
++{
++ unsigned long prev;
++ switch (size) {
++ case 1:
++ __asm__ __volatile__("lock; cmpxchgb %b1,%2"
++ : "=a"(prev)
++ : "q"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++ case 2:
++ __asm__ __volatile__("lock; cmpxchgw %w1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++#ifdef CONFIG_X86_64
++ case 4:
++ __asm__ __volatile__("lock; cmpxchgl %k1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++ case 8:
++ __asm__ __volatile__("lock; cmpxchgq %1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++#else
++ case 4:
++ __asm__ __volatile__("lock; cmpxchgl %1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++#endif
++ }
++ return old;
++}
++
++#define synch_test_bit test_bit
++
++#define synch_cmpxchg_subword synch_cmpxchg
++
++#endif /* __XEN_SYNCH_BITOPS_H__ */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/system.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/system.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/system.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/system.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,488 @@
++#ifndef __ASM_SYSTEM_H
++#define __ASM_SYSTEM_H
++
++#include <linux/kernel.h>
++#include <asm/segment.h>
++#include <asm/cpufeature.h>
++#include <linux/bitops.h> /* for LOCK_PREFIX */
++#include <asm/synch_bitops.h>
++#include <asm/hypervisor.h>
++
++#ifdef __KERNEL__
++
++struct task_struct; /* one of the stranger aspects of C forward declarations.. */
++extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
++
++/*
++ * Saving eflags is important. It switches not only IOPL between tasks,
++ * it also protects other tasks from NT leaking through sysenter etc.
++ */
++#define switch_to(prev,next,last) do { \
++ unsigned long esi,edi; \
++ asm volatile("pushfl\n\t" /* Save flags */ \
++ "pushl %%ebp\n\t" \
++ "movl %%esp,%0\n\t" /* save ESP */ \
++ "movl %5,%%esp\n\t" /* restore ESP */ \
++ "movl $1f,%1\n\t" /* save EIP */ \
++ "pushl %6\n\t" /* restore EIP */ \
++ "jmp __switch_to\n" \
++ "1:\t" \
++ "popl %%ebp\n\t" \
++ "popfl" \
++ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
++ "=a" (last),"=S" (esi),"=D" (edi) \
++ :"m" (next->thread.esp),"m" (next->thread.eip), \
++ "2" (prev), "d" (next)); \
++} while (0)
++
++#define _set_base(addr,base) do { unsigned long __pr; \
++__asm__ __volatile__ ("movw %%dx,%1\n\t" \
++ "rorl $16,%%edx\n\t" \
++ "movb %%dl,%2\n\t" \
++ "movb %%dh,%3" \
++ :"=&d" (__pr) \
++ :"m" (*((addr)+2)), \
++ "m" (*((addr)+4)), \
++ "m" (*((addr)+7)), \
++ "0" (base) \
++ ); } while(0)
++
++#define _set_limit(addr,limit) do { unsigned long __lr; \
++__asm__ __volatile__ ("movw %%dx,%1\n\t" \
++ "rorl $16,%%edx\n\t" \
++ "movb %2,%%dh\n\t" \
++ "andb $0xf0,%%dh\n\t" \
++ "orb %%dh,%%dl\n\t" \
++ "movb %%dl,%2" \
++ :"=&d" (__lr) \
++ :"m" (*(addr)), \
++ "m" (*((addr)+6)), \
++ "0" (limit) \
++ ); } while(0)
++
++#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
++#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
++
++/*
++ * Load a segment. Fall back on loading the zero
++ * segment if something goes wrong..
++ */
++#define loadsegment(seg,value) \
++ asm volatile("\n" \
++ "1:\t" \
++ "mov %0,%%" #seg "\n" \
++ "2:\n" \
++ ".section .fixup,\"ax\"\n" \
++ "3:\t" \
++ "pushl $0\n\t" \
++ "popl %%" #seg "\n\t" \
++ "jmp 2b\n" \
++ ".previous\n" \
++ ".section __ex_table,\"a\"\n\t" \
++ ".align 4\n\t" \
++ ".long 1b,3b\n" \
++ ".previous" \
++ : :"rm" (value))
++
++/*
++ * Save a segment register away
++ */
++#define savesegment(seg, value) \
++ asm volatile("mov %%" #seg ",%0":"=rm" (value))
++
++#define read_cr0() ({ \
++ unsigned int __dummy; \
++ __asm__ __volatile__( \
++ "movl %%cr0,%0\n\t" \
++ :"=r" (__dummy)); \
++ __dummy; \
++})
++#define write_cr0(x) \
++ __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
++
++#define read_cr2() (current_vcpu_info()->arch.cr2)
++#define write_cr2(x) \
++ __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
++
++#define read_cr3() ({ \
++ unsigned int __dummy; \
++ __asm__ ( \
++ "movl %%cr3,%0\n\t" \
++ :"=r" (__dummy)); \
++ __dummy = xen_cr3_to_pfn(__dummy); \
++ mfn_to_pfn(__dummy) << PAGE_SHIFT; \
++})
++#define write_cr3(x) ({ \
++ unsigned int __dummy = pfn_to_mfn((x) >> PAGE_SHIFT); \
++ __dummy = xen_pfn_to_cr3(__dummy); \
++ __asm__ __volatile__("movl %0,%%cr3": :"r" (__dummy)); \
++})
++#define read_cr4() ({ \
++ unsigned int __dummy; \
++ __asm__( \
++ "movl %%cr4,%0\n\t" \
++ :"=r" (__dummy)); \
++ __dummy; \
++})
++#define read_cr4_safe() ({ \
++ unsigned int __dummy; \
++ /* This could fault if %cr4 does not exist */ \
++ __asm__("1: movl %%cr4, %0 \n" \
++ "2: \n" \
++ ".section __ex_table,\"a\" \n" \
++ ".long 1b,2b \n" \
++ ".previous \n" \
++ : "=r" (__dummy): "0" (0)); \
++ __dummy; \
++})
++
++#define write_cr4(x) \
++ __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
++
++/*
++ * Clear and set 'TS' bit respectively
++ */
++#define clts() (HYPERVISOR_fpu_taskswitch(0))
++#define stts() (HYPERVISOR_fpu_taskswitch(1))
++
++#endif /* __KERNEL__ */
++
++#define wbinvd() \
++ __asm__ __volatile__ ("wbinvd": : :"memory")
++
++static inline unsigned long get_limit(unsigned long segment)
++{
++ unsigned long __limit;
++ __asm__("lsll %1,%0"
++ :"=r" (__limit):"r" (segment));
++ return __limit+1;
++}
++
++#define nop() __asm__ __volatile__ ("nop")
++
++#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
++
++#define tas(ptr) (xchg((ptr),1))
++
++struct __xchg_dummy { unsigned long a[100]; };
++#define __xg(x) ((struct __xchg_dummy *)(x))
++
++
++#ifdef CONFIG_X86_CMPXCHG64
++
++/*
++ * The semantics of XCHGCMP8B are a bit strange, this is why
++ * there is a loop and the loading of %%eax and %%edx has to
++ * be inside. This inlines well in most cases, the cached
++ * cost is around ~38 cycles. (in the future we might want
++ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
++ * might have an implicit FPU-save as a cost, so it's not
++ * clear which path to go.)
++ *
++ * cmpxchg8b must be used with the lock prefix here to allow
++ * the instruction to be executed atomically, see page 3-102
++ * of the instruction set reference 24319102.pdf. We need
++ * the reader side to see the coherent 64bit value.
++ */
++static inline void __set_64bit (unsigned long long * ptr,
++ unsigned int low, unsigned int high)
++{
++ __asm__ __volatile__ (
++ "\n1:\t"
++ "movl (%0), %%eax\n\t"
++ "movl 4(%0), %%edx\n\t"
++ "lock cmpxchg8b (%0)\n\t"
++ "jnz 1b"
++ : /* no outputs */
++ : "D"(ptr),
++ "b"(low),
++ "c"(high)
++ : "ax","dx","memory");
++}
++
++static inline void __set_64bit_constant (unsigned long long *ptr,
++ unsigned long long value)
++{
++ __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
++}
++#define ll_low(x) *(((unsigned int*)&(x))+0)
++#define ll_high(x) *(((unsigned int*)&(x))+1)
++
++static inline void __set_64bit_var (unsigned long long *ptr,
++ unsigned long long value)
++{
++ __set_64bit(ptr,ll_low(value), ll_high(value));
++}
++
++#define set_64bit(ptr,value) \
++(__builtin_constant_p(value) ? \
++ __set_64bit_constant(ptr, value) : \
++ __set_64bit_var(ptr, value) )
++
++#define _set_64bit(ptr,value) \
++(__builtin_constant_p(value) ? \
++ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
++ __set_64bit(ptr, ll_low(value), ll_high(value)) )
++
++#endif
++
++/*
++ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
++ * Note 2: xchg has side effect, so that attribute volatile is necessary,
++ * but generally the primitive is invalid, *ptr is output argument. --ANK
++ */
++static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
++{
++ switch (size) {
++ case 1:
++ __asm__ __volatile__("xchgb %b0,%1"
++ :"=q" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 2:
++ __asm__ __volatile__("xchgw %w0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 4:
++ __asm__ __volatile__("xchgl %0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ }
++ return x;
++}
++
++/*
++ * Atomic compare and exchange. Compare OLD with MEM, if identical,
++ * store NEW in MEM. Return the initial value in MEM. Success is
++ * indicated by comparing RETURN with OLD.
++ */
++
++#ifdef CONFIG_X86_CMPXCHG
++#define __HAVE_ARCH_CMPXCHG 1
++#define cmpxchg(ptr,o,n)\
++ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
++ (unsigned long)(n),sizeof(*(ptr))))
++#endif
++
++static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
++ unsigned long new, int size)
++{
++ unsigned long prev;
++ switch (size) {
++ case 1:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
++ : "=a"(prev)
++ : "q"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 2:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 4:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ }
++ return old;
++}
++
++#ifndef CONFIG_X86_CMPXCHG
++/*
++ * Building a kernel capable running on 80386. It may be necessary to
++ * simulate the cmpxchg on the 80386 CPU. For that purpose we define
++ * a function for each of the sizes we support.
++ */
++
++extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
++extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
++extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
++
++static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
++ unsigned long new, int size)
++{
++ switch (size) {
++ case 1:
++ return cmpxchg_386_u8(ptr, old, new);
++ case 2:
++ return cmpxchg_386_u16(ptr, old, new);
++ case 4:
++ return cmpxchg_386_u32(ptr, old, new);
++ }
++ return old;
++}
++
++#define cmpxchg(ptr,o,n) \
++({ \
++ __typeof__(*(ptr)) __ret; \
++ if (likely(boot_cpu_data.x86 > 3)) \
++ __ret = __cmpxchg((ptr), (unsigned long)(o), \
++ (unsigned long)(n), sizeof(*(ptr))); \
++ else \
++ __ret = cmpxchg_386((ptr), (unsigned long)(o), \
++ (unsigned long)(n), sizeof(*(ptr))); \
++ __ret; \
++})
++#endif
++
++#ifdef CONFIG_X86_CMPXCHG64
++
++static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
++ unsigned long long new)
++{
++ unsigned long long prev;
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
++ : "=A"(prev)
++ : "b"((unsigned long)new),
++ "c"((unsigned long)(new >> 32)),
++ "m"(*__xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++}
++
++#define cmpxchg64(ptr,o,n)\
++ ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
++ (unsigned long long)(n)))
++
++#endif
++
++/*
++ * Force strict CPU ordering.
++ * And yes, this is required on UP too when we're talking
++ * to devices.
++ *
++ * For now, "wmb()" doesn't actually do anything, as all
++ * Intel CPU's follow what Intel calls a *Processor Order*,
++ * in which all writes are seen in the program order even
++ * outside the CPU.
++ *
++ * I expect future Intel CPU's to have a weaker ordering,
++ * but I'd also expect them to finally get their act together
++ * and add some real memory barriers if so.
++ *
++ * Some non intel clones support out of order store. wmb() ceases to be a
++ * nop for these.
++ */
++
++
++/*
++ * Actually only lfence would be needed for mb() because all stores done
++ * by the kernel should be already ordered. But keep a full barrier for now.
++ */
++
++#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
++#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
++
++/**
++ * read_barrier_depends - Flush all pending reads that subsequents reads
++ * depend on.
++ *
++ * No data-dependent reads from memory-like regions are ever reordered
++ * over this barrier. All reads preceding this primitive are guaranteed
++ * to access memory (but not necessarily other CPUs' caches) before any
++ * reads following this primitive that depend on the data return by
++ * any of the preceding reads. This primitive is much lighter weight than
++ * rmb() on most CPUs, and is never heavier weight than is
++ * rmb().
++ *
++ * These ordering constraints are respected by both the local CPU
++ * and the compiler.
++ *
++ * Ordering is not guaranteed by anything other than these primitives,
++ * not even by data dependencies. See the documentation for
++ * memory_barrier() for examples and URLs to more information.
++ *
++ * For example, the following code would force ordering (the initial
++ * value of "a" is zero, "b" is one, and "p" is "&a"):
++ *
++ * <programlisting>
++ * CPU 0 CPU 1
++ *
++ * b = 2;
++ * memory_barrier();
++ * p = &b; q = p;
++ * read_barrier_depends();
++ * d = *q;
++ * </programlisting>
++ *
++ * because the read of "*q" depends on the read of "p" and these
++ * two reads are separated by a read_barrier_depends(). However,
++ * the following code, with the same initial values for "a" and "b":
++ *
++ * <programlisting>
++ * CPU 0 CPU 1
++ *
++ * a = 2;
++ * memory_barrier();
++ * b = 3; y = b;
++ * read_barrier_depends();
++ * x = a;
++ * </programlisting>
++ *
++ * does not enforce ordering, since there is no data dependency between
++ * the read of "a" and the read of "b". Therefore, on some CPUs, such
++ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
++ * in cases like this where there are no data dependencies.
++ **/
++
++#define read_barrier_depends() do { } while(0)
++
++#ifdef CONFIG_X86_OOSTORE
++/* Actually there are no OOO store capable CPUs for now that do SSE,
++ but make it already an possibility. */
++#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
++#else
++#define wmb() __asm__ __volatile__ ("": : :"memory")
++#endif
++
++#ifdef CONFIG_SMP
++#define smp_mb() mb()
++#define smp_rmb() rmb()
++#define smp_wmb() wmb()
++#define smp_read_barrier_depends() read_barrier_depends()
++#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
++#else
++#define smp_mb() barrier()
++#define smp_rmb() barrier()
++#define smp_wmb() barrier()
++#define smp_read_barrier_depends() do { } while(0)
++#define set_mb(var, value) do { var = value; barrier(); } while (0)
++#endif
++
++#include <linux/irqflags.h>
++
++/*
++ * disable hlt during certain critical i/o operations
++ */
++#define HAVE_DISABLE_HLT
++void disable_hlt(void);
++void enable_hlt(void);
++
++extern int es7000_plat;
++void cpu_idle_wait(void);
++
++/*
++ * On SMP systems, when the scheduler does migration-cost autodetection,
++ * it needs a way to flush as much of the CPU's caches as possible:
++ */
++static inline void sched_cacheflush(void)
++{
++ wbinvd();
++}
++
++extern unsigned long arch_align_stack(unsigned long sp);
++extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
++
++void default_idle(void);
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/tlbflush.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/tlbflush.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/tlbflush.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/tlbflush.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,101 @@
++#ifndef _I386_TLBFLUSH_H
++#define _I386_TLBFLUSH_H
++
++#include <linux/mm.h>
++#include <asm/processor.h>
++
++#define __flush_tlb() xen_tlb_flush()
++#define __flush_tlb_global() xen_tlb_flush()
++#define __flush_tlb_all() xen_tlb_flush()
++
++extern unsigned long pgkern_mask;
++
++#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
++
++#define __flush_tlb_single(addr) xen_invlpg(addr)
++
++#define __flush_tlb_one(addr) __flush_tlb_single(addr)
++
++/*
++ * TLB flushing:
++ *
++ * - flush_tlb() flushes the current mm struct TLBs
++ * - flush_tlb_all() flushes all processes TLBs
++ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
++ * - flush_tlb_page(vma, vmaddr) flushes one page
++ * - flush_tlb_range(vma, start, end) flushes a range of pages
++ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
++ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
++ *
++ * ..but the i386 has somewhat limited tlb flushing capabilities,
++ * and page-granular flushes are available only on i486 and up.
++ */
++
++#ifndef CONFIG_SMP
++
++#define flush_tlb() __flush_tlb()
++#define flush_tlb_all() __flush_tlb_all()
++#define local_flush_tlb() __flush_tlb()
++
++static inline void flush_tlb_mm(struct mm_struct *mm)
++{
++ if (mm == current->active_mm)
++ __flush_tlb();
++}
++
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++ unsigned long addr)
++{
++ if (vma->vm_mm == current->active_mm)
++ __flush_tlb_one(addr);
++}
++
++static inline void flush_tlb_range(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end)
++{
++ if (vma->vm_mm == current->active_mm)
++ __flush_tlb();
++}
++
++#else
++
++#include <asm/smp.h>
++
++#define local_flush_tlb() \
++ __flush_tlb()
++
++#define flush_tlb_all xen_tlb_flush_all
++#define flush_tlb_current_task() xen_tlb_flush_mask(&current->mm->cpu_vm_mask)
++#define flush_tlb_mm(mm) xen_tlb_flush_mask(&(mm)->cpu_vm_mask)
++#define flush_tlb_page(vma, va) xen_invlpg_mask(&(vma)->vm_mm->cpu_vm_mask, va)
++
++#define flush_tlb() flush_tlb_current_task()
++
++static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
++{
++ flush_tlb_mm(vma->vm_mm);
++}
++
++#define TLBSTATE_OK 1
++#define TLBSTATE_LAZY 2
++
++struct tlb_state
++{
++ struct mm_struct *active_mm;
++ int state;
++ char __cacheline_padding[L1_CACHE_BYTES-8];
++};
++DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
++
++
++#endif
++
++#define flush_tlb_kernel_range(start, end) flush_tlb_all()
++
++static inline void flush_tlb_pgtables(struct mm_struct *mm,
++ unsigned long start, unsigned long end)
++{
++ /* i386 does not keep any page table caches in TLB */
++}
++
++#endif /* _I386_TLBFLUSH_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/vga.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/vga.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/vga.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/vga.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,20 @@
++/*
++ * Access to VGA videoram
++ *
++ * (c) 1998 Martin Mares <mj@ucw.cz>
++ */
++
++#ifndef _LINUX_ASM_VGA_H_
++#define _LINUX_ASM_VGA_H_
++
++/*
++ * On the PC, we can just recalculate addresses and then
++ * access the videoram directly without any black magic.
++ */
++
++#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
++
++#define vga_readb(x) (*(x))
++#define vga_writeb(x,y) (*(y) = (x))
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/asm/xenoprof.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/xenoprof.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/asm/xenoprof.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/asm/xenoprof.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,48 @@
++/******************************************************************************
++ * asm-i386/mach-xen/asm/xenoprof.h
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef __ASM_XENOPROF_H__
++#define __ASM_XENOPROF_H__
++#ifdef CONFIG_XEN
++
++struct super_block;
++struct dentry;
++int xenoprof_create_files(struct super_block * sb, struct dentry * root);
++#define HAVE_XENOPROF_CREATE_FILES
++
++struct xenoprof_init;
++void xenoprof_arch_init_counter(struct xenoprof_init *init);
++void xenoprof_arch_counter(void);
++void xenoprof_arch_start(void);
++void xenoprof_arch_stop(void);
++
++struct xenoprof_arch_shared_buffer {
++ /* nothing */
++};
++struct xenoprof_shared_buffer;
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf);
++struct xenoprof_get_buffer;
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer, struct xenoprof_shared_buffer* sbuf);
++struct xenoprof_passive;
++int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain, struct xenoprof_shared_buffer* sbuf);
++
++#endif /* CONFIG_XEN */
++#endif /* __ASM_XENOPROF_H__ */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/irq_vectors.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/irq_vectors.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/irq_vectors.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/irq_vectors.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,125 @@
++/*
++ * This file should contain #defines for all of the interrupt vector
++ * numbers used by this architecture.
++ *
++ * In addition, there are some standard defines:
++ *
++ * FIRST_EXTERNAL_VECTOR:
++ * The first free place for external interrupts
++ *
++ * SYSCALL_VECTOR:
++ * The IRQ vector a syscall makes the user to kernel transition
++ * under.
++ *
++ * TIMER_IRQ:
++ * The IRQ number the timer interrupt comes in at.
++ *
++ * NR_IRQS:
++ * The total number of interrupt vectors (including all the
++ * architecture specific interrupts) needed.
++ *
++ */
++#ifndef _ASM_IRQ_VECTORS_H
++#define _ASM_IRQ_VECTORS_H
++
++/*
++ * IDT vectors usable for external interrupt sources start
++ * at 0x20:
++ */
++#define FIRST_EXTERNAL_VECTOR 0x20
++
++#define SYSCALL_VECTOR 0x80
++
++/*
++ * Vectors 0x20-0x2f are used for ISA interrupts.
++ */
++
++#if 0
++/*
++ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
++ *
++ * some of the following vectors are 'rare', they are merged
++ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
++ * TLB, reschedule and local APIC vectors are performance-critical.
++ *
++ * Vectors 0xf0-0xfa are free (reserved for future Linux use).
++ */
++#define SPURIOUS_APIC_VECTOR 0xff
++#define ERROR_APIC_VECTOR 0xfe
++#define INVALIDATE_TLB_VECTOR 0xfd
++#define RESCHEDULE_VECTOR 0xfc
++#define CALL_FUNCTION_VECTOR 0xfb
++
++#define THERMAL_APIC_VECTOR 0xf0
++/*
++ * Local APIC timer IRQ vector is on a different priority level,
++ * to work around the 'lost local interrupt if more than 2 IRQ
++ * sources per level' errata.
++ */
++#define LOCAL_TIMER_VECTOR 0xef
++#endif
++
++#define SPURIOUS_APIC_VECTOR 0xff
++#define ERROR_APIC_VECTOR 0xfe
++
++/*
++ * First APIC vector available to drivers: (vectors 0x30-0xee)
++ * we start at 0x31 to spread out vectors evenly between priority
++ * levels. (0x80 is the syscall vector)
++ */
++#define FIRST_DEVICE_VECTOR 0x31
++#define FIRST_SYSTEM_VECTOR 0xef
++
++/*
++ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
++ * Right now the APIC is mostly only used for SMP.
++ * 256 vectors is an architectural limit. (we can have
++ * more than 256 devices theoretically, but they will
++ * have to use shared interrupts)
++ * Since vectors 0x00-0x1f are used/reserved for the CPU,
++ * the usable vector space is 0x20-0xff (224 vectors)
++ */
++
++#define RESCHEDULE_VECTOR 0
++#define CALL_FUNCTION_VECTOR 1
++#define NR_IPIS 2
++
++/*
++ * The maximum number of vectors supported by i386 processors
++ * is limited to 256. For processors other than i386, NR_VECTORS
++ * should be changed accordingly.
++ */
++#define NR_VECTORS 256
++
++#define FPU_IRQ 13
++
++#define FIRST_VM86_IRQ 3
++#define LAST_VM86_IRQ 15
++#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
++
++/*
++ * The flat IRQ space is divided into two regions:
++ * 1. A one-to-one mapping of real physical IRQs. This space is only used
++ * if we have physical device-access privilege. This region is at the
++ * start of the IRQ space so that existing device drivers do not need
++ * to be modified to translate physical IRQ numbers into our IRQ space.
++ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
++ * are bound using the provided bind/unbind functions.
++ */
++
++#define PIRQ_BASE 0
++#define NR_PIRQS 256
++
++#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
++#define NR_DYNIRQS 256
++
++#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
++#define NR_IRQ_VECTORS NR_IRQS
++
++#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
++#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
++
++#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
++#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
++
++#endif /* _ASM_IRQ_VECTORS_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/mach_traps.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/mach_traps.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/mach_traps.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/mach_traps.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,33 @@
++/*
++ * include/asm-xen/asm-i386/mach-xen/mach_traps.h
++ *
++ * Machine specific NMI handling for Xen
++ */
++#ifndef _MACH_TRAPS_H
++#define _MACH_TRAPS_H
++
++#include <linux/bitops.h>
++#include <xen/interface/nmi.h>
++
++static inline void clear_mem_error(unsigned char reason) {}
++static inline void clear_io_check_error(unsigned char reason) {}
++
++static inline unsigned char get_nmi_reason(void)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ unsigned char reason = 0;
++
++ /* construct a value which looks like it came from
++ * port 0x61.
++ */
++ if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
++ reason |= 0x40;
++ if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
++ reason |= 0x80;
++
++ return reason;
++}
++
++static inline void reassert_nmi(void) {}
++
++#endif /* !_MACH_TRAPS_H */
+diff -rpuN linux-2.6.18.8/include/asm-i386/mach-xen/setup_arch.h linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/setup_arch.h
+--- linux-2.6.18.8/include/asm-i386/mach-xen/setup_arch.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/mach-xen/setup_arch.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,5 @@
++/* Hook to call BIOS initialisation function */
++
++#define ARCH_SETUP machine_specific_arch_setup();
++
++void __init machine_specific_arch_setup(void);
+diff -rpuN linux-2.6.18.8/include/asm-i386/page.h linux-2.6.18-xen-3.3.0/include/asm-i386/page.h
+--- linux-2.6.18.8/include/asm-i386/page.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/page.h 2008-08-21 11:36:07.000000000 +0200
+@@ -122,7 +122,7 @@ extern int page_is_ram(unsigned long pag
+
+ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+ #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
+-#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
++#define MAXMEM (__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE)
+ #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
+ #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+ #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+diff -rpuN linux-2.6.18.8/include/asm-i386/pgtable-2level-defs.h linux-2.6.18-xen-3.3.0/include/asm-i386/pgtable-2level-defs.h
+--- linux-2.6.18.8/include/asm-i386/pgtable-2level-defs.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/pgtable-2level-defs.h 2008-08-21 11:36:07.000000000 +0200
+@@ -1,6 +1,8 @@
+ #ifndef _I386_PGTABLE_2LEVEL_DEFS_H
+ #define _I386_PGTABLE_2LEVEL_DEFS_H
+
++#define HAVE_SHARED_KERNEL_PMD 0
++
+ /*
+ * traditional i386 two-level paging structure:
+ */
+diff -rpuN linux-2.6.18.8/include/asm-i386/pgtable-3level-defs.h linux-2.6.18-xen-3.3.0/include/asm-i386/pgtable-3level-defs.h
+--- linux-2.6.18.8/include/asm-i386/pgtable-3level-defs.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-i386/pgtable-3level-defs.h 2008-08-21 11:36:07.000000000 +0200
+@@ -1,6 +1,8 @@
+ #ifndef _I386_PGTABLE_3LEVEL_DEFS_H
+ #define _I386_PGTABLE_3LEVEL_DEFS_H
+
++#define HAVE_SHARED_KERNEL_PMD 1
++
+ /*
+ * PGDIR_SHIFT determines what a top-level page table entry can map
+ */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/agp.h linux-2.6.18-xen-3.3.0/include/asm-ia64/agp.h
+--- linux-2.6.18.8/include/asm-ia64/agp.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/agp.h 2008-08-21 11:36:07.000000000 +0200
+@@ -19,13 +19,44 @@
+ #define flush_agp_cache() mb()
+
+ /* Convert a physical address to an address suitable for the GART. */
++#ifndef CONFIG_XEN
+ #define phys_to_gart(x) (x)
+ #define gart_to_phys(x) (x)
++#else
++#define phys_to_gart(x) phys_to_machine_for_dma(x)
++#define gart_to_phys(x) machine_to_phys_for_dma(x)
++#endif
+
+ /* GATT allocation. Returns/accepts GATT kernel virtual address. */
++#ifndef CONFIG_XEN
+ #define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+ #define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
++#else
++#include <asm/hypervisor.h>
++static inline char*
++alloc_gatt_pages(unsigned int order)
++{
++ unsigned long error;
++ unsigned long ret = __get_free_pages(GFP_KERNEL, (order));
++ if (ret == 0) {
++ goto out;
++ }
++ error = xen_create_contiguous_region(ret, order, 0);
++ if (error) {
++ free_pages(ret, order);
++ ret = 0;
++ }
++out:
++ return (char*)ret;
++}
++static inline void
++free_gatt_pages(void* table, unsigned int order)
++{
++ xen_destroy_contiguous_region((unsigned long)table, order);
++ free_pages((unsigned long)table, order);
++}
++#endif /* CONFIG_XEN */
+
+ #endif /* _ASM_IA64_AGP_H */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/gcc_intrin.h linux-2.6.18-xen-3.3.0/include/asm-ia64/gcc_intrin.h
+--- linux-2.6.18.8/include/asm-ia64/gcc_intrin.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/gcc_intrin.h 2008-08-21 11:36:07.000000000 +0200
+@@ -26,7 +26,7 @@ extern void ia64_bad_param_for_getreg (v
+
+ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
+
+-#define ia64_setreg(regnum, val) \
++#define __ia64_setreg(regnum, val) \
+ ({ \
+ switch (regnum) { \
+ case _IA64_REG_PSR_L: \
+@@ -55,7 +55,7 @@ register unsigned long ia64_r13 asm ("r1
+ } \
+ })
+
+-#define ia64_getreg(regnum) \
++#define __ia64_getreg(regnum) \
+ ({ \
+ __u64 ia64_intri_res; \
+ \
+@@ -92,7 +92,7 @@ register unsigned long ia64_r13 asm ("r1
+
+ #define ia64_hint_pause 0
+
+-#define ia64_hint(mode) \
++#define __ia64_hint(mode) \
+ ({ \
+ switch (mode) { \
+ case ia64_hint_pause: \
+@@ -374,7 +374,7 @@ register unsigned long ia64_r13 asm ("r1
+
+ #define ia64_invala() asm volatile ("invala" ::: "memory")
+
+-#define ia64_thash(addr) \
++#define __ia64_thash(addr) \
+ ({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
+@@ -394,18 +394,18 @@ register unsigned long ia64_r13 asm ("r1
+
+ #define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
+
+-#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
++#define __ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
+
+-#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
++#define __ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
+
+
+-#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
++#define __ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
+ :: "r"(trnum), "r"(addr) : "memory")
+
+-#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
++#define __ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
+ :: "r"(trnum), "r"(addr) : "memory")
+
+-#define ia64_tpa(addr) \
++#define __ia64_tpa(addr) \
+ ({ \
+ __u64 ia64_pa; \
+ asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
+@@ -415,22 +415,22 @@ register unsigned long ia64_r13 asm ("r1
+ #define __ia64_set_dbr(index, val) \
+ asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+-#define ia64_set_ibr(index, val) \
++#define __ia64_set_ibr(index, val) \
+ asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+-#define ia64_set_pkr(index, val) \
++#define __ia64_set_pkr(index, val) \
+ asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+-#define ia64_set_pmc(index, val) \
++#define __ia64_set_pmc(index, val) \
+ asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+-#define ia64_set_pmd(index, val) \
++#define __ia64_set_pmd(index, val) \
+ asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+-#define ia64_set_rr(index, val) \
++#define __ia64_set_rr(index, val) \
+ asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
+
+-#define ia64_get_cpuid(index) \
++#define __ia64_get_cpuid(index) \
+ ({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
+@@ -444,21 +444,21 @@ register unsigned long ia64_r13 asm ("r1
+ ia64_intri_res; \
+ })
+
+-#define ia64_get_ibr(index) \
++#define __ia64_get_ibr(index) \
+ ({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+ })
+
+-#define ia64_get_pkr(index) \
++#define __ia64_get_pkr(index) \
+ ({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+ })
+
+-#define ia64_get_pmc(index) \
++#define __ia64_get_pmc(index) \
+ ({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+@@ -466,48 +466,48 @@ register unsigned long ia64_r13 asm ("r1
+ })
+
+
+-#define ia64_get_pmd(index) \
++#define __ia64_get_pmd(index) \
+ ({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+ })
+
+-#define ia64_get_rr(index) \
++#define __ia64_get_rr(index) \
+ ({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
+ ia64_intri_res; \
+ })
+
+-#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
++#define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
+
+
+ #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
+
+-#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
+-#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
++#define __ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
++#define __ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
+ #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
+ #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
+
+-#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
++#define __ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
+
+-#define ia64_ptcga(addr, size) \
++#define __ia64_ptcga(addr, size) \
+ do { \
+ asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
+ ia64_dv_serialize_data(); \
+ } while (0)
+
+-#define ia64_ptcl(addr, size) \
++#define __ia64_ptcl(addr, size) \
+ do { \
+ asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
+ ia64_dv_serialize_data(); \
+ } while (0)
+
+-#define ia64_ptri(addr, size) \
++#define __ia64_ptri(addr, size) \
+ asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+-#define ia64_ptrd(addr, size) \
++#define __ia64_ptrd(addr, size) \
+ asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+ /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
+@@ -589,7 +589,7 @@ do { \
+ } \
+ })
+
+-#define ia64_intrin_local_irq_restore(x) \
++#define __ia64_intrin_local_irq_restore(x) \
+ do { \
+ asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
+ "(p6) ssm psr.i;" \
+@@ -598,4 +598,6 @@ do { \
+ :: "r"((x)) : "p6", "p7", "memory"); \
+ } while (0)
+
++#define __ia64_get_psr_i() (__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
++
+ #endif /* _ASM_IA64_GCC_INTRIN_H */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/gnttab_dma.h linux-2.6.18-xen-3.3.0/include/asm-ia64/gnttab_dma.h
+--- linux-2.6.18.8/include/asm-ia64/gnttab_dma.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/gnttab_dma.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,51 @@
++/*
++ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
++ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef _ASM_IA64_GNTTAB_DMA_H
++#define _ASM_IA64_GNTTAB_DMA_H
++
++static inline int gnttab_dma_local_pfn(struct page *page)
++{
++ return 0;
++}
++
++/* caller must get dma address after calling this function */
++static inline void gnttab_dma_use_page(struct page *page)
++{
++ __gnttab_dma_map_page(page);
++}
++
++static inline dma_addr_t gnttab_dma_map_page(struct page *page)
++{
++ gnttab_dma_use_page(page);
++ return page_to_bus(page);
++}
++
++static inline dma_addr_t gnttab_dma_map_virt(void *ptr)
++{
++ return gnttab_dma_map_page(virt_to_page(ptr)) + offset_in_page(ptr);
++}
++
++static inline void gnttab_dma_unmap_page(dma_addr_t dma_address)
++{
++ __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(dma_address)));
++}
++
++#endif /* _ASM_IA64_GNTTAB_DMA_H */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/hw_irq.h linux-2.6.18-xen-3.3.0/include/asm-ia64/hw_irq.h
+--- linux-2.6.18.8/include/asm-ia64/hw_irq.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/hw_irq.h 2008-08-21 11:36:07.000000000 +0200
+@@ -15,7 +15,11 @@
+ #include <asm/ptrace.h>
+ #include <asm/smp.h>
+
++#ifndef CONFIG_XEN
+ typedef u8 ia64_vector;
++#else
++typedef u16 ia64_vector;
++#endif
+
+ /*
+ * 0 special
+@@ -99,6 +103,12 @@ extern void register_percpu_irq (ia64_ve
+
+ static inline void ia64_resend_irq(unsigned int vector)
+ {
++#ifdef CONFIG_XEN
++ extern int resend_irq_on_evtchn(unsigned int i);
++ if (is_running_on_xen())
++ resend_irq_on_evtchn(vector);
++ else
++#endif /* CONFIG_XEN */
+ platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
+ }
+
+diff -rpuN linux-2.6.18.8/include/asm-ia64/hypercall.h linux-2.6.18-xen-3.3.0/include/asm-ia64/hypercall.h
+--- linux-2.6.18.8/include/asm-ia64/hypercall.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/hypercall.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,458 @@
++/******************************************************************************
++ * hypercall.h
++ *
++ * Linux-specific hypervisor handling.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERCALL_H__
++#define __HYPERCALL_H__
++
++#ifndef __HYPERVISOR_H__
++# error "please don't include this file directly"
++#endif
++
++#include <asm/xen/xcom_hcall.h>
++struct xencomm_handle;
++extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
++ unsigned long a3, unsigned long a4,
++ unsigned long a5, unsigned long cmd);
++
++/*
++ * Assembler stubs for hyper-calls.
++ */
++
++#define _hypercall0(type, name) \
++({ \
++ long __res; \
++ __res=__hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name); \
++ (type)__res; \
++})
++
++#define _hypercall1(type, name, a1) \
++({ \
++ long __res; \
++ __res = __hypercall((unsigned long)a1, \
++ 0, 0, 0, 0, __HYPERVISOR_##name); \
++ (type)__res; \
++})
++
++#define _hypercall2(type, name, a1, a2) \
++({ \
++ long __res; \
++ __res = __hypercall((unsigned long)a1, \
++ (unsigned long)a2, \
++ 0, 0, 0, __HYPERVISOR_##name); \
++ (type)__res; \
++})
++
++#define _hypercall3(type, name, a1, a2, a3) \
++({ \
++ long __res; \
++ __res = __hypercall((unsigned long)a1, \
++ (unsigned long)a2, \
++ (unsigned long)a3, \
++ 0, 0, __HYPERVISOR_##name); \
++ (type)__res; \
++})
++
++#define _hypercall4(type, name, a1, a2, a3, a4) \
++({ \
++ long __res; \
++ __res = __hypercall((unsigned long)a1, \
++ (unsigned long)a2, \
++ (unsigned long)a3, \
++ (unsigned long)a4, \
++ 0, __HYPERVISOR_##name); \
++ (type)__res; \
++})
++
++#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
++({ \
++ long __res; \
++ __res = __hypercall((unsigned long)a1, \
++ (unsigned long)a2, \
++ (unsigned long)a3, \
++ (unsigned long)a4, \
++ (unsigned long)a5, \
++ __HYPERVISOR_##name); \
++ (type)__res; \
++})
++
++
++static inline int
++xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
++{
++ return _hypercall2(int, sched_op, cmd, arg);
++}
++
++static inline long
++HYPERVISOR_set_timer_op(u64 timeout)
++{
++ unsigned long timeout_hi = (unsigned long)(timeout >> 32);
++ unsigned long timeout_lo = (unsigned long)timeout;
++ return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
++}
++
++static inline int
++xencomm_arch_hypercall_platform_op(struct xencomm_handle *op)
++{
++ return _hypercall1(int, platform_op, op);
++}
++
++static inline int
++xencomm_arch_hypercall_sysctl(struct xencomm_handle *op)
++{
++ return _hypercall1(int, sysctl, op);
++}
++
++static inline int
++xencomm_arch_hypercall_domctl(struct xencomm_handle *op)
++{
++ return _hypercall1(int, domctl, op);
++}
++
++static inline int
++xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list,
++ int nr_calls)
++{
++ return _hypercall2(int, multicall, call_list, nr_calls);
++}
++
++static inline int
++xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg)
++{
++ return _hypercall2(int, memory_op, cmd, arg);
++}
++
++static inline int
++xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg)
++{
++ return _hypercall2(int, event_channel_op, cmd, arg);
++}
++
++static inline int
++xencomm_arch_hypercall_xsm_op(struct xencomm_handle *arg)
++{
++ return _hypercall1(int, xsm_op, arg);
++}
++
++static inline int
++xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg)
++{
++ return _hypercall2(int, xen_version, cmd, arg);
++}
++
++static inline int
++xencomm_arch_hypercall_console_io(int cmd, int count,
++ struct xencomm_handle *str)
++{
++ return _hypercall3(int, console_io, cmd, count, str);
++}
++
++static inline int
++xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg)
++{
++ return _hypercall2(int, physdev_op, cmd, arg);
++}
++
++static inline int
++xencomm_arch_hypercall_grant_table_op(unsigned int cmd,
++ struct xencomm_handle *uop,
++ unsigned int count)
++{
++ return _hypercall3(int, grant_table_op, cmd, uop, count);
++}
++
++int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
++
++extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg);
++
++static inline int
++xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg)
++{
++ return _hypercall2(int, callback_op, cmd, arg);
++}
++
++static inline unsigned long
++xencomm_arch_hypercall_hvm_op(int cmd, void *arg)
++{
++ return _hypercall2(unsigned long, hvm_op, cmd, arg);
++}
++
++static inline long
++xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg)
++{
++ return _hypercall3(long, vcpu_op, cmd, cpu, arg);
++}
++
++static inline int
++HYPERVISOR_physdev_op(int cmd, void *arg)
++{
++ switch (cmd) {
++ case PHYSDEVOP_eoi:
++ return _hypercall1(int, ia64_fast_eoi,
++ ((struct physdev_eoi *)arg)->irq);
++ default:
++ return xencomm_hypercall_physdev_op(cmd, arg);
++ }
++}
++
++static inline int
++xencomm_arch_hypercall_xenoprof_op(int op, struct xencomm_handle *arg)
++{
++ return _hypercall2(int, xenoprof_op, op, arg);
++}
++
++static inline long
++xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg)
++{
++ return _hypercall1(long, opt_feature, arg);
++}
++
++extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
++static inline void exit_idle(void) {}
++#define do_IRQ(irq, regs) ({ \
++ irq_enter(); \
++ __do_IRQ((irq), (regs)); \
++ irq_exit(); \
++})
++
++#include <linux/err.h>
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++static inline unsigned long
++__HYPERVISOR_ioremap(unsigned long ioaddr, unsigned long size)
++{
++ return _hypercall3(unsigned long, ia64_dom0vp_op,
++ IA64_DOM0VP_ioremap, ioaddr, size);
++}
++
++static inline unsigned long
++HYPERVISOR_ioremap(unsigned long ioaddr, unsigned long size)
++{
++ unsigned long ret = ioaddr;
++ if (is_running_on_xen()) {
++ ret = __HYPERVISOR_ioremap(ioaddr, size);
++ if (unlikely(ret == -ENOSYS))
++ panic("hypercall %s failed with %ld. "
++ "Please check Xen and Linux config mismatch\n",
++ __func__, -ret);
++ else if (unlikely(IS_ERR_VALUE(ret)))
++ ret = ioaddr;
++ }
++ return ret;
++}
++
++static inline unsigned long
++__HYPERVISOR_phystomach(unsigned long gpfn)
++{
++ return _hypercall2(unsigned long, ia64_dom0vp_op,
++ IA64_DOM0VP_phystomach, gpfn);
++}
++
++static inline unsigned long
++HYPERVISOR_phystomach(unsigned long gpfn)
++{
++ unsigned long ret = gpfn;
++ if (is_running_on_xen()) {
++ ret = __HYPERVISOR_phystomach(gpfn);
++ }
++ return ret;
++}
++
++static inline unsigned long
++__HYPERVISOR_machtophys(unsigned long mfn)
++{
++ return _hypercall2(unsigned long, ia64_dom0vp_op,
++ IA64_DOM0VP_machtophys, mfn);
++}
++
++static inline unsigned long
++HYPERVISOR_machtophys(unsigned long mfn)
++{
++ unsigned long ret = mfn;
++ if (is_running_on_xen()) {
++ ret = __HYPERVISOR_machtophys(mfn);
++ }
++ return ret;
++}
++
++static inline unsigned long
++__HYPERVISOR_zap_physmap(unsigned long gpfn, unsigned int extent_order)
++{
++ return _hypercall3(unsigned long, ia64_dom0vp_op,
++ IA64_DOM0VP_zap_physmap, gpfn, extent_order);
++}
++
++static inline unsigned long
++HYPERVISOR_zap_physmap(unsigned long gpfn, unsigned int extent_order)
++{
++ unsigned long ret = 0;
++ if (is_running_on_xen()) {
++ ret = __HYPERVISOR_zap_physmap(gpfn, extent_order);
++ }
++ return ret;
++}
++
++static inline unsigned long
++__HYPERVISOR_add_physmap(unsigned long gpfn, unsigned long mfn,
++ unsigned long flags, domid_t domid)
++{
++ return _hypercall5(unsigned long, ia64_dom0vp_op,
++ IA64_DOM0VP_add_physmap, gpfn, mfn, flags, domid);
++}
++
++static inline unsigned long
++HYPERVISOR_add_physmap(unsigned long gpfn, unsigned long mfn,
++ unsigned long flags, domid_t domid)
++{
++ unsigned long ret = 0;
++ BUG_ON(!is_running_on_xen());//XXX
++ if (is_running_on_xen()) {
++ ret = __HYPERVISOR_add_physmap(gpfn, mfn, flags, domid);
++ }
++ return ret;
++}
++
++static inline unsigned long
++__HYPERVISOR_add_physmap_with_gmfn(unsigned long gpfn, unsigned long gmfn,
++ unsigned long flags, domid_t domid)
++{
++ return _hypercall5(unsigned long, ia64_dom0vp_op,
++ IA64_DOM0VP_add_physmap_with_gmfn,
++ gpfn, gmfn, flags, domid);
++}
++
++static inline unsigned long
++HYPERVISOR_add_physmap_with_gmfn(unsigned long gpfn, unsigned long gmfn,
++ unsigned long flags, domid_t domid)
++{
++ unsigned long ret = 0;
++ BUG_ON(!is_running_on_xen());//XXX
++ if (is_running_on_xen()) {
++ ret = __HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn,
++ flags, domid);
++ }
++ return ret;
++}
++
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
++static inline unsigned long
++HYPERVISOR_expose_p2m(unsigned long conv_start_gpfn,
++ unsigned long assign_start_gpfn,
++ unsigned long expose_size, unsigned long granule_pfn)
++{
++ return _hypercall5(unsigned long, ia64_dom0vp_op,
++ IA64_DOM0VP_expose_p2m, conv_start_gpfn,
++ assign_start_gpfn, expose_size, granule_pfn);
++}
++
++static inline int
++xencomm_arch_expose_foreign_p2m(unsigned long gpfn,
++ domid_t domid, struct xencomm_handle *arg,
++ unsigned long flags)
++{
++ return _hypercall5(int, ia64_dom0vp_op,
++ IA64_DOM0VP_expose_foreign_p2m,
++ gpfn, domid, arg, flags);
++}
++
++static inline int
++HYPERVISOR_unexpose_foreign_p2m(unsigned long gpfn, domid_t domid)
++{
++ return _hypercall3(int, ia64_dom0vp_op,
++ IA64_DOM0VP_unexpose_foreign_p2m, gpfn, domid);
++}
++#endif
++
++static inline int
++xencomm_arch_hypercall_perfmon_op(unsigned long cmd,
++ struct xencomm_handle *arg,
++ unsigned long count)
++{
++ return _hypercall4(int, ia64_dom0vp_op,
++ IA64_DOM0VP_perfmon, cmd, arg, count);
++}
++
++static inline int
++xencomm_arch_hypercall_fpswa_revision(struct xencomm_handle *arg)
++{
++ return _hypercall2(int, ia64_dom0vp_op,
++ IA64_DOM0VP_fpswa_revision, arg);
++}
++
++static inline int
++xencomm_arch_hypercall_ia64_debug_op(unsigned long cmd,
++ unsigned long domain,
++ struct xencomm_handle *arg)
++{
++ return _hypercall3(int, ia64_debug_op, cmd, domain, arg);
++}
++
++static inline int
++HYPERVISOR_add_io_space(unsigned long phys_base,
++ unsigned long sparse,
++ unsigned long space_number)
++{
++ return _hypercall4(int, ia64_dom0vp_op, IA64_DOM0VP_add_io_space,
++ phys_base, sparse, space_number);
++}
++
++static inline int
++xencomm_arch_hypercall_kexec_op(int cmd, struct xencomm_handle *arg)
++{
++ return _hypercall2(int, kexec_op, cmd, arg);
++}
++
++// for balloon driver
++#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0)
++
++/* Use xencomm to do hypercalls. */
++#define HYPERVISOR_sched_op xencomm_hypercall_sched_op
++#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op
++#define HYPERVISOR_callback_op xencomm_hypercall_callback_op
++#define HYPERVISOR_multicall xencomm_hypercall_multicall
++#define HYPERVISOR_xen_version xencomm_hypercall_xen_version
++#define HYPERVISOR_console_io xencomm_hypercall_console_io
++#define HYPERVISOR_hvm_op xencomm_hypercall_hvm_op
++#define HYPERVISOR_memory_op xencomm_hypercall_memory_op
++#define HYPERVISOR_xenoprof_op xencomm_hypercall_xenoprof_op
++#define HYPERVISOR_perfmon_op xencomm_hypercall_perfmon_op
++#define HYPERVISOR_fpswa_revision xencomm_hypercall_fpswa_revision
++#define HYPERVISOR_suspend xencomm_hypercall_suspend
++#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op
++#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature
++#define HYPERVISOR_kexec_op xencomm_hypercall_kexec_op
++
++/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */
++#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({BUG();0;})
++
++#endif /* __HYPERCALL_H__ */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/hypervisor.h linux-2.6.18-xen-3.3.0/include/asm-ia64/hypervisor.h
+--- linux-2.6.18.8/include/asm-ia64/hypervisor.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/hypervisor.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,233 @@
++/******************************************************************************
++ * hypervisor.h
++ *
++ * Linux-specific hypervisor handling.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERVISOR_H__
++#define __HYPERVISOR_H__
++
++#ifdef CONFIG_XEN
++/* running_on_xen is set before executing any C code by early_xen_setup */
++extern const int running_on_xen;
++#define is_running_on_xen() (running_on_xen)
++#else /* CONFIG_XEN */
++# ifdef CONFIG_VMX_GUEST
++# define is_running_on_xen() (1)
++# else /* CONFIG_VMX_GUEST */
++# define is_running_on_xen() (0)
++# define HYPERVISOR_ioremap(offset, size) (offset)
++# endif /* CONFIG_VMX_GUEST */
++#endif /* CONFIG_XEN */
++
++#if defined(CONFIG_XEN) || defined(CONFIG_VMX_GUEST)
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/errno.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/platform.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/sched.h>
++#include <xen/hypercall.h>
++#include <asm/ptrace.h>
++#include <asm/page.h>
++
++extern shared_info_t *HYPERVISOR_shared_info;
++extern start_info_t *xen_start_info;
++
++void force_evtchn_callback(void);
++
++/* Turn jiffies into Xen system time. XXX Implement me. */
++#define jiffies_to_st(j) 0
++
++static inline int
++HYPERVISOR_yield(
++ void)
++{
++ int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
++
++ return rc;
++}
++
++static inline int
++HYPERVISOR_block(
++ void)
++{
++ int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL);
++
++ return rc;
++}
++
++static inline int
++HYPERVISOR_shutdown(
++ unsigned int reason)
++{
++ struct sched_shutdown sched_shutdown = {
++ .reason = reason
++ };
++
++ int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
++
++ return rc;
++}
++
++static inline int
++HYPERVISOR_poll(
++ evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
++{
++ struct sched_poll sched_poll = {
++ .nr_ports = nr_ports,
++ .timeout = jiffies_to_st(timeout)
++ };
++
++ int rc;
++
++ set_xen_guest_handle(sched_poll.ports, ports);
++ rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
++
++ return rc;
++}
++
++#ifndef CONFIG_VMX_GUEST
++/* for drivers/xen/privcmd/privcmd.c */
++#define machine_to_phys_mapping 0
++struct vm_area_struct;
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid);
++struct file;
++int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
++int privcmd_mmap(struct file * file, struct vm_area_struct * vma);
++#define HAVE_ARCH_PRIVCMD_MMAP
++
++/* for drivers/xen/balloon/balloon.c */
++#ifdef CONFIG_XEN_SCRUB_PAGES
++#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
++#else
++#define scrub_pages(_p,_n) ((void)0)
++#endif
++#define pte_mfn(_x) pte_pfn(_x)
++#define phys_to_machine_mapping_valid(_x) (1)
++
++void xen_contiguous_bitmap_init(unsigned long end_pfn);
++int __xen_create_contiguous_region(unsigned long vstart, unsigned int order, unsigned int address_bits);
++static inline int
++xen_create_contiguous_region(unsigned long vstart,
++ unsigned int order, unsigned int address_bits)
++{
++ int ret = 0;
++ if (is_running_on_xen()) {
++ ret = __xen_create_contiguous_region(vstart, order,
++ address_bits);
++ }
++ return ret;
++}
++
++void __xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
++static inline void
++xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++{
++ if (is_running_on_xen())
++ __xen_destroy_contiguous_region(vstart, order);
++}
++
++struct page;
++
++int xen_limit_pages_to_max_mfn(struct page *pages, unsigned int order,
++ unsigned int address_bits);
++
++/* For drivers/xen/core/machine_reboot.c */
++#define HAVE_XEN_POST_SUSPEND
++void xen_post_suspend(int suspend_cancelled);
++
++/* For setup_arch() in arch/ia64/kernel/setup.c */
++void xen_ia64_enable_opt_feature(void);
++#endif /* !CONFIG_VMX_GUEST */
++
++#define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */
++#define pfn_pte_ma(_x,_y) __pte_ma(0) /* unmodified use */
++
++/* for netfront.c, netback.c */
++#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
++
++static inline void
++MULTI_update_va_mapping(
++ multicall_entry_t *mcl, unsigned long va,
++ pte_t new_val, unsigned long flags)
++{
++ mcl->op = __HYPERVISOR_update_va_mapping;
++ mcl->result = 0;
++}
++
++static inline void
++MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd,
++ void *uop, unsigned int count)
++{
++ mcl->op = __HYPERVISOR_grant_table_op;
++ mcl->args[0] = cmd;
++ mcl->args[1] = (unsigned long)uop;
++ mcl->args[2] = count;
++}
++
++/*
++ * for blktap.c
++ * int create_lookup_pte_addr(struct mm_struct *mm,
++ * unsigned long address,
++ * uint64_t *ptep);
++ */
++#define create_lookup_pte_addr(mm, address, ptep) \
++ ({ \
++ printk(KERN_EMERG \
++ "%s:%d " \
++ "create_lookup_pte_addr() isn't supported.\n", \
++ __func__, __LINE__); \
++ BUG(); \
++ (-ENOSYS); \
++ })
++
++/* for debug */
++asmlinkage int xprintk(const char *fmt, ...);
++#define xprintd(fmt, ...) xprintk("%s:%d " fmt, __func__, __LINE__, \
++ ##__VA_ARGS__)
++
++#endif /* CONFIG_XEN || CONFIG_VMX_GUEST */
++
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++#define is_initial_xendomain() \
++ (is_running_on_xen() ? xen_start_info->flags & SIF_INITDOMAIN : 0)
++#else
++#define is_initial_xendomain() 0
++#endif
++
++#endif /* __HYPERVISOR_H__ */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/intel_intrin.h linux-2.6.18-xen-3.3.0/include/asm-ia64/intel_intrin.h
+--- linux-2.6.18.8/include/asm-ia64/intel_intrin.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/intel_intrin.h 2008-08-21 11:36:07.000000000 +0200
+@@ -16,8 +16,8 @@
+ * intrinsic
+ */
+
+-#define ia64_getreg __getReg
+-#define ia64_setreg __setReg
++#define __ia64_getreg __getReg
++#define __ia64_setreg __setReg
+
+ #define ia64_hint __hint
+ #define ia64_hint_pause __hint_pause
+@@ -33,16 +33,16 @@
+ #define ia64_getf_exp __getf_exp
+ #define ia64_shrp _m64_shrp
+
+-#define ia64_tpa __tpa
++#define __ia64_tpa __tpa
+ #define ia64_invala __invala
+ #define ia64_invala_gr __invala_gr
+ #define ia64_invala_fr __invala_fr
+ #define ia64_nop __nop
+ #define ia64_sum __sum
+-#define ia64_ssm __ssm
++#define __ia64_ssm __ssm
+ #define ia64_rum __rum
+-#define ia64_rsm __rsm
+-#define ia64_fc __fc
++#define __ia64_rsm __rsm
++#define __ia64_fc __fc
+
+ #define ia64_ldfs __ldfs
+ #define ia64_ldfd __ldfd
+@@ -80,24 +80,24 @@
+
+ #define __ia64_set_dbr(index, val) \
+ __setIndReg(_IA64_REG_INDR_DBR, index, val)
+-#define ia64_set_ibr(index, val) \
++#define __ia64_set_ibr(index, val) \
+ __setIndReg(_IA64_REG_INDR_IBR, index, val)
+-#define ia64_set_pkr(index, val) \
++#define __ia64_set_pkr(index, val) \
+ __setIndReg(_IA64_REG_INDR_PKR, index, val)
+-#define ia64_set_pmc(index, val) \
++#define __ia64_set_pmc(index, val) \
+ __setIndReg(_IA64_REG_INDR_PMC, index, val)
+-#define ia64_set_pmd(index, val) \
++#define __ia64_set_pmd(index, val) \
+ __setIndReg(_IA64_REG_INDR_PMD, index, val)
+-#define ia64_set_rr(index, val) \
++#define __ia64_set_rr(index, val) \
+ __setIndReg(_IA64_REG_INDR_RR, index, val)
+
+-#define ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index)
++#define __ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index)
+ #define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index)
+-#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index)
+-#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index)
+-#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index)
+-#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index)
+-#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index)
++#define __ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index)
++#define __ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index)
++#define __ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index)
++#define __ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index)
++#define __ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index)
+
+ #define ia64_srlz_d __dsrlz
+ #define ia64_srlz_i __isrlz
+@@ -116,18 +116,18 @@
+ #define ia64_ld8_acq __ld8_acq
+
+ #define ia64_sync_i __synci
+-#define ia64_thash __thash
+-#define ia64_ttag __ttag
+-#define ia64_itcd __itcd
+-#define ia64_itci __itci
+-#define ia64_itrd __itrd
+-#define ia64_itri __itri
+-#define ia64_ptce __ptce
+-#define ia64_ptcl __ptcl
+-#define ia64_ptcg __ptcg
+-#define ia64_ptcga __ptcga
+-#define ia64_ptri __ptri
+-#define ia64_ptrd __ptrd
++#define __ia64_thash __thash
++#define __ia64_ttag __ttag
++#define __ia64_itcd __itcd
++#define __ia64_itci __itci
++#define __ia64_itrd __itrd
++#define __ia64_itri __itri
++#define __ia64_ptce __ptce
++#define __ia64_ptcl __ptcl
++#define __ia64_ptcg __ptcg
++#define __ia64_ptcga __ptcga
++#define __ia64_ptri __ptri
++#define __ia64_ptrd __ptrd
+ #define ia64_dep_mi _m64_dep_mi
+
+ /* Values for lfhint in __lfetch and __lfetch_fault */
+@@ -142,16 +142,18 @@
+ #define ia64_lfetch_fault __lfetch_fault
+ #define ia64_lfetch_fault_excl __lfetch_fault_excl
+
+-#define ia64_intrin_local_irq_restore(x) \
++#define __ia64_intrin_local_irq_restore(x) \
+ do { \
+ if ((x) != 0) { \
+- ia64_ssm(IA64_PSR_I); \
++ __ia64_ssm(IA64_PSR_I); \
+ ia64_srlz_d(); \
+ } else { \
+- ia64_rsm(IA64_PSR_I); \
++ __ia64_rsm(IA64_PSR_I); \
+ } \
+ } while (0)
+
++#define __ia64_get_psr_i() (__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
++
+ #define __builtin_trap() __break(0);
+
+ #endif /* _ASM_IA64_INTEL_INTRIN_H */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/intrinsics.h linux-2.6.18-xen-3.3.0/include/asm-ia64/intrinsics.h
+--- linux-2.6.18.8/include/asm-ia64/intrinsics.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/intrinsics.h 2008-08-21 11:36:07.000000000 +0200
+@@ -18,6 +18,15 @@
+ # include <asm/gcc_intrin.h>
+ #endif
+
++#define __ia64_set_rr0_to_rr4(val0, val1, val2, val3, val4) \
++do { \
++ __ia64_set_rr(0x0000000000000000UL, (val0)); \
++ __ia64_set_rr(0x2000000000000000UL, (val1)); \
++ __ia64_set_rr(0x4000000000000000UL, (val2)); \
++ __ia64_set_rr(0x6000000000000000UL, (val3)); \
++ __ia64_set_rr(0x8000000000000000UL, (val4)); \
++} while (0)
++
+ /*
+ * Force an unresolved reference if someone tries to use
+ * ia64_fetch_and_add() with a bad value.
+@@ -177,4 +186,5 @@ extern long ia64_cmpxchg_called_with_bad
+ #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
+
+ #endif
++#include <asm/privop.h>
+ #endif /* _ASM_IA64_INTRINSICS_H */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/io.h linux-2.6.18-xen-3.3.0/include/asm-ia64/io.h
+--- linux-2.6.18.8/include/asm-ia64/io.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/io.h 2008-08-21 11:36:07.000000000 +0200
+@@ -96,9 +96,39 @@ extern int valid_mmap_phys_addr_range (u
+ * The following two macros are deprecated and scheduled for removal.
+ * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
+ */
++#ifndef CONFIG_XEN
+ #define bus_to_virt phys_to_virt
+ #define virt_to_bus virt_to_phys
+ #define page_to_bus page_to_phys
++#else
++#define bus_to_virt(bus) \
++ phys_to_virt(machine_to_phys_for_dma(bus))
++#define virt_to_bus(virt) \
++ phys_to_machine_for_dma(virt_to_phys(virt))
++#define page_to_bus(page) \
++ phys_to_machine_for_dma(page_to_pseudophys(page))
++
++#define page_to_pseudophys(page) \
++ ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
++
++/*
++ * Drivers that use page_to_phys() for bus addresses are broken.
++ * This includes:
++ * drivers/ide/cris/ide-cris.c
++ * drivers/scsi/dec_esp.c
++ */
++#define page_to_phys(page) (page_to_pseudophys(page))
++#define bvec_to_bus(bv) (page_to_bus((bv)->bv_page) + \
++ (unsigned long) (bv)->bv_offset)
++#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
++ (unsigned long) bio_offset((bio)))
++#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
++ (unsigned long) (bv)->bv_offset)
++#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
++ (((bvec_to_bus((vec1)) + (vec1)->bv_len) == bvec_to_bus((vec2))) && \
++ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
++ bvec_to_pseudophys((vec2))))
++#endif /* CONFIG_XEN */
+
+ # endif /* KERNEL */
+
+diff -rpuN linux-2.6.18.8/include/asm-ia64/iosapic.h linux-2.6.18-xen-3.3.0/include/asm-ia64/iosapic.h
+--- linux-2.6.18.8/include/asm-ia64/iosapic.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/iosapic.h 2008-08-21 11:36:07.000000000 +0200
+@@ -53,6 +53,7 @@
+
+ #define NR_IOSAPICS 256
+
++#ifndef CONFIG_XEN
+ static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg)
+ {
+ writel(reg, iosapic + IOSAPIC_REG_SELECT);
+@@ -64,6 +65,7 @@ static inline void iosapic_write(char __
+ writel(reg, iosapic + IOSAPIC_REG_SELECT);
+ writel(val, iosapic + IOSAPIC_WINDOW);
+ }
++#endif
+
+ static inline void iosapic_eoi(char __iomem *iosapic, u32 vector)
+ {
+diff -rpuN linux-2.6.18.8/include/asm-ia64/irq.h linux-2.6.18-xen-3.3.0/include/asm-ia64/irq.h
+--- linux-2.6.18.8/include/asm-ia64/irq.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/irq.h 2008-08-21 11:36:07.000000000 +0200
+@@ -11,8 +11,41 @@
+ * 02/29/00 D.Mosberger moved most things into hw_irq.h
+ */
+
++#ifndef CONFIG_XEN
+ #define NR_IRQS 256
+ #define NR_IRQ_VECTORS NR_IRQS
++#else
++/*
++ * The flat IRQ space is divided into two regions:
++ * 1. A one-to-one mapping of real physical IRQs. This space is only used
++ * if we have physical device-access privilege. This region is at the
++ * start of the IRQ space so that existing device drivers do not need
++ * to be modified to translate physical IRQ numbers into our IRQ space.
++ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
++ * are bound using the provided bind/unbind functions.
++ */
++
++#define PIRQ_BASE 0
++#define NR_PIRQS 256
++
++#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
++#define NR_DYNIRQS (CONFIG_NR_CPUS * 8)
++
++#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
++#define NR_IRQ_VECTORS NR_IRQS
++
++#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
++#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
++
++#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
++#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
++
++#define RESCHEDULE_VECTOR 0
++#define IPI_VECTOR 1
++#define CMCP_VECTOR 2
++#define CPEP_VECTOR 3
++#define NR_IPIS 4
++#endif /* CONFIG_XEN */
+
+ static __inline__ int
+ irq_canonicalize (int irq)
+diff -rpuN linux-2.6.18.8/include/asm-ia64/kexec.h linux-2.6.18-xen-3.3.0/include/asm-ia64/kexec.h
+--- linux-2.6.18.8/include/asm-ia64/kexec.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/kexec.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,58 @@
++#ifndef _ASM_IA64_KEXEC_H
++#define _ASM_IA64_KEXEC_H
++
++
++/* Maximum physical address we can use pages from */
++#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
++/* Maximum address we can reach in physical address mode */
++#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
++/* Maximum address we can use for the control code buffer */
++#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
++
++#define KEXEC_CONTROL_CODE_SIZE (8192 + 8192 + 4096)
++
++/* The native architecture */
++#define KEXEC_ARCH KEXEC_ARCH_IA_64
++
++#define MAX_NOTE_BYTES 1024
++
++#define kexec_flush_icache_page(page) do { \
++ unsigned long page_addr = (unsigned long)page_address(page); \
++ flush_icache_range(page_addr, page_addr + PAGE_SIZE); \
++ } while(0)
++
++extern struct kimage *ia64_kimage;
++DECLARE_PER_CPU(u64, ia64_mca_pal_base);
++const extern unsigned int relocate_new_kernel_size;
++extern void relocate_new_kernel(unsigned long, unsigned long,
++ struct ia64_boot_param *, unsigned long);
++static inline void
++crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
++{
++}
++extern struct resource efi_memmap_res;
++extern struct resource boot_param_res;
++extern void kdump_smp_send_stop(void);
++extern void kdump_smp_send_init(void);
++extern void kexec_disable_iosapic(void);
++extern void crash_save_this_cpu(void);
++struct rsvd_region;
++extern unsigned long kdump_find_rsvd_region(unsigned long size,
++ struct rsvd_region *rsvd_regions, int n);
++extern void kdump_cpu_freeze(struct unw_frame_info *info, void *arg);
++extern int kdump_status[];
++extern atomic_t kdump_cpu_freezed;
++extern atomic_t kdump_in_progress;
++
++/* Kexec needs to know about the actual physical addresss.
++ * But in xen, on some architectures, a physical address is a
++ * pseudo-physical addresss. */
++#ifdef CONFIG_XEN
++#define KEXEC_ARCH_HAS_PAGE_MACROS
++#define kexec_page_to_pfn(page) pfn_to_mfn_for_dma(page_to_pfn(page))
++#define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn_for_dma(pfn))
++#define kexec_virt_to_phys(addr) phys_to_machine_for_dma(__pa(addr))
++#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys_for_dma(addr))
++#endif
++
++#endif /* _ASM_IA64_KEXEC_H */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/machvec.h linux-2.6.18-xen-3.3.0/include/asm-ia64/machvec.h
+--- linux-2.6.18.8/include/asm-ia64/machvec.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/machvec.h 2008-08-21 11:36:07.000000000 +0200
+@@ -35,6 +35,7 @@ typedef int ia64_mv_pci_legacy_read_t (s
+ typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
+ u8 size);
+ typedef void ia64_mv_migrate_t(struct task_struct * task);
++typedef void ia64_mv_kernel_launch_event_t(void);
+
+ /* DMA-mapping interface: */
+ typedef void ia64_mv_dma_init (void);
+@@ -108,6 +109,8 @@ extern void machvec_tlb_migrate_finish (
+ # include <asm/machvec_hpzx1_swiotlb.h>
+ # elif defined (CONFIG_IA64_SGI_SN2)
+ # include <asm/machvec_sn2.h>
++# elif defined (CONFIG_IA64_XEN)
++# include <asm/machvec_xen.h>
+ # elif defined (CONFIG_IA64_GENERIC)
+
+ # ifdef MACHVEC_PLATFORM_HEADER
+@@ -205,6 +208,7 @@ struct ia64_machine_vector {
+ ia64_mv_readq_relaxed_t *readq_relaxed;
+ ia64_mv_migrate_t *migrate;
+ ia64_mv_msi_init_t *msi_init;
++ ia64_mv_kernel_launch_event_t *kernel_launch_event;
+ } __attribute__((__aligned__(16))); /* align attrib? see above comment */
+
+ #define MACHVEC_INIT(name) \
+@@ -303,6 +307,9 @@ extern ia64_mv_dma_supported swiotlb_dm
+ #ifndef platform_tlb_migrate_finish
+ # define platform_tlb_migrate_finish machvec_noop_mm
+ #endif
++#ifndef platform_kernel_launch_event
++# define platform_kernel_launch_event machvec_noop
++#endif
+ #ifndef platform_dma_init
+ # define platform_dma_init swiotlb_init
+ #endif
+diff -rpuN linux-2.6.18.8/include/asm-ia64/machvec_sn2.h linux-2.6.18-xen-3.3.0/include/asm-ia64/machvec_sn2.h
+--- linux-2.6.18.8/include/asm-ia64/machvec_sn2.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/machvec_sn2.h 2008-08-21 11:36:07.000000000 +0200
+@@ -67,6 +67,7 @@ extern ia64_mv_dma_sync_sg_for_device sn
+ extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
+ extern ia64_mv_dma_supported sn_dma_supported;
+ extern ia64_mv_migrate_t sn_migrate;
++extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
+ extern ia64_mv_msi_init_t sn_msi_init;
+
+
+@@ -119,6 +120,7 @@ extern ia64_mv_msi_init_t sn_msi_init;
+ #define platform_dma_mapping_error sn_dma_mapping_error
+ #define platform_dma_supported sn_dma_supported
+ #define platform_migrate sn_migrate
++#define platform_kernel_launch_event sn_kernel_launch_event
+ #ifdef CONFIG_PCI_MSI
+ #define platform_msi_init sn_msi_init
+ #else
+diff -rpuN linux-2.6.18.8/include/asm-ia64/machvec_xen.h linux-2.6.18-xen-3.3.0/include/asm-ia64/machvec_xen.h
+--- linux-2.6.18.8/include/asm-ia64/machvec_xen.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/machvec_xen.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,43 @@
++#ifndef _ASM_IA64_MACHVEC_XEN_h
++#define _ASM_IA64_MACHVEC_XEN_h
++
++extern ia64_mv_setup_t xen_setup;
++extern ia64_mv_cpu_init_t xen_cpu_init;
++extern ia64_mv_irq_init_t xen_irq_init;
++extern ia64_mv_send_ipi_t xen_platform_send_ipi;
++extern ia64_mv_dma_alloc_coherent xen_alloc_coherent;
++extern ia64_mv_dma_free_coherent xen_free_coherent;
++extern ia64_mv_dma_map_single xen_map_single;
++extern ia64_mv_dma_unmap_single xen_unmap_single;
++extern ia64_mv_dma_map_sg xen_map_sg;
++extern ia64_mv_dma_unmap_sg xen_unmap_sg;
++extern ia64_mv_dma_supported xen_dma_supported;
++extern ia64_mv_dma_mapping_error xen_dma_mapping_error;
++
++/*
++ * This stuff has dual use!
++ *
++ * For a generic kernel, the macros are used to initialize the
++ * platform's machvec structure. When compiling a non-generic kernel,
++ * the macros are used directly.
++ */
++#define platform_name "xen"
++#define platform_setup xen_setup
++#define platform_cpu_init xen_cpu_init
++#define platform_irq_init xen_irq_init
++#define platform_send_ipi xen_platform_send_ipi
++#define platform_dma_init machvec_noop
++#define platform_dma_alloc_coherent xen_alloc_coherent
++#define platform_dma_free_coherent xen_free_coherent
++#define platform_dma_map_single xen_map_single
++#define platform_dma_unmap_single xen_unmap_single
++#define platform_dma_map_sg xen_map_sg
++#define platform_dma_unmap_sg xen_unmap_sg
++#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
++#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
++#define platform_dma_sync_single_for_device machvec_dma_sync_single
++#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
++#define platform_dma_supported xen_dma_supported
++#define platform_dma_mapping_error xen_dma_mapping_error
++
++#endif /* _ASM_IA64_MACHVEC_XEN_h */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/maddr.h linux-2.6.18-xen-3.3.0/include/asm-ia64/maddr.h
+--- linux-2.6.18.8/include/asm-ia64/maddr.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/maddr.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,116 @@
++#ifndef _ASM_IA64_MADDR_H
++#define _ASM_IA64_MADDR_H
++
++#include <linux/kernel.h>
++#include <asm/hypervisor.h>
++#include <xen/features.h>
++#include <xen/interface/xen.h>
++
++#ifdef CONFIG_XEN
++
++#define INVALID_P2M_ENTRY (~0UL)
++
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
++extern int p2m_initialized;
++extern unsigned long p2m_min_low_pfn;
++extern unsigned long p2m_max_low_pfn;
++extern unsigned long p2m_convert_min_pfn;
++extern unsigned long p2m_convert_max_pfn;
++extern volatile const pte_t* p2m_pte;
++unsigned long p2m_phystomach(unsigned long gpfn);
++#else
++#define p2m_initialized (0)
++#define p2m_phystomach(gpfn) INVALID_MFN
++#endif
++
++/* XXX xen page size != page size */
++static inline unsigned long
++pfn_to_mfn_for_dma(unsigned long pfn)
++{
++ unsigned long mfn;
++ if (p2m_initialized)
++ return p2m_phystomach(pfn);
++ mfn = HYPERVISOR_phystomach(pfn);
++ BUG_ON(mfn == 0); /* XXX */
++ BUG_ON(mfn == INVALID_P2M_ENTRY); /* XXX */
++ BUG_ON(mfn == INVALID_MFN);
++ return mfn;
++}
++
++static inline unsigned long
++phys_to_machine_for_dma(unsigned long phys)
++{
++ unsigned long machine =
++ pfn_to_mfn_for_dma(phys >> PAGE_SHIFT) << PAGE_SHIFT;
++ machine |= (phys & ~PAGE_MASK);
++ return machine;
++}
++
++static inline unsigned long
++mfn_to_pfn_for_dma(unsigned long mfn)
++{
++ unsigned long pfn;
++ pfn = HYPERVISOR_machtophys(mfn);
++ BUG_ON(pfn == 0);
++ /* BUG_ON(pfn == INVALID_M2P_ENTRY); */
++ return pfn;
++}
++
++static inline unsigned long
++machine_to_phys_for_dma(unsigned long machine)
++{
++ unsigned long phys =
++ mfn_to_pfn_for_dma(machine >> PAGE_SHIFT) << PAGE_SHIFT;
++ phys |= (machine & ~PAGE_MASK);
++ return phys;
++}
++
++#ifdef CONFIG_SPARSEMEM
++/*
++ * When CONFIG_SPARSEMEM=y, pfn_valid() is defined in
++ * linux/include/linux/mmzone.h. Hoever #include <linux/mmzone.h>
++ * causes the header inclusion hell.
++ */
++static inline int pfn_valid(unsigned long pfn);
++#endif
++
++static inline unsigned long
++mfn_to_local_pfn(unsigned long mfn)
++{
++ unsigned long pfn = mfn_to_pfn_for_dma(mfn);
++ if (!pfn_valid(pfn))
++ return INVALID_P2M_ENTRY;
++ return pfn;
++}
++
++#else /* !CONFIG_XEN */
++
++#define pfn_to_mfn_for_dma(pfn) (pfn)
++#define mfn_to_pfn_for_dma(mfn) (mfn)
++#define phys_to_machine_for_dma(phys) (phys)
++#define machine_to_phys_for_dma(machine) (machine)
++#define mfn_to_local_pfn(mfn) (mfn)
++
++#endif /* !CONFIG_XEN */
++
++#define mfn_to_pfn(mfn) (mfn)
++#define pfn_to_mfn(pfn) (pfn)
++
++#define mfn_to_virt(mfn) (__va((mfn) << PAGE_SHIFT))
++#define virt_to_mfn(virt) (__pa(virt) >> PAGE_SHIFT)
++#define virt_to_machine(virt) __pa(virt) /* for tpmfront.c */
++
++#define set_phys_to_machine(pfn, mfn) do { } while (0)
++
++typedef unsigned long maddr_t; /* to compile netback, netfront */
++#ifndef _ASM_IA64_SN_TYPES_H /* paddr_t is defined in asm-ia64/sn/types.h */
++typedef unsigned long paddr_t;
++#endif
++
++#ifdef CONFIG_XEN
++int range_straddles_page_boundary(paddr_t p, size_t size);
++#else
++#define range_straddles_page_boundary(addr, size) (0)
++#endif
++
++#endif /* _ASM_IA64_MADDR_H */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/meminit.h linux-2.6.18-xen-3.3.0/include/asm-ia64/meminit.h
+--- linux-2.6.18.8/include/asm-ia64/meminit.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/meminit.h 2008-08-21 11:36:07.000000000 +0200
+@@ -15,11 +15,17 @@
+ * - initrd (optional)
+ * - command line string
+ * - kernel code & data
++ * - crash dumping code reserved region
+ * - Kernel memory map built from EFI memory map
++ * - xen start info
+ *
+ * More could be added if necessary
+ */
+-#define IA64_MAX_RSVD_REGIONS 6
++#ifndef CONFIG_XEN
++#define IA64_MAX_RSVD_REGIONS 7
++#else
++#define IA64_MAX_RSVD_REGIONS 8
++#endif
+
+ struct rsvd_region {
+ unsigned long start; /* virtual address of beginning of element */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/mmu_context.h linux-2.6.18-xen-3.3.0/include/asm-ia64/mmu_context.h
+--- linux-2.6.18.8/include/asm-ia64/mmu_context.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/mmu_context.h 2008-08-21 11:36:07.000000000 +0200
+@@ -151,11 +151,7 @@ reload_context (nv_mm_context_t context)
+ # endif
+ #endif
+
+- ia64_set_rr(0x0000000000000000UL, rr0);
+- ia64_set_rr(0x2000000000000000UL, rr1);
+- ia64_set_rr(0x4000000000000000UL, rr2);
+- ia64_set_rr(0x6000000000000000UL, rr3);
+- ia64_set_rr(0x8000000000000000UL, rr4);
++ ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4);
+ ia64_srlz_i(); /* srlz.i implies srlz.d */
+ }
+
+diff -rpuN linux-2.6.18.8/include/asm-ia64/page.h linux-2.6.18-xen-3.3.0/include/asm-ia64/page.h
+--- linux-2.6.18.8/include/asm-ia64/page.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/page.h 2008-08-21 11:36:07.000000000 +0200
+@@ -119,6 +119,7 @@ extern struct page *vmem_map;
+ #endif
+
+ #ifdef CONFIG_FLATMEM
++extern unsigned long max_mapnr;
+ # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
+ #elif defined(CONFIG_DISCONTIGMEM)
+ extern unsigned long min_low_pfn;
+@@ -126,7 +127,9 @@ extern unsigned long max_low_pfn;
+ # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
+ #endif
+
++#ifndef CONFIG_XEN
+ #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
++#endif
+ #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+ #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+@@ -227,5 +230,17 @@ get_order (unsigned long size)
+ (((current->personality & READ_IMPLIES_EXEC) != 0) \
+ ? VM_EXEC : 0))
+
++#ifndef __ASSEMBLY__
++
++#include <linux/kernel.h>
++#include <asm/hypervisor.h> /* to compile ioremap.c */
++
++#ifdef CONFIG_XEN
++
++#include <asm/maddr.h>
++
++#endif /* CONFIG_XEN */
++#endif /* __ASSEMBLY__ */
++
+ # endif /* __KERNEL__ */
+ #endif /* _ASM_IA64_PAGE_H */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/pgalloc.h linux-2.6.18-xen-3.3.0/include/asm-ia64/pgalloc.h
+--- linux-2.6.18.8/include/asm-ia64/pgalloc.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/pgalloc.h 2008-08-21 11:36:07.000000000 +0200
+@@ -125,7 +125,11 @@ static inline void pmd_free(pmd_t * pmd)
+ static inline void
+ pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
+ {
++#ifndef CONFIG_XEN
+ pmd_val(*pmd_entry) = page_to_phys(pte);
++#else
++ pmd_val(*pmd_entry) = page_to_pseudophys(pte);
++#endif
+ }
+
+ static inline void
+diff -rpuN linux-2.6.18.8/include/asm-ia64/privop.h linux-2.6.18-xen-3.3.0/include/asm-ia64/privop.h
+--- linux-2.6.18.8/include/asm-ia64/privop.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/privop.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,63 @@
++#ifndef _ASM_IA64_PRIVOP_H
++#define _ASM_IA64_PRIVOP_H
++
++#ifndef _ASM_IA64_INTRINSICS_H
++#error "don't include privop.h directly. instead include intrinsics.h"
++#endif
++/*
++ * Copyright (C) 2005 Hewlett-Packard Co
++ * Dan Magenheimer <dan.magenheimer@hp.com>
++ *
++ */
++
++#ifdef CONFIG_XEN
++#include <asm/xen/privop.h>
++#endif
++
++#ifndef __ASSEMBLY
++
++#ifndef IA64_PARAVIRTUALIZED
++
++#define ia64_getreg __ia64_getreg
++#define ia64_setreg __ia64_setreg
++#define ia64_hint __ia64_hint
++#define ia64_thash __ia64_thash
++#define ia64_itci __ia64_itci
++#define ia64_itcd __ia64_itcd
++#define ia64_itri __ia64_itri
++#define ia64_itrd __ia64_itrd
++#define ia64_tpa __ia64_tpa
++#define ia64_set_ibr __ia64_set_ibr
++#define ia64_set_pkr __ia64_set_pkr
++#define ia64_set_pmc __ia64_set_pmc
++#define ia64_set_pmd __ia64_set_pmd
++#define ia64_set_rr __ia64_set_rr
++#define ia64_get_cpuid __ia64_get_cpuid
++#define ia64_get_ibr __ia64_get_ibr
++#define ia64_get_pkr __ia64_get_pkr
++#define ia64_get_pmc __ia64_get_pmc
++#define ia64_get_pmd __ia64_get_pmd
++#define ia64_get_rr __ia64_get_rr
++#define ia64_fc __ia64_fc
++#define ia64_ssm __ia64_ssm
++#define ia64_rsm __ia64_rsm
++#define ia64_ptce __ia64_ptce
++#define ia64_ptcga __ia64_ptcga
++#define ia64_ptcl __ia64_ptcl
++#define ia64_ptri __ia64_ptri
++#define ia64_ptrd __ia64_ptrd
++#define ia64_get_psr_i __ia64_get_psr_i
++#define ia64_intrin_local_irq_restore __ia64_intrin_local_irq_restore
++#define ia64_leave_kernel __ia64_leave_kernel
++#define ia64_leave_syscall __ia64_leave_syscall
++#define ia64_trace_syscall __ia64_trace_syscall
++#define ia64_ret_from_clone __ia64_ret_from_clone
++#define ia64_switch_to __ia64_switch_to
++#define ia64_pal_call_static __ia64_pal_call_static
++#define ia64_set_rr0_to_rr4 __ia64_set_rr0_to_rr4
++
++#endif /* !IA64_PARAVIRTUALIZED */
++
++#endif /* !__ASSEMBLY */
++
++#endif /* _ASM_IA64_PRIVOP_H */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/sal.h linux-2.6.18-xen-3.3.0/include/asm-ia64/sal.h
+--- linux-2.6.18.8/include/asm-ia64/sal.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/sal.h 2008-08-21 11:36:07.000000000 +0200
+@@ -42,6 +42,9 @@
+ #include <asm/pal.h>
+ #include <asm/system.h>
+ #include <asm/fpu.h>
++#ifdef CONFIG_XEN
++#include <asm/xen/xencomm.h>
++#endif
+
+ extern spinlock_t sal_lock;
+
+@@ -686,10 +689,43 @@ ia64_sal_clear_state_info (u64 sal_info_
+ /* Get the processor and platform information logged by SAL with respect to the machine
+ * state at the time of the MCAs, INITs, CMCs, or CPEs.
+ */
++#ifdef CONFIG_XEN
++static inline u64 ia64_sal_get_state_info_size (u64 sal_info_type);
++typedef struct ia64_mca_xencomm_t {
++ void *record;
++ struct xencomm_handle *handle;
++ struct list_head list;
++} ia64_mca_xencomm_t;
++extern struct list_head ia64_mca_xencomm_list;
++extern spinlock_t ia64_mca_xencomm_lock;
++#endif
++
+ static inline u64
+ ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info)
+ {
+ struct ia64_sal_retval isrv;
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ ia64_mca_xencomm_t *entry;
++ struct xencomm_handle *desc = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ia64_mca_xencomm_lock, flags);
++ list_for_each_entry(entry, &ia64_mca_xencomm_list, list) {
++ if (entry->record == sal_info) {
++ desc = entry->handle;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&ia64_mca_xencomm_lock, flags);
++
++ if (desc == NULL)
++ return 0;
++
++ SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
++ desc, 0, 0, 0, 0);
++ } else
++#endif
+ SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
+ sal_info, 0, 0, 0, 0);
+ if (isrv.status)
+diff -rpuN linux-2.6.18.8/include/asm-ia64/sn/sn_sal.h linux-2.6.18-xen-3.3.0/include/asm-ia64/sn/sn_sal.h
+--- linux-2.6.18.8/include/asm-ia64/sn/sn_sal.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/sn/sn_sal.h 2008-08-21 11:36:07.000000000 +0200
+@@ -87,6 +87,8 @@
+ #define SN_SAL_INJECT_ERROR 0x02000067
+ #define SN_SAL_SET_CPU_NUMBER 0x02000068
+
++#define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069
++
+ /*
+ * Service-specific constants
+ */
+@@ -1154,4 +1156,11 @@ ia64_sn_set_cpu_number(int cpu)
+ SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0);
+ return rv.status;
+ }
++static inline int
++ia64_sn_kernel_launch_event(void)
++{
++ struct ia64_sal_retval rv;
++ SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0);
++ return rv.status;
++}
+ #endif /* _ASM_IA64_SN_SN_SAL_H */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/sn/types.h linux-2.6.18-xen-3.3.0/include/asm-ia64/sn/types.h
+--- linux-2.6.18.8/include/asm-ia64/sn/types.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/sn/types.h 2008-08-21 11:36:07.000000000 +0200
+@@ -20,7 +20,9 @@ typedef unsigned char slotid_t; /* slot
+ typedef unsigned char slabid_t; /* slab (asic) within slot */
+ typedef u64 nic_t;
+ typedef unsigned long iopaddr_t;
++#ifndef _ASM_IA64_MADDR_H /* paddr_t is defined in asm-ia64/maddr.h */
+ typedef unsigned long paddr_t;
++#endif
+ typedef short cnodeid_t;
+
+ #endif /* _ASM_IA64_SN_TYPES_H */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/swiotlb.h linux-2.6.18-xen-3.3.0/include/asm-ia64/swiotlb.h
+--- linux-2.6.18.8/include/asm-ia64/swiotlb.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/swiotlb.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,41 @@
++#ifndef _ASM_SWIOTLB_H
++#define _ASM_SWIOTLB_H 1
++
++/* SWIOTLB interface */
++
++extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
++ int dir);
++extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir);
++extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
++ dma_addr_t dev_addr,
++ size_t size, int dir);
++extern void swiotlb_sync_single_for_device(struct device *hwdev,
++ dma_addr_t dev_addr,
++ size_t size, int dir);
++extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
++ struct scatterlist *sg, int nelems,
++ int dir);
++extern void swiotlb_sync_sg_for_device(struct device *hwdev,
++ struct scatterlist *sg, int nelems,
++ int dir);
++extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
++ int nents, int direction);
++extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
++ int nents, int direction);
++extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
++extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction);
++extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
++ size_t size, enum dma_data_direction direction);
++extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
++extern void swiotlb_init(void);
++
++#ifdef CONFIG_SWIOTLB
++extern int swiotlb;
++#else
++#define swiotlb 0
++#endif
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-ia64/synch_bitops.h linux-2.6.18-xen-3.3.0/include/asm-ia64/synch_bitops.h
+--- linux-2.6.18.8/include/asm-ia64/synch_bitops.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/synch_bitops.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,61 @@
++#ifndef __XEN_SYNCH_BITOPS_H__
++#define __XEN_SYNCH_BITOPS_H__
++
++/*
++ * Copyright 1992, Linus Torvalds.
++ * Heavily modified to provide guaranteed strong synchronisation
++ * when communicating with Xen or other guest OSes running on other CPUs.
++ */
++
++#define ADDR (*(volatile long *) addr)
++
++static __inline__ void synch_set_bit(int nr, volatile void * addr)
++{
++ set_bit(nr, addr);
++}
++
++static __inline__ void synch_clear_bit(int nr, volatile void * addr)
++{
++ clear_bit(nr, addr);
++}
++
++static __inline__ void synch_change_bit(int nr, volatile void * addr)
++{
++ change_bit(nr, addr);
++}
++
++static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
++{
++ return test_and_set_bit(nr, addr);
++}
++
++static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
++{
++ return test_and_clear_bit(nr, addr);
++}
++
++static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
++{
++ return test_and_change_bit(nr, addr);
++}
++
++static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
++{
++ return test_bit(nr, addr);
++}
++
++static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
++{
++ return test_bit(nr, addr);
++}
++
++#define synch_cmpxchg ia64_cmpxchg4_acq
++
++#define synch_test_bit(nr,addr) \
++(__builtin_constant_p(nr) ? \
++ synch_const_test_bit((nr),(addr)) : \
++ synch_var_test_bit((nr),(addr)))
++
++#define synch_cmpxchg_subword synch_cmpxchg
++
++#endif /* __XEN_SYNCH_BITOPS_H__ */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/system.h linux-2.6.18-xen-3.3.0/include/asm-ia64/system.h
+--- linux-2.6.18.8/include/asm-ia64/system.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/system.h 2008-08-21 11:36:07.000000000 +0200
+@@ -123,7 +123,7 @@ extern struct ia64_boot_param {
+ #define __local_irq_save(x) \
+ do { \
+ ia64_stop(); \
+- (x) = ia64_getreg(_IA64_REG_PSR); \
++ (x) = ia64_get_psr_i(); \
+ ia64_stop(); \
+ ia64_rsm(IA64_PSR_I); \
+ } while (0)
+@@ -171,7 +171,7 @@ do { \
+ #endif /* !CONFIG_IA64_DEBUG_IRQ */
+
+ #define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
+-#define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); })
++#define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_get_psr_i(); })
+
+ #define irqs_disabled() \
+ ({ \
+diff -rpuN linux-2.6.18.8/include/asm-ia64/xen/privop.h linux-2.6.18-xen-3.3.0/include/asm-ia64/xen/privop.h
+--- linux-2.6.18.8/include/asm-ia64/xen/privop.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/xen/privop.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,550 @@
++#ifndef _ASM_IA64_XEN_PRIVOP_H
++#define _ASM_IA64_XEN_PRIVOP_H
++
++/*
++ * Copyright (C) 2005 Hewlett-Packard Co
++ * Dan Magenheimer <dan.magenheimer@hp.com>
++ *
++ * Paravirtualizations of privileged operations for Xen/ia64
++ *
++ */
++
++#ifndef __ASSEMBLY__
++#include <linux/types.h> /* arch-ia64.h requires uint64_t */
++#include <linux/stringify.h>
++#endif
++#include <xen/interface/arch-ia64.h>
++
++#define IA64_PARAVIRTUALIZED
++
++/* At 1 MB, before per-cpu space but still addressable using addl instead
++ of movl. */
++#define XSI_BASE 0xfffffffffff00000
++
++/* Address of mapped regs. */
++#define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE)
++
++#ifdef __ASSEMBLY__
++#define XEN_HYPER_RFI break HYPERPRIVOP_RFI
++#define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT
++#define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT
++#define XEN_HYPER_COVER break HYPERPRIVOP_COVER
++#define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D
++#define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I
++#define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I
++#define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR
++#define XEN_HYPER_GET_TPR break HYPERPRIVOP_GET_TPR
++#define XEN_HYPER_SET_TPR break HYPERPRIVOP_SET_TPR
++#define XEN_HYPER_EOI break HYPERPRIVOP_EOI
++#define XEN_HYPER_SET_ITM break HYPERPRIVOP_SET_ITM
++#define XEN_HYPER_THASH break HYPERPRIVOP_THASH
++#define XEN_HYPER_PTC_GA break HYPERPRIVOP_PTC_GA
++#define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D
++#define XEN_HYPER_GET_RR break HYPERPRIVOP_GET_RR
++#define XEN_HYPER_SET_RR break HYPERPRIVOP_SET_RR
++#define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR
++#define XEN_HYPER_FC break HYPERPRIVOP_FC
++#define XEN_HYPER_GET_CPUID break HYPERPRIVOP_GET_CPUID
++#define XEN_HYPER_GET_PMD break HYPERPRIVOP_GET_PMD
++#define XEN_HYPER_GET_EFLAG break HYPERPRIVOP_GET_EFLAG
++#define XEN_HYPER_SET_EFLAG break HYPERPRIVOP_SET_EFLAG
++#define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR
++
++#define XSI_IFS (XSI_BASE + XSI_IFS_OFS)
++#define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS)
++#define XSI_IFA (XSI_BASE + XSI_IFA_OFS)
++#define XSI_ISR (XSI_BASE + XSI_ISR_OFS)
++#define XSI_IIM (XSI_BASE + XSI_IIM_OFS)
++#define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS)
++#define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
++#define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS)
++#define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS)
++#define XSI_IIP (XSI_BASE + XSI_IIP_OFS)
++#define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS)
++#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS)
++#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS)
++#define XSI_IHA (XSI_BASE + XSI_IHA_OFS)
++#endif
++
++#ifndef __ASSEMBLY__
++#define XEN_HYPER_SSM_I asm("break %0" : : "i" (HYPERPRIVOP_SSM_I): "memory")
++
++/************************************************/
++/* Instructions paravirtualized for correctness */
++/************************************************/
++
++/* "fc" and "thash" are privilege-sensitive instructions, meaning they
++ * may have different semantics depending on whether they are executed
++ * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
++ * be allowed to execute directly, lest incorrect semantics result. */
++#ifdef ASM_SUPPORTED
++static inline void
++xen_fc(unsigned long addr)
++{
++ register __u64 __addr asm ("r8") = addr;
++ asm volatile ("break %0":: "i"(HYPERPRIVOP_FC), "r"(__addr): "memory");
++}
++
++static inline unsigned long
++xen_thash(unsigned long addr)
++{
++ register __u64 ia64_intri_res asm ("r8");
++ register __u64 __addr asm ("r8") = addr;
++ asm volatile ("break %1":
++ "=r"(ia64_intri_res):
++ "i"(HYPERPRIVOP_THASH), "0"(__addr));
++ return ia64_intri_res;
++}
++#else
++extern void xen_fc(unsigned long addr);
++extern unsigned long xen_thash(unsigned long addr);
++#endif
++
++#define ia64_fc(addr) \
++do { \
++ if (is_running_on_xen()) \
++ xen_fc((unsigned long)(addr)); \
++ else \
++ __ia64_fc(addr); \
++} while (0)
++
++#define ia64_thash(addr) \
++({ \
++ unsigned long ia64_intri_res; \
++ if (is_running_on_xen()) \
++ ia64_intri_res = \
++ xen_thash((unsigned long)(addr)); \
++ else \
++ ia64_intri_res = __ia64_thash(addr); \
++ ia64_intri_res; \
++})
++
++/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
++ * is not currently used (though it may be in a long-format VHPT system!)
++ * and the semantics of cover only change if psr.ic is off which is very
++ * rare (and currently non-existent outside of assembly code */
++
++/* There are also privilege-sensitive registers. These registers are
++ * readable at any privilege level but only writable at PL0. */
++#ifdef ASM_SUPPORTED
++static inline unsigned long
++xen_get_cpuid(int index)
++{
++ register __u64 ia64_intri_res asm ("r8");
++ register __u64 __index asm ("r8") = index;
++ asm volatile ("break %1":
++ "=r"(ia64_intri_res):
++ "i"(HYPERPRIVOP_GET_CPUID), "0"(__index));
++ return ia64_intri_res;
++}
++
++static inline unsigned long
++xen_get_pmd(int index)
++{
++ register __u64 ia64_intri_res asm ("r8");
++ register __u64 __index asm ("r8") = index;
++ asm volatile ("break %1":
++ "=r"(ia64_intri_res):
++ "i"(HYPERPRIVOP_GET_PMD), "0O"(__index));
++ return ia64_intri_res;
++}
++#else
++extern unsigned long xen_get_cpuid(int index);
++extern unsigned long xen_get_pmd(int index);
++#endif
++
++#define ia64_get_cpuid(i) \
++({ \
++ unsigned long ia64_intri_res; \
++ if (is_running_on_xen()) \
++ ia64_intri_res = xen_get_cpuid(i); \
++ else \
++ ia64_intri_res = __ia64_get_cpuid(i); \
++ ia64_intri_res; \
++})
++
++#define ia64_get_pmd(i) \
++({ \
++ unsigned long ia64_intri_res; \
++ if (is_running_on_xen()) \
++ ia64_intri_res = xen_get_pmd(i); \
++ else \
++ ia64_intri_res = __ia64_get_pmd(i); \
++ ia64_intri_res; \
++})
++
++#ifdef ASM_SUPPORTED
++static inline unsigned long
++xen_get_eflag(void)
++{
++ register __u64 ia64_intri_res asm ("r8");
++ asm volatile ("break %1":
++ "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_EFLAG));
++ return ia64_intri_res;
++}
++
++static inline void
++xen_set_eflag(unsigned long val)
++{
++ register __u64 __val asm ("r8") = val;
++ asm volatile ("break %0":: "i"(HYPERPRIVOP_SET_EFLAG), "r"(__val): "memory");
++}
++#else
++extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
++extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
++#endif
++
++/************************************************/
++/* Instructions paravirtualized for performance */
++/************************************************/
++
++/* Xen uses memory-mapped virtual privileged registers for access to many
++ * performance-sensitive privileged registers. Some, like the processor
++ * status register (psr), are broken up into multiple memory locations.
++ * Others, like "pend", are abstractions based on privileged registers.
++ * "Pend" is guaranteed to be set if reading cr.ivr would return a
++ * (non-spurious) interrupt. */
++#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
++#define XSI_PSR_I \
++ (*XEN_MAPPEDREGS->interrupt_mask_addr)
++#define xen_get_virtual_psr_i() \
++ (!XSI_PSR_I)
++#define xen_set_virtual_psr_i(_val) \
++ ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
++#define xen_set_virtual_psr_ic(_val) \
++ ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
++#define xen_get_virtual_pend() \
++ (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
++
++/* Hyperprivops are "break" instructions with a well-defined API.
++ * In particular, the virtual psr.ic bit must be off; in this way
++ * it is guaranteed to never conflict with a linux break instruction.
++ * Normally, this is done in a xen stub but this one is frequent enough
++ * that we inline it */
++#define xen_hyper_ssm_i() \
++({ \
++ XEN_HYPER_SSM_I; \
++})
++
++/* turning off interrupts can be paravirtualized simply by writing
++ * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
++#define xen_rsm_i() \
++{ \
++ xen_set_virtual_psr_i(0); \
++ barrier(); \
++}
++
++/* turning on interrupts is a bit more complicated.. write to the
++ * memory-mapped virtual psr.i bit first (to avoid race condition),
++ * then if any interrupts were pending, we have to execute a hyperprivop
++ * to ensure the pending interrupt gets delivered; else we're done! */
++#define xen_ssm_i() \
++({ \
++ int old = xen_get_virtual_psr_i(); \
++ xen_set_virtual_psr_i(1); \
++ barrier(); \
++ if (!old && xen_get_virtual_pend()) \
++ xen_hyper_ssm_i(); \
++})
++
++#define xen_ia64_intrin_local_irq_restore(x) \
++{ \
++ if (is_running_on_xen()) { \
++ if ((x) & IA64_PSR_I) { xen_ssm_i(); } \
++ else { xen_rsm_i(); } \
++ } \
++ else __ia64_intrin_local_irq_restore((x)); \
++}
++
++#define xen_get_psr_i() \
++( \
++ (is_running_on_xen()) ? \
++ (xen_get_virtual_psr_i() ? IA64_PSR_I : 0) \
++ : __ia64_get_psr_i() \
++)
++
++#define xen_ia64_ssm(mask) \
++{ \
++ if ((mask)==IA64_PSR_I) { \
++ if (is_running_on_xen()) { xen_ssm_i(); } \
++ else { __ia64_ssm(mask); } \
++ } \
++ else { __ia64_ssm(mask); } \
++}
++
++#define xen_ia64_rsm(mask) \
++{ \
++ if ((mask)==IA64_PSR_I) { \
++ if (is_running_on_xen()) { xen_rsm_i(); } \
++ else { __ia64_rsm(mask); } \
++ } \
++ else { __ia64_rsm(mask); } \
++}
++
++
++/* Although all privileged operations can be left to trap and will
++ * be properly handled by Xen, some are frequent enough that we use
++ * hyperprivops for performance. */
++
++#ifndef ASM_SUPPORTED
++extern unsigned long xen_get_psr(void);
++extern unsigned long xen_get_ivr(void);
++extern unsigned long xen_get_tpr(void);
++extern void xen_set_itm(unsigned long);
++extern void xen_set_tpr(unsigned long);
++extern void xen_eoi(unsigned long);
++extern void xen_set_rr(unsigned long index, unsigned long val);
++extern unsigned long xen_get_rr(unsigned long index);
++extern void xen_set_kr(unsigned long index, unsigned long val);
++extern void xen_ptcga(unsigned long addr, unsigned long size);
++#else
++static inline unsigned long
++xen_get_psr(void)
++{
++ register __u64 ia64_intri_res asm ("r8");
++ asm volatile ("break %1":
++ "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_PSR));
++ return ia64_intri_res;
++}
++
++static inline unsigned long
++xen_get_ivr(void)
++{
++ register __u64 ia64_intri_res asm ("r8");
++ asm volatile ("break %1":
++ "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_IVR));
++ return ia64_intri_res;
++}
++
++static inline unsigned long
++xen_get_tpr(void)
++{
++ register __u64 ia64_intri_res asm ("r8");
++ asm volatile ("break %1":
++ "=r"(ia64_intri_res): "i"(HYPERPRIVOP_GET_TPR));
++ return ia64_intri_res;
++}
++
++static inline void
++xen_set_tpr(unsigned long val)
++{
++ register __u64 __val asm ("r8") = val;
++ asm volatile ("break %0"::
++ "i"(HYPERPRIVOP_GET_TPR), "r"(__val): "memory");
++}
++
++static inline void
++xen_eoi(unsigned long val)
++{
++ register __u64 __val asm ("r8") = val;
++ asm volatile ("break %0"::
++ "i"(HYPERPRIVOP_EOI), "r"(__val): "memory");
++}
++
++static inline void
++xen_set_itm(unsigned long val)
++{
++ register __u64 __val asm ("r8") = val;
++ asm volatile ("break %0":: "i"(HYPERPRIVOP_SET_ITM), "r"(__val): "memory");
++}
++
++static inline void
++xen_ptcga(unsigned long addr, unsigned long size)
++{
++ register __u64 __addr asm ("r8") = addr;
++ register __u64 __size asm ("r9") = size;
++ asm volatile ("break %0"::
++ "i"(HYPERPRIVOP_PTC_GA), "r"(__addr), "r"(__size): "memory");
++}
++
++static inline unsigned long
++xen_get_rr(unsigned long index)
++{
++ register __u64 ia64_intri_res asm ("r8");
++ register __u64 __index asm ("r8") = index;
++ asm volatile ("break %1":
++ "=r"(ia64_intri_res):
++ "i"(HYPERPRIVOP_GET_RR), "0"(__index));
++ return ia64_intri_res;
++}
++
++static inline void
++xen_set_rr(unsigned long index, unsigned long val)
++{
++ register __u64 __index asm ("r8") = index;
++ register __u64 __val asm ("r9") = val;
++ asm volatile ("break %0"::
++ "i"(HYPERPRIVOP_SET_RR), "r"(__index), "r"(__val): "memory");
++}
++
++static inline void
++xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
++ unsigned long val2, unsigned long val3, unsigned long val4)
++{
++ register __u64 __val0 asm ("r8") = val0;
++ register __u64 __val1 asm ("r9") = val1;
++ register __u64 __val2 asm ("r10") = val2;
++ register __u64 __val3 asm ("r11") = val3;
++ register __u64 __val4 asm ("r14") = val4;
++ asm volatile ("break %0" ::
++ "i"(HYPERPRIVOP_SET_RR0_TO_RR4),
++ "r"(__val0), "r"(__val1),
++ "r"(__val2), "r"(__val3), "r"(__val4): "memory");
++}
++
++static inline void
++xen_set_kr(unsigned long index, unsigned long val)
++{
++ register __u64 __index asm ("r8") = index;
++ register __u64 __val asm ("r9") = val;
++ asm volatile ("break %0"::
++ "i"(HYPERPRIVOP_SET_KR), "r"(__index), "r"(__val): "memory");
++}
++#endif
++
++/* Note: It may look wrong to test for is_running_on_xen() in each case.
++ * However regnum is always a constant so, as written, the compiler
++ * eliminates the switch statement, whereas is_running_on_xen() must be
++ * tested dynamically. */
++#define xen_ia64_getreg(regnum) \
++({ \
++ __u64 ia64_intri_res; \
++ \
++ switch(regnum) { \
++ case _IA64_REG_PSR: \
++ ia64_intri_res = (is_running_on_xen()) ? \
++ xen_get_psr() : \
++ __ia64_getreg(regnum); \
++ break; \
++ case _IA64_REG_CR_IVR: \
++ ia64_intri_res = (is_running_on_xen()) ? \
++ xen_get_ivr() : \
++ __ia64_getreg(regnum); \
++ break; \
++ case _IA64_REG_CR_TPR: \
++ ia64_intri_res = (is_running_on_xen()) ? \
++ xen_get_tpr() : \
++ __ia64_getreg(regnum); \
++ break; \
++ case _IA64_REG_AR_EFLAG: \
++ ia64_intri_res = (is_running_on_xen()) ? \
++ xen_get_eflag() : \
++ __ia64_getreg(regnum); \
++ break; \
++ default: \
++ ia64_intri_res = __ia64_getreg(regnum); \
++ break; \
++ } \
++ ia64_intri_res; \
++})
++
++#define xen_ia64_setreg(regnum,val) \
++({ \
++ switch(regnum) { \
++ case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: \
++ (is_running_on_xen()) ? \
++ xen_set_kr((regnum-_IA64_REG_AR_KR0), val) : \
++ __ia64_setreg(regnum,val); \
++ break; \
++ case _IA64_REG_CR_ITM: \
++ (is_running_on_xen()) ? \
++ xen_set_itm(val) : \
++ __ia64_setreg(regnum,val); \
++ break; \
++ case _IA64_REG_CR_TPR: \
++ (is_running_on_xen()) ? \
++ xen_set_tpr(val) : \
++ __ia64_setreg(regnum,val); \
++ break; \
++ case _IA64_REG_CR_EOI: \
++ (is_running_on_xen()) ? \
++ xen_eoi(val) : \
++ __ia64_setreg(regnum,val); \
++ break; \
++ case _IA64_REG_AR_EFLAG: \
++ (is_running_on_xen()) ? \
++ xen_set_eflag(val) : \
++ __ia64_setreg(regnum,val); \
++ break; \
++ default: \
++ __ia64_setreg(regnum,val); \
++ break; \
++ } \
++})
++
++#define ia64_ptcga(addr, size) \
++do { \
++ if (is_running_on_xen()) \
++ xen_ptcga((addr), (size)); \
++ else \
++ __ia64_ptcga((addr), (size)); \
++} while (0)
++
++#define ia64_set_rr(index, val) \
++do { \
++ if (is_running_on_xen()) \
++ xen_set_rr((index), (val)); \
++ else \
++ __ia64_set_rr((index), (val)); \
++} while (0)
++
++#define ia64_get_rr(index) \
++({ \
++ __u64 ia64_intri_res; \
++ if (is_running_on_xen()) \
++ ia64_intri_res = xen_get_rr((index)); \
++ else \
++ ia64_intri_res = __ia64_get_rr((index)); \
++ ia64_intri_res; \
++})
++
++#define ia64_set_rr0_to_rr4(val0, val1, val2, val3, val4) \
++do { \
++ if (is_running_on_xen()) \
++ xen_set_rr0_to_rr4((val0), (val1), (val2), \
++ (val3), (val4)); \
++ else \
++ __ia64_set_rr0_to_rr4((val0), (val1), (val2), \
++ (val3), (val4)); \
++} while (0)
++
++#define ia64_getreg xen_ia64_getreg
++#define ia64_setreg xen_ia64_setreg
++#define ia64_ssm xen_ia64_ssm
++#define ia64_rsm xen_ia64_rsm
++#define ia64_intrin_local_irq_restore xen_ia64_intrin_local_irq_restore
++#define ia64_get_psr_i xen_get_psr_i
++
++/* the remainder of these are not performance-sensitive so its
++ * OK to not paravirtualize and just take a privop trap and emulate */
++#define ia64_hint __ia64_hint
++#define ia64_set_pmd __ia64_set_pmd
++#define ia64_itci __ia64_itci
++#define ia64_itcd __ia64_itcd
++#define ia64_itri __ia64_itri
++#define ia64_itrd __ia64_itrd
++#define ia64_tpa __ia64_tpa
++#define ia64_set_ibr __ia64_set_ibr
++#define ia64_set_pkr __ia64_set_pkr
++#define ia64_set_pmc __ia64_set_pmc
++#define ia64_get_ibr __ia64_get_ibr
++#define ia64_get_pkr __ia64_get_pkr
++#define ia64_get_pmc __ia64_get_pmc
++#define ia64_ptce __ia64_ptce
++#define ia64_ptcl __ia64_ptcl
++#define ia64_ptri __ia64_ptri
++#define ia64_ptrd __ia64_ptrd
++
++#endif /* !__ASSEMBLY__ */
++
++/* these routines utilize privilege-sensitive or performance-sensitive
++ * privileged instructions so the code must be replaced with
++ * paravirtualized versions */
++#define ia64_leave_kernel xen_leave_kernel
++#define ia64_leave_syscall xen_leave_syscall
++#define ia64_trace_syscall xen_trace_syscall
++#define ia64_ret_from_clone xen_ret_from_clone
++#define ia64_switch_to xen_switch_to
++#define ia64_pal_call_static xen_pal_call_static
++
++#endif /* _ASM_IA64_XEN_PRIVOP_H */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/xen/xcom_hcall.h linux-2.6.18-xen-3.3.0/include/asm-ia64/xen/xcom_hcall.h
+--- linux-2.6.18.8/include/asm-ia64/xen/xcom_hcall.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/xen/xcom_hcall.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,67 @@
++/*
++ * Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef _LINUX_XENCOMM_HCALL_H_
++#define _LINUX_XENCOMM_HCALL_H_
++
++/* These function creates inline or mini descriptor for the parameters and
++ calls the corresponding xencomm_arch_hypercall_X.
++ Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless
++ they want to use their own wrapper. */
++extern int xencomm_hypercall_console_io(int cmd, int count, char *str);
++
++extern int xencomm_hypercall_event_channel_op(int cmd, void *op);
++
++extern int xencomm_hypercall_xen_version(int cmd, void *arg);
++
++extern int xencomm_hypercall_physdev_op(int cmd, void *op);
++
++extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
++ unsigned int count);
++
++extern int xencomm_hypercall_sched_op(int cmd, void *arg);
++
++extern int xencomm_hypercall_multicall(void *call_list, int nr_calls);
++
++extern int xencomm_hypercall_callback_op(int cmd, void *arg);
++
++extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg);
++
++extern unsigned long xencomm_hypercall_hvm_op(int cmd, void *arg);
++
++extern int xencomm_hypercall_suspend(unsigned long srec);
++
++extern int xencomm_hypercall_xenoprof_op(int op, void *arg);
++
++extern int xencomm_hypercall_perfmon_op(unsigned long cmd, void* arg,
++ unsigned long count);
++
++extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg);
++
++extern long xencomm_hypercall_opt_feature(void *arg);
++
++/* For privcmd. Locally declare argument type to avoid include storm.
++ Type coherency will be checked within privcmd.c */
++struct privcmd_hypercall;
++extern int privcmd_hypercall(struct privcmd_hypercall *hypercall);
++
++extern int xen_foreign_p2m_expose(struct privcmd_hypercall *hypercall);
++
++extern int xencomm_hypercall_kexec_op(int cmd, void *arg);
++
++#endif /* _LINUX_XENCOMM_HCALL_H_ */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/xen/xencomm.h linux-2.6.18-xen-3.3.0/include/asm-ia64/xen/xencomm.h
+--- linux-2.6.18.8/include/asm-ia64/xen/xencomm.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/xen/xencomm.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,33 @@
++/*
++ * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef _ASM_IA64_XENCOMM_H_
++#define _ASM_IA64_XENCOMM_H_
++
++#define is_kernel_addr(x) \
++ ((PAGE_OFFSET <= (x) && \
++ (x) < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) || \
++ (KERNEL_START <= (x) && \
++ (x) < KERNEL_START + KERNEL_TR_PAGE_SIZE))
++
++/* Must be called before any hypercall. */
++extern void xencomm_initialize (void);
++
++#include <xen/xencomm.h>
++
++#endif /* _ASM_IA64_XENCOMM_H_ */
+diff -rpuN linux-2.6.18.8/include/asm-ia64/xenoprof.h linux-2.6.18-xen-3.3.0/include/asm-ia64/xenoprof.h
+--- linux-2.6.18.8/include/asm-ia64/xenoprof.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-ia64/xenoprof.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,48 @@
++/******************************************************************************
++ * asm-ia64/xenoprof.h
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef __ASM_XENOPROF_H__
++#define __ASM_XENOPROF_H__
++#ifdef CONFIG_XEN
++
++#undef HAVE_XENOPROF_CREATE_FILES
++
++struct xenoprof_init;
++void xenoprof_arch_init_counter(struct xenoprof_init *init);
++void xenoprof_arch_counter(void);
++void xenoprof_arch_start(void);
++void xenoprof_arch_stop(void);
++
++struct xenoprof_arch_shared_buffer {
++ struct resource* res;
++};
++
++struct xenoprof_shared_buffer;
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer *sbuf);
++struct xenoprof_get_buffer;
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer *get_buffer,
++ struct xenoprof_shared_buffer *sbuf);
++struct xenoprof_passive;
++int xenoprof_arch_set_passive(struct xenoprof_passive *pdomain,
++ struct xenoprof_shared_buffer *sbuf);
++
++#endif /* CONFIG_XEN */
++#endif /* __ASM_XENOPROF_H__ */
+diff -rpuN linux-2.6.18.8/include/asm-powerpc/mpic.h linux-2.6.18-xen-3.3.0/include/asm-powerpc/mpic.h
+--- linux-2.6.18.8/include/asm-powerpc/mpic.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-powerpc/mpic.h 2008-08-21 11:36:07.000000000 +0200
+@@ -305,6 +305,8 @@ struct mpic
+ #define MPIC_SPV_EOI 0x00000020
+ /* No passthrough disable */
+ #define MPIC_NO_PTHROU_DIS 0x00000040
++/* Skip reset of IPI vectors during init */
++#define MPIC_SKIP_IPI_INIT 0x00000080
+
+ /* MPIC HW modification ID */
+ #define MPIC_REGSET_MASK 0xf0000000
+diff -rpuN linux-2.6.18.8/include/asm-powerpc/udbg.h linux-2.6.18-xen-3.3.0/include/asm-powerpc/udbg.h
+--- linux-2.6.18.8/include/asm-powerpc/udbg.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-powerpc/udbg.h 2008-08-21 11:36:07.000000000 +0200
+@@ -42,6 +42,7 @@ extern void __init udbg_init_debug_lpar(
+ extern void __init udbg_init_pmac_realmode(void);
+ extern void __init udbg_init_maple_realmode(void);
+ extern void __init udbg_init_iseries(void);
++extern void __init udbg_init_xen(void);
+ extern void __init udbg_init_rtas_panel(void);
+ extern void __init udbg_init_rtas_console(void);
+
+diff -rpuN linux-2.6.18.8/include/asm-powerpc/xen/asm/gnttab_dma.h linux-2.6.18-xen-3.3.0/include/asm-powerpc/xen/asm/gnttab_dma.h
+--- linux-2.6.18.8/include/asm-powerpc/xen/asm/gnttab_dma.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-powerpc/xen/asm/gnttab_dma.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,29 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright 2007 IBM Corp.
++ *
++ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
++ */
++
++#ifndef _ASM_PPC_GNTTAB_DMA_H
++#define _ASM_PPC_GNTTAB_DMA_H
++
++static inline int gnttab_dma_local_pfn(struct page *page)
++{
++ return 0;
++}
++
++#endif /* _ASM_PPC_GNTTAB_DMA_H */
+diff -rpuN linux-2.6.18.8/include/asm-powerpc/xen/asm/hypercall.h linux-2.6.18-xen-3.3.0/include/asm-powerpc/xen/asm/hypercall.h
+--- linux-2.6.18.8/include/asm-powerpc/xen/asm/hypercall.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-powerpc/xen/asm/hypercall.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,90 @@
++/******************************************************************************
++ * hypercall.h
++ *
++ * Linux-specific hypervisor handling.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Copyright 2007 IBM Corp.
++ *
++ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
++ * Jimi Xenidis <jimix@watson.ibm.com>
++ */
++
++#ifndef __HYPERCALL_H__
++#define __HYPERCALL_H__
++
++#include <asm/hvcall.h>
++#include <asm/page.h>
++#include <xen/xencomm.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/sched.h>
++
++#define XEN_MARK(a)((a) | (~0UL << 16))
++
++extern int HYPERVISOR_console_io(int cmd, int count, char *str);
++extern int HYPERVISOR_event_channel_op(int cmd, void *op);
++extern int HYPERVISOR_xen_version(int cmd, void *arg);
++extern int HYPERVISOR_physdev_op(int cmd, void *op);
++extern int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop,
++ unsigned int count);
++extern int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
++extern int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
++extern int HYPERVISOR_multicall(void *call_list, int nr_calls);
++
++extern int HYPERVISOR_sched_op(int cmd, void *arg);
++extern int HYPERVISOR_poll(
++ evtchn_port_t *ports, unsigned int nr_ports, u64 timeout);
++
++static inline int HYPERVISOR_shutdown(unsigned int reason)
++{
++ struct sched_shutdown sched_shutdown = {
++ .reason = reason
++ };
++
++ return HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
++}
++
++static inline int HYPERVISOR_set_timer_op(unsigned long arg)
++{
++ return plpar_hcall_norets(XEN_MARK(__HYPERVISOR_set_timer_op), arg);
++}
++
++extern int HYPERVISOR_suspend(unsigned long srec);
++extern int HYPERVISOR_kexec_op(unsigned long op, void *args);
++static inline unsigned long HYPERVISOR_hvm_op(int op, void *arg) {
++ return -ENOSYS;
++}
++
++static inline int
++HYPERVISOR_mmu_update(
++ mmu_update_t *req, int count, int *success_count, domid_t domid)
++{
++ return -ENOSYS;
++}
++
++struct privcmd_hypercall;
++extern int privcmd_hypercall(struct privcmd_hypercall *hypercall);
++
++#endif /* __HYPERCALL_H__ */
+diff -rpuN linux-2.6.18.8/include/asm-powerpc/xen/asm/hypervisor.h linux-2.6.18-xen-3.3.0/include/asm-powerpc/xen/asm/hypervisor.h
+--- linux-2.6.18.8/include/asm-powerpc/xen/asm/hypervisor.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-powerpc/xen/asm/hypervisor.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,276 @@
++/******************************************************************************
++ * hypervisor.h
++ *
++ * Linux-specific hypervisor handling.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERVISOR_H__
++#define __HYPERVISOR_H__
++
++#include <linux/config.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <xen/interface/xen.h>
++#include <asm/ptrace.h>
++#include <asm/page.h>
++#include <asm/irq.h>
++
++extern shared_info_t *HYPERVISOR_shared_info;
++
++/* arch/xen/i386/kernel/setup.c */
++extern start_info_t *xen_start_info;
++
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++#define is_initial_xendomain() (xen_start_info && \
++ (xen_start_info->flags & SIF_INITDOMAIN))
++#else
++#define is_initial_xendomain() 0
++#endif
++
++/* arch/xen/kernel/evtchn.c */
++/* Force a proper event-channel callback from Xen. */
++void force_evtchn_callback(void);
++
++/* arch/xen/kernel/process.c */
++void xen_cpu_idle (void);
++
++/* arch/xen/i386/kernel/hypervisor.c */
++void do_hypervisor_callback(struct pt_regs *regs);
++
++/* arch/xen/i386/kernel/head.S */
++void lgdt_finish(void);
++
++/* arch/xen/i386/mm/hypervisor.c */
++/*
++ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
++ * be MACHINE addresses.
++ */
++
++void xen_pt_switch(unsigned long ptr);
++void xen_new_user_pt(unsigned long ptr); /* x86_64 only */
++void xen_load_gs(unsigned int selector); /* x86_64 only */
++void xen_tlb_flush(void);
++void xen_invlpg(unsigned long ptr);
++
++#ifndef CONFIG_XEN_SHADOW_MODE
++void xen_l1_entry_update(pte_t *ptr, pte_t val);
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
++void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
++void xen_pgd_pin(unsigned long ptr);
++void xen_pgd_unpin(unsigned long ptr);
++void xen_pud_pin(unsigned long ptr); /* x86_64 only */
++void xen_pud_unpin(unsigned long ptr); /* x86_64 only */
++void xen_pmd_pin(unsigned long ptr); /* x86_64 only */
++void xen_pmd_unpin(unsigned long ptr); /* x86_64 only */
++void xen_pte_pin(unsigned long ptr);
++void xen_pte_unpin(unsigned long ptr);
++#else
++#define xen_l1_entry_update(_p, _v) set_pte((_p), (_v))
++#define xen_l2_entry_update(_p, _v) set_pgd((_p), (_v))
++#define xen_pgd_pin(_p) ((void)0)
++#define xen_pgd_unpin(_p) ((void)0)
++#define xen_pte_pin(_p) ((void)0)
++#define xen_pte_unpin(_p) ((void)0)
++#endif
++
++void xen_set_ldt(unsigned long ptr, unsigned long bytes);
++void xen_machphys_update(unsigned long mfn, unsigned long pfn);
++
++#ifdef CONFIG_SMP
++#include <linux/cpumask.h>
++void xen_tlb_flush_all(void);
++void xen_invlpg_all(unsigned long ptr);
++void xen_tlb_flush_mask(cpumask_t *mask);
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
++#endif
++
++/* Returns zero on success else negative errno. */
++static inline int xen_create_contiguous_region(
++ unsigned long vstart, unsigned int order, unsigned int address_bits)
++{
++ return 0;
++}
++static inline void xen_destroy_contiguous_region(
++ unsigned long vstart, unsigned int order)
++{
++ return;
++}
++
++#include <asm/hypercall.h>
++
++/* BEGIN: all of these need a new home */
++struct vm_area_struct;
++int direct_remap_pfn_range(struct vm_area_struct *vma, unsigned long address,
++ unsigned long mfn, unsigned long size,
++ pgprot_t prot, domid_t domid);
++#define pfn_to_mfn(x) (x)
++#define mfn_to_pfn(x) (x)
++#define phys_to_machine(phys) ((maddr_t)(phys))
++#define phys_to_machine_mapping_valid(pfn) (1)
++
++/* VIRT <-> MACHINE conversion */
++#define virt_to_machine(v) (phys_to_machine(__pa(v)))
++#define machine_to_virt(m) (__va(m))
++#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
++#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
++
++
++#define PIRQ_BASE 0
++#define NR_PIRQS 256
++
++#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
++#define NR_DYNIRQS 256
++
++#define NR_IPIS 4 /* PPC_MSG_DEBUGGER_BREAK + 1 */
++
++#if NR_IRQS < (NR_PIRQS + NR_DYNIRQS)
++#error to many Xen IRQs
++#endif
++
++#define NR_IRQ_VECTORS NR_IRQS
++
++#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
++#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
++
++#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
++#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
++
++
++/* END: all of these need a new home */
++
++#if defined(CONFIG_X86_64)
++#define MULTI_UVMFLAGS_INDEX 2
++#define MULTI_UVMDOMID_INDEX 3
++#else
++#define MULTI_UVMFLAGS_INDEX 3
++#define MULTI_UVMDOMID_INDEX 4
++#endif
++
++extern int is_running_on_xen(void);
++
++static inline void
++MULTI_update_va_mapping(
++ multicall_entry_t *mcl, unsigned long va,
++ pte_t new_val, unsigned long flags)
++{
++ mcl->op = __HYPERVISOR_update_va_mapping;
++ mcl->args[0] = va;
++#if defined(CONFIG_X86_64)
++ mcl->args[1] = new_val.pte;
++ mcl->args[2] = flags;
++#elif defined(CONFIG_X86_PAE)
++ mcl->args[1] = new_val.pte_low;
++ mcl->args[2] = new_val.pte_high;
++ mcl->args[3] = flags;
++#elif defined(CONFIG_PPC64)
++ mcl->args[1] = pte_val(new_val);
++ mcl->args[2] = 0;
++ mcl->args[3] = flags;
++#else
++ mcl->args[1] = new_val.pte_low;
++ mcl->args[2] = 0;
++ mcl->args[3] = flags;
++#endif
++}
++
++static inline void
++MULTI_update_va_mapping_otherdomain(
++ multicall_entry_t *mcl, unsigned long va,
++ pte_t new_val, unsigned long flags, domid_t domid)
++{
++ mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
++ mcl->args[0] = va;
++#if defined(CONFIG_X86_64)
++ mcl->args[1] = new_val.pte;
++ mcl->args[2] = flags;
++ mcl->args[3] = domid;
++#elif defined(CONFIG_X86_PAE)
++ mcl->args[1] = new_val.pte_low;
++ mcl->args[2] = new_val.pte_high;
++ mcl->args[3] = flags;
++ mcl->args[4] = domid;
++#elif defined(CONFIG_PPC64)
++ mcl->args[1] = pte_val(new_val);
++ mcl->args[2] = 0;
++ mcl->args[3] = flags;
++ mcl->args[4] = domid;
++#else
++ mcl->args[1] = new_val.pte_low;
++ mcl->args[2] = 0;
++ mcl->args[3] = flags;
++ mcl->args[4] = domid;
++#endif
++}
++
++#define INVALID_P2M_ENTRY (~0UL)
++#define FOREIGN_FRAME(m) (INVALID_P2M_ENTRY)
++static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++{
++ if (pfn != mfn && mfn != INVALID_P2M_ENTRY)
++ printk(KERN_EMERG "%s: pfn: 0x%lx mfn: 0x%lx\n",
++ __func__, pfn, mfn);
++
++ return;
++}
++#define pfn_pte_ma(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++
++typedef unsigned long maddr_t;
++typedef unsigned long paddr_t;
++
++#ifdef CONFIG_XEN_SCRUB_PAGES
++
++static inline void scrub_pages(void *p, unsigned n)
++{
++ unsigned i;
++
++ for (i = 0; i < n; i++) {
++ clear_page(p);
++ p += PAGE_SIZE;
++ }
++}
++#else
++#define scrub_pages(_p,_n) ((void)0)
++#endif
++
++/*
++ * for blktap.c
++ * int create_lookup_pte_addr(struct mm_struct *mm,
++ * unsigned long address,
++ * uint64_t *ptep);
++ */
++#define create_lookup_pte_addr(mm, address, ptep) \
++ ({ \
++ printk(KERN_EMERG \
++ "%s:%d " \
++ "create_lookup_pte_addr() isn't supported.\n", \
++ __func__, __LINE__); \
++ BUG(); \
++ (-ENOSYS); \
++ })
++
++#endif /* __HYPERVISOR_H__ */
+diff -rpuN linux-2.6.18.8/include/asm-powerpc/xen/asm/maddr.h linux-2.6.18-xen-3.3.0/include/asm-powerpc/xen/asm/maddr.h
+--- linux-2.6.18.8/include/asm-powerpc/xen/asm/maddr.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-powerpc/xen/asm/maddr.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,7 @@
++#ifndef _POWERPC_MADDR_H
++#define _POWERPC_MADDR_H
++
++#include <xen/features.h>
++#include <xen/interface/xen.h>
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-powerpc/xen/asm/synch_bitops.h linux-2.6.18-xen-3.3.0/include/asm-powerpc/xen/asm/synch_bitops.h
+--- linux-2.6.18.8/include/asm-powerpc/xen/asm/synch_bitops.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-powerpc/xen/asm/synch_bitops.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,100 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright 2006 IBM Corp.
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ */
++
++#ifndef __SYNCH_BITOPS_H__
++#define __SYNCH_BITOPS_H__
++
++#include <linux/config.h>
++#include <xen/interface/xen.h>
++
++#ifdef CONFIG_SMP
++#include <asm/bitops.h>
++
++#define synch_change_bit(a,b) change_bit(a,b)
++#define synch_clear_bit(a,b) clear_bit(a,b)
++#define synch_const_test_bit(a,b) const_test_bit(a,b)
++#define synch_set_bit(a,b) set_bit(a,b)
++#define synch_test_and_set_bit(a,b) test_and_set_bit(a,b)
++#define synch_test_and_change_bit(a,b) test_and_change_bit(a,b)
++#define synch_test_and_clear_bit(a,b) test_and_clear_bit(a,b)
++#define synch_test_bit(a,b) test_bit(a,b)
++
++static __inline__ unsigned long
++__synch_cmpxchg_u16(volatile unsigned short *p, unsigned long old, unsigned long new)
++{
++ int idx;
++ volatile unsigned int *xp = (unsigned int *)((ulong)p & ~(0x3UL));
++ union {
++ unsigned int word;
++ struct {
++ unsigned short s[2];
++ }s;
++ } xold, xnew;
++
++ /* we could start the reservation here and copy the u32
++ * assembler, but I don't think it will gain us a whole
++ * lot. */
++ xold.word = *xp;
++ xnew.word = xold.word;
++ idx = ((ulong)p >> 1) & 0x1;
++ xold.s.s[idx] = old;
++ xnew.s.s[idx] = new;
++
++ return __cmpxchg_u32(xp, xold.word, xnew.word);
++}
++
++/*
++ * This function doesn't exist, so you'll get a linker error
++ * if something tries to do an invalid xchg().
++ */
++extern void __synch_cmpxchg_called_with_bad_pointer(void);
++static __inline__ unsigned long
++__synch_cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
++ unsigned int size)
++{
++ switch (size) {
++ case 2:
++ return __synch_cmpxchg_u16(ptr, old, new);
++ case 4:
++ return __cmpxchg_u32(ptr, old, new);
++#ifdef CONFIG_PPC64
++ case 8:
++ return __cmpxchg_u64(ptr, old, new);
++#endif
++ }
++ __synch_cmpxchg_called_with_bad_pointer();
++ return old;
++}
++
++#define synch_cmpxchg(ptr,o,n) \
++ ({ \
++ __typeof__(*(ptr)) _o_ = (o); \
++ __typeof__(*(ptr)) _n_ = (n); \
++ (__typeof__(*(ptr))) __synch_cmpxchg((ptr), (unsigned long)_o_, \
++ (unsigned long)_n_, sizeof(*(ptr))); \
++ })
++
++#define synch_cmpxchg_subword(ptr,o,n) __synch_cmpxchg_u16((ptr), (o), (n))
++
++#else
++#error "this only works for CONFIG_SMP"
++#endif
++
++#endif /* __SYNCH_BITOPS_H__ */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/acpi.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/acpi.h
+--- linux-2.6.18.8/include/asm-x86_64/acpi.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/acpi.h 2008-08-21 11:36:07.000000000 +0200
+@@ -28,6 +28,9 @@
+
+ #ifdef __KERNEL__
+
++#ifdef CONFIG_XEN
++#include <xen/interface/platform.h>
++#endif
+ #include <acpi/pdc_intel.h>
+
+ #define COMPILER_DEPENDENT_INT64 long long
+@@ -129,6 +132,27 @@ static inline void acpi_disable_pci(void
+ }
+ extern int acpi_irq_balance_set(char *str);
+
++#ifdef CONFIG_XEN
++static inline int acpi_notify_hypervisor_state(u8 sleep_state,
++ u32 pm1a_cnt_val,
++ u32 pm1b_cnt_val)
++{
++ struct xen_platform_op op = {
++ .cmd = XENPF_enter_acpi_sleep,
++ .interface_version = XENPF_INTERFACE_VERSION,
++ .u = {
++ .enter_acpi_sleep = {
++ .pm1a_cnt_val = pm1a_cnt_val,
++ .pm1b_cnt_val = pm1b_cnt_val,
++ .sleep_state = sleep_state,
++ },
++ },
++ };
++
++ return HYPERVISOR_platform_op(&op);
++}
++#endif /* CONFIG_XEN */
++
+ #else /* !CONFIG_ACPI */
+
+ #define acpi_lapic 0
+@@ -152,7 +176,6 @@ extern unsigned long acpi_wakeup_address
+
+ /* early initialization routine */
+ extern void acpi_reserve_bootmem(void);
+-
+ #endif /*CONFIG_ACPI_SLEEP*/
+
+ #define boot_cpu_physical_apicid boot_cpu_id
+@@ -162,7 +185,9 @@ extern int acpi_pci_disabled;
+
+ extern u8 x86_acpiid_to_apicid[];
+
++#ifndef CONFIG_XEN
+ #define ARCH_HAS_POWER_INIT 1
++#endif
+
+ extern int acpi_skip_timer_override;
+
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/agp.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/agp.h
+--- linux-2.6.18.8/include/asm-x86_64/agp.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/agp.h 2008-08-21 11:36:07.000000000 +0200
+@@ -10,8 +10,10 @@
+ * with different cachability attributes for the same page.
+ */
+
+-int map_page_into_agp(struct page *page);
+-int unmap_page_from_agp(struct page *page);
++/* Caller's responsibility to call global_flush_tlb() for
++ * performance reasons */
++#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
++#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
+ #define flush_agp_mappings() global_flush_tlb()
+
+ /* Could use CLFLUSH here if the cpu supports it. But then it would
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/apic.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/apic.h
+--- linux-2.6.18.8/include/asm-x86_64/apic.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/apic.h 2008-08-21 11:36:07.000000000 +0200
+@@ -98,11 +98,13 @@ extern void setup_APIC_extened_lvt(unsig
+ extern int disable_timer_pin_1;
+
+
++#ifndef CONFIG_XEN
+ void smp_send_timer_broadcast_ipi(void);
+ void switch_APIC_timer_to_ipi(void *cpumask);
+ void switch_ipi_to_APIC_timer(void *cpumask);
+
+ #define ARCH_APICTIMER_STOPS_ON_C3 1
++#endif
+
+ #endif /* CONFIG_X86_LOCAL_APIC */
+
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/io_apic.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/io_apic.h
+--- linux-2.6.18.8/include/asm-x86_64/io_apic.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/io_apic.h 2008-08-21 11:36:07.000000000 +0200
+@@ -12,7 +12,7 @@
+
+ #ifdef CONFIG_X86_IO_APIC
+
+-#ifdef CONFIG_PCI_MSI
++#if defined(CONFIG_PCI_MSI) && !defined(CONFIG_XEN)
+ static inline int use_pci_vector(void) {return 1;}
+ static inline void disable_edge_ioapic_vector(unsigned int vector) { }
+ static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/kexec.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/kexec.h
+--- linux-2.6.18.8/include/asm-x86_64/kexec.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/kexec.h 2008-08-21 11:36:07.000000000 +0200
+@@ -1,6 +1,27 @@
+ #ifndef _X86_64_KEXEC_H
+ #define _X86_64_KEXEC_H
+
++#define PA_CONTROL_PAGE 0
++#define VA_CONTROL_PAGE 1
++#define PA_PGD 2
++#define VA_PGD 3
++#define PA_PUD_0 4
++#define VA_PUD_0 5
++#define PA_PMD_0 6
++#define VA_PMD_0 7
++#define PA_PTE_0 8
++#define VA_PTE_0 9
++#define PA_PUD_1 10
++#define VA_PUD_1 11
++#define PA_PMD_1 12
++#define VA_PMD_1 13
++#define PA_PTE_1 14
++#define VA_PTE_1 15
++#define PA_TABLE_PAGE 16
++#define PAGES_NR 17
++
++#ifndef __ASSEMBLY__
++
+ #include <linux/string.h>
+
+ #include <asm/page.h>
+@@ -64,4 +85,25 @@ static inline void crash_setup_regs(stru
+ newregs->rip = (unsigned long)current_text_addr();
+ }
+ }
++
++NORET_TYPE void
++relocate_kernel(unsigned long indirection_page,
++ unsigned long page_list,
++ unsigned long start_address) ATTRIB_NORET;
++
++/* Under Xen we need to work with machine addresses. These macros give the
++ * machine address of a certain page to the generic kexec code instead of
++ * the pseudo physical address which would be given by the default macros.
++ */
++
++#ifdef CONFIG_XEN
++#define KEXEC_ARCH_HAS_PAGE_MACROS
++#define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page))
++#define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn))
++#define kexec_virt_to_phys(addr) virt_to_machine(addr)
++#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr))
++#endif
++
++#endif /* __ASSEMBLY__ */
++
+ #endif /* _X86_64_KEXEC_H */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/agp.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/agp.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/agp.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/agp.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,40 @@
++#ifndef AGP_H
++#define AGP_H 1
++
++#include <asm/cacheflush.h>
++#include <asm/system.h>
++
++/*
++ * Functions to keep the agpgart mappings coherent.
++ * The GART gives the CPU a physical alias of memory. The alias is
++ * mapped uncacheable. Make sure there are no conflicting mappings
++ * with different cachability attributes for the same page.
++ */
++
++#define map_page_into_agp(page) ( \
++ xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \
++ ?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE))
++#define unmap_page_from_agp(page) ( \
++ xen_destroy_contiguous_region((unsigned long)page_address(page), 0), \
++ /* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \
++ change_page_attr(page, 1, PAGE_KERNEL))
++#define flush_agp_mappings() global_flush_tlb()
++
++/* Could use CLFLUSH here if the cpu supports it. But then it would
++ need to be called for each cacheline of the whole page so it may not be
++ worth it. Would need a page for it. */
++#define flush_agp_cache() wbinvd()
++
++/* Convert a physical address to an address suitable for the GART. */
++#define phys_to_gart(x) phys_to_machine(x)
++#define gart_to_phys(x) machine_to_phys(x)
++
++/* GATT allocation. Returns/accepts GATT kernel virtual address. */
++#define alloc_gatt_pages(order) ({ \
++ char *_t; dma_addr_t _d; \
++ _t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL); \
++ _t; })
++#define free_gatt_pages(table, order) \
++ dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/arch_hooks.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/arch_hooks.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/arch_hooks.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/arch_hooks.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,27 @@
++#ifndef _ASM_ARCH_HOOKS_H
++#define _ASM_ARCH_HOOKS_H
++
++#include <linux/interrupt.h>
++
++/*
++ * linux/include/asm/arch_hooks.h
++ *
++ * define the architecture specific hooks
++ */
++
++/* these aren't arch hooks, they are generic routines
++ * that can be used by the hooks */
++extern void init_ISA_irqs(void);
++extern void apic_intr_init(void);
++extern void smp_intr_init(void);
++extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
++
++/* these are the defined hooks */
++extern void intr_init_hook(void);
++extern void pre_intr_init_hook(void);
++extern void pre_setup_arch_hook(void);
++extern void trap_init_hook(void);
++extern void time_init_hook(void);
++extern void mca_nmi_hook(void);
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/bootsetup.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/bootsetup.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/bootsetup.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/bootsetup.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,42 @@
++
++#ifndef _X86_64_BOOTSETUP_H
++#define _X86_64_BOOTSETUP_H 1
++
++#define BOOT_PARAM_SIZE 4096
++extern char x86_boot_params[BOOT_PARAM_SIZE];
++
++/*
++ * This is set up by the setup-routine at boot-time
++ */
++#define PARAM ((unsigned char *)x86_boot_params)
++#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
++#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
++#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
++#define E820_MAP_NR (*(char*) (PARAM+E820NR))
++#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
++#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
++#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
++#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
++#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
++#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
++#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
++#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
++#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
++#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
++#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
++
++#define INITRD_START (__pa(xen_start_info->mod_start))
++#define INITRD_SIZE (xen_start_info->mod_len)
++#define EDID_INFO (*(struct edid_info *) (PARAM+0x440))
++
++#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
++#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
++#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
++#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
++#define COMMAND_LINE saved_command_line
++
++#define RAMDISK_IMAGE_START_MASK 0x07FF
++#define RAMDISK_PROMPT_FLAG 0x8000
++#define RAMDISK_LOAD_FLAG 0x4000
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/desc.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/desc.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/desc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/desc.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,265 @@
++/* Written 2000 by Andi Kleen */
++#ifndef __ARCH_DESC_H
++#define __ARCH_DESC_H
++
++#include <linux/threads.h>
++#include <asm/ldt.h>
++
++#ifndef __ASSEMBLY__
++
++#include <linux/string.h>
++#include <linux/smp.h>
++
++#include <asm/segment.h>
++#include <asm/mmu.h>
++
++// 8 byte segment descriptor
++struct desc_struct {
++ u16 limit0;
++ u16 base0;
++ unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
++ unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
++} __attribute__((packed));
++
++struct n_desc_struct {
++ unsigned int a,b;
++};
++
++enum {
++ GATE_INTERRUPT = 0xE,
++ GATE_TRAP = 0xF,
++ GATE_CALL = 0xC,
++};
++
++// 16byte gate
++struct gate_struct {
++ u16 offset_low;
++ u16 segment;
++ unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
++ u16 offset_middle;
++ u32 offset_high;
++ u32 zero1;
++} __attribute__((packed));
++
++#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
++#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
++#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
++
++enum {
++ DESC_TSS = 0x9,
++ DESC_LDT = 0x2,
++};
++
++// LDT or TSS descriptor in the GDT. 16 bytes.
++struct ldttss_desc {
++ u16 limit0;
++ u16 base0;
++ unsigned base1 : 8, type : 5, dpl : 2, p : 1;
++ unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
++ u32 base3;
++ u32 zero1;
++} __attribute__((packed));
++
++struct desc_ptr {
++ unsigned short size;
++ unsigned long address;
++} __attribute__((packed)) ;
++
++extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS];
++
++extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
++
++#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
++#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
++
++static inline void clear_LDT(void)
++{
++ int cpu = get_cpu();
++
++ /*
++ * NB. We load the default_ldt for lcall7/27 handling on demand, as
++ * it slows down context switching. Noone uses it anyway.
++ */
++ cpu = cpu; /* XXX avoid compiler warning */
++ xen_set_ldt(NULL, 0);
++ put_cpu();
++}
++
++/*
++ * This is the ldt that every process will get unless we need
++ * something other than this.
++ */
++extern struct desc_struct default_ldt[];
++#ifndef CONFIG_X86_NO_IDT
++extern struct gate_struct idt_table[];
++#endif
++extern struct desc_ptr cpu_gdt_descr[];
++
++/* the cpu gdt accessor */
++#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
++
++static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
++{
++ struct gate_struct s;
++ s.offset_low = PTR_LOW(func);
++ s.segment = __KERNEL_CS;
++ s.ist = ist;
++ s.p = 1;
++ s.dpl = dpl;
++ s.zero0 = 0;
++ s.zero1 = 0;
++ s.type = type;
++ s.offset_middle = PTR_MIDDLE(func);
++ s.offset_high = PTR_HIGH(func);
++ /* does not need to be atomic because it is only done once at setup time */
++ memcpy(adr, &s, 16);
++}
++
++#ifndef CONFIG_X86_NO_IDT
++static inline void set_intr_gate(int nr, void *func)
++{
++ BUG_ON((unsigned)nr > 0xFF);
++ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
++}
++
++static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
++{
++ BUG_ON((unsigned)nr > 0xFF);
++ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
++}
++
++static inline void set_system_gate(int nr, void *func)
++{
++ BUG_ON((unsigned)nr > 0xFF);
++ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
++}
++
++static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
++{
++ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
++}
++#endif
++
++static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
++ unsigned size)
++{
++ struct ldttss_desc d;
++ memset(&d,0,sizeof(d));
++ d.limit0 = size & 0xFFFF;
++ d.base0 = PTR_LOW(tss);
++ d.base1 = PTR_MIDDLE(tss) & 0xFF;
++ d.type = type;
++ d.p = 1;
++ d.limit1 = (size >> 16) & 0xF;
++ d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
++ d.base3 = PTR_HIGH(tss);
++ memcpy(ptr, &d, 16);
++}
++
++#ifndef CONFIG_X86_NO_TSS
++static inline void set_tss_desc(unsigned cpu, void *addr)
++{
++ /*
++ * sizeof(unsigned long) coming from an extra "long" at the end
++ * of the iobitmap. See tss_struct definition in processor.h
++ *
++ * -1? seg base+limit should be pointing to the address of the
++ * last valid byte
++ */
++ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
++ (unsigned long)addr, DESC_TSS,
++ IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
++}
++#endif
++
++static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
++{
++ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
++ DESC_LDT, size * 8 - 1);
++}
++
++static inline void set_seg_base(unsigned cpu, int entry, void *base)
++{
++ struct desc_struct *d = &cpu_gdt(cpu)[entry];
++ u32 addr = (u32)(u64)base;
++ BUG_ON((u64)base >> 32);
++ d->base0 = addr & 0xffff;
++ d->base1 = (addr >> 16) & 0xff;
++ d->base2 = (addr >> 24) & 0xff;
++}
++
++#define LDT_entry_a(info) \
++ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
++/* Don't allow setting of the lm bit. It is useless anyways because
++ 64bit system calls require __USER_CS. */
++#define LDT_entry_b(info) \
++ (((info)->base_addr & 0xff000000) | \
++ (((info)->base_addr & 0x00ff0000) >> 16) | \
++ ((info)->limit & 0xf0000) | \
++ (((info)->read_exec_only ^ 1) << 9) | \
++ ((info)->contents << 10) | \
++ (((info)->seg_not_present ^ 1) << 15) | \
++ ((info)->seg_32bit << 22) | \
++ ((info)->limit_in_pages << 23) | \
++ ((info)->useable << 20) | \
++ /* ((info)->lm << 21) | */ \
++ 0x7000)
++
++#define LDT_empty(info) (\
++ (info)->base_addr == 0 && \
++ (info)->limit == 0 && \
++ (info)->contents == 0 && \
++ (info)->read_exec_only == 1 && \
++ (info)->seg_32bit == 0 && \
++ (info)->limit_in_pages == 0 && \
++ (info)->seg_not_present == 1 && \
++ (info)->useable == 0 && \
++ (info)->lm == 0)
++
++#if TLS_SIZE != 24
++# error update this code.
++#endif
++
++static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
++{
++#if 0
++ u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
++ gdt[0] = t->tls_array[0];
++ gdt[1] = t->tls_array[1];
++ gdt[2] = t->tls_array[2];
++#endif
++#define C(i) \
++ if (HYPERVISOR_update_descriptor(virt_to_machine(&cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]), \
++ t->tls_array[i])) \
++ BUG();
++
++ C(0); C(1); C(2);
++#undef C
++}
++
++/*
++ * load one particular LDT into the current CPU
++ */
++static inline void load_LDT_nolock (mm_context_t *pc, int cpu)
++{
++ void *segments = pc->ldt;
++ int count = pc->size;
++
++ if (likely(!count))
++ segments = NULL;
++
++ xen_set_ldt(segments, count);
++}
++
++static inline void load_LDT(mm_context_t *pc)
++{
++ int cpu = get_cpu();
++ load_LDT_nolock(pc, cpu);
++ put_cpu();
++}
++
++extern struct desc_ptr idt_descr;
++
++#endif /* !__ASSEMBLY__ */
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/dma-mapping.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/dma-mapping.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/dma-mapping.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/dma-mapping.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,207 @@
++#ifndef _X8664_DMA_MAPPING_H
++#define _X8664_DMA_MAPPING_H 1
++
++/*
++ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
++ * documentation.
++ */
++
++
++#include <asm/scatterlist.h>
++#include <asm/io.h>
++#include <asm/swiotlb.h>
++
++struct dma_mapping_ops {
++ int (*mapping_error)(dma_addr_t dma_addr);
++ void* (*alloc_coherent)(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp);
++ void (*free_coherent)(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle);
++ dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
++ size_t size, int direction);
++ /* like map_single, but doesn't check the device mask */
++ dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
++ size_t size, int direction);
++ void (*unmap_single)(struct device *dev, dma_addr_t addr,
++ size_t size, int direction);
++ void (*sync_single_for_cpu)(struct device *hwdev,
++ dma_addr_t dma_handle, size_t size,
++ int direction);
++ void (*sync_single_for_device)(struct device *hwdev,
++ dma_addr_t dma_handle, size_t size,
++ int direction);
++ void (*sync_single_range_for_cpu)(struct device *hwdev,
++ dma_addr_t dma_handle, unsigned long offset,
++ size_t size, int direction);
++ void (*sync_single_range_for_device)(struct device *hwdev,
++ dma_addr_t dma_handle, unsigned long offset,
++ size_t size, int direction);
++ void (*sync_sg_for_cpu)(struct device *hwdev,
++ struct scatterlist *sg, int nelems,
++ int direction);
++ void (*sync_sg_for_device)(struct device *hwdev,
++ struct scatterlist *sg, int nelems,
++ int direction);
++ int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
++ int nents, int direction);
++ void (*unmap_sg)(struct device *hwdev,
++ struct scatterlist *sg, int nents,
++ int direction);
++ int (*dma_supported)(struct device *hwdev, u64 mask);
++ int is_phys;
++};
++
++extern dma_addr_t bad_dma_address;
++extern struct dma_mapping_ops* dma_ops;
++extern int iommu_merge;
++
++static inline int valid_dma_direction(int dma_direction)
++{
++ return ((dma_direction == DMA_BIDIRECTIONAL) ||
++ (dma_direction == DMA_TO_DEVICE) ||
++ (dma_direction == DMA_FROM_DEVICE));
++}
++
++#if 0
++static inline int dma_mapping_error(dma_addr_t dma_addr)
++{
++ if (dma_ops->mapping_error)
++ return dma_ops->mapping_error(dma_addr);
++
++ return (dma_addr == bad_dma_address);
++}
++
++extern void *dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp);
++extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle);
++
++static inline dma_addr_t
++dma_map_single(struct device *hwdev, void *ptr, size_t size,
++ int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ return dma_ops->map_single(hwdev, ptr, size, direction);
++}
++
++static inline void
++dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
++ int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ dma_ops->unmap_single(dev, addr, size, direction);
++}
++
++#define dma_map_page(dev,page,offset,size,dir) \
++ dma_map_single((dev), page_address(page)+(offset), (size), (dir))
++
++#define dma_unmap_page dma_unmap_single
++
++static inline void
++dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
++ size_t size, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_single_for_cpu)
++ dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
++ direction);
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
++ size_t size, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_single_for_device)
++ dma_ops->sync_single_for_device(hwdev, dma_handle, size,
++ direction);
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
++ unsigned long offset, size_t size, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_single_range_for_cpu) {
++ dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
++ }
++
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
++ unsigned long offset, size_t size, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_single_range_for_device)
++ dma_ops->sync_single_range_for_device(hwdev, dma_handle,
++ offset, size, direction);
++
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_sg_for_cpu)
++ dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_sg_for_device) {
++ dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
++ }
++
++ flush_write_buffers();
++}
++
++static inline int
++dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ return dma_ops->map_sg(hwdev, sg, nents, direction);
++}
++
++static inline void
++dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ dma_ops->unmap_sg(hwdev, sg, nents, direction);
++}
++
++extern int dma_supported(struct device *hwdev, u64 mask);
++
++/* same for gart, swiotlb, and nommu */
++static inline int dma_get_cache_alignment(void)
++{
++ return boot_cpu_data.x86_clflush_size;
++}
++
++#define dma_is_consistent(h) 1
++
++extern int dma_set_mask(struct device *dev, u64 mask);
++
++static inline void
++dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
++{
++ flush_write_buffers();
++}
++
++extern struct device fallback_dev;
++extern int panic_on_overflow;
++#endif
++
++#endif /* _X8664_DMA_MAPPING_H */
++
++#include <asm-i386/mach-xen/asm/dma-mapping.h>
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/e820.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/e820.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/e820.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/e820.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,66 @@
++/*
++ * structures and definitions for the int 15, ax=e820 memory map
++ * scheme.
++ *
++ * In a nutshell, setup.S populates a scratch table in the
++ * empty_zero_block that contains a list of usable address/size
++ * duples. setup.c, this information is transferred into the e820map,
++ * and in init.c/numa.c, that new information is used to mark pages
++ * reserved or not.
++ */
++#ifndef __E820_HEADER
++#define __E820_HEADER
++
++#include <linux/mmzone.h>
++
++#define E820MAP 0x2d0 /* our map */
++#define E820MAX 128 /* number of entries in E820MAP */
++#define E820NR 0x1e8 /* # entries in E820MAP */
++
++#define E820_RAM 1
++#define E820_RESERVED 2
++#define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */
++#define E820_NVS 4
++
++#define HIGH_MEMORY (1024*1024)
++
++#define LOWMEMSIZE() (0x9f000)
++
++#ifndef __ASSEMBLY__
++struct e820entry {
++ u64 addr; /* start of memory segment */
++ u64 size; /* size of memory segment */
++ u32 type; /* type of memory segment */
++} __attribute__((packed));
++
++struct e820map {
++ int nr_map;
++ struct e820entry map[E820MAX];
++};
++
++extern unsigned long find_e820_area(unsigned long start, unsigned long end,
++ unsigned size);
++extern void add_memory_region(unsigned long start, unsigned long size,
++ int type);
++extern void setup_memory_region(void);
++extern void contig_e820_setup(void);
++extern unsigned long e820_end_of_ram(void);
++extern void e820_reserve_resources(struct e820entry *e820, int nr_map);
++extern void e820_print_map(char *who);
++extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type);
++extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type);
++
++extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end);
++extern void e820_setup_gap(struct e820entry *e820, int nr_map);
++extern unsigned long e820_hole_size(unsigned long start_pfn,
++ unsigned long end_pfn);
++
++extern void __init parse_memopt(char *p, char **end);
++extern void __init parse_memmapopt(char *p, char **end);
++
++extern struct e820map e820;
++
++extern unsigned ebda_addr, ebda_size;
++#endif/*!__ASSEMBLY__*/
++
++#endif/*__E820_HEADER*/
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/fixmap.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/fixmap.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/fixmap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/fixmap.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,112 @@
++/*
++ * fixmap.h: compile-time virtual memory allocation
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1998 Ingo Molnar
++ */
++
++#ifndef _ASM_FIXMAP_H
++#define _ASM_FIXMAP_H
++
++#include <linux/kernel.h>
++#include <asm/apicdef.h>
++#include <asm/page.h>
++#include <asm/vsyscall.h>
++#include <asm/vsyscall32.h>
++#include <asm/acpi.h>
++
++/*
++ * Here we define all the compile-time 'special' virtual
++ * addresses. The point is to have a constant address at
++ * compile time, but to set the physical address only
++ * in the boot process.
++ *
++ * these 'compile-time allocated' memory buffers are
++ * fixed-size 4k pages. (or larger if used with an increment
++ * highger than 1) use fixmap_set(idx,phys) to associate
++ * physical memory with fixmap indices.
++ *
++ * TLB entries of such buffers will not be flushed across
++ * task switches.
++ */
++
++enum fixed_addresses {
++ VSYSCALL_LAST_PAGE,
++ VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
++ VSYSCALL_HPET,
++ FIX_HPET_BASE,
++#ifdef CONFIG_X86_LOCAL_APIC
++ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
++#endif
++#ifdef CONFIG_X86_IO_APIC
++ FIX_IO_APIC_BASE_0,
++ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
++#endif
++#ifdef CONFIG_ACPI
++ FIX_ACPI_BEGIN,
++ FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
++#endif
++ FIX_SHARED_INFO,
++#define NR_FIX_ISAMAPS 256
++ FIX_ISAMAP_END,
++ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
++ __end_of_permanent_fixed_addresses,
++ /* temporary boot-time mappings, used before ioremap() is functional */
++#define NR_FIX_BTMAPS 16
++ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
++ FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
++ __end_of_fixed_addresses
++};
++
++extern void __set_fixmap (enum fixed_addresses idx,
++ unsigned long phys, pgprot_t flags);
++
++#define set_fixmap(idx, phys) \
++ __set_fixmap(idx, phys, PAGE_KERNEL)
++/*
++ * Some hardware wants to get fixmapped without caching.
++ */
++#define set_fixmap_nocache(idx, phys) \
++ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
++
++#define clear_fixmap(idx) \
++ __set_fixmap(idx, 0, __pgprot(0))
++
++#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
++#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
++#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
++
++/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
++#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
++#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
++
++#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
++
++extern void __this_fixmap_does_not_exist(void);
++
++/*
++ * 'index to address' translation. If anyone tries to use the idx
++ * directly without translation, we catch the bug with a NULL-deference
++ * kernel oops. Illegal ranges of incoming indices are caught too.
++ */
++static __always_inline unsigned long fix_to_virt(const unsigned int idx)
++{
++ /*
++ * this branch gets completely eliminated after inlining,
++ * except when someone tries to use fixaddr indices in an
++ * illegal way. (such as mixing up address types or using
++ * out-of-range indices).
++ *
++ * If it doesn't get removed, the linker will complain
++ * loudly with a reasonably clear error message..
++ */
++ if (idx >= __end_of_fixed_addresses)
++ __this_fixmap_does_not_exist();
++
++ return __fix_to_virt(idx);
++}
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/gnttab_dma.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/gnttab_dma.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/gnttab_dma.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/gnttab_dma.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1 @@
++#include <asm-i386/mach-xen/asm/gnttab_dma.h>
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/hw_irq.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/hw_irq.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/hw_irq.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/hw_irq.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,136 @@
++#ifndef _ASM_HW_IRQ_H
++#define _ASM_HW_IRQ_H
++
++/*
++ * linux/include/asm/hw_irq.h
++ *
++ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
++ *
++ * moved some of the old arch/i386/kernel/irq.h to here. VY
++ *
++ * IRQ/IPI changes taken from work by Thomas Radke
++ * <tomsoft@informatik.tu-chemnitz.de>
++ *
++ * hacked by Andi Kleen for x86-64.
++ */
++
++#ifndef __ASSEMBLY__
++#include <asm/atomic.h>
++#include <asm/irq.h>
++#include <linux/profile.h>
++#include <linux/smp.h>
++
++struct hw_interrupt_type;
++#endif
++
++#define NMI_VECTOR 0x02
++/*
++ * IDT vectors usable for external interrupt sources start
++ * at 0x20:
++ */
++#define FIRST_EXTERNAL_VECTOR 0x20
++
++#define IA32_SYSCALL_VECTOR 0x80
++
++
++/*
++ * Vectors 0x20-0x2f are used for ISA interrupts.
++ */
++
++/*
++ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
++ *
++ * some of the following vectors are 'rare', they are merged
++ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
++ * TLB, reschedule and local APIC vectors are performance-critical.
++ */
++#ifndef CONFIG_XEN
++#define SPURIOUS_APIC_VECTOR 0xff
++#define ERROR_APIC_VECTOR 0xfe
++#define RESCHEDULE_VECTOR 0xfd
++#define CALL_FUNCTION_VECTOR 0xfc
++/* fb free - please don't readd KDB here because it's useless
++ (hint - think what a NMI bit does to a vector) */
++#define THERMAL_APIC_VECTOR 0xfa
++#define THRESHOLD_APIC_VECTOR 0xf9
++/* f8 free */
++#define INVALIDATE_TLB_VECTOR_END 0xf7
++#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
++
++#define NUM_INVALIDATE_TLB_VECTORS 8
++#endif
++
++/*
++ * Local APIC timer IRQ vector is on a different priority level,
++ * to work around the 'lost local interrupt if more than 2 IRQ
++ * sources per level' errata.
++ */
++#define LOCAL_TIMER_VECTOR 0xef
++
++/*
++ * First APIC vector available to drivers: (vectors 0x30-0xee)
++ * we start at 0x31 to spread out vectors evenly between priority
++ * levels. (0x80 is the syscall vector)
++ */
++#define FIRST_DEVICE_VECTOR 0x31
++#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in irq.h */
++
++
++#ifndef __ASSEMBLY__
++extern u8 irq_vector[NR_IRQ_VECTORS];
++#define IO_APIC_VECTOR(irq) (irq_vector[irq])
++#define AUTO_ASSIGN -1
++
++/*
++ * Various low-level irq details needed by irq.c, process.c,
++ * time.c, io_apic.c and smp.c
++ *
++ * Interrupt entry/exit code at both C and assembly level
++ */
++
++extern void disable_8259A_irq(unsigned int irq);
++extern void enable_8259A_irq(unsigned int irq);
++extern int i8259A_irq_pending(unsigned int irq);
++extern void make_8259A_irq(unsigned int irq);
++extern void init_8259A(int aeoi);
++extern void FASTCALL(send_IPI_self(int vector));
++extern void init_VISWS_APIC_irqs(void);
++extern void setup_IO_APIC(void);
++extern void disable_IO_APIC(void);
++#define print_IO_APIC()
++extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
++extern void send_IPI(int dest, int vector);
++extern void setup_ioapic_dest(void);
++
++extern unsigned long io_apic_irqs;
++
++extern atomic_t irq_err_count;
++extern atomic_t irq_mis_count;
++
++#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
++
++#define __STR(x) #x
++#define STR(x) __STR(x)
++
++#include <asm/ptrace.h>
++
++#define IRQ_NAME2(nr) nr##_interrupt(void)
++#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
++
++/*
++ * SMP has a few special interrupts for IPI messages
++ */
++
++#define BUILD_IRQ(nr) \
++asmlinkage void IRQ_NAME(nr); \
++__asm__( \
++"\n.p2align\n" \
++"IRQ" #nr "_interrupt:\n\t" \
++ "push $~(" #nr ") ; " \
++ "jmp common_interrupt");
++
++#define platform_legacy_irq(irq) ((irq) < 16)
++
++#endif
++
++#endif /* _ASM_HW_IRQ_H */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/hypercall.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/hypercall.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/hypercall.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/hypercall.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,415 @@
++/******************************************************************************
++ * hypercall.h
++ *
++ * Linux-specific hypervisor handling.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * 64-bit updates:
++ * Benjamin Liu <benjamin.liu@intel.com>
++ * Jun Nakajima <jun.nakajima@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERCALL_H__
++#define __HYPERCALL_H__
++
++#include <linux/string.h> /* memcpy() */
++#include <linux/stringify.h>
++
++#ifndef __HYPERVISOR_H__
++# error "please don't include this file directly"
++#endif
++
++#ifdef CONFIG_XEN
++#define HYPERCALL_STR(name) \
++ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)"
++#else
++#define HYPERCALL_STR(name) \
++ "mov $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\
++ "add hypercall_stubs(%%rip),%%rax; " \
++ "call *%%rax"
++#endif
++
++#define _hypercall0(type, name) \
++({ \
++ type __res; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res) \
++ : \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall1(type, name, a1) \
++({ \
++ type __res; \
++ long __ign1; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1) \
++ : "1" ((long)(a1)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall2(type, name, a1, a2) \
++({ \
++ type __res; \
++ long __ign1, __ign2; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \
++ : "1" ((long)(a1)), "2" ((long)(a2)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall3(type, name, a1, a2, a3) \
++({ \
++ type __res; \
++ long __ign1, __ign2, __ign3; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
++ "=d" (__ign3) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall4(type, name, a1, a2, a3, a4) \
++({ \
++ type __res; \
++ long __ign1, __ign2, __ign3; \
++ register long __arg4 asm("r10") = (long)(a4); \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
++ "=d" (__ign3), "+r" (__arg4) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
++({ \
++ type __res; \
++ long __ign1, __ign2, __ign3; \
++ register long __arg4 asm("r10") = (long)(a4); \
++ register long __arg5 asm("r8") = (long)(a5); \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
++ "=d" (__ign3), "+r" (__arg4), "+r" (__arg5) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)) \
++ : "memory" ); \
++ __res; \
++})
++
++static inline int __must_check
++HYPERVISOR_set_trap_table(
++ const trap_info_t *table)
++{
++ return _hypercall1(int, set_trap_table, table);
++}
++
++static inline int __must_check
++HYPERVISOR_mmu_update(
++ mmu_update_t *req, unsigned int count, unsigned int *success_count,
++ domid_t domid)
++{
++ return _hypercall4(int, mmu_update, req, count, success_count, domid);
++}
++
++static inline int __must_check
++HYPERVISOR_mmuext_op(
++ struct mmuext_op *op, unsigned int count, unsigned int *success_count,
++ domid_t domid)
++{
++ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
++}
++
++static inline int __must_check
++HYPERVISOR_set_gdt(
++ unsigned long *frame_list, unsigned int entries)
++{
++ return _hypercall2(int, set_gdt, frame_list, entries);
++}
++
++static inline int __must_check
++HYPERVISOR_stack_switch(
++ unsigned long ss, unsigned long esp)
++{
++ return _hypercall2(int, stack_switch, ss, esp);
++}
++
++static inline int __must_check
++HYPERVISOR_set_callbacks(
++ unsigned long event_address, unsigned long failsafe_address,
++ unsigned long syscall_address)
++{
++ return _hypercall3(int, set_callbacks,
++ event_address, failsafe_address, syscall_address);
++}
++
++static inline int
++HYPERVISOR_fpu_taskswitch(
++ int set)
++{
++ return _hypercall1(int, fpu_taskswitch, set);
++}
++
++static inline int __must_check
++HYPERVISOR_sched_op_compat(
++ int cmd, unsigned long arg)
++{
++ return _hypercall2(int, sched_op_compat, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_sched_op(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, sched_op, cmd, arg);
++}
++
++static inline long __must_check
++HYPERVISOR_set_timer_op(
++ u64 timeout)
++{
++ return _hypercall1(long, set_timer_op, timeout);
++}
++
++static inline int __must_check
++HYPERVISOR_platform_op(
++ struct xen_platform_op *platform_op)
++{
++ platform_op->interface_version = XENPF_INTERFACE_VERSION;
++ return _hypercall1(int, platform_op, platform_op);
++}
++
++static inline int __must_check
++HYPERVISOR_set_debugreg(
++ unsigned int reg, unsigned long value)
++{
++ return _hypercall2(int, set_debugreg, reg, value);
++}
++
++static inline unsigned long __must_check
++HYPERVISOR_get_debugreg(
++ unsigned int reg)
++{
++ return _hypercall1(unsigned long, get_debugreg, reg);
++}
++
++static inline int __must_check
++HYPERVISOR_update_descriptor(
++ unsigned long ma, unsigned long word)
++{
++ return _hypercall2(int, update_descriptor, ma, word);
++}
++
++static inline int __must_check
++HYPERVISOR_memory_op(
++ unsigned int cmd, void *arg)
++{
++ return _hypercall2(int, memory_op, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_multicall(
++ multicall_entry_t *call_list, unsigned int nr_calls)
++{
++ return _hypercall2(int, multicall, call_list, nr_calls);
++}
++
++static inline int __must_check
++HYPERVISOR_update_va_mapping(
++ unsigned long va, pte_t new_val, unsigned long flags)
++{
++ return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
++}
++
++static inline int __must_check
++HYPERVISOR_event_channel_op(
++ int cmd, void *arg)
++{
++ int rc = _hypercall2(int, event_channel_op, cmd, arg);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ struct evtchn_op op;
++ op.cmd = cmd;
++ memcpy(&op.u, arg, sizeof(op.u));
++ rc = _hypercall1(int, event_channel_op_compat, &op);
++ memcpy(arg, &op.u, sizeof(op.u));
++ }
++#endif
++
++ return rc;
++}
++
++static inline int __must_check
++HYPERVISOR_acm_op(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, acm_op, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_xen_version(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, xen_version, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_console_io(
++ int cmd, unsigned int count, char *str)
++{
++ return _hypercall3(int, console_io, cmd, count, str);
++}
++
++static inline int __must_check
++HYPERVISOR_physdev_op(
++ int cmd, void *arg)
++{
++ int rc = _hypercall2(int, physdev_op, cmd, arg);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ struct physdev_op op;
++ op.cmd = cmd;
++ memcpy(&op.u, arg, sizeof(op.u));
++ rc = _hypercall1(int, physdev_op_compat, &op);
++ memcpy(arg, &op.u, sizeof(op.u));
++ }
++#endif
++
++ return rc;
++}
++
++static inline int __must_check
++HYPERVISOR_grant_table_op(
++ unsigned int cmd, void *uop, unsigned int count)
++{
++ return _hypercall3(int, grant_table_op, cmd, uop, count);
++}
++
++static inline int __must_check
++HYPERVISOR_update_va_mapping_otherdomain(
++ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
++{
++ return _hypercall4(int, update_va_mapping_otherdomain, va,
++ new_val.pte, flags, domid);
++}
++
++static inline int __must_check
++HYPERVISOR_vm_assist(
++ unsigned int cmd, unsigned int type)
++{
++ return _hypercall2(int, vm_assist, cmd, type);
++}
++
++static inline int __must_check
++HYPERVISOR_vcpu_op(
++ int cmd, unsigned int vcpuid, void *extra_args)
++{
++ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
++}
++
++static inline int __must_check
++HYPERVISOR_set_segment_base(
++ int reg, unsigned long value)
++{
++ return _hypercall2(int, set_segment_base, reg, value);
++}
++
++static inline int __must_check
++HYPERVISOR_suspend(
++ unsigned long srec)
++{
++ struct sched_shutdown sched_shutdown = {
++ .reason = SHUTDOWN_suspend
++ };
++
++ int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
++ &sched_shutdown, srec);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
++ SHUTDOWN_suspend, srec);
++#endif
++
++ return rc;
++}
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++static inline int
++HYPERVISOR_nmi_op(
++ unsigned long op, void *arg)
++{
++ return _hypercall2(int, nmi_op, op, arg);
++}
++#endif
++
++#ifndef CONFIG_XEN
++static inline unsigned long __must_check
++HYPERVISOR_hvm_op(
++ int op, void *arg)
++{
++ return _hypercall2(unsigned long, hvm_op, op, arg);
++}
++#endif
++
++static inline int __must_check
++HYPERVISOR_callback_op(
++ int cmd, const void *arg)
++{
++ return _hypercall2(int, callback_op, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_xenoprof_op(
++ int op, void *arg)
++{
++ return _hypercall2(int, xenoprof_op, op, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_kexec_op(
++ unsigned long op, void *args)
++{
++ return _hypercall2(int, kexec_op, op, args);
++}
++
++#endif /* __HYPERCALL_H__ */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/hypervisor.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/hypervisor.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/hypervisor.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/hypervisor.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2 @@
++
++#include <asm-i386/mach-xen/asm/hypervisor.h>
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/io.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/io.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/io.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/io.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,329 @@
++#ifndef _ASM_IO_H
++#define _ASM_IO_H
++
++#include <asm/fixmap.h>
++
++/*
++ * This file contains the definitions for the x86 IO instructions
++ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
++ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
++ * versions of the single-IO instructions (inb_p/inw_p/..).
++ *
++ * This file is not meant to be obfuscating: it's just complicated
++ * to (a) handle it all in a way that makes gcc able to optimize it
++ * as well as possible and (b) trying to avoid writing the same thing
++ * over and over again with slight variations and possibly making a
++ * mistake somewhere.
++ */
++
++/*
++ * Thanks to James van Artsdalen for a better timing-fix than
++ * the two short jumps: using outb's to a nonexistent port seems
++ * to guarantee better timings even on fast machines.
++ *
++ * On the other hand, I'd like to be sure of a non-existent port:
++ * I feel a bit unsafe about using 0x80 (should be safe, though)
++ *
++ * Linus
++ */
++
++ /*
++ * Bit simplified and optimized by Jan Hubicka
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
++ *
++ * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
++ * isa_read[wl] and isa_write[wl] fixed
++ * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
++ */
++
++#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
++
++#ifdef REALLY_SLOW_IO
++#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
++#else
++#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
++#endif
++
++/*
++ * Talk about misusing macros..
++ */
++#define __OUT1(s,x) \
++static inline void out##s(unsigned x value, unsigned short port) {
++
++#define __OUT2(s,s1,s2) \
++__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
++
++#define __OUT(s,s1,x) \
++__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
++__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
++
++#define __IN1(s) \
++static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
++
++#define __IN2(s,s1,s2) \
++__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
++
++#define __IN(s,s1,i...) \
++__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
++__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
++
++#define __INS(s) \
++static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
++{ __asm__ __volatile__ ("rep ; ins" #s \
++: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
++
++#define __OUTS(s) \
++static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
++{ __asm__ __volatile__ ("rep ; outs" #s \
++: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
++
++#define RETURN_TYPE unsigned char
++__IN(b,"")
++#undef RETURN_TYPE
++#define RETURN_TYPE unsigned short
++__IN(w,"")
++#undef RETURN_TYPE
++#define RETURN_TYPE unsigned int
++__IN(l,"")
++#undef RETURN_TYPE
++
++__OUT(b,"b",char)
++__OUT(w,"w",short)
++__OUT(l,,int)
++
++__INS(b)
++__INS(w)
++__INS(l)
++
++__OUTS(b)
++__OUTS(w)
++__OUTS(l)
++
++#define IO_SPACE_LIMIT 0xffff
++
++#if defined(__KERNEL__) && __x86_64__
++
++#include <linux/vmalloc.h>
++
++#ifndef __i386__
++/*
++ * Change virtual addresses to physical addresses and vv.
++ * These are pretty trivial
++ */
++static inline unsigned long virt_to_phys(volatile void * address)
++{
++ return __pa(address);
++}
++
++static inline void * phys_to_virt(unsigned long address)
++{
++ return __va(address);
++}
++
++#define virt_to_bus(_x) phys_to_machine(__pa(_x))
++#define bus_to_virt(_x) __va(machine_to_phys(_x))
++#endif
++
++/*
++ * Change "struct page" to physical address.
++ */
++#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
++#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
++#define page_to_bus(page) (phys_to_machine(page_to_pseudophys(page)))
++
++#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
++ (unsigned long) bio_offset((bio)))
++#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
++ (unsigned long) (bv)->bv_offset)
++
++#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
++ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
++ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
++ bvec_to_pseudophys((vec2))))
++
++#include <asm-generic/iomap.h>
++
++extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
++
++static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
++{
++ return __ioremap(offset, size, 0);
++}
++
++extern void *bt_ioremap(unsigned long addr, unsigned long size);
++extern void bt_iounmap(void *addr, unsigned long size);
++#define early_ioremap bt_ioremap
++#define early_iounmap bt_iounmap
++
++/*
++ * This one maps high address device memory and turns off caching for that area.
++ * it's useful if some control registers are in such an area and write combining
++ * or read caching is not desirable:
++ */
++extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
++extern void iounmap(volatile void __iomem *addr);
++
++/*
++ * ISA I/O bus memory addresses are 1:1 with the physical address.
++ */
++
++#define isa_virt_to_bus(_x) ({ BUG(); virt_to_bus(_x); })
++#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
++#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
++
++/*
++ * However PCI ones are not necessarily 1:1 and therefore these interfaces
++ * are forbidden in portable PCI drivers.
++ *
++ * Allow them on x86 for legacy drivers, though.
++ */
++#define virt_to_bus(_x) phys_to_machine(__pa(_x))
++#define bus_to_virt(_x) __va(machine_to_phys(_x))
++
++/*
++ * readX/writeX() are used to access memory mapped devices. On some
++ * architectures the memory mapped IO stuff needs to be accessed
++ * differently. On the x86 architecture, we just read/write the
++ * memory location directly.
++ */
++
++static inline __u8 __readb(const volatile void __iomem *addr)
++{
++ return *(__force volatile __u8 *)addr;
++}
++static inline __u16 __readw(const volatile void __iomem *addr)
++{
++ return *(__force volatile __u16 *)addr;
++}
++static __always_inline __u32 __readl(const volatile void __iomem *addr)
++{
++ return *(__force volatile __u32 *)addr;
++}
++static inline __u64 __readq(const volatile void __iomem *addr)
++{
++ return *(__force volatile __u64 *)addr;
++}
++#define readb(x) __readb(x)
++#define readw(x) __readw(x)
++#define readl(x) __readl(x)
++#define readq(x) __readq(x)
++#define readb_relaxed(a) readb(a)
++#define readw_relaxed(a) readw(a)
++#define readl_relaxed(a) readl(a)
++#define readq_relaxed(a) readq(a)
++#define __raw_readb readb
++#define __raw_readw readw
++#define __raw_readl readl
++#define __raw_readq readq
++
++#define mmiowb()
++
++static inline void __writel(__u32 b, volatile void __iomem *addr)
++{
++ *(__force volatile __u32 *)addr = b;
++}
++static inline void __writeq(__u64 b, volatile void __iomem *addr)
++{
++ *(__force volatile __u64 *)addr = b;
++}
++static inline void __writeb(__u8 b, volatile void __iomem *addr)
++{
++ *(__force volatile __u8 *)addr = b;
++}
++static inline void __writew(__u16 b, volatile void __iomem *addr)
++{
++ *(__force volatile __u16 *)addr = b;
++}
++#define writeq(val,addr) __writeq((val),(addr))
++#define writel(val,addr) __writel((val),(addr))
++#define writew(val,addr) __writew((val),(addr))
++#define writeb(val,addr) __writeb((val),(addr))
++#define __raw_writeb writeb
++#define __raw_writew writew
++#define __raw_writel writel
++#define __raw_writeq writeq
++
++void __memcpy_fromio(void*,unsigned long,unsigned);
++void __memcpy_toio(unsigned long,const void*,unsigned);
++
++static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
++{
++ __memcpy_fromio(to,(unsigned long)from,len);
++}
++static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
++{
++ __memcpy_toio((unsigned long)to,from,len);
++}
++
++void memset_io(volatile void __iomem *a, int b, size_t c);
++
++/*
++ * ISA space is 'always mapped' on a typical x86 system, no need to
++ * explicitly ioremap() it. The fact that the ISA IO space is mapped
++ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
++ * are physical addresses. The following constant pointer can be
++ * used as the IO-area pointer (it can be iounmapped as well, so the
++ * analogy with PCI is quite large):
++ */
++#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
++
++/*
++ * Again, x86-64 does not require mem IO specific function.
++ */
++
++#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d))
++
++/**
++ * check_signature - find BIOS signatures
++ * @io_addr: mmio address to check
++ * @signature: signature block
++ * @length: length of signature
++ *
++ * Perform a signature comparison with the mmio address io_addr. This
++ * address should have been obtained by ioremap.
++ * Returns 1 on a match.
++ */
++
++static inline int check_signature(void __iomem *io_addr,
++ const unsigned char *signature, int length)
++{
++ int retval = 0;
++ do {
++ if (readb(io_addr) != *signature)
++ goto out;
++ io_addr++;
++ signature++;
++ length--;
++ } while (length);
++ retval = 1;
++out:
++ return retval;
++}
++
++/* Nothing to do */
++
++#define dma_cache_inv(_start,_size) do { } while (0)
++#define dma_cache_wback(_start,_size) do { } while (0)
++#define dma_cache_wback_inv(_start,_size) do { } while (0)
++
++#define flush_write_buffers()
++
++extern int iommu_bio_merge;
++#define BIO_VMERGE_BOUNDARY iommu_bio_merge
++
++/*
++ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
++ * access
++ */
++#define xlate_dev_mem_ptr(p) __va(p)
++
++/*
++ * Convert a virtual cached pointer to an uncached pointer
++ */
++#define xlate_dev_kmem_ptr(p) p
++
++#endif /* __KERNEL__ */
++
++#define ARCH_HAS_DEV_MEM
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/irqflags.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/irqflags.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/irqflags.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/irqflags.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,139 @@
++/*
++ * include/asm-x86_64/irqflags.h
++ *
++ * IRQ flags handling
++ *
++ * This file gets included from lowlevel asm headers too, to provide
++ * wrapped versions of the local_irq_*() APIs, based on the
++ * raw_local_irq_*() functions from the lowlevel headers.
++ */
++#ifndef _ASM_IRQFLAGS_H
++#define _ASM_IRQFLAGS_H
++
++#ifndef __ASSEMBLY__
++/*
++ * Interrupt control:
++ */
++
++/*
++ * The use of 'barrier' in the following reflects their use as local-lock
++ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
++ * critical operations are executed. All critical operations must complete
++ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
++ * includes these barriers, for example.
++ */
++
++#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
++
++#define raw_local_save_flags(flags) \
++ do { (flags) = __raw_local_save_flags(); } while (0)
++
++#define raw_local_irq_restore(x) \
++do { \
++ vcpu_info_t *_vcpu; \
++ barrier(); \
++ _vcpu = current_vcpu_info(); \
++ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
++ barrier(); /* unmask then check (avoid races) */ \
++ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
++ force_evtchn_callback(); \
++ } \
++} while (0)
++
++#ifdef CONFIG_X86_VSMP
++
++/*
++ * Interrupt control for the VSMP architecture:
++ */
++
++static inline void raw_local_irq_disable(void)
++{
++ unsigned long flags = __raw_local_save_flags();
++
++ raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18));
++}
++
++static inline void raw_local_irq_enable(void)
++{
++ unsigned long flags = __raw_local_save_flags();
++
++ raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18));
++}
++
++static inline int raw_irqs_disabled_flags(unsigned long flags)
++{
++ return !(flags & (1<<9)) || (flags & (1 << 18));
++}
++
++#else /* CONFIG_X86_VSMP */
++
++#define raw_local_irq_disable() \
++do { \
++ current_vcpu_info()->evtchn_upcall_mask = 1; \
++ barrier(); \
++} while (0)
++
++#define raw_local_irq_enable() \
++do { \
++ vcpu_info_t *_vcpu; \
++ barrier(); \
++ _vcpu = current_vcpu_info(); \
++ _vcpu->evtchn_upcall_mask = 0; \
++ barrier(); /* unmask then check (avoid races) */ \
++ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
++ force_evtchn_callback(); \
++} while (0)
++
++static inline int raw_irqs_disabled_flags(unsigned long flags)
++{
++ return (flags != 0);
++}
++
++#endif
++
++/*
++ * For spinlocks, etc.:
++ */
++
++#define __raw_local_irq_save() \
++({ \
++ unsigned long flags = __raw_local_save_flags(); \
++ \
++ raw_local_irq_disable(); \
++ \
++ flags; \
++})
++
++#define raw_local_irq_save(flags) \
++ do { (flags) = __raw_local_irq_save(); } while (0)
++
++#define raw_irqs_disabled() \
++({ \
++ unsigned long flags = __raw_local_save_flags(); \
++ \
++ raw_irqs_disabled_flags(flags); \
++})
++
++/*
++ * Used in the idle loop; sti takes one instruction cycle
++ * to complete:
++ */
++void raw_safe_halt(void);
++
++/*
++ * Used when interrupts are already enabled or to
++ * shutdown the processor:
++ */
++void halt(void);
++
++#else /* __ASSEMBLY__: */
++# ifdef CONFIG_TRACE_IRQFLAGS
++# define TRACE_IRQS_ON call trace_hardirqs_on_thunk
++# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk
++# else
++# define TRACE_IRQS_ON
++# define TRACE_IRQS_OFF
++# endif
++#endif
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/irq.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/irq.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/irq.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/irq.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,38 @@
++#ifndef _ASM_IRQ_H
++#define _ASM_IRQ_H
++
++/*
++ * linux/include/asm/irq.h
++ *
++ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
++ *
++ * IRQ/IPI changes taken from work by Thomas Radke
++ * <tomsoft@informatik.tu-chemnitz.de>
++ */
++
++#include <linux/sched.h>
++/* include comes from machine specific directory */
++#include "irq_vectors.h"
++#include <asm/thread_info.h>
++
++static __inline__ int irq_canonicalize(int irq)
++{
++ return ((irq == 2) ? 9 : irq);
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
++#endif
++
++#define KDB_VECTOR 0xf9
++
++# define irq_ctx_init(cpu) do { } while (0)
++
++#ifdef CONFIG_HOTPLUG_CPU
++#include <linux/cpumask.h>
++extern void fixup_irqs(cpumask_t map);
++#endif
++
++#define __ARCH_HAS_DO_SOFTIRQ 1
++
++#endif /* _ASM_IRQ_H */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/maddr.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/maddr.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/maddr.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/maddr.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,161 @@
++#ifndef _X86_64_MADDR_H
++#define _X86_64_MADDR_H
++
++#include <xen/features.h>
++#include <xen/interface/xen.h>
++
++/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
++#define INVALID_P2M_ENTRY (~0UL)
++#define FOREIGN_FRAME_BIT (1UL<<63)
++#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
++
++/* Definitions for machine and pseudophysical addresses. */
++typedef unsigned long paddr_t;
++typedef unsigned long maddr_t;
++
++#ifdef CONFIG_XEN
++
++extern unsigned long *phys_to_machine_mapping;
++
++#undef machine_to_phys_mapping
++extern unsigned long *machine_to_phys_mapping;
++extern unsigned int machine_to_phys_order;
++
++static inline unsigned long pfn_to_mfn(unsigned long pfn)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return pfn;
++ BUG_ON(end_pfn && pfn >= end_pfn);
++ return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT;
++}
++
++static inline int phys_to_machine_mapping_valid(unsigned long pfn)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return 1;
++ BUG_ON(end_pfn && pfn >= end_pfn);
++ return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
++}
++
++static inline unsigned long mfn_to_pfn(unsigned long mfn)
++{
++ unsigned long pfn;
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return mfn;
++
++ if (unlikely((mfn >> machine_to_phys_order) != 0))
++ return end_pfn;
++
++ /* The array access can fail (e.g., device space beyond end of RAM). */
++ asm (
++ "1: movq %1,%0\n"
++ "2:\n"
++ ".section .fixup,\"ax\"\n"
++ "3: movq %2,%0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 8\n"
++ " .quad 1b,3b\n"
++ ".previous"
++ : "=r" (pfn)
++ : "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) );
++
++ return pfn;
++}
++
++/*
++ * We detect special mappings in one of two ways:
++ * 1. If the MFN is an I/O page then Xen will set the m2p entry
++ * to be outside our maximum possible pseudophys range.
++ * 2. If the MFN belongs to a different domain then we will certainly
++ * not have MFN in our p2m table. Conversely, if the page is ours,
++ * then we'll have p2m(m2p(MFN))==MFN.
++ * If we detect a special mapping then it doesn't have a 'struct page'.
++ * We force !pfn_valid() by returning an out-of-range pointer.
++ *
++ * NB. These checks require that, for any MFN that is not in our reservation,
++ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
++ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
++ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
++ *
++ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
++ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
++ * require. In all the cases we care about, the FOREIGN_FRAME bit is
++ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
++ */
++static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
++{
++ unsigned long pfn = mfn_to_pfn(mfn);
++ if ((pfn < end_pfn)
++ && !xen_feature(XENFEAT_auto_translated_physmap)
++ && (phys_to_machine_mapping[pfn] != mfn))
++ return end_pfn; /* force !pfn_valid() */
++ return pfn;
++}
++
++static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++{
++ BUG_ON(end_pfn && pfn >= end_pfn);
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
++ return;
++ }
++ phys_to_machine_mapping[pfn] = mfn;
++}
++
++static inline maddr_t phys_to_machine(paddr_t phys)
++{
++ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
++ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
++ return machine;
++}
++
++static inline paddr_t machine_to_phys(maddr_t machine)
++{
++ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
++ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
++ return phys;
++}
++
++static inline paddr_t pte_phys_to_machine(paddr_t phys)
++{
++ maddr_t machine;
++ machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
++ machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK);
++ return machine;
++}
++
++static inline paddr_t pte_machine_to_phys(maddr_t machine)
++{
++ paddr_t phys;
++ phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
++ phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
++ return phys;
++}
++
++#define __pte_ma(x) ((pte_t) { (x) } )
++#define pfn_pte_ma(pfn, prot) __pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask)
++
++#else /* !CONFIG_XEN */
++
++#define pfn_to_mfn(pfn) (pfn)
++#define mfn_to_pfn(mfn) (mfn)
++#define mfn_to_local_pfn(mfn) (mfn)
++#define set_phys_to_machine(pfn, mfn) ((void)0)
++#define phys_to_machine_mapping_valid(pfn) (1)
++#define phys_to_machine(phys) ((maddr_t)(phys))
++#define machine_to_phys(mach) ((paddr_t)(mach))
++#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
++#define __pte_ma(x) __pte(x)
++
++#endif /* !CONFIG_XEN */
++
++/* VIRT <-> MACHINE conversion */
++#define virt_to_machine(v) (phys_to_machine(__pa(v)))
++#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
++#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
++
++#endif /* _X86_64_MADDR_H */
++
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/mmu_context.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/mmu_context.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/mmu_context.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/mmu_context.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,136 @@
++#ifndef __X86_64_MMU_CONTEXT_H
++#define __X86_64_MMU_CONTEXT_H
++
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/pgalloc.h>
++#include <asm/page.h>
++#include <asm/pda.h>
++#include <asm/pgtable.h>
++#include <asm/tlbflush.h>
++
++/*
++ * possibly do the LDT unload here?
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
++void destroy_context(struct mm_struct *mm);
++
++static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
++{
++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
++ if (read_pda(mmu_state) == TLBSTATE_OK)
++ write_pda(mmu_state, TLBSTATE_LAZY);
++#endif
++}
++
++#define prepare_arch_switch(next) __prepare_arch_switch()
++
++static inline void __prepare_arch_switch(void)
++{
++ /*
++ * Save away %es, %ds, %fs and %gs. Must happen before reload
++ * of cr3/ldt (i.e., not in __switch_to).
++ */
++ __asm__ __volatile__ (
++ "mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3"
++ : "=m" (current->thread.es),
++ "=m" (current->thread.ds),
++ "=m" (current->thread.fsindex),
++ "=m" (current->thread.gsindex) );
++
++ if (current->thread.ds)
++ __asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
++
++ if (current->thread.es)
++ __asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
++
++ if (current->thread.fsindex) {
++ __asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
++ current->thread.fs = 0;
++ }
++
++ if (current->thread.gsindex) {
++ load_gs_index(0);
++ current->thread.gs = 0;
++ }
++}
++
++extern void mm_pin(struct mm_struct *mm);
++extern void mm_unpin(struct mm_struct *mm);
++void mm_pin_all(void);
++
++static inline void load_cr3(pgd_t *pgd)
++{
++ asm volatile("movq %0,%%cr3" :: "r" (phys_to_machine(__pa(pgd))) :
++ "memory");
++}
++
++static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
++ struct task_struct *tsk)
++{
++ unsigned cpu = smp_processor_id();
++ struct mmuext_op _op[3], *op = _op;
++
++ if (likely(prev != next)) {
++ BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
++ !next->context.pinned);
++
++ /* stop flush ipis for the previous mm */
++ cpu_clear(cpu, prev->cpu_vm_mask);
++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
++ write_pda(mmu_state, TLBSTATE_OK);
++ write_pda(active_mm, next);
++#endif
++ cpu_set(cpu, next->cpu_vm_mask);
++
++ /* load_cr3(next->pgd) */
++ op->cmd = MMUEXT_NEW_BASEPTR;
++ op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
++ op++;
++
++ /* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */
++ op->cmd = MMUEXT_NEW_USER_BASEPTR;
++ op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT);
++ op++;
++
++ if (unlikely(next->context.ldt != prev->context.ldt)) {
++ /* load_LDT_nolock(&next->context, cpu) */
++ op->cmd = MMUEXT_SET_LDT;
++ op->arg1.linear_addr = (unsigned long)next->context.ldt;
++ op->arg2.nr_ents = next->context.size;
++ op++;
++ }
++
++ BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
++ }
++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
++ else {
++ write_pda(mmu_state, TLBSTATE_OK);
++ if (read_pda(active_mm) != next)
++ out_of_line_bug();
++ if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
++ /* We were in lazy tlb mode and leave_mm disabled
++ * tlb flush IPI delivery. We must reload CR3
++ * to make sure to use no freed page tables.
++ */
++ load_cr3(next->pgd);
++ xen_new_user_pt(__pa(__user_pgd(next->pgd)));
++ load_LDT_nolock(&next->context, cpu);
++ }
++ }
++#endif
++}
++
++#define deactivate_mm(tsk,mm) do { \
++ load_gs_index(0); \
++ asm volatile("movl %0,%%fs"::"r"(0)); \
++} while(0)
++
++static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
++{
++ if (!next->context.pinned)
++ mm_pin(next);
++ switch_mm(prev, next, NULL);
++}
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/mmu.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/mmu.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/mmu.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/mmu.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,38 @@
++#ifndef __x86_64_MMU_H
++#define __x86_64_MMU_H
++
++#include <linux/spinlock.h>
++#include <asm/semaphore.h>
++
++/*
++ * The x86_64 doesn't have a mmu context, but
++ * we put the segment information here.
++ *
++ * cpu_vm_mask is used to optimize ldt flushing.
++ */
++typedef struct {
++ void *ldt;
++ rwlock_t ldtlock;
++ int size;
++ struct semaphore sem;
++#ifdef CONFIG_XEN
++ unsigned pinned:1;
++ unsigned has_foreign_mappings:1;
++ struct list_head unpinned;
++#endif
++} mm_context_t;
++
++#ifdef CONFIG_XEN
++extern struct list_head mm_unpinned;
++extern spinlock_t mm_unpinned_lock;
++
++/* mm/memory.c:exit_mmap hook */
++extern void _arch_exit_mmap(struct mm_struct *mm);
++#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
++
++/* kernel/fork.c:dup_mmap hook */
++extern void _arch_dup_mmap(struct mm_struct *mm);
++#define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm))
++#endif
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/msr.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/msr.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/msr.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/msr.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,399 @@
++#ifndef X86_64_MSR_H
++#define X86_64_MSR_H 1
++
++#ifndef __ASSEMBLY__
++/*
++ * Access to machine-specific registers (available on 586 and better only)
++ * Note: the rd* operations modify the parameters directly (without using
++ * pointer indirection), this allows gcc to optimize better
++ */
++
++#define rdmsr(msr,val1,val2) \
++ __asm__ __volatile__("rdmsr" \
++ : "=a" (val1), "=d" (val2) \
++ : "c" (msr))
++
++
++#define rdmsrl(msr,val) do { unsigned long a__,b__; \
++ __asm__ __volatile__("rdmsr" \
++ : "=a" (a__), "=d" (b__) \
++ : "c" (msr)); \
++ val = a__ | (b__<<32); \
++} while(0)
++
++#define wrmsr(msr,val1,val2) \
++ __asm__ __volatile__("wrmsr" \
++ : /* no outputs */ \
++ : "c" (msr), "a" (val1), "d" (val2))
++
++#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
++
++/* wrmsr with exception handling */
++#define wrmsr_safe(msr,a,b) ({ int ret__; \
++ asm volatile("2: wrmsr ; xorl %0,%0\n" \
++ "1:\n\t" \
++ ".section .fixup,\"ax\"\n\t" \
++ "3: movl %4,%0 ; jmp 1b\n\t" \
++ ".previous\n\t" \
++ ".section __ex_table,\"a\"\n" \
++ " .align 8\n\t" \
++ " .quad 2b,3b\n\t" \
++ ".previous" \
++ : "=a" (ret__) \
++ : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
++ ret__; })
++
++#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
++
++#define rdmsr_safe(msr,a,b) \
++ ({ int ret__; \
++ asm volatile ("1: rdmsr\n" \
++ "2:\n" \
++ ".section .fixup,\"ax\"\n" \
++ "3: movl %4,%0\n" \
++ " jmp 2b\n" \
++ ".previous\n" \
++ ".section __ex_table,\"a\"\n" \
++ " .align 8\n" \
++ " .quad 1b,3b\n" \
++ ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
++ :"c"(msr), "i"(-EIO), "0"(0)); \
++ ret__; })
++
++#define rdtsc(low,high) \
++ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
++
++#define rdtscl(low) \
++ __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
++
++#define rdtscll(val) do { \
++ unsigned int __a,__d; \
++ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
++ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
++} while(0)
++
++#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
++
++#define rdpmc(counter,low,high) \
++ __asm__ __volatile__("rdpmc" \
++ : "=a" (low), "=d" (high) \
++ : "c" (counter))
++
++static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
++ unsigned int *ecx, unsigned int *edx)
++{
++ __asm__(XEN_CPUID
++ : "=a" (*eax),
++ "=b" (*ebx),
++ "=c" (*ecx),
++ "=d" (*edx)
++ : "0" (op));
++}
++
++/* Some CPUID calls want 'count' to be placed in ecx */
++static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
++ int *edx)
++{
++ __asm__(XEN_CPUID
++ : "=a" (*eax),
++ "=b" (*ebx),
++ "=c" (*ecx),
++ "=d" (*edx)
++ : "0" (op), "c" (count));
++}
++
++/*
++ * CPUID functions returning a single datum
++ */
++static inline unsigned int cpuid_eax(unsigned int op)
++{
++ unsigned int eax;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax)
++ : "0" (op)
++ : "bx", "cx", "dx");
++ return eax;
++}
++static inline unsigned int cpuid_ebx(unsigned int op)
++{
++ unsigned int eax, ebx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=b" (ebx)
++ : "0" (op)
++ : "cx", "dx" );
++ return ebx;
++}
++static inline unsigned int cpuid_ecx(unsigned int op)
++{
++ unsigned int eax, ecx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=c" (ecx)
++ : "0" (op)
++ : "bx", "dx" );
++ return ecx;
++}
++static inline unsigned int cpuid_edx(unsigned int op)
++{
++ unsigned int eax, edx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=d" (edx)
++ : "0" (op)
++ : "bx", "cx");
++ return edx;
++}
++
++#define MSR_IA32_UCODE_WRITE 0x79
++#define MSR_IA32_UCODE_REV 0x8b
++
++
++#endif
++
++/* AMD/K8 specific MSRs */
++#define MSR_EFER 0xc0000080 /* extended feature register */
++#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
++#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
++#define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */
++#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
++#define MSR_FS_BASE 0xc0000100 /* 64bit GS base */
++#define MSR_GS_BASE 0xc0000101 /* 64bit FS base */
++#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */
++/* EFER bits: */
++#define _EFER_SCE 0 /* SYSCALL/SYSRET */
++#define _EFER_LME 8 /* Long mode enable */
++#define _EFER_LMA 10 /* Long mode active (read-only) */
++#define _EFER_NX 11 /* No execute enable */
++
++#define EFER_SCE (1<<_EFER_SCE)
++#define EFER_LME (1<<_EFER_LME)
++#define EFER_LMA (1<<_EFER_LMA)
++#define EFER_NX (1<<_EFER_NX)
++
++/* Intel MSRs. Some also available on other CPUs */
++#define MSR_IA32_TSC 0x10
++#define MSR_IA32_PLATFORM_ID 0x17
++
++#define MSR_IA32_PERFCTR0 0xc1
++#define MSR_IA32_PERFCTR1 0xc2
++
++#define MSR_MTRRcap 0x0fe
++#define MSR_IA32_BBL_CR_CTL 0x119
++
++#define MSR_IA32_SYSENTER_CS 0x174
++#define MSR_IA32_SYSENTER_ESP 0x175
++#define MSR_IA32_SYSENTER_EIP 0x176
++
++#define MSR_IA32_MCG_CAP 0x179
++#define MSR_IA32_MCG_STATUS 0x17a
++#define MSR_IA32_MCG_CTL 0x17b
++
++#define MSR_IA32_EVNTSEL0 0x186
++#define MSR_IA32_EVNTSEL1 0x187
++
++#define MSR_IA32_DEBUGCTLMSR 0x1d9
++#define MSR_IA32_LASTBRANCHFROMIP 0x1db
++#define MSR_IA32_LASTBRANCHTOIP 0x1dc
++#define MSR_IA32_LASTINTFROMIP 0x1dd
++#define MSR_IA32_LASTINTTOIP 0x1de
++
++#define MSR_MTRRfix64K_00000 0x250
++#define MSR_MTRRfix16K_80000 0x258
++#define MSR_MTRRfix16K_A0000 0x259
++#define MSR_MTRRfix4K_C0000 0x268
++#define MSR_MTRRfix4K_C8000 0x269
++#define MSR_MTRRfix4K_D0000 0x26a
++#define MSR_MTRRfix4K_D8000 0x26b
++#define MSR_MTRRfix4K_E0000 0x26c
++#define MSR_MTRRfix4K_E8000 0x26d
++#define MSR_MTRRfix4K_F0000 0x26e
++#define MSR_MTRRfix4K_F8000 0x26f
++#define MSR_MTRRdefType 0x2ff
++
++#define MSR_IA32_MC0_CTL 0x400
++#define MSR_IA32_MC0_STATUS 0x401
++#define MSR_IA32_MC0_ADDR 0x402
++#define MSR_IA32_MC0_MISC 0x403
++
++#define MSR_P6_PERFCTR0 0xc1
++#define MSR_P6_PERFCTR1 0xc2
++#define MSR_P6_EVNTSEL0 0x186
++#define MSR_P6_EVNTSEL1 0x187
++
++/* K7/K8 MSRs. Not complete. See the architecture manual for a more complete list. */
++#define MSR_K7_EVNTSEL0 0xC0010000
++#define MSR_K7_PERFCTR0 0xC0010004
++#define MSR_K7_EVNTSEL1 0xC0010001
++#define MSR_K7_PERFCTR1 0xC0010005
++#define MSR_K7_EVNTSEL2 0xC0010002
++#define MSR_K7_PERFCTR2 0xC0010006
++#define MSR_K7_EVNTSEL3 0xC0010003
++#define MSR_K7_PERFCTR3 0xC0010007
++#define MSR_K8_TOP_MEM1 0xC001001A
++#define MSR_K8_TOP_MEM2 0xC001001D
++#define MSR_K8_SYSCFG 0xC0010010
++#define MSR_K8_HWCR 0xC0010015
++
++/* K6 MSRs */
++#define MSR_K6_EFER 0xC0000080
++#define MSR_K6_STAR 0xC0000081
++#define MSR_K6_WHCR 0xC0000082
++#define MSR_K6_UWCCR 0xC0000085
++#define MSR_K6_PSOR 0xC0000087
++#define MSR_K6_PFIR 0xC0000088
++
++/* Centaur-Hauls/IDT defined MSRs. */
++#define MSR_IDT_FCR1 0x107
++#define MSR_IDT_FCR2 0x108
++#define MSR_IDT_FCR3 0x109
++#define MSR_IDT_FCR4 0x10a
++
++#define MSR_IDT_MCR0 0x110
++#define MSR_IDT_MCR1 0x111
++#define MSR_IDT_MCR2 0x112
++#define MSR_IDT_MCR3 0x113
++#define MSR_IDT_MCR4 0x114
++#define MSR_IDT_MCR5 0x115
++#define MSR_IDT_MCR6 0x116
++#define MSR_IDT_MCR7 0x117
++#define MSR_IDT_MCR_CTRL 0x120
++
++/* VIA Cyrix defined MSRs*/
++#define MSR_VIA_FCR 0x1107
++#define MSR_VIA_LONGHAUL 0x110a
++#define MSR_VIA_RNG 0x110b
++#define MSR_VIA_BCR2 0x1147
++
++/* Intel defined MSRs. */
++#define MSR_IA32_P5_MC_ADDR 0
++#define MSR_IA32_P5_MC_TYPE 1
++#define MSR_IA32_PLATFORM_ID 0x17
++#define MSR_IA32_EBL_CR_POWERON 0x2a
++
++#define MSR_IA32_APICBASE 0x1b
++#define MSR_IA32_APICBASE_BSP (1<<8)
++#define MSR_IA32_APICBASE_ENABLE (1<<11)
++#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
++
++/* P4/Xeon+ specific */
++#define MSR_IA32_MCG_EAX 0x180
++#define MSR_IA32_MCG_EBX 0x181
++#define MSR_IA32_MCG_ECX 0x182
++#define MSR_IA32_MCG_EDX 0x183
++#define MSR_IA32_MCG_ESI 0x184
++#define MSR_IA32_MCG_EDI 0x185
++#define MSR_IA32_MCG_EBP 0x186
++#define MSR_IA32_MCG_ESP 0x187
++#define MSR_IA32_MCG_EFLAGS 0x188
++#define MSR_IA32_MCG_EIP 0x189
++#define MSR_IA32_MCG_RESERVED 0x18A
++
++#define MSR_P6_EVNTSEL0 0x186
++#define MSR_P6_EVNTSEL1 0x187
++
++#define MSR_IA32_PERF_STATUS 0x198
++#define MSR_IA32_PERF_CTL 0x199
++
++#define MSR_IA32_THERM_CONTROL 0x19a
++#define MSR_IA32_THERM_INTERRUPT 0x19b
++#define MSR_IA32_THERM_STATUS 0x19c
++#define MSR_IA32_MISC_ENABLE 0x1a0
++
++#define MSR_IA32_DEBUGCTLMSR 0x1d9
++#define MSR_IA32_LASTBRANCHFROMIP 0x1db
++#define MSR_IA32_LASTBRANCHTOIP 0x1dc
++#define MSR_IA32_LASTINTFROMIP 0x1dd
++#define MSR_IA32_LASTINTTOIP 0x1de
++
++#define MSR_IA32_MC0_CTL 0x400
++#define MSR_IA32_MC0_STATUS 0x401
++#define MSR_IA32_MC0_ADDR 0x402
++#define MSR_IA32_MC0_MISC 0x403
++
++/* Pentium IV performance counter MSRs */
++#define MSR_P4_BPU_PERFCTR0 0x300
++#define MSR_P4_BPU_PERFCTR1 0x301
++#define MSR_P4_BPU_PERFCTR2 0x302
++#define MSR_P4_BPU_PERFCTR3 0x303
++#define MSR_P4_MS_PERFCTR0 0x304
++#define MSR_P4_MS_PERFCTR1 0x305
++#define MSR_P4_MS_PERFCTR2 0x306
++#define MSR_P4_MS_PERFCTR3 0x307
++#define MSR_P4_FLAME_PERFCTR0 0x308
++#define MSR_P4_FLAME_PERFCTR1 0x309
++#define MSR_P4_FLAME_PERFCTR2 0x30a
++#define MSR_P4_FLAME_PERFCTR3 0x30b
++#define MSR_P4_IQ_PERFCTR0 0x30c
++#define MSR_P4_IQ_PERFCTR1 0x30d
++#define MSR_P4_IQ_PERFCTR2 0x30e
++#define MSR_P4_IQ_PERFCTR3 0x30f
++#define MSR_P4_IQ_PERFCTR4 0x310
++#define MSR_P4_IQ_PERFCTR5 0x311
++#define MSR_P4_BPU_CCCR0 0x360
++#define MSR_P4_BPU_CCCR1 0x361
++#define MSR_P4_BPU_CCCR2 0x362
++#define MSR_P4_BPU_CCCR3 0x363
++#define MSR_P4_MS_CCCR0 0x364
++#define MSR_P4_MS_CCCR1 0x365
++#define MSR_P4_MS_CCCR2 0x366
++#define MSR_P4_MS_CCCR3 0x367
++#define MSR_P4_FLAME_CCCR0 0x368
++#define MSR_P4_FLAME_CCCR1 0x369
++#define MSR_P4_FLAME_CCCR2 0x36a
++#define MSR_P4_FLAME_CCCR3 0x36b
++#define MSR_P4_IQ_CCCR0 0x36c
++#define MSR_P4_IQ_CCCR1 0x36d
++#define MSR_P4_IQ_CCCR2 0x36e
++#define MSR_P4_IQ_CCCR3 0x36f
++#define MSR_P4_IQ_CCCR4 0x370
++#define MSR_P4_IQ_CCCR5 0x371
++#define MSR_P4_ALF_ESCR0 0x3ca
++#define MSR_P4_ALF_ESCR1 0x3cb
++#define MSR_P4_BPU_ESCR0 0x3b2
++#define MSR_P4_BPU_ESCR1 0x3b3
++#define MSR_P4_BSU_ESCR0 0x3a0
++#define MSR_P4_BSU_ESCR1 0x3a1
++#define MSR_P4_CRU_ESCR0 0x3b8
++#define MSR_P4_CRU_ESCR1 0x3b9
++#define MSR_P4_CRU_ESCR2 0x3cc
++#define MSR_P4_CRU_ESCR3 0x3cd
++#define MSR_P4_CRU_ESCR4 0x3e0
++#define MSR_P4_CRU_ESCR5 0x3e1
++#define MSR_P4_DAC_ESCR0 0x3a8
++#define MSR_P4_DAC_ESCR1 0x3a9
++#define MSR_P4_FIRM_ESCR0 0x3a4
++#define MSR_P4_FIRM_ESCR1 0x3a5
++#define MSR_P4_FLAME_ESCR0 0x3a6
++#define MSR_P4_FLAME_ESCR1 0x3a7
++#define MSR_P4_FSB_ESCR0 0x3a2
++#define MSR_P4_FSB_ESCR1 0x3a3
++#define MSR_P4_IQ_ESCR0 0x3ba
++#define MSR_P4_IQ_ESCR1 0x3bb
++#define MSR_P4_IS_ESCR0 0x3b4
++#define MSR_P4_IS_ESCR1 0x3b5
++#define MSR_P4_ITLB_ESCR0 0x3b6
++#define MSR_P4_ITLB_ESCR1 0x3b7
++#define MSR_P4_IX_ESCR0 0x3c8
++#define MSR_P4_IX_ESCR1 0x3c9
++#define MSR_P4_MOB_ESCR0 0x3aa
++#define MSR_P4_MOB_ESCR1 0x3ab
++#define MSR_P4_MS_ESCR0 0x3c0
++#define MSR_P4_MS_ESCR1 0x3c1
++#define MSR_P4_PMH_ESCR0 0x3ac
++#define MSR_P4_PMH_ESCR1 0x3ad
++#define MSR_P4_RAT_ESCR0 0x3bc
++#define MSR_P4_RAT_ESCR1 0x3bd
++#define MSR_P4_SAAT_ESCR0 0x3ae
++#define MSR_P4_SAAT_ESCR1 0x3af
++#define MSR_P4_SSU_ESCR0 0x3be
++#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */
++#define MSR_P4_TBPU_ESCR0 0x3c2
++#define MSR_P4_TBPU_ESCR1 0x3c3
++#define MSR_P4_TC_ESCR0 0x3c4
++#define MSR_P4_TC_ESCR1 0x3c5
++#define MSR_P4_U2L_ESCR0 0x3b0
++#define MSR_P4_U2L_ESCR1 0x3b1
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/nmi.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/nmi.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/nmi.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/nmi.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,93 @@
++/*
++ * linux/include/asm-i386/nmi.h
++ */
++#ifndef ASM_NMI_H
++#define ASM_NMI_H
++
++#include <linux/pm.h>
++#include <asm/io.h>
++
++#include <xen/interface/nmi.h>
++
++struct pt_regs;
++
++typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
++
++/**
++ * set_nmi_callback
++ *
++ * Set a handler for an NMI. Only one handler may be
++ * set. Return 1 if the NMI was handled.
++ */
++void set_nmi_callback(nmi_callback_t callback);
++
++/**
++ * unset_nmi_callback
++ *
++ * Remove the handler previously set.
++ */
++void unset_nmi_callback(void);
++
++#ifdef CONFIG_PM
++
++/** Replace the PM callback routine for NMI. */
++struct pm_dev * set_nmi_pm_callback(pm_callback callback);
++
++/** Unset the PM callback routine back to the default. */
++void unset_nmi_pm_callback(struct pm_dev * dev);
++
++#else
++
++static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
++{
++ return 0;
++}
++
++static inline void unset_nmi_pm_callback(struct pm_dev * dev)
++{
++}
++
++#endif /* CONFIG_PM */
++
++extern void default_do_nmi(struct pt_regs *);
++extern void die_nmi(char *str, struct pt_regs *regs);
++
++static inline unsigned char get_nmi_reason(void)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ unsigned char reason = 0;
++
++ /* construct a value which looks like it came from
++ * port 0x61.
++ */
++ if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
++ reason |= 0x40;
++ if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
++ reason |= 0x80;
++
++ return reason;
++}
++
++extern int panic_on_timeout;
++extern int unknown_nmi_panic;
++
++extern int check_nmi_watchdog(void);
++
++extern void setup_apic_nmi_watchdog (void);
++extern int reserve_lapic_nmi(void);
++extern void release_lapic_nmi(void);
++extern void disable_timer_nmi_watchdog(void);
++extern void enable_timer_nmi_watchdog(void);
++extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
++
++extern void nmi_watchdog_default(void);
++extern int setup_nmi_watchdog(char *);
++
++extern unsigned int nmi_watchdog;
++#define NMI_DEFAULT -1
++#define NMI_NONE 0
++#define NMI_IO_APIC 1
++#define NMI_LOCAL_APIC 2
++#define NMI_INVALID 3
++
++#endif /* ASM_NMI_H */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/page.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/page.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/page.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/page.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,212 @@
++#ifndef _X86_64_PAGE_H
++#define _X86_64_PAGE_H
++
++/* #include <linux/string.h> */
++#ifndef __ASSEMBLY__
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <asm/bug.h>
++#endif
++#include <xen/interface/xen.h>
++
++/*
++ * Need to repeat this here in order to not include pgtable.h (which in turn
++ * depends on definitions made here), but to be able to use the symbolic
++ * below. The preprocessor will warn if the two definitions aren't identical.
++ */
++#define _PAGE_PRESENT 0x001
++#define _PAGE_IO 0x200
++
++/* PAGE_SHIFT determines the page size */
++#define PAGE_SHIFT 12
++#ifdef __ASSEMBLY__
++#define PAGE_SIZE (0x1 << PAGE_SHIFT)
++#else
++#define PAGE_SIZE (1UL << PAGE_SHIFT)
++#endif
++#define PAGE_MASK (~(PAGE_SIZE-1))
++
++/* See Documentation/x86_64/mm.txt for a description of the memory map. */
++#define __PHYSICAL_MASK_SHIFT 46
++#define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1)
++#define __VIRTUAL_MASK_SHIFT 48
++#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
++
++#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
++
++#define THREAD_ORDER 1
++#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
++#define CURRENT_MASK (~(THREAD_SIZE-1))
++
++#define EXCEPTION_STACK_ORDER 0
++#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
++
++#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
++#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
++
++#define IRQSTACK_ORDER 2
++#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
++
++#define STACKFAULT_STACK 1
++#define DOUBLEFAULT_STACK 2
++#define NMI_STACK 3
++#define DEBUG_STACK 4
++#define MCE_STACK 5
++#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
++
++#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
++#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
++
++#define HPAGE_SHIFT PMD_SHIFT
++#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
++#define HPAGE_MASK (~(HPAGE_SIZE - 1))
++#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
++
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++
++extern unsigned long end_pfn;
++
++#include <asm/maddr.h>
++
++void clear_page(void *);
++void copy_page(void *, void *);
++
++#define clear_user_page(page, vaddr, pg) clear_page(page)
++#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
++
++#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
++#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
++
++/*
++ * These are used to make use of C type-checking..
++ */
++typedef struct { unsigned long pte; } pte_t;
++typedef struct { unsigned long pmd; } pmd_t;
++typedef struct { unsigned long pud; } pud_t;
++typedef struct { unsigned long pgd; } pgd_t;
++#define PTE_MASK PHYSICAL_PAGE_MASK
++
++typedef struct { unsigned long pgprot; } pgprot_t;
++
++#define __pte_val(x) ((x).pte)
++#define pte_val(x) ((__pte_val(x) & (_PAGE_PRESENT|_PAGE_IO)) \
++ == _PAGE_PRESENT ? \
++ pte_machine_to_phys(__pte_val(x)) : \
++ __pte_val(x))
++
++#define __pmd_val(x) ((x).pmd)
++static inline unsigned long pmd_val(pmd_t x)
++{
++ unsigned long ret = __pmd_val(x);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
++#else
++ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++#endif
++ return ret;
++}
++
++#define __pud_val(x) ((x).pud)
++static inline unsigned long pud_val(pud_t x)
++{
++ unsigned long ret = __pud_val(x);
++ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++ return ret;
++}
++
++#define __pgd_val(x) ((x).pgd)
++static inline unsigned long pgd_val(pgd_t x)
++{
++ unsigned long ret = __pgd_val(x);
++ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++ return ret;
++}
++
++#define pgprot_val(x) ((x).pgprot)
++
++static inline pte_t __pte(unsigned long x)
++{
++ if ((x & (_PAGE_PRESENT|_PAGE_IO)) == _PAGE_PRESENT)
++ x = pte_phys_to_machine(x);
++ return ((pte_t) { (x) });
++}
++
++static inline pmd_t __pmd(unsigned long x)
++{
++ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
++ return ((pmd_t) { (x) });
++}
++
++static inline pud_t __pud(unsigned long x)
++{
++ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
++ return ((pud_t) { (x) });
++}
++
++static inline pgd_t __pgd(unsigned long x)
++{
++ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
++ return ((pgd_t) { (x) });
++}
++
++#define __pgprot(x) ((pgprot_t) { (x) } )
++
++#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
++#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
++#define __START_KERNEL_map 0xffffffff80000000UL
++#define __PAGE_OFFSET 0xffff880000000000UL
++
++#else
++#define __PHYSICAL_START CONFIG_PHYSICAL_START
++#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
++#define __START_KERNEL_map 0xffffffff80000000
++#define __PAGE_OFFSET 0xffff880000000000
++#endif /* !__ASSEMBLY__ */
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++#undef LOAD_OFFSET
++#define LOAD_OFFSET 0
++#endif
++
++/* to align the pointer to the (next) page boundary */
++#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
++
++#define KERNEL_TEXT_SIZE (40UL*1024*1024)
++#define KERNEL_TEXT_START 0xffffffff80000000UL
++
++#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
++
++/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
++ Otherwise you risk miscompilation. */
++#define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
++/* __pa_symbol should be used for C visible symbols.
++ This seems to be the official gcc blessed way to do such arithmetic. */
++#define __pa_symbol(x) \
++ ({unsigned long v; \
++ asm("" : "=r" (v) : "0" (x)); \
++ __pa(v); })
++
++#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
++#define __boot_va(x) __va(x)
++#define __boot_pa(x) __pa(x)
++#ifdef CONFIG_FLATMEM
++#define pfn_valid(pfn) ((pfn) < end_pfn)
++#endif
++
++#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
++#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
++#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
++
++#define VM_DATA_DEFAULT_FLAGS \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++
++#define __HAVE_ARCH_GATE_AREA 1
++
++#include <asm-generic/memory_model.h>
++#include <asm-generic/page.h>
++
++#endif /* __KERNEL__ */
++
++#endif /* _X86_64_PAGE_H */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/pci.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/pci.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/pci.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/pci.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,168 @@
++#ifndef __x8664_PCI_H
++#define __x8664_PCI_H
++
++#include <asm/io.h>
++
++#ifdef __KERNEL__
++
++#include <linux/mm.h> /* for struct page */
++
++/* Can be used to override the logic in pci_scan_bus for skipping
++ already-configured bus numbers - to be used for buggy BIOSes
++ or architectures with incomplete PCI setup by the loader */
++
++#ifdef CONFIG_PCI
++extern unsigned int pcibios_assign_all_busses(void);
++#else
++#define pcibios_assign_all_busses() 0
++#endif
++
++#include <asm/hypervisor.h>
++#define pcibios_scan_all_fns(a, b) (!is_initial_xendomain())
++
++extern unsigned long pci_mem_start;
++#define PCIBIOS_MIN_IO 0x1000
++#define PCIBIOS_MIN_MEM (pci_mem_start)
++
++#define PCIBIOS_MIN_CARDBUS_IO 0x4000
++
++void pcibios_config_init(void);
++struct pci_bus * pcibios_scan_root(int bus);
++extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
++extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
++
++void pcibios_set_master(struct pci_dev *dev);
++void pcibios_penalize_isa_irq(int irq, int active);
++struct irq_routing_table *pcibios_get_irq_routing_table(void);
++int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
++
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <asm/scatterlist.h>
++#include <linux/string.h>
++#include <asm/page.h>
++
++extern void pci_iommu_alloc(void);
++extern int iommu_setup(char *opt);
++
++/* The PCI address space does equal the physical memory
++ * address space. The networking and block device layers use
++ * this boolean for bounce buffer decisions
++ *
++ * On AMD64 it mostly equals, but we set it to zero if a hardware
++ * IOMMU (gart) of sotware IOMMU (swiotlb) is available.
++ */
++#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
++
++#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
++
++/*
++ * x86-64 always supports DAC, but sometimes it is useful to force
++ * devices through the IOMMU to get automatic sg list merging.
++ * Optional right now.
++ */
++extern int iommu_sac_force;
++#define pci_dac_dma_supported(pci_dev, mask) (!iommu_sac_force)
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
++ dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
++ __u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME) \
++ ((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
++ (((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME) \
++ ((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
++ (((PTR)->LEN_NAME) = (VAL))
++
++#elif defined(CONFIG_SWIOTLB)
++
++#define pci_dac_dma_supported(pci_dev, mask) 1
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
++ dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
++ __u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME) \
++ ((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
++ (((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME) \
++ ((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
++ (((PTR)->LEN_NAME) = (VAL))
++
++#else
++/* No IOMMU */
++
++#define pci_dac_dma_supported(pci_dev, mask) 1
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
++#define pci_unmap_addr(PTR, ADDR_NAME) (0)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
++#define pci_unmap_len(PTR, LEN_NAME) (0)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
++
++#endif
++
++#include <asm-generic/pci-dma-compat.h>
++
++static inline dma64_addr_t
++pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
++{
++ return ((dma64_addr_t) page_to_phys(page) +
++ (dma64_addr_t) offset);
++}
++
++static inline struct page *
++pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
++{
++ return virt_to_page(__va(dma_addr));
++}
++
++static inline unsigned long
++pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
++{
++ return (dma_addr & ~PAGE_MASK);
++}
++
++static inline void
++pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++}
++
++static inline void
++pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++ flush_write_buffers();
++}
++
++#ifdef CONFIG_PCI
++static inline void pci_dma_burst_advice(struct pci_dev *pdev,
++ enum pci_dma_burst_strategy *strat,
++ unsigned long *strategy_parameter)
++{
++ *strat = PCI_DMA_BURST_INFINITY;
++ *strategy_parameter = ~0UL;
++}
++#endif
++
++#define HAVE_PCI_MMAP
++extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
++ enum pci_mmap_state mmap_state, int write_combine);
++
++static inline void pcibios_add_platform_entries(struct pci_dev *dev)
++{
++}
++
++#endif /* __KERNEL__ */
++
++/* generic pci stuff */
++#ifdef CONFIG_PCI
++#include <asm-generic/pci.h>
++#endif
++
++#endif /* __x8664_PCI_H */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/pgalloc.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/pgalloc.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/pgalloc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/pgalloc.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,204 @@
++#ifndef _X86_64_PGALLOC_H
++#define _X86_64_PGALLOC_H
++
++#include <asm/fixmap.h>
++#include <asm/pda.h>
++#include <linux/threads.h>
++#include <linux/mm.h>
++#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
++
++#include <xen/features.h>
++void make_page_readonly(void *va, unsigned int feature);
++void make_page_writable(void *va, unsigned int feature);
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
++
++#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
++
++static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
++{
++ set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
++}
++
++static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
++{
++ if (unlikely((mm)->context.pinned)) {
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
++ pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
++ set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
++ } else {
++ *(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
++ }
++}
++
++static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ if (unlikely((mm)->context.pinned)) {
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)pmd,
++ pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT,
++ PAGE_KERNEL_RO), 0));
++ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
++ } else {
++ *(pud) = __pud(_PAGE_TABLE | __pa(pmd));
++ }
++}
++
++/*
++ * We need to use the batch mode here, but pgd_pupulate() won't be
++ * be called frequently.
++ */
++static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++ if (unlikely((mm)->context.pinned)) {
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)pud,
++ pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT,
++ PAGE_KERNEL_RO), 0));
++ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
++ set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
++ } else {
++ *(pgd) = __pgd(_PAGE_TABLE | __pa(pud));
++ *(__user_pgd(pgd)) = *(pgd);
++ }
++}
++
++extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
++extern void pte_free(struct page *pte);
++
++static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
++{
++ struct page *pg;
++
++ pg = pte_alloc_one(mm, addr);
++ return pg ? page_address(pg) : NULL;
++}
++
++static inline void pmd_free(pmd_t *pmd)
++{
++ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
++ pte_free(virt_to_page(pmd));
++}
++
++static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
++{
++ struct page *pg;
++
++ pg = pte_alloc_one(mm, addr);
++ return pg ? page_address(pg) : NULL;
++}
++
++static inline void pud_free(pud_t *pud)
++{
++ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
++ pte_free(virt_to_page(pud));
++}
++
++static inline void pgd_list_add(pgd_t *pgd)
++{
++ struct page *page = virt_to_page(pgd);
++
++ spin_lock(&pgd_lock);
++ page->index = (pgoff_t)pgd_list;
++ if (pgd_list)
++ pgd_list->private = (unsigned long)&page->index;
++ pgd_list = page;
++ page->private = (unsigned long)&pgd_list;
++ spin_unlock(&pgd_lock);
++}
++
++static inline void pgd_list_del(pgd_t *pgd)
++{
++ struct page *next, **pprev, *page = virt_to_page(pgd);
++
++ spin_lock(&pgd_lock);
++ next = (struct page *)page->index;
++ pprev = (struct page **)page->private;
++ *pprev = next;
++ if (next)
++ next->private = (unsigned long)pprev;
++ spin_unlock(&pgd_lock);
++}
++
++static inline pgd_t *pgd_alloc(struct mm_struct *mm)
++{
++ /*
++ * We allocate two contiguous pages for kernel and user.
++ */
++ unsigned boundary;
++ pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
++ if (!pgd)
++ return NULL;
++ pgd_list_add(pgd);
++ /*
++ * Copy kernel pointers in from init.
++ * Could keep a freelist or slab cache of those because the kernel
++ * part never changes.
++ */
++ boundary = pgd_index(__PAGE_OFFSET);
++ memset(pgd, 0, boundary * sizeof(pgd_t));
++ memcpy(pgd + boundary,
++ init_level4_pgt + boundary,
++ (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
++
++ memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
++ /*
++ * Set level3_user_pgt for vsyscall area
++ */
++ __user_pgd(pgd)[pgd_index(VSYSCALL_START)] =
++ __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE);
++ return pgd;
++}
++
++static inline void pgd_free(pgd_t *pgd)
++{
++ pte_t *ptep = virt_to_ptep(pgd);
++
++ if (!pte_write(*ptep)) {
++ xen_pgd_unpin(__pa(pgd));
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)pgd,
++ pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
++ 0));
++ }
++
++ ptep = virt_to_ptep(__user_pgd(pgd));
++
++ if (!pte_write(*ptep)) {
++ xen_pgd_unpin(__pa(__user_pgd(pgd)));
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)__user_pgd(pgd),
++ pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT,
++ PAGE_KERNEL),
++ 0));
++ }
++
++ pgd_list_del(pgd);
++ free_pages((unsigned long)pgd, 1);
++}
++
++static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
++{
++ pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
++ if (pte)
++ make_page_readonly(pte, XENFEAT_writable_page_tables);
++
++ return pte;
++}
++
++/* Should really implement gc for free page table pages. This could be
++ done with a reference count in struct page. */
++
++static inline void pte_free_kernel(pte_t *pte)
++{
++ BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
++ make_page_writable(pte, XENFEAT_writable_page_tables);
++ free_page((unsigned long)pte);
++}
++
++#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
++#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
++#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
++
++#endif /* _X86_64_PGALLOC_H */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/pgtable.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/pgtable.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/pgtable.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/pgtable.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,583 @@
++#ifndef _X86_64_PGTABLE_H
++#define _X86_64_PGTABLE_H
++
++/*
++ * This file contains the functions and defines necessary to modify and use
++ * the x86-64 page table tree.
++ */
++#include <asm/processor.h>
++#include <asm/fixmap.h>
++#include <asm/bitops.h>
++#include <linux/threads.h>
++#include <linux/sched.h>
++#include <asm/pda.h>
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++
++extern pud_t level3_user_pgt[512];
++
++extern void xen_init_pt(void);
++
++extern pte_t *lookup_address(unsigned long address);
++
++#define virt_to_ptep(va) \
++({ \
++ pte_t *__ptep = lookup_address((unsigned long)(va)); \
++ BUG_ON(!__ptep || !pte_present(*__ptep)); \
++ __ptep; \
++})
++
++#define arbitrary_virt_to_machine(va) \
++ (((maddr_t)pte_mfn(*virt_to_ptep(va)) << PAGE_SHIFT) \
++ | ((unsigned long)(va) & (PAGE_SIZE - 1)))
++#endif
++
++extern pud_t level3_kernel_pgt[512];
++extern pud_t level3_physmem_pgt[512];
++extern pud_t level3_ident_pgt[512];
++extern pmd_t level2_kernel_pgt[512];
++extern pgd_t init_level4_pgt[];
++extern pgd_t boot_level4_pgt[];
++extern unsigned long __supported_pte_mask;
++
++#define swapper_pg_dir init_level4_pgt
++
++extern int nonx_setup(char *str);
++extern void paging_init(void);
++extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
++
++extern unsigned long pgkern_mask;
++
++/*
++ * ZERO_PAGE is a global shared page that is always zero: used
++ * for zero-mapped memory areas etc..
++ */
++extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
++#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
++
++/*
++ * PGDIR_SHIFT determines what a top-level page table entry can map
++ */
++#define PGDIR_SHIFT 39
++#define PTRS_PER_PGD 512
++
++/*
++ * 3rd level page
++ */
++#define PUD_SHIFT 30
++#define PTRS_PER_PUD 512
++
++/*
++ * PMD_SHIFT determines the size of the area a middle-level
++ * page table can map
++ */
++#define PMD_SHIFT 21
++#define PTRS_PER_PMD 512
++
++/*
++ * entries per page directory level
++ */
++#define PTRS_PER_PTE 512
++
++#define pte_ERROR(e) \
++ printk("%s:%d: bad pte %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++ &(e), __pte_val(e), pte_pfn(e))
++#define pmd_ERROR(e) \
++ printk("%s:%d: bad pmd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++ &(e), __pmd_val(e), pmd_pfn(e))
++#define pud_ERROR(e) \
++ printk("%s:%d: bad pud %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++ &(e), __pud_val(e), (pud_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
++#define pgd_ERROR(e) \
++ printk("%s:%d: bad pgd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++ &(e), __pgd_val(e), (pgd_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
++
++#define pgd_none(x) (!__pgd_val(x))
++#define pud_none(x) (!__pud_val(x))
++
++static inline void set_pte(pte_t *dst, pte_t val)
++{
++ *dst = val;
++}
++
++#define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
++#define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
++#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
++
++static inline void pud_clear (pud_t * pud)
++{
++ set_pud(pud, __pud(0));
++}
++
++#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
++
++static inline void pgd_clear (pgd_t * pgd)
++{
++ set_pgd(pgd, __pgd(0));
++ set_pgd(__user_pgd(pgd), __pgd(0));
++}
++
++#define pud_page(pud) \
++ ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
++
++#define pte_same(a, b) ((a).pte == (b).pte)
++
++#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
++
++#define PMD_SIZE (1UL << PMD_SHIFT)
++#define PMD_MASK (~(PMD_SIZE-1))
++#define PUD_SIZE (1UL << PUD_SHIFT)
++#define PUD_MASK (~(PUD_SIZE-1))
++#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
++#define PGDIR_MASK (~(PGDIR_SIZE-1))
++
++#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
++#define FIRST_USER_ADDRESS 0
++
++#ifndef __ASSEMBLY__
++#define MAXMEM 0x3fffffffffffUL
++#define VMALLOC_START 0xffffc20000000000UL
++#define VMALLOC_END 0xffffe1ffffffffffUL
++#define MODULES_VADDR 0xffffffff88000000UL
++#define MODULES_END 0xfffffffffff00000UL
++#define MODULES_LEN (MODULES_END - MODULES_VADDR)
++
++#define _PAGE_BIT_PRESENT 0
++#define _PAGE_BIT_RW 1
++#define _PAGE_BIT_USER 2
++#define _PAGE_BIT_PWT 3
++#define _PAGE_BIT_PCD 4
++#define _PAGE_BIT_ACCESSED 5
++#define _PAGE_BIT_DIRTY 6
++#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
++#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
++#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
++
++#define _PAGE_PRESENT 0x001
++#define _PAGE_RW 0x002
++#define _PAGE_USER 0x004
++#define _PAGE_PWT 0x008
++#define _PAGE_PCD 0x010
++#define _PAGE_ACCESSED 0x020
++#define _PAGE_DIRTY 0x040
++#define _PAGE_PSE 0x080 /* 2MB page */
++#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
++#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
++
++#define _PAGE_PROTNONE 0x080 /* If not present */
++#define _PAGE_NX (1UL<<_PAGE_BIT_NX)
++
++/* Mapped page is I/O or foreign and has no associated page struct. */
++#define _PAGE_IO 0x200
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++extern unsigned int __kernel_page_user;
++#else
++#define __kernel_page_user 0
++#endif
++
++#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | __kernel_page_user)
++
++#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_IO)
++
++#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
++#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_COPY PAGE_COPY_NOEXEC
++#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define __PAGE_KERNEL \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
++#define __PAGE_KERNEL_EXEC \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | __kernel_page_user)
++#define __PAGE_KERNEL_NOCACHE \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
++#define __PAGE_KERNEL_RO \
++ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
++#define __PAGE_KERNEL_VSYSCALL \
++ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
++ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
++#define __PAGE_KERNEL_LARGE \
++ (__PAGE_KERNEL | _PAGE_PSE)
++#define __PAGE_KERNEL_LARGE_EXEC \
++ (__PAGE_KERNEL_EXEC | _PAGE_PSE)
++
++/*
++ * We don't support GLOBAL page in xenolinux64
++ */
++#define MAKE_GLOBAL(x) __pgprot((x))
++
++#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
++#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
++#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
++#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
++#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
++#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
++#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
++#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
++
++/* xwr */
++#define __P000 PAGE_NONE
++#define __P001 PAGE_READONLY
++#define __P010 PAGE_COPY
++#define __P011 PAGE_COPY
++#define __P100 PAGE_READONLY_EXEC
++#define __P101 PAGE_READONLY_EXEC
++#define __P110 PAGE_COPY_EXEC
++#define __P111 PAGE_COPY_EXEC
++
++#define __S000 PAGE_NONE
++#define __S001 PAGE_READONLY
++#define __S010 PAGE_SHARED
++#define __S011 PAGE_SHARED
++#define __S100 PAGE_READONLY_EXEC
++#define __S101 PAGE_READONLY_EXEC
++#define __S110 PAGE_SHARED_EXEC
++#define __S111 PAGE_SHARED_EXEC
++
++static inline unsigned long pgd_bad(pgd_t pgd)
++{
++ unsigned long val = __pgd_val(pgd);
++ val &= ~PTE_MASK;
++ val &= ~(_PAGE_USER | _PAGE_DIRTY);
++ return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
++}
++
++static inline unsigned long pud_bad(pud_t pud)
++{
++ unsigned long val = __pud_val(pud);
++ val &= ~PTE_MASK;
++ val &= ~(_PAGE_USER | _PAGE_DIRTY);
++ return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
++}
++
++#define set_pte_at(_mm,addr,ptep,pteval) do { \
++ if (((_mm) != current->mm && (_mm) != &init_mm) || \
++ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
++ set_pte((ptep), (pteval)); \
++} while (0)
++
++#define pte_none(x) (!(x).pte)
++#define pte_present(x) ((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
++#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
++
++#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
++
++#define __pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
++#define pte_mfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \
++ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
++#define pte_pfn(_pte) ((_pte).pte & _PAGE_IO ? end_pfn : \
++ (_pte).pte & _PAGE_PRESENT ? \
++ mfn_to_local_pfn(__pte_mfn(_pte)) : \
++ __pte_mfn(_pte))
++
++#define pte_page(x) pfn_to_page(pte_pfn(x))
++
++static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
++{
++ unsigned long pte = page_nr << PAGE_SHIFT;
++ pte |= pgprot_val(pgprot);
++ pte &= __supported_pte_mask;
++ return __pte(pte);
++}
++
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (!pte_none(pte)) {
++ if ((mm != &init_mm) ||
++ HYPERVISOR_update_va_mapping(addr, __pte(0), 0))
++ pte = __pte_ma(xchg(&ptep->pte, 0));
++ }
++ return pte;
++}
++
++static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
++{
++ if (full) {
++ pte_t pte = *ptep;
++ if (mm->context.pinned)
++ xen_l1_entry_update(ptep, __pte(0));
++ else
++ *ptep = __pte(0);
++ return pte;
++ }
++ return ptep_get_and_clear(mm, addr, ptep);
++}
++
++#define ptep_clear_flush(vma, addr, ptep) \
++({ \
++ pte_t *__ptep = (ptep); \
++ pte_t __res = *__ptep; \
++ if (!pte_none(__res) && \
++ ((vma)->vm_mm != current->mm || \
++ HYPERVISOR_update_va_mapping(addr, __pte(0), \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI))) { \
++ __ptep->pte = 0; \
++ flush_tlb_page(vma, addr); \
++ } \
++ __res; \
++})
++
++/*
++ * The following only work if pte_present() is true.
++ * Undefined behaviour if not..
++ */
++#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
++static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
++static inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
++static inline int pte_exec(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
++static inline int pte_dirty(pte_t pte) { return __pte_val(pte) & _PAGE_DIRTY; }
++static inline int pte_young(pte_t pte) { return __pte_val(pte) & _PAGE_ACCESSED; }
++static inline int pte_write(pte_t pte) { return __pte_val(pte) & _PAGE_RW; }
++static inline int pte_file(pte_t pte) { return __pte_val(pte) & _PAGE_FILE; }
++static inline int pte_huge(pte_t pte) { return __pte_val(pte) & _PAGE_PSE; }
++
++static inline pte_t pte_rdprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_exprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_mkclean(pte_t pte) { __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkold(pte_t pte) { __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
++static inline pte_t pte_wrprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_RW; return pte; }
++static inline pte_t pte_mkread(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkdirty(pte_t pte) { __pte_val(pte) |= _PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkyoung(pte_t pte) { __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
++static inline pte_t pte_mkwrite(pte_t pte) { __pte_val(pte) |= _PAGE_RW; return pte; }
++static inline pte_t pte_mkhuge(pte_t pte) { __pte_val(pte) |= _PAGE_PSE; return pte; }
++
++#define ptep_test_and_clear_dirty(vma, addr, ptep) \
++({ \
++ pte_t __pte = *(ptep); \
++ int __ret = pte_dirty(__pte); \
++ if (__ret) \
++ set_pte_at((vma)->vm_mm, addr, ptep, pte_mkclean(__pte)); \
++ __ret; \
++})
++
++#define ptep_test_and_clear_young(vma, addr, ptep) \
++({ \
++ pte_t __pte = *(ptep); \
++ int __ret = pte_young(__pte); \
++ if (__ret) \
++ set_pte_at((vma)->vm_mm, addr, ptep, pte_mkold(__pte)); \
++ __ret; \
++})
++
++static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (pte_write(pte))
++ set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
++}
++
++/*
++ * Macro to mark a page protection value as "uncacheable".
++ */
++#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
++
++static inline int pmd_large(pmd_t pte) {
++ return (__pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
++}
++
++
++/*
++ * Conversion functions: convert a page and protection to a page entry,
++ * and a page entry and page directory to the page they refer to.
++ */
++
++/*
++ * Level 4 access.
++ * Never use these in the common code.
++ */
++#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
++#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
++#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
++#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
++#define pgd_present(pgd) (__pgd_val(pgd) & _PAGE_PRESENT)
++#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
++
++/* PUD - Level3 access */
++/* to find an entry in a page-table-directory. */
++#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
++#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
++#define pud_present(pud) (__pud_val(pud) & _PAGE_PRESENT)
++
++/* PMD - Level 2 access */
++#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
++#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
++
++#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
++#define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
++ pmd_index(address))
++#define pmd_none(x) (!__pmd_val(x))
++#if CONFIG_XEN_COMPAT <= 0x030002
++/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
++ can temporarily clear it. */
++#define pmd_present(x) (__pmd_val(x))
++#else
++#define pmd_present(x) (__pmd_val(x) & _PAGE_PRESENT)
++#endif
++#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
++#define pmd_bad(x) ((__pmd_val(x) & ~(PTE_MASK | _PAGE_USER | _PAGE_PRESENT)) \
++ != (_KERNPG_TABLE & ~(_PAGE_USER | _PAGE_PRESENT)))
++#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
++#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
++
++#define pte_to_pgoff(pte) ((__pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
++#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
++#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
++
++/* PTE - Level 1 access. */
++
++/* page, protection -> pte */
++#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
++#define mk_pte_huge(entry) (__pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
++
++/* physical address -> PTE */
++static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
++{
++ unsigned long pteval;
++ pteval = physpage | pgprot_val(pgprot);
++ return __pte(pteval);
++}
++
++/* Change flags of a PTE */
++static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
++{
++ /*
++ * Since this might change the present bit (which controls whether
++ * a pte_t object has undergone p2m translation), we must use
++ * pte_val() on the input pte and __pte() for the return value.
++ */
++ unsigned long pteval = pte_val(pte);
++
++ pteval &= _PAGE_CHG_MASK;
++ pteval |= pgprot_val(newprot);
++ pteval &= __supported_pte_mask;
++ return __pte(pteval);
++}
++
++#define pte_index(address) \
++ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
++#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
++ pte_index(address))
++
++/* x86-64 always has all page tables mapped. */
++#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
++#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
++#define pte_unmap(pte) /* NOP */
++#define pte_unmap_nested(pte) /* NOP */
++
++#define update_mmu_cache(vma,address,pte) do { } while (0)
++
++/*
++ * Rules for using ptep_establish: the pte MUST be a user pte, and
++ * must be a present->present transition.
++ */
++#define __HAVE_ARCH_PTEP_ESTABLISH
++#define ptep_establish(vma, address, ptep, pteval) \
++ do { \
++ if ( likely((vma)->vm_mm == current->mm) ) { \
++ BUG_ON(HYPERVISOR_update_va_mapping(address, \
++ pteval, \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI)); \
++ } else { \
++ xen_l1_entry_update(ptep, pteval); \
++ flush_tlb_page(vma, address); \
++ } \
++ } while (0)
++
++/* We only update the dirty/accessed state if we set
++ * the dirty bit by hand in the kernel, since the hardware
++ * will do the accessed bit for us, and we don't want to
++ * race with other CPU's that might be updating the dirty
++ * bit at the same time. */
++#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
++#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
++ do { \
++ if (dirty) \
++ ptep_establish(vma, address, ptep, entry); \
++ } while (0)
++
++/* Encode and de-code a swap entry */
++#define __swp_type(x) (((x).val >> 1) & 0x3f)
++#define __swp_offset(x) ((x).val >> 8)
++#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
++#define __pte_to_swp_entry(pte) ((swp_entry_t) { __pte_val(pte) })
++#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
++
++extern spinlock_t pgd_lock;
++extern struct page *pgd_list;
++void vmalloc_sync_all(void);
++
++#endif /* !__ASSEMBLY__ */
++
++extern int kern_addr_valid(unsigned long addr);
++
++#define DOMID_LOCAL (0xFFFFU)
++
++struct vm_area_struct;
++
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid);
++
++int direct_kernel_remap_pfn_range(unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid);
++
++int create_lookup_pte_addr(struct mm_struct *mm,
++ unsigned long address,
++ uint64_t *ptep);
++
++int touch_pte_range(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long size);
++
++int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long addr, unsigned long end, pgprot_t newprot);
++
++#define arch_change_pte_range(mm, pmd, addr, end, newprot) \
++ xen_change_pte_range(mm, pmd, addr, end, newprot)
++
++#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
++ direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
++
++#define MK_IOSPACE_PFN(space, pfn) (pfn)
++#define GET_IOSPACE(pfn) 0
++#define GET_PFN(pfn) (pfn)
++
++#define HAVE_ARCH_UNMAPPED_AREA
++
++#define pgtable_cache_init() do { } while (0)
++#define check_pgt_cache() do { } while (0)
++
++#define PAGE_AGP PAGE_KERNEL_NOCACHE
++#define HAVE_PAGE_AGP 1
++
++/* fs/proc/kcore.c */
++#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
++#define kc_offset_to_vaddr(o) \
++ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
++
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
++#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
++#define __HAVE_ARCH_PTEP_SET_WRPROTECT
++#define __HAVE_ARCH_PTE_SAME
++#include <asm-generic/pgtable.h>
++
++#endif /* _X86_64_PGTABLE_H */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/processor.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/processor.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/processor.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/processor.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,502 @@
++/*
++ * include/asm-x86_64/processor.h
++ *
++ * Copyright (C) 1994 Linus Torvalds
++ */
++
++#ifndef __ASM_X86_64_PROCESSOR_H
++#define __ASM_X86_64_PROCESSOR_H
++
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/types.h>
++#include <asm/sigcontext.h>
++#include <asm/cpufeature.h>
++#include <linux/threads.h>
++#include <asm/msr.h>
++#include <asm/current.h>
++#include <asm/system.h>
++#include <asm/mmsegment.h>
++#include <asm/percpu.h>
++#include <linux/personality.h>
++#include <linux/cpumask.h>
++
++#define TF_MASK 0x00000100
++#define IF_MASK 0x00000200
++#define IOPL_MASK 0x00003000
++#define NT_MASK 0x00004000
++#define VM_MASK 0x00020000
++#define AC_MASK 0x00040000
++#define VIF_MASK 0x00080000 /* virtual interrupt flag */
++#define VIP_MASK 0x00100000 /* virtual interrupt pending */
++#define ID_MASK 0x00200000
++
++#define desc_empty(desc) \
++ (!((desc)->a | (desc)->b))
++
++#define desc_equal(desc1, desc2) \
++ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
++
++/*
++ * Default implementation of macro that returns current
++ * instruction pointer ("program counter").
++ */
++#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
++
++/*
++ * CPU type and hardware bug flags. Kept separately for each CPU.
++ */
++
++struct cpuinfo_x86 {
++ __u8 x86; /* CPU family */
++ __u8 x86_vendor; /* CPU vendor */
++ __u8 x86_model;
++ __u8 x86_mask;
++ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
++ __u32 x86_capability[NCAPINTS];
++ char x86_vendor_id[16];
++ char x86_model_id[64];
++ int x86_cache_size; /* in KB */
++ int x86_clflush_size;
++ int x86_cache_alignment;
++ int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
++ __u8 x86_virt_bits, x86_phys_bits;
++ __u8 x86_max_cores; /* cpuid returned max cores value */
++ __u32 x86_power;
++ __u32 extended_cpuid_level; /* Max extended CPUID function supported */
++ unsigned long loops_per_jiffy;
++#ifdef CONFIG_SMP
++ cpumask_t llc_shared_map; /* cpus sharing the last level cache */
++#endif
++ __u8 apicid;
++#ifdef CONFIG_SMP
++ __u8 booted_cores; /* number of cores as seen by OS */
++ __u8 phys_proc_id; /* Physical Processor id. */
++ __u8 cpu_core_id; /* Core id. */
++#endif
++} ____cacheline_aligned;
++
++#define X86_VENDOR_INTEL 0
++#define X86_VENDOR_CYRIX 1
++#define X86_VENDOR_AMD 2
++#define X86_VENDOR_UMC 3
++#define X86_VENDOR_NEXGEN 4
++#define X86_VENDOR_CENTAUR 5
++#define X86_VENDOR_RISE 6
++#define X86_VENDOR_TRANSMETA 7
++#define X86_VENDOR_NUM 8
++#define X86_VENDOR_UNKNOWN 0xff
++
++#ifdef CONFIG_SMP
++extern struct cpuinfo_x86 cpu_data[];
++#define current_cpu_data cpu_data[smp_processor_id()]
++#else
++#define cpu_data (&boot_cpu_data)
++#define current_cpu_data boot_cpu_data
++#endif
++
++extern char ignore_irq13;
++
++extern void identify_cpu(struct cpuinfo_x86 *);
++extern void print_cpu_info(struct cpuinfo_x86 *);
++extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
++extern unsigned short num_cache_leaves;
++
++/*
++ * EFLAGS bits
++ */
++#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
++#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
++#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
++#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
++#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
++#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
++#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
++#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
++#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
++#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
++#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
++#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
++#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
++#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
++#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
++#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
++#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
++
++/*
++ * Intel CPU features in CR4
++ */
++#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
++#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
++#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
++#define X86_CR4_DE 0x0008 /* enable debugging extensions */
++#define X86_CR4_PSE 0x0010 /* enable page size extensions */
++#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
++#define X86_CR4_MCE 0x0040 /* Machine check enable */
++#define X86_CR4_PGE 0x0080 /* enable global pages */
++#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
++#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
++#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
++
++/*
++ * Save the cr4 feature set we're using (ie
++ * Pentium 4MB enable and PPro Global page
++ * enable), so that any CPU's that boot up
++ * after us can get the correct flags.
++ */
++extern unsigned long mmu_cr4_features;
++
++static inline void set_in_cr4 (unsigned long mask)
++{
++ mmu_cr4_features |= mask;
++ __asm__("movq %%cr4,%%rax\n\t"
++ "orq %0,%%rax\n\t"
++ "movq %%rax,%%cr4\n"
++ : : "irg" (mask)
++ :"ax");
++}
++
++static inline void clear_in_cr4 (unsigned long mask)
++{
++ mmu_cr4_features &= ~mask;
++ __asm__("movq %%cr4,%%rax\n\t"
++ "andq %0,%%rax\n\t"
++ "movq %%rax,%%cr4\n"
++ : : "irg" (~mask)
++ :"ax");
++}
++
++
++/*
++ * User space process size. 47bits minus one guard page.
++ */
++#define TASK_SIZE64 (0x800000000000UL - 4096)
++
++/* This decides where the kernel will search for a free chunk of vm
++ * space during mmap's.
++ */
++#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
++
++#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
++#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
++
++#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
++
++/*
++ * Size of io_bitmap.
++ */
++#define IO_BITMAP_BITS 65536
++#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
++#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
++#ifndef CONFIG_X86_NO_TSS
++#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
++#endif
++#define INVALID_IO_BITMAP_OFFSET 0x8000
++
++struct i387_fxsave_struct {
++ u16 cwd;
++ u16 swd;
++ u16 twd;
++ u16 fop;
++ u64 rip;
++ u64 rdp;
++ u32 mxcsr;
++ u32 mxcsr_mask;
++ u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
++ u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */
++ u32 padding[24];
++} __attribute__ ((aligned (16)));
++
++union i387_union {
++ struct i387_fxsave_struct fxsave;
++};
++
++#ifndef CONFIG_X86_NO_TSS
++struct tss_struct {
++ u32 reserved1;
++ u64 rsp0;
++ u64 rsp1;
++ u64 rsp2;
++ u64 reserved2;
++ u64 ist[7];
++ u32 reserved3;
++ u32 reserved4;
++ u16 reserved5;
++ u16 io_bitmap_base;
++ /*
++ * The extra 1 is there because the CPU will access an
++ * additional byte beyond the end of the IO permission
++ * bitmap. The extra byte must be all 1 bits, and must
++ * be within the limit. Thus we have:
++ *
++ * 128 bytes, the bitmap itself, for ports 0..0x3ff
++ * 8 bytes, for an extra "long" of ~0UL
++ */
++ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
++} __attribute__((packed)) ____cacheline_aligned;
++
++DECLARE_PER_CPU(struct tss_struct,init_tss);
++#endif
++
++
++extern struct cpuinfo_x86 boot_cpu_data;
++#ifndef CONFIG_X86_NO_TSS
++/* Save the original ist values for checking stack pointers during debugging */
++struct orig_ist {
++ unsigned long ist[7];
++};
++DECLARE_PER_CPU(struct orig_ist, orig_ist);
++#endif
++
++#ifdef CONFIG_X86_VSMP
++#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
++#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
++#else
++#define ARCH_MIN_TASKALIGN 16
++#define ARCH_MIN_MMSTRUCT_ALIGN 0
++#endif
++
++struct thread_struct {
++ unsigned long rsp0;
++ unsigned long rsp;
++ unsigned long userrsp; /* Copy from PDA */
++ unsigned long fs;
++ unsigned long gs;
++ unsigned short es, ds, fsindex, gsindex;
++/* Hardware debugging registers */
++ unsigned long debugreg0;
++ unsigned long debugreg1;
++ unsigned long debugreg2;
++ unsigned long debugreg3;
++ unsigned long debugreg6;
++ unsigned long debugreg7;
++/* fault info */
++ unsigned long cr2, trap_no, error_code;
++/* floating point info */
++ union i387_union i387 __attribute__((aligned(16)));
++/* IO permissions. the bitmap could be moved into the GDT, that would make
++ switch faster for a limited number of ioperm using tasks. -AK */
++ int ioperm;
++ unsigned long *io_bitmap_ptr;
++ unsigned io_bitmap_max;
++/* cached TLS descriptors. */
++ u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
++ unsigned int iopl;
++} __attribute__((aligned(16)));
++
++#define INIT_THREAD { \
++ .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++}
++
++#ifndef CONFIG_X86_NO_TSS
++#define INIT_TSS { \
++ .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++}
++#endif
++
++#define INIT_MMAP \
++{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
++
++#define start_thread(regs,new_rip,new_rsp) do { \
++ asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
++ load_gs_index(0); \
++ (regs)->rip = (new_rip); \
++ (regs)->rsp = (new_rsp); \
++ write_pda(oldrsp, (new_rsp)); \
++ (regs)->cs = __USER_CS; \
++ (regs)->ss = __USER_DS; \
++ (regs)->eflags = 0x200; \
++ set_fs(USER_DS); \
++} while(0)
++
++#define get_debugreg(var, register) \
++ var = HYPERVISOR_get_debugreg(register)
++#define set_debugreg(value, register) do { \
++ if (HYPERVISOR_set_debugreg(register, value)) \
++ BUG(); \
++} while (0)
++
++struct task_struct;
++struct mm_struct;
++
++/* Free all resources held by a thread. */
++extern void release_thread(struct task_struct *);
++
++/* Prepare to copy thread state - unlazy all lazy status */
++extern void prepare_to_copy(struct task_struct *tsk);
++
++/*
++ * create a kernel thread without removing it from tasklists
++ */
++extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
++
++/*
++ * Return saved PC of a blocked thread.
++ * What is this good for? it will be always the scheduler or ret_from_fork.
++ */
++#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
++
++extern unsigned long get_wchan(struct task_struct *p);
++#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
++#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
++#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
++
++
++struct microcode_header {
++ unsigned int hdrver;
++ unsigned int rev;
++ unsigned int date;
++ unsigned int sig;
++ unsigned int cksum;
++ unsigned int ldrver;
++ unsigned int pf;
++ unsigned int datasize;
++ unsigned int totalsize;
++ unsigned int reserved[3];
++};
++
++struct microcode {
++ struct microcode_header hdr;
++ unsigned int bits[0];
++};
++
++typedef struct microcode microcode_t;
++typedef struct microcode_header microcode_header_t;
++
++/* microcode format is extended from prescott processors */
++struct extended_signature {
++ unsigned int sig;
++ unsigned int pf;
++ unsigned int cksum;
++};
++
++struct extended_sigtable {
++ unsigned int count;
++ unsigned int cksum;
++ unsigned int reserved[3];
++ struct extended_signature sigs[0];
++};
++
++
++#define ASM_NOP1 K8_NOP1
++#define ASM_NOP2 K8_NOP2
++#define ASM_NOP3 K8_NOP3
++#define ASM_NOP4 K8_NOP4
++#define ASM_NOP5 K8_NOP5
++#define ASM_NOP6 K8_NOP6
++#define ASM_NOP7 K8_NOP7
++#define ASM_NOP8 K8_NOP8
++
++/* Opteron nops */
++#define K8_NOP1 ".byte 0x90\n"
++#define K8_NOP2 ".byte 0x66,0x90\n"
++#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
++#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
++#define K8_NOP5 K8_NOP3 K8_NOP2
++#define K8_NOP6 K8_NOP3 K8_NOP3
++#define K8_NOP7 K8_NOP4 K8_NOP3
++#define K8_NOP8 K8_NOP4 K8_NOP4
++
++#define ASM_NOP_MAX 8
++
++/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
++static inline void rep_nop(void)
++{
++ __asm__ __volatile__("rep;nop": : :"memory");
++}
++
++/* Stop speculative execution */
++static inline void sync_core(void)
++{
++ int tmp;
++ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
++}
++
++#define cpu_has_fpu 1
++
++#define ARCH_HAS_PREFETCH
++static inline void prefetch(void *x)
++{
++ asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
++}
++
++#define ARCH_HAS_PREFETCHW 1
++static inline void prefetchw(void *x)
++{
++ alternative_input("prefetcht0 (%1)",
++ "prefetchw (%1)",
++ X86_FEATURE_3DNOW,
++ "r" (x));
++}
++
++#define ARCH_HAS_SPINLOCK_PREFETCH 1
++
++#define spin_lock_prefetch(x) prefetchw(x)
++
++#define cpu_relax() rep_nop()
++
++/*
++ * NSC/Cyrix CPU configuration register indexes
++ */
++#define CX86_CCR0 0xc0
++#define CX86_CCR1 0xc1
++#define CX86_CCR2 0xc2
++#define CX86_CCR3 0xc3
++#define CX86_CCR4 0xe8
++#define CX86_CCR5 0xe9
++#define CX86_CCR6 0xea
++#define CX86_CCR7 0xeb
++#define CX86_DIR0 0xfe
++#define CX86_DIR1 0xff
++#define CX86_ARR_BASE 0xc4
++#define CX86_RCR_BASE 0xdc
++
++/*
++ * NSC/Cyrix CPU indexed register access macros
++ */
++
++#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
++
++#define setCx86(reg, data) do { \
++ outb((reg), 0x22); \
++ outb((data), 0x23); \
++} while (0)
++
++static inline void serialize_cpu(void)
++{
++ __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
++}
++
++static inline void __monitor(const void *eax, unsigned long ecx,
++ unsigned long edx)
++{
++ /* "monitor %eax,%ecx,%edx;" */
++ asm volatile(
++ ".byte 0x0f,0x01,0xc8;"
++ : :"a" (eax), "c" (ecx), "d"(edx));
++}
++
++static inline void __mwait(unsigned long eax, unsigned long ecx)
++{
++ /* "mwait %eax,%ecx;" */
++ asm volatile(
++ ".byte 0x0f,0x01,0xc9;"
++ : :"a" (eax), "c" (ecx));
++}
++
++#define stack_current() \
++({ \
++ struct thread_info *ti; \
++ asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
++ ti->task; \
++})
++
++#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
++
++extern unsigned long boot_option_idle_override;
++/* Boot loader type from the setup header */
++extern int bootloader_type;
++
++#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
++
++#endif /* __ASM_X86_64_PROCESSOR_H */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/ptrace.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/ptrace.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/ptrace.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/ptrace.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,127 @@
++#ifndef _X86_64_PTRACE_H
++#define _X86_64_PTRACE_H
++
++#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
++#define R15 0
++#define R14 8
++#define R13 16
++#define R12 24
++#define RBP 32
++#define RBX 40
++/* arguments: interrupts/non tracing syscalls only save upto here*/
++#define R11 48
++#define R10 56
++#define R9 64
++#define R8 72
++#define RAX 80
++#define RCX 88
++#define RDX 96
++#define RSI 104
++#define RDI 112
++#define ORIG_RAX 120 /* = ERROR */
++/* end of arguments */
++/* cpu exception frame or undefined in case of fast syscall. */
++#define RIP 128
++#define CS 136
++#define EFLAGS 144
++#define RSP 152
++#define SS 160
++#define ARGOFFSET R11
++#endif /* __ASSEMBLY__ */
++
++/* top of stack page */
++#define FRAME_SIZE 168
++
++#define PTRACE_OLDSETOPTIONS 21
++
++#ifndef __ASSEMBLY__
++
++struct pt_regs {
++ unsigned long r15;
++ unsigned long r14;
++ unsigned long r13;
++ unsigned long r12;
++ unsigned long rbp;
++ unsigned long rbx;
++/* arguments: non interrupts/non tracing syscalls only save upto here*/
++ unsigned long r11;
++ unsigned long r10;
++ unsigned long r9;
++ unsigned long r8;
++ unsigned long rax;
++ unsigned long rcx;
++ unsigned long rdx;
++ unsigned long rsi;
++ unsigned long rdi;
++ unsigned long orig_rax;
++/* end of arguments */
++/* cpu exception frame or undefined */
++ unsigned long rip;
++ unsigned long cs;
++ unsigned long eflags;
++ unsigned long rsp;
++ unsigned long ss;
++/* top of stack page */
++};
++
++#endif
++
++/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
++#define PTRACE_GETREGS 12
++#define PTRACE_SETREGS 13
++#define PTRACE_GETFPREGS 14
++#define PTRACE_SETFPREGS 15
++#define PTRACE_GETFPXREGS 18
++#define PTRACE_SETFPXREGS 19
++
++/* only useful for access 32bit programs */
++#define PTRACE_GET_THREAD_AREA 25
++#define PTRACE_SET_THREAD_AREA 26
++
++#define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */
++
++#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
++#define user_mode(regs) (!!((regs)->cs & 3))
++#define user_mode_vm(regs) user_mode(regs)
++#define instruction_pointer(regs) ((regs)->rip)
++#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++extern unsigned long profile_pc(struct pt_regs *regs);
++#else
++#define profile_pc(regs) instruction_pointer(regs)
++#endif
++
++#include <linux/compiler.h>
++
++void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
++
++struct task_struct;
++
++extern unsigned long
++convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
++
++enum {
++ EF_CF = 0x00000001,
++ EF_PF = 0x00000004,
++ EF_AF = 0x00000010,
++ EF_ZF = 0x00000040,
++ EF_SF = 0x00000080,
++ EF_TF = 0x00000100,
++ EF_IE = 0x00000200,
++ EF_DF = 0x00000400,
++ EF_OF = 0x00000800,
++ EF_IOPL = 0x00003000,
++ EF_IOPL_RING0 = 0x00000000,
++ EF_IOPL_RING1 = 0x00001000,
++ EF_IOPL_RING2 = 0x00002000,
++ EF_NT = 0x00004000, /* nested task */
++ EF_RF = 0x00010000, /* resume */
++ EF_VM = 0x00020000, /* virtual mode */
++ EF_AC = 0x00040000, /* alignment */
++ EF_VIF = 0x00080000, /* virtual interrupt */
++ EF_VIP = 0x00100000, /* virtual interrupt pending */
++ EF_ID = 0x00200000, /* id */
++};
++
++#endif
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/smp.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/smp.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/smp.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/smp.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,150 @@
++#ifndef __ASM_SMP_H
++#define __ASM_SMP_H
++
++/*
++ * We need the APIC definitions automatically as part of 'smp.h'
++ */
++#ifndef __ASSEMBLY__
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/bitops.h>
++extern int disable_apic;
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#ifndef __ASSEMBLY__
++#include <asm/fixmap.h>
++#include <asm/mpspec.h>
++#ifdef CONFIG_X86_IO_APIC
++#include <asm/io_apic.h>
++#endif
++#include <asm/apic.h>
++#include <asm/thread_info.h>
++#endif
++#endif
++
++#ifdef CONFIG_SMP
++#ifndef ASSEMBLY
++
++#include <asm/pda.h>
++
++struct pt_regs;
++
++extern cpumask_t cpu_present_mask;
++extern cpumask_t cpu_possible_map;
++extern cpumask_t cpu_online_map;
++extern cpumask_t cpu_initialized;
++
++/*
++ * Private routines/data
++ */
++
++extern void smp_alloc_memory(void);
++extern volatile unsigned long smp_invalidate_needed;
++extern int pic_mode;
++extern void lock_ipi_call_lock(void);
++extern void unlock_ipi_call_lock(void);
++extern int smp_num_siblings;
++extern void smp_send_reschedule(int cpu);
++void smp_stop_cpu(void);
++extern int smp_call_function_single(int cpuid, void (*func) (void *info),
++ void *info, int retry, int wait);
++
++extern cpumask_t cpu_sibling_map[NR_CPUS];
++extern cpumask_t cpu_core_map[NR_CPUS];
++extern u8 cpu_llc_id[NR_CPUS];
++
++#define SMP_TRAMPOLINE_BASE 0x6000
++
++/*
++ * On x86 all CPUs are mapped 1:1 to the APIC space.
++ * This simplifies scheduling and IPI sending and
++ * compresses data structures.
++ */
++
++static inline int num_booting_cpus(void)
++{
++ return cpus_weight(cpu_possible_map);
++}
++
++#define raw_smp_processor_id() read_pda(cpunumber)
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static inline int hard_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
++}
++#endif
++
++extern int safe_smp_processor_id(void);
++extern int __cpu_disable(void);
++extern void __cpu_die(unsigned int cpu);
++extern void prefill_possible_map(void);
++extern unsigned num_processors;
++extern unsigned disabled_cpus;
++
++#endif /* !ASSEMBLY */
++
++#define NO_PROC_ID 0xFF /* No processor magic marker */
++
++#endif
++
++#ifndef ASSEMBLY
++/*
++ * Some lowlevel functions might want to know about
++ * the real APIC ID <-> CPU # mapping.
++ */
++extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
++extern u8 x86_cpu_to_log_apicid[NR_CPUS];
++extern u8 bios_cpu_apicid[];
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
++{
++ return cpus_addr(cpumask)[0];
++}
++
++static inline int cpu_present_to_apicid(int mps_cpu)
++{
++ if (mps_cpu < NR_CPUS)
++ return (int)bios_cpu_apicid[mps_cpu];
++ else
++ return BAD_APICID;
++}
++#endif
++
++#endif /* !ASSEMBLY */
++
++#ifndef CONFIG_SMP
++#define stack_smp_processor_id() 0
++#define safe_smp_processor_id() 0
++#define cpu_logical_map(x) (x)
++#else
++#include <asm/thread_info.h>
++#define stack_smp_processor_id() \
++({ \
++ struct thread_info *ti; \
++ __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
++ ti->cpu; \
++})
++#endif
++
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_LOCAL_APIC
++static __inline int logical_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
++}
++#endif
++#endif
++
++#ifdef CONFIG_SMP
++#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
++#else
++#define cpu_physical_id(cpu) boot_cpu_id
++#endif
++
++#endif
++
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/synch_bitops.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/synch_bitops.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/synch_bitops.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/synch_bitops.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,2 @@
++
++#include <asm-i386/mach-xen/asm/synch_bitops.h>
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/system.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/system.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/system.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/system.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,256 @@
++#ifndef __ASM_SYSTEM_H
++#define __ASM_SYSTEM_H
++
++#include <linux/kernel.h>
++#include <asm/segment.h>
++#include <asm/alternative.h>
++
++#include <asm/synch_bitops.h>
++#include <asm/hypervisor.h>
++#include <xen/interface/arch-x86_64.h>
++
++#ifdef __KERNEL__
++
++#define __STR(x) #x
++#define STR(x) __STR(x)
++
++#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
++#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
++
++/* frame pointer must be last for get_wchan */
++#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
++#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\n\t"
++
++#define __EXTRA_CLOBBER \
++ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
++
++#define switch_to(prev,next,last) \
++ asm volatile(SAVE_CONTEXT \
++ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
++ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
++ "call __switch_to\n\t" \
++ ".globl thread_return\n" \
++ "thread_return:\n\t" \
++ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
++ "movq %P[thread_info](%%rsi),%%r8\n\t" \
++ LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
++ "movq %%rax,%%rdi\n\t" \
++ "jc ret_from_fork\n\t" \
++ RESTORE_CONTEXT \
++ : "=a" (last) \
++ : [next] "S" (next), [prev] "D" (prev), \
++ [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
++ [ti_flags] "i" (offsetof(struct thread_info, flags)),\
++ [tif_fork] "i" (TIF_FORK), \
++ [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
++ [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
++ : "memory", "cc" __EXTRA_CLOBBER)
++
++extern void load_gs_index(unsigned);
++
++/*
++ * Load a segment. Fall back on loading the zero
++ * segment if something goes wrong..
++ */
++#define loadsegment(seg,value) \
++ asm volatile("\n" \
++ "1:\t" \
++ "movl %k0,%%" #seg "\n" \
++ "2:\n" \
++ ".section .fixup,\"ax\"\n" \
++ "3:\t" \
++ "movl %1,%%" #seg "\n\t" \
++ "jmp 2b\n" \
++ ".previous\n" \
++ ".section __ex_table,\"a\"\n\t" \
++ ".align 8\n\t" \
++ ".quad 1b,3b\n" \
++ ".previous" \
++ : :"r" (value), "r" (0))
++
++/*
++ * Clear and set 'TS' bit respectively
++ */
++#define clts() (HYPERVISOR_fpu_taskswitch(0))
++
++static inline unsigned long read_cr0(void)
++{
++ unsigned long cr0;
++ asm volatile("movq %%cr0,%0" : "=r" (cr0));
++ return cr0;
++}
++
++static inline void write_cr0(unsigned long val)
++{
++ asm volatile("movq %0,%%cr0" :: "r" (val));
++}
++
++#define read_cr3() ({ \
++ unsigned long __dummy; \
++ asm("movq %%cr3,%0" : "=r" (__dummy)); \
++ machine_to_phys(__dummy); \
++})
++
++static inline unsigned long read_cr4(void)
++{
++ unsigned long cr4;
++ asm("movq %%cr4,%0" : "=r" (cr4));
++ return cr4;
++}
++
++static inline void write_cr4(unsigned long val)
++{
++ asm volatile("movq %0,%%cr4" :: "r" (val));
++}
++
++#define stts() (HYPERVISOR_fpu_taskswitch(1))
++
++#define wbinvd() \
++ __asm__ __volatile__ ("wbinvd": : :"memory");
++
++/*
++ * On SMP systems, when the scheduler does migration-cost autodetection,
++ * it needs a way to flush as much of the CPU's caches as possible.
++ */
++static inline void sched_cacheflush(void)
++{
++ wbinvd();
++}
++
++#endif /* __KERNEL__ */
++
++#define nop() __asm__ __volatile__ ("nop")
++
++#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
++
++#define tas(ptr) (xchg((ptr),1))
++
++#define __xg(x) ((volatile long *)(x))
++
++static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
++{
++ *ptr = val;
++}
++
++#define _set_64bit set_64bit
++
++/*
++ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
++ * Note 2: xchg has side effect, so that attribute volatile is necessary,
++ * but generally the primitive is invalid, *ptr is output argument. --ANK
++ */
++static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
++{
++ switch (size) {
++ case 1:
++ __asm__ __volatile__("xchgb %b0,%1"
++ :"=q" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 2:
++ __asm__ __volatile__("xchgw %w0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 4:
++ __asm__ __volatile__("xchgl %k0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 8:
++ __asm__ __volatile__("xchgq %0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ }
++ return x;
++}
++
++/*
++ * Atomic compare and exchange. Compare OLD with MEM, if identical,
++ * store NEW in MEM. Return the initial value in MEM. Success is
++ * indicated by comparing RETURN with OLD.
++ */
++
++#define __HAVE_ARCH_CMPXCHG 1
++
++static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
++ unsigned long new, int size)
++{
++ unsigned long prev;
++ switch (size) {
++ case 1:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
++ : "=a"(prev)
++ : "q"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 2:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 4:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 8:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ }
++ return old;
++}
++
++#define cmpxchg(ptr,o,n)\
++ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
++ (unsigned long)(n),sizeof(*(ptr))))
++
++#ifdef CONFIG_SMP
++#define smp_mb() mb()
++#define smp_rmb() rmb()
++#define smp_wmb() wmb()
++#define smp_read_barrier_depends() do {} while(0)
++#else
++#define smp_mb() barrier()
++#define smp_rmb() barrier()
++#define smp_wmb() barrier()
++#define smp_read_barrier_depends() do {} while(0)
++#endif
++
++
++/*
++ * Force strict CPU ordering.
++ * And yes, this is required on UP too when we're talking
++ * to devices.
++ */
++#define mb() asm volatile("mfence":::"memory")
++#define rmb() asm volatile("lfence":::"memory")
++
++#ifdef CONFIG_UNORDERED_IO
++#define wmb() asm volatile("sfence" ::: "memory")
++#else
++#define wmb() asm volatile("" ::: "memory")
++#endif
++#define read_barrier_depends() do {} while(0)
++#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
++
++#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
++
++#include <linux/irqflags.h>
++
++void cpu_idle_wait(void);
++
++extern unsigned long arch_align_stack(unsigned long sp);
++extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/timer.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/timer.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/timer.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/timer.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,67 @@
++#ifndef _ASMi386_TIMER_H
++#define _ASMi386_TIMER_H
++#include <linux/init.h>
++
++/**
++ * struct timer_ops - used to define a timer source
++ *
++ * @name: name of the timer.
++ * @init: Probes and initializes the timer. Takes clock= override
++ * string as an argument. Returns 0 on success, anything else
++ * on failure.
++ * @mark_offset: called by the timer interrupt.
++ * @get_offset: called by gettimeofday(). Returns the number of microseconds
++ * since the last timer interupt.
++ * @monotonic_clock: returns the number of nanoseconds since the init of the
++ * timer.
++ * @delay: delays this many clock cycles.
++ */
++struct timer_opts {
++ char* name;
++ void (*mark_offset)(void);
++ unsigned long (*get_offset)(void);
++ unsigned long long (*monotonic_clock)(void);
++ void (*delay)(unsigned long);
++ unsigned long (*read_timer)(void);
++ int (*suspend)(pm_message_t state);
++ int (*resume)(void);
++};
++
++struct init_timer_opts {
++ int (*init)(char *override);
++ struct timer_opts *opts;
++};
++
++#define TICK_SIZE (tick_nsec / 1000)
++
++extern struct timer_opts* __init select_timer(void);
++extern void clock_fallback(void);
++void setup_pit_timer(void);
++
++/* Modifiers for buggy PIT handling */
++
++extern int pit_latch_buggy;
++
++extern struct timer_opts *cur_timer;
++extern int timer_ack;
++
++/* list of externed timers */
++extern struct timer_opts timer_none;
++extern struct timer_opts timer_pit;
++extern struct init_timer_opts timer_pit_init;
++extern struct init_timer_opts timer_tsc_init;
++#ifdef CONFIG_X86_CYCLONE_TIMER
++extern struct init_timer_opts timer_cyclone_init;
++#endif
++
++extern unsigned long calibrate_tsc(void);
++extern void init_cpu_khz(void);
++#ifdef CONFIG_HPET_TIMER
++extern struct init_timer_opts timer_hpet_init;
++extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
++#endif
++
++#ifdef CONFIG_X86_PM_TIMER
++extern struct init_timer_opts timer_pmtmr_init;
++#endif
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/tlbflush.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/tlbflush.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/tlbflush.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/tlbflush.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,103 @@
++#ifndef _X8664_TLBFLUSH_H
++#define _X8664_TLBFLUSH_H
++
++#include <linux/mm.h>
++#include <asm/processor.h>
++
++#define __flush_tlb() xen_tlb_flush()
++
++/*
++ * Global pages have to be flushed a bit differently. Not a real
++ * performance problem because this does not happen often.
++ */
++#define __flush_tlb_global() xen_tlb_flush()
++
++
++extern unsigned long pgkern_mask;
++
++#define __flush_tlb_all() __flush_tlb_global()
++
++#define __flush_tlb_one(addr) xen_invlpg((unsigned long)addr)
++
++
++/*
++ * TLB flushing:
++ *
++ * - flush_tlb() flushes the current mm struct TLBs
++ * - flush_tlb_all() flushes all processes TLBs
++ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
++ * - flush_tlb_page(vma, vmaddr) flushes one page
++ * - flush_tlb_range(vma, start, end) flushes a range of pages
++ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
++ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
++ *
++ * x86-64 can only flush individual pages or full VMs. For a range flush
++ * we always do the full VM. Might be worth trying if for a small
++ * range a few INVLPGs in a row are a win.
++ */
++
++#ifndef CONFIG_SMP
++
++#define flush_tlb() __flush_tlb()
++#define flush_tlb_all() __flush_tlb_all()
++#define local_flush_tlb() __flush_tlb()
++
++static inline void flush_tlb_mm(struct mm_struct *mm)
++{
++ if (mm == current->active_mm)
++ __flush_tlb();
++}
++
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++ unsigned long addr)
++{
++ if (vma->vm_mm == current->active_mm)
++ __flush_tlb_one(addr);
++}
++
++static inline void flush_tlb_range(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end)
++{
++ if (vma->vm_mm == current->active_mm)
++ __flush_tlb();
++}
++
++#else
++
++#include <asm/smp.h>
++
++#define local_flush_tlb() \
++ __flush_tlb()
++
++#define flush_tlb_all xen_tlb_flush_all
++#define flush_tlb_current_task() xen_tlb_flush_mask(&current->mm->cpu_vm_mask)
++#define flush_tlb_mm(mm) xen_tlb_flush_mask(&(mm)->cpu_vm_mask)
++#define flush_tlb_page(vma, va) xen_invlpg_mask(&(vma)->vm_mm->cpu_vm_mask, va)
++
++#define flush_tlb() flush_tlb_current_task()
++
++static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
++{
++ flush_tlb_mm(vma->vm_mm);
++}
++
++#define TLBSTATE_OK 1
++#define TLBSTATE_LAZY 2
++
++/* Roughly an IPI every 20MB with 4k pages for freeing page table
++ ranges. Cost is about 42k of memory for each CPU. */
++#define ARCH_FREE_PTE_NR 5350
++
++#endif
++
++#define flush_tlb_kernel_range(start, end) flush_tlb_all()
++
++static inline void flush_tlb_pgtables(struct mm_struct *mm,
++ unsigned long start, unsigned long end)
++{
++ /* x86_64 does not keep any page table caches in a software TLB.
++ The CPUs do in their hardware TLBs, but they are handled
++ by the normal TLB flushing algorithms. */
++}
++
++#endif /* _X8664_TLBFLUSH_H */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/vga.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/vga.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/vga.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/vga.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,20 @@
++/*
++ * Access to VGA videoram
++ *
++ * (c) 1998 Martin Mares <mj@ucw.cz>
++ */
++
++#ifndef _LINUX_ASM_VGA_H_
++#define _LINUX_ASM_VGA_H_
++
++/*
++ * On the PC, we can just recalculate addresses and then
++ * access the videoram directly without any black magic.
++ */
++
++#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
++
++#define vga_readb(x) (*(x))
++#define vga_writeb(x,y) (*(y) = (x))
++
++#endif
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/xenoprof.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/xenoprof.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/xenoprof.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/xenoprof.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1 @@
++#include <asm-i386/mach-xen/asm/xenoprof.h>
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/xor.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/xor.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/asm/xor.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/asm/xor.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,328 @@
++/*
++ * x86-64 changes / gcc fixes from Andi Kleen.
++ * Copyright 2002 Andi Kleen, SuSE Labs.
++ *
++ * This hasn't been optimized for the hammer yet, but there are likely
++ * no advantages to be gotten from x86-64 here anyways.
++ */
++
++typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
++
++/* Doesn't use gcc to save the XMM registers, because there is no easy way to
++ tell it to do a clts before the register saving. */
++#define XMMS_SAVE do { \
++ preempt_disable(); \
++ if (!(current_thread_info()->status & TS_USEDFPU)) \
++ clts(); \
++ __asm__ __volatile__ ( \
++ "movups %%xmm0,(%1) ;\n\t" \
++ "movups %%xmm1,0x10(%1) ;\n\t" \
++ "movups %%xmm2,0x20(%1) ;\n\t" \
++ "movups %%xmm3,0x30(%1) ;\n\t" \
++ : "=&r" (cr0) \
++ : "r" (xmm_save) \
++ : "memory"); \
++} while(0)
++
++#define XMMS_RESTORE do { \
++ asm volatile ( \
++ "sfence ;\n\t" \
++ "movups (%1),%%xmm0 ;\n\t" \
++ "movups 0x10(%1),%%xmm1 ;\n\t" \
++ "movups 0x20(%1),%%xmm2 ;\n\t" \
++ "movups 0x30(%1),%%xmm3 ;\n\t" \
++ : \
++ : "r" (cr0), "r" (xmm_save) \
++ : "memory"); \
++ if (!(current_thread_info()->status & TS_USEDFPU)) \
++ stts(); \
++ preempt_enable(); \
++} while(0)
++
++#define OFFS(x) "16*("#x")"
++#define PF_OFFS(x) "256+16*("#x")"
++#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
++#define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
++#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
++#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
++#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
++#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
++#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
++#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
++#define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
++#define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
++#define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
++#define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
++#define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
++
++
++static void
++xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
++{
++ unsigned int lines = bytes >> 8;
++ unsigned long cr0;
++ xmm_store_t xmm_save[4];
++
++ XMMS_SAVE;
++
++ asm volatile (
++#undef BLOCK
++#define BLOCK(i) \
++ LD(i,0) \
++ LD(i+1,1) \
++ PF1(i) \
++ PF1(i+2) \
++ LD(i+2,2) \
++ LD(i+3,3) \
++ PF0(i+4) \
++ PF0(i+6) \
++ XO1(i,0) \
++ XO1(i+1,1) \
++ XO1(i+2,2) \
++ XO1(i+3,3) \
++ ST(i,0) \
++ ST(i+1,1) \
++ ST(i+2,2) \
++ ST(i+3,3) \
++
++
++ PF0(0)
++ PF0(2)
++
++ " .align 32 ;\n"
++ " 1: ;\n"
++
++ BLOCK(0)
++ BLOCK(4)
++ BLOCK(8)
++ BLOCK(12)
++
++ " addq %[inc], %[p1] ;\n"
++ " addq %[inc], %[p2] ;\n"
++ " decl %[cnt] ; jnz 1b"
++ : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
++ : [inc] "r" (256UL)
++ : "memory");
++
++ XMMS_RESTORE;
++}
++
++static void
++xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++ unsigned long *p3)
++{
++ unsigned int lines = bytes >> 8;
++ xmm_store_t xmm_save[4];
++ unsigned long cr0;
++
++ XMMS_SAVE;
++
++ __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++ PF1(i) \
++ PF1(i+2) \
++ LD(i,0) \
++ LD(i+1,1) \
++ LD(i+2,2) \
++ LD(i+3,3) \
++ PF2(i) \
++ PF2(i+2) \
++ PF0(i+4) \
++ PF0(i+6) \
++ XO1(i,0) \
++ XO1(i+1,1) \
++ XO1(i+2,2) \
++ XO1(i+3,3) \
++ XO2(i,0) \
++ XO2(i+1,1) \
++ XO2(i+2,2) \
++ XO2(i+3,3) \
++ ST(i,0) \
++ ST(i+1,1) \
++ ST(i+2,2) \
++ ST(i+3,3) \
++
++
++ PF0(0)
++ PF0(2)
++
++ " .align 32 ;\n"
++ " 1: ;\n"
++
++ BLOCK(0)
++ BLOCK(4)
++ BLOCK(8)
++ BLOCK(12)
++
++ " addq %[inc], %[p1] ;\n"
++ " addq %[inc], %[p2] ;\n"
++ " addq %[inc], %[p3] ;\n"
++ " decl %[cnt] ; jnz 1b"
++ : [cnt] "+r" (lines),
++ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
++ : [inc] "r" (256UL)
++ : "memory");
++ XMMS_RESTORE;
++}
++
++static void
++xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++ unsigned long *p3, unsigned long *p4)
++{
++ unsigned int lines = bytes >> 8;
++ xmm_store_t xmm_save[4];
++ unsigned long cr0;
++
++ XMMS_SAVE;
++
++ __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++ PF1(i) \
++ PF1(i+2) \
++ LD(i,0) \
++ LD(i+1,1) \
++ LD(i+2,2) \
++ LD(i+3,3) \
++ PF2(i) \
++ PF2(i+2) \
++ XO1(i,0) \
++ XO1(i+1,1) \
++ XO1(i+2,2) \
++ XO1(i+3,3) \
++ PF3(i) \
++ PF3(i+2) \
++ PF0(i+4) \
++ PF0(i+6) \
++ XO2(i,0) \
++ XO2(i+1,1) \
++ XO2(i+2,2) \
++ XO2(i+3,3) \
++ XO3(i,0) \
++ XO3(i+1,1) \
++ XO3(i+2,2) \
++ XO3(i+3,3) \
++ ST(i,0) \
++ ST(i+1,1) \
++ ST(i+2,2) \
++ ST(i+3,3) \
++
++
++ PF0(0)
++ PF0(2)
++
++ " .align 32 ;\n"
++ " 1: ;\n"
++
++ BLOCK(0)
++ BLOCK(4)
++ BLOCK(8)
++ BLOCK(12)
++
++ " addq %[inc], %[p1] ;\n"
++ " addq %[inc], %[p2] ;\n"
++ " addq %[inc], %[p3] ;\n"
++ " addq %[inc], %[p4] ;\n"
++ " decl %[cnt] ; jnz 1b"
++ : [cnt] "+c" (lines),
++ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
++ : [inc] "r" (256UL)
++ : "memory" );
++
++ XMMS_RESTORE;
++}
++
++static void
++xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++ unsigned long *p3, unsigned long *p4, unsigned long *p5)
++{
++ unsigned int lines = bytes >> 8;
++ xmm_store_t xmm_save[4];
++ unsigned long cr0;
++
++ XMMS_SAVE;
++
++ __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++ PF1(i) \
++ PF1(i+2) \
++ LD(i,0) \
++ LD(i+1,1) \
++ LD(i+2,2) \
++ LD(i+3,3) \
++ PF2(i) \
++ PF2(i+2) \
++ XO1(i,0) \
++ XO1(i+1,1) \
++ XO1(i+2,2) \
++ XO1(i+3,3) \
++ PF3(i) \
++ PF3(i+2) \
++ XO2(i,0) \
++ XO2(i+1,1) \
++ XO2(i+2,2) \
++ XO2(i+3,3) \
++ PF4(i) \
++ PF4(i+2) \
++ PF0(i+4) \
++ PF0(i+6) \
++ XO3(i,0) \
++ XO3(i+1,1) \
++ XO3(i+2,2) \
++ XO3(i+3,3) \
++ XO4(i,0) \
++ XO4(i+1,1) \
++ XO4(i+2,2) \
++ XO4(i+3,3) \
++ ST(i,0) \
++ ST(i+1,1) \
++ ST(i+2,2) \
++ ST(i+3,3) \
++
++
++ PF0(0)
++ PF0(2)
++
++ " .align 32 ;\n"
++ " 1: ;\n"
++
++ BLOCK(0)
++ BLOCK(4)
++ BLOCK(8)
++ BLOCK(12)
++
++ " addq %[inc], %[p1] ;\n"
++ " addq %[inc], %[p2] ;\n"
++ " addq %[inc], %[p3] ;\n"
++ " addq %[inc], %[p4] ;\n"
++ " addq %[inc], %[p5] ;\n"
++ " decl %[cnt] ; jnz 1b"
++ : [cnt] "+c" (lines),
++ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
++ [p5] "+r" (p5)
++ : [inc] "r" (256UL)
++ : "memory");
++
++ XMMS_RESTORE;
++}
++
++static struct xor_block_template xor_block_sse = {
++ .name = "generic_sse",
++ .do_2 = xor_sse_2,
++ .do_3 = xor_sse_3,
++ .do_4 = xor_sse_4,
++ .do_5 = xor_sse_5,
++};
++
++#undef XOR_TRY_TEMPLATES
++#define XOR_TRY_TEMPLATES \
++ do { \
++ xor_speed(&xor_block_sse); \
++ } while (0)
++
++/* We force the use of the SSE xor block because it can write around L2.
++ We may also be able to load into the L1 only depending on how the cpu
++ deals with a load to a line that is being prefetched. */
++#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/irq_vectors.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/irq_vectors.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/irq_vectors.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/irq_vectors.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,123 @@
++/*
++ * This file should contain #defines for all of the interrupt vector
++ * numbers used by this architecture.
++ *
++ * In addition, there are some standard defines:
++ *
++ * FIRST_EXTERNAL_VECTOR:
++ * The first free place for external interrupts
++ *
++ * SYSCALL_VECTOR:
++ * The IRQ vector a syscall makes the user to kernel transition
++ * under.
++ *
++ * TIMER_IRQ:
++ * The IRQ number the timer interrupt comes in at.
++ *
++ * NR_IRQS:
++ * The total number of interrupt vectors (including all the
++ * architecture specific interrupts) needed.
++ *
++ */
++#ifndef _ASM_IRQ_VECTORS_H
++#define _ASM_IRQ_VECTORS_H
++
++/*
++ * IDT vectors usable for external interrupt sources start
++ * at 0x20:
++ */
++#define FIRST_EXTERNAL_VECTOR 0x20
++
++#define SYSCALL_VECTOR 0x80
++
++/*
++ * Vectors 0x20-0x2f are used for ISA interrupts.
++ */
++
++#if 0
++/*
++ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
++ *
++ * some of the following vectors are 'rare', they are merged
++ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
++ * TLB, reschedule and local APIC vectors are performance-critical.
++ *
++ * Vectors 0xf0-0xfa are free (reserved for future Linux use).
++ */
++#define INVALIDATE_TLB_VECTOR 0xfd
++#define RESCHEDULE_VECTOR 0xfc
++#define CALL_FUNCTION_VECTOR 0xfb
++
++#define THERMAL_APIC_VECTOR 0xf0
++/*
++ * Local APIC timer IRQ vector is on a different priority level,
++ * to work around the 'lost local interrupt if more than 2 IRQ
++ * sources per level' errata.
++ */
++#define LOCAL_TIMER_VECTOR 0xef
++#endif
++
++#define SPURIOUS_APIC_VECTOR 0xff
++#define ERROR_APIC_VECTOR 0xfe
++
++/*
++ * First APIC vector available to drivers: (vectors 0x30-0xee)
++ * we start at 0x31 to spread out vectors evenly between priority
++ * levels. (0x80 is the syscall vector)
++ */
++#define FIRST_DEVICE_VECTOR 0x31
++#define FIRST_SYSTEM_VECTOR 0xef
++
++/*
++ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
++ * Right now the APIC is mostly only used for SMP.
++ * 256 vectors is an architectural limit. (we can have
++ * more than 256 devices theoretically, but they will
++ * have to use shared interrupts)
++ * Since vectors 0x00-0x1f are used/reserved for the CPU,
++ * the usable vector space is 0x20-0xff (224 vectors)
++ */
++
++#define RESCHEDULE_VECTOR 0
++#define CALL_FUNCTION_VECTOR 1
++#define NR_IPIS 2
++
++/*
++ * The maximum number of vectors supported by i386 processors
++ * is limited to 256. For processors other than i386, NR_VECTORS
++ * should be changed accordingly.
++ */
++#define NR_VECTORS 256
++
++#define FPU_IRQ 13
++
++#define FIRST_VM86_IRQ 3
++#define LAST_VM86_IRQ 15
++#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
++
++/*
++ * The flat IRQ space is divided into two regions:
++ * 1. A one-to-one mapping of real physical IRQs. This space is only used
++ * if we have physical device-access privilege. This region is at the
++ * start of the IRQ space so that existing device drivers do not need
++ * to be modified to translate physical IRQ numbers into our IRQ space.
++ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
++ * are bound using the provided bind/unbind functions.
++ */
++
++#define PIRQ_BASE 0
++#define NR_PIRQS 256
++
++#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
++#define NR_DYNIRQS 256
++
++#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
++#define NR_IRQ_VECTORS NR_IRQS
++
++#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
++#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
++
++#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
++#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
++
++#endif /* _ASM_IRQ_VECTORS_H */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/mach_time.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/mach_time.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/mach_time.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/mach_time.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,111 @@
++/*
++ * include/asm-i386/mach-default/mach_time.h
++ *
++ * Machine specific set RTC function for generic.
++ * Split out from time.c by Osamu Tomita <tomita@cinet.co.jp>
++ */
++#ifndef _MACH_TIME_H
++#define _MACH_TIME_H
++
++#include <asm-i386/mc146818rtc.h>
++
++/* for check timing call set_rtc_mmss() 500ms */
++/* used in arch/i386/time.c::do_timer_interrupt() */
++#define USEC_AFTER 500000
++#define USEC_BEFORE 500000
++
++/*
++ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
++ * called 500 ms after the second nowtime has started, because when
++ * nowtime is written into the registers of the CMOS clock, it will
++ * jump to the next second precisely 500 ms later. Check the Motorola
++ * MC146818A or Dallas DS12887 data sheet for details.
++ *
++ * BUG: This routine does not handle hour overflow properly; it just
++ * sets the minutes. Usually you'll only notice that after reboot!
++ */
++static inline int mach_set_rtc_mmss(unsigned long nowtime)
++{
++ int retval = 0;
++ int real_seconds, real_minutes, cmos_minutes;
++ unsigned char save_control, save_freq_select;
++
++ save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
++ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
++
++ save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
++ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
++
++ cmos_minutes = CMOS_READ(RTC_MINUTES);
++ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
++ BCD_TO_BIN(cmos_minutes);
++
++ /*
++ * since we're only adjusting minutes and seconds,
++ * don't interfere with hour overflow. This avoids
++ * messing with unknown time zones but requires your
++ * RTC not to be off by more than 15 minutes
++ */
++ real_seconds = nowtime % 60;
++ real_minutes = nowtime / 60;
++ if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
++ real_minutes += 30; /* correct for half hour time zone */
++ real_minutes %= 60;
++
++ if (abs(real_minutes - cmos_minutes) < 30) {
++ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
++ BIN_TO_BCD(real_seconds);
++ BIN_TO_BCD(real_minutes);
++ }
++ CMOS_WRITE(real_seconds,RTC_SECONDS);
++ CMOS_WRITE(real_minutes,RTC_MINUTES);
++ } else {
++ printk(KERN_WARNING
++ "set_rtc_mmss: can't update from %d to %d\n",
++ cmos_minutes, real_minutes);
++ retval = -1;
++ }
++
++ /* The following flags have to be released exactly in this order,
++ * otherwise the DS12887 (popular MC146818A clone with integrated
++ * battery and quartz) will not reset the oscillator and will not
++ * update precisely 500 ms later. You won't find this mentioned in
++ * the Dallas Semiconductor data sheets, but who believes data
++ * sheets anyway ... -- Markus Kuhn
++ */
++ CMOS_WRITE(save_control, RTC_CONTROL);
++ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++
++ return retval;
++}
++
++static inline unsigned long mach_get_cmos_time(void)
++{
++ unsigned int year, mon, day, hour, min, sec;
++
++ do {
++ sec = CMOS_READ(RTC_SECONDS);
++ min = CMOS_READ(RTC_MINUTES);
++ hour = CMOS_READ(RTC_HOURS);
++ day = CMOS_READ(RTC_DAY_OF_MONTH);
++ mon = CMOS_READ(RTC_MONTH);
++ year = CMOS_READ(RTC_YEAR);
++ } while (sec != CMOS_READ(RTC_SECONDS));
++
++ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
++ BCD_TO_BIN(sec);
++ BCD_TO_BIN(min);
++ BCD_TO_BIN(hour);
++ BCD_TO_BIN(day);
++ BCD_TO_BIN(mon);
++ BCD_TO_BIN(year);
++ }
++
++ year += 1900;
++ if (year < 1970)
++ year += 100;
++
++ return mktime(year, mon, day, hour, min, sec);
++}
++
++#endif /* !_MACH_TIME_H */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/mach_timer.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/mach_timer.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/mach_timer.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/mach_timer.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,50 @@
++/*
++ * include/asm-i386/mach-default/mach_timer.h
++ *
++ * Machine specific calibrate_tsc() for generic.
++ * Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp>
++ */
++/* ------ Calibrate the TSC -------
++ * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
++ * Too much 64-bit arithmetic here to do this cleanly in C, and for
++ * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
++ * output busy loop as low as possible. We avoid reading the CTC registers
++ * directly because of the awkward 8-bit access mechanism of the 82C54
++ * device.
++ */
++#ifndef _MACH_TIMER_H
++#define _MACH_TIMER_H
++
++#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
++#define CALIBRATE_LATCH \
++ ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
++
++static inline void mach_prepare_counter(void)
++{
++ /* Set the Gate high, disable speaker */
++ outb((inb(0x61) & ~0x02) | 0x01, 0x61);
++
++ /*
++ * Now let's take care of CTC channel 2
++ *
++ * Set the Gate high, program CTC channel 2 for mode 0,
++ * (interrupt on terminal count mode), binary count,
++ * load 5 * LATCH count, (LSB and MSB) to begin countdown.
++ *
++ * Some devices need a delay here.
++ */
++ outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
++ outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
++ outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
++}
++
++static inline void mach_countup(unsigned long *count_p)
++{
++ unsigned long count = 0;
++ do {
++ count++;
++ } while ((inb_p(0x61) & 0x20) == 0);
++ *count_p = count;
++}
++
++#endif /* !_MACH_TIMER_H */
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/setup_arch_post.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/setup_arch_post.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/setup_arch_post.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/setup_arch_post.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,63 @@
++/**
++ * machine_specific_* - Hooks for machine specific setup.
++ *
++ * Description:
++ * This is included late in kernel/setup.c so that it can make
++ * use of all of the static functions.
++ **/
++
++#include <xen/interface/callback.h>
++
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void nmi(void);
++
++static void __init machine_specific_arch_setup(void)
++{
++ int ret;
++ static struct callback_register __initdata event = {
++ .type = CALLBACKTYPE_event,
++ .address = (unsigned long) hypervisor_callback,
++ };
++ static struct callback_register __initdata failsafe = {
++ .type = CALLBACKTYPE_failsafe,
++ .address = (unsigned long)failsafe_callback,
++ };
++ static struct callback_register __initdata syscall = {
++ .type = CALLBACKTYPE_syscall,
++ .address = (unsigned long)system_call,
++ };
++#ifdef CONFIG_X86_LOCAL_APIC
++ static struct callback_register __initdata nmi_cb = {
++ .type = CALLBACKTYPE_nmi,
++ .address = (unsigned long)nmi,
++ };
++#endif
++
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
++ if (ret == 0)
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
++ if (ret == 0)
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &syscall);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS)
++ ret = HYPERVISOR_set_callbacks(
++ event.address,
++ failsafe.address,
++ syscall.address);
++#endif
++ BUG_ON(ret);
++
++#ifdef CONFIG_X86_LOCAL_APIC
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS) {
++ static struct xennmi_callback __initdata cb = {
++ .handler_address = (unsigned long)nmi
++ };
++
++ HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
++ }
++#endif
++#endif
++}
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/mach-xen/setup_arch_pre.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/setup_arch_pre.h
+--- linux-2.6.18.8/include/asm-x86_64/mach-xen/setup_arch_pre.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/mach-xen/setup_arch_pre.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,5 @@
++/* Hook to call BIOS initialisation function */
++
++#define ARCH_SETUP machine_specific_arch_setup();
++
++static void __init machine_specific_arch_setup(void);
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/msi.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/msi.h
+--- linux-2.6.18.8/include/asm-x86_64/msi.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/msi.h 2008-08-21 11:36:07.000000000 +0200
+@@ -7,14 +7,21 @@
+ #define ASM_MSI_H
+
+ #include <asm/desc.h>
++#ifndef CONFIG_XEN
+ #include <asm/mach_apic.h>
++#endif
+ #include <asm/smp.h>
+
++#ifndef CONFIG_XEN
+ #define LAST_DEVICE_VECTOR (FIRST_SYSTEM_VECTOR - 1)
++#else
++#define LAST_DYNAMIC_VECTOR 0xdf
++#define LAST_DEVICE_VECTOR (LAST_DYNAMIC_VECTOR)
++#endif
++
+ #define MSI_TARGET_CPU_SHIFT 12
+
+ extern struct msi_ops msi_apic_ops;
+-
+ static inline int msi_arch_init(void)
+ {
+ msi_register(&msi_apic_ops);
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/signal.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/signal.h
+--- linux-2.6.18.8/include/asm-x86_64/signal.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/signal.h 2008-08-21 11:36:07.000000000 +0200
+@@ -24,10 +24,6 @@ typedef struct {
+ } sigset_t;
+
+
+-struct pt_regs;
+-asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
+-
+-
+ #else
+ /* Here we must cater to libcs that poke about in kernel headers. */
+
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/thread_info.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/thread_info.h
+--- linux-2.6.18.8/include/asm-x86_64/thread_info.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/thread_info.h 2008-08-21 11:36:07.000000000 +0200
+@@ -114,6 +114,7 @@ static inline struct thread_info *stack_
+ #define TIF_IRET 5 /* force IRET */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
++#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */
+ /* 16 free */
+ #define TIF_IA32 17 /* 32bit process */
+ #define TIF_FORK 18 /* ret_from_fork */
+@@ -128,6 +129,7 @@ static inline struct thread_info *stack_
+ #define _TIF_IRET (1<<TIF_IRET)
+ #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1<<TIF_SECCOMP)
++#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
+ #define _TIF_IA32 (1<<TIF_IA32)
+ #define _TIF_FORK (1<<TIF_FORK)
+ #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
+diff -rpuN linux-2.6.18.8/include/asm-x86_64/unistd.h linux-2.6.18-xen-3.3.0/include/asm-x86_64/unistd.h
+--- linux-2.6.18.8/include/asm-x86_64/unistd.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/asm-x86_64/unistd.h 2008-08-21 11:36:07.000000000 +0200
+@@ -600,9 +600,9 @@ __SYSCALL(__NR_fchmodat, sys_fchmodat)
+ #define __NR_faccessat 269
+ __SYSCALL(__NR_faccessat, sys_faccessat)
+ #define __NR_pselect6 270
+-__SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */
++__SYSCALL(__NR_pselect6, sys_pselect6)
+ #define __NR_ppoll 271
+-__SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */
++__SYSCALL(__NR_ppoll, sys_ppoll)
+ #define __NR_unshare 272
+ __SYSCALL(__NR_unshare, sys_unshare)
+ #define __NR_set_robust_list 273
+@@ -658,6 +658,7 @@ do { \
+ #define __ARCH_WANT_SYS_SIGPENDING
+ #define __ARCH_WANT_SYS_SIGPROCMASK
+ #define __ARCH_WANT_SYS_RT_SIGACTION
++#define __ARCH_WANT_SYS_RT_SIGSUSPEND
+ #define __ARCH_WANT_SYS_TIME
+ #define __ARCH_WANT_COMPAT_SYS_TIME
+
+diff -rpuN linux-2.6.18.8/include/linux/aio.h linux-2.6.18-xen-3.3.0/include/linux/aio.h
+--- linux-2.6.18.8/include/linux/aio.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/aio.h 2008-08-21 11:36:07.000000000 +0200
+@@ -191,6 +191,11 @@ struct kioctx {
+ struct aio_ring_info ring_info;
+
+ struct work_struct wq;
++#ifdef CONFIG_EPOLL
++ // poll integration
++ wait_queue_head_t poll_wait;
++ struct file *file;
++#endif
+ };
+
+ /* prototypes */
+diff -rpuN linux-2.6.18.8/include/linux/bio.h linux-2.6.18-xen-3.3.0/include/linux/bio.h
+--- linux-2.6.18.8/include/linux/bio.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/bio.h 2008-08-21 11:36:07.000000000 +0200
+@@ -172,12 +172,27 @@ struct bio {
+ #define bio_offset(bio) bio_iovec((bio))->bv_offset
+ #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
+ #define bio_sectors(bio) ((bio)->bi_size >> 9)
+-#define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9)
+-#define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio)))
+ #define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
+ #define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
+ #define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
+ #define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
++#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size)
++
++static inline unsigned int bio_cur_sectors(struct bio *bio)
++{
++ if (bio->bi_vcnt)
++ return bio_iovec(bio)->bv_len >> 9;
++
++ return 0;
++}
++
++static inline void *bio_data(struct bio *bio)
++{
++ if (bio->bi_vcnt)
++ return page_address(bio_page(bio)) + bio_offset(bio);
++
++ return NULL;
++}
+
+ /*
+ * will die
+diff -rpuN linux-2.6.18.8/include/linux/blkdev.h linux-2.6.18-xen-3.3.0/include/linux/blkdev.h
+--- linux-2.6.18.8/include/linux/blkdev.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/blkdev.h 2008-08-21 11:36:07.000000000 +0200
+@@ -506,6 +506,8 @@ enum {
+ #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)
+ #define blk_fua_rq(rq) ((rq)->flags & REQ_FUA)
+
++#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
++
+ #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
+
+ #define rq_data_dir(rq) ((rq)->flags & 1)
+diff -rpuN linux-2.6.18.8/include/linux/crash_dump.h linux-2.6.18-xen-3.3.0/include/linux/crash_dump.h
+--- linux-2.6.18.8/include/linux/crash_dump.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/crash_dump.h 2008-08-21 11:36:07.000000000 +0200
+@@ -14,5 +14,13 @@ extern ssize_t copy_oldmem_page(unsigned
+ extern const struct file_operations proc_vmcore_operations;
+ extern struct proc_dir_entry *proc_vmcore;
+
++/* Architecture code defines this if there are other possible ELF
++ * machine types, e.g. on bi-arch capable hardware. */
++#ifndef vmcore_elf_check_arch_cross
++#define vmcore_elf_check_arch_cross(x) 0
++#endif
++
++#define vmcore_elf_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
++
+ #endif /* CONFIG_CRASH_DUMP */
+ #endif /* LINUX_CRASHDUMP_H */
+diff -rpuN linux-2.6.18.8/include/linux/debugfs.h linux-2.6.18-xen-3.3.0/include/linux/debugfs.h
+--- linux-2.6.18.8/include/linux/debugfs.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/debugfs.h 2008-08-21 11:36:07.000000000 +0200
+@@ -41,6 +41,8 @@ struct dentry *debugfs_create_u16(const
+ struct dentry *parent, u16 *value);
+ struct dentry *debugfs_create_u32(const char *name, mode_t mode,
+ struct dentry *parent, u32 *value);
++struct dentry *debugfs_create_u64(const char *name, mode_t mode,
++ struct dentry *parent, u64 *value);
+ struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+ struct dentry *parent, u32 *value);
+
+@@ -94,6 +96,13 @@ static inline struct dentry *debugfs_cre
+ return ERR_PTR(-ENODEV);
+ }
+
++static inline struct dentry *debugfs_create_u64(const char *name, mode_t mode,
++ struct dentry *parent,
++ u64 *value)
++{
++ return ERR_PTR(-ENODEV);
++}
++
+ static inline struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+ struct dentry *parent,
+ u32 *value)
+diff -rpuN linux-2.6.18.8/include/linux/elfnote.h linux-2.6.18-xen-3.3.0/include/linux/elfnote.h
+--- linux-2.6.18.8/include/linux/elfnote.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/elfnote.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,104 @@
++#ifndef _LINUX_ELFNOTE_H
++#define _LINUX_ELFNOTE_H
++/*
++ * Helper macros to generate ELF Note structures, which are put into a
++ * PT_NOTE segment of the final vmlinux image. These are useful for
++ * including name-value pairs of metadata into the kernel binary (or
++ * modules?) for use by external programs.
++ *
++ * Each note has three parts: a name, a type and a desc. The name is
++ * intended to distinguish the note's originator, so it would be a
++ * company, project, subsystem, etc; it must be in a suitable form for
++ * use in a section name. The type is an integer which is used to tag
++ * the data, and is considered to be within the "name" namespace (so
++ * "FooCo"'s type 42 is distinct from "BarProj"'s type 42). The
++ * "desc" field is the actual data. There are no constraints on the
++ * desc field's contents, though typically they're fairly small.
++ *
++ * All notes from a given NAME are put into a section named
++ * .note.NAME. When the kernel image is finally linked, all the notes
++ * are packed into a single .notes section, which is mapped into the
++ * PT_NOTE segment. Because notes for a given name are grouped into
++ * the same section, they'll all be adjacent the output file.
++ *
++ * This file defines macros for both C and assembler use. Their
++ * syntax is slightly different, but they're semantically similar.
++ *
++ * See the ELF specification for more detail about ELF notes.
++ */
++
++#ifdef __ASSEMBLER__
++/*
++ * Generate a structure with the same shape as Elf{32,64}_Nhdr (which
++ * turn out to be the same size and shape), followed by the name and
++ * desc data with appropriate padding. The 'desctype' argument is the
++ * assembler pseudo op defining the type of the data e.g. .asciz while
++ * 'descdata' is the data itself e.g. "hello, world".
++ *
++ * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
++ * ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
++ */
++#ifdef __STDC__
++#define ELFNOTE(name, type, desctype, descdata...) \
++.pushsection .note.name ; \
++ .align 4 ; \
++ .long 2f - 1f /* namesz */ ; \
++ .long 4f - 3f /* descsz */ ; \
++ .long type ; \
++1:.asciz #name ; \
++2:.align 4 ; \
++3:desctype descdata ; \
++4:.align 4 ; \
++.popsection
++#else /* !__STDC__, i.e. -traditional */
++#define ELFNOTE(name, type, desctype, descdata) \
++.pushsection .note.name ; \
++ .align 4 ; \
++ .long 2f - 1f /* namesz */ ; \
++ .long 4f - 3f /* descsz */ ; \
++ .long type ; \
++1:.asciz "name" ; \
++2:.align 4 ; \
++3:desctype descdata ; \
++4:.align 4 ; \
++.popsection
++#endif /* __STDC__ */
++#else /* !__ASSEMBLER__ */
++#include <linux/elf.h>
++/*
++ * Use an anonymous structure which matches the shape of
++ * Elf{32,64}_Nhdr, but includes the name and desc data. The size and
++ * type of name and desc depend on the macro arguments. "name" must
++ * be a literal string, and "desc" must be passed by value. You may
++ * only define one note per line, since __LINE__ is used to generate
++ * unique symbols.
++ */
++#define _ELFNOTE_PASTE(a,b) a##b
++#define _ELFNOTE(size, name, unique, type, desc) \
++ static const struct { \
++ struct elf##size##_note _nhdr; \
++ unsigned char _name[sizeof(name)] \
++ __attribute__((aligned(sizeof(Elf##size##_Word)))); \
++ typeof(desc) _desc \
++ __attribute__((aligned(sizeof(Elf##size##_Word)))); \
++ } _ELFNOTE_PASTE(_note_, unique) \
++ __attribute_used__ \
++ __attribute__((section(".note." name), \
++ aligned(sizeof(Elf##size##_Word)), \
++ unused)) = { \
++ { \
++ sizeof(name), \
++ sizeof(desc), \
++ type, \
++ }, \
++ name, \
++ desc \
++ }
++#define ELFNOTE(size, name, type, desc) \
++ _ELFNOTE(size, name, __LINE__, type, desc)
++
++#define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc)
++#define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc)
++#endif /* __ASSEMBLER__ */
++
++#endif /* _LINUX_ELFNOTE_H */
+diff -rpuN linux-2.6.18.8/include/linux/eventpoll.h linux-2.6.18-xen-3.3.0/include/linux/eventpoll.h
+--- linux-2.6.18.8/include/linux/eventpoll.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/eventpoll.h 2008-08-21 11:36:07.000000000 +0200
+@@ -90,6 +90,12 @@ static inline void eventpoll_release(str
+ eventpoll_release_file(file);
+ }
+
++/*
++ * called by aio code to create fd that can poll the aio event queueQ
++ */
++struct eventpoll;
++int ep_getfd(int *efd, struct inode **einode, struct file **efile,
++ struct eventpoll *ep, const struct file_operations *fops);
+ #else
+
+ static inline void eventpoll_init_file(struct file *file) {}
+diff -rpuN linux-2.6.18.8/include/linux/highmem.h linux-2.6.18-xen-3.3.0/include/linux/highmem.h
+--- linux-2.6.18.8/include/linux/highmem.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/highmem.h 2008-08-21 11:36:07.000000000 +0200
+@@ -24,10 +24,16 @@ static inline void flush_kernel_dcache_p
+
+ /* declarations for linux/mm/highmem.c */
+ unsigned int nr_free_highpages(void);
++#ifdef CONFIG_XEN
++void kmap_flush_unused(void);
++#endif
+
+ #else /* CONFIG_HIGHMEM */
+
+ static inline unsigned int nr_free_highpages(void) { return 0; }
++#ifdef CONFIG_XEN
++static inline void kmap_flush_unused(void) { }
++#endif
+
+ static inline void *kmap(struct page *page)
+ {
+diff -rpuN linux-2.6.18.8/include/linux/interrupt.h linux-2.6.18-xen-3.3.0/include/linux/interrupt.h
+--- linux-2.6.18.8/include/linux/interrupt.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/interrupt.h 2008-08-21 11:36:07.000000000 +0200
+@@ -166,6 +166,12 @@ static inline int disable_irq_wake(unsig
+
+ #endif /* CONFIG_GENERIC_HARDIRQS */
+
++#ifdef CONFIG_HAVE_IRQ_IGNORE_UNHANDLED
++int irq_ignore_unhandled(unsigned int irq);
++#else
++#define irq_ignore_unhandled(irq) 0
++#endif
++
+ #ifndef __ARCH_SET_SOFTIRQ_PENDING
+ #define set_softirq_pending(x) (local_softirq_pending() = (x))
+ #define or_softirq_pending(x) (local_softirq_pending() |= (x))
+diff -rpuN linux-2.6.18.8/include/linux/ioport.h linux-2.6.18-xen-3.3.0/include/linux/ioport.h
+--- linux-2.6.18.8/include/linux/ioport.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/ioport.h 2008-08-21 11:36:07.000000000 +0200
+@@ -93,6 +93,9 @@ struct resource_list {
+ /* PC/ISA/whatever - the normal PC address spaces: IO and memory */
+ extern struct resource ioport_resource;
+ extern struct resource iomem_resource;
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++extern struct resource iomem_machine_resource;
++#endif
+
+ extern int request_resource(struct resource *root, struct resource *new);
+ extern struct resource * ____request_resource(struct resource *root, struct resource *new);
+diff -rpuN linux-2.6.18.8/include/linux/kexec.h linux-2.6.18-xen-3.3.0/include/linux/kexec.h
+--- linux-2.6.18.8/include/linux/kexec.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/kexec.h 2008-08-21 11:36:07.000000000 +0200
+@@ -31,6 +31,13 @@
+ #error KEXEC_ARCH not defined
+ #endif
+
++#ifndef KEXEC_ARCH_HAS_PAGE_MACROS
++#define kexec_page_to_pfn(page) page_to_pfn(page)
++#define kexec_pfn_to_page(pfn) pfn_to_page(pfn)
++#define kexec_virt_to_phys(addr) virt_to_phys(addr)
++#define kexec_phys_to_virt(addr) phys_to_virt(addr)
++#endif
++
+ /*
+ * This structure is used to hold the arguments that are used when loading
+ * kernel binaries.
+@@ -91,6 +98,12 @@ struct kimage {
+ extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET;
+ extern int machine_kexec_prepare(struct kimage *image);
+ extern void machine_kexec_cleanup(struct kimage *image);
++#ifdef CONFIG_XEN
++extern int xen_machine_kexec_load(struct kimage *image);
++extern void xen_machine_kexec_unload(struct kimage *image);
++extern void xen_machine_kexec_setup_resources(void);
++extern void xen_machine_kexec_register_resources(struct resource *res);
++#endif
+ extern asmlinkage long sys_kexec_load(unsigned long entry,
+ unsigned long nr_segments,
+ struct kexec_segment __user *segments,
+@@ -108,6 +121,10 @@ int kexec_should_crash(struct task_struc
+ extern struct kimage *kexec_image;
+ extern struct kimage *kexec_crash_image;
+
++#ifndef kexec_flush_icache_page
++#define kexec_flush_icache_page(page)
++#endif
++
+ #define KEXEC_ON_CRASH 0x00000001
+ #define KEXEC_ARCH_MASK 0xffff0000
+
+@@ -131,6 +148,7 @@ extern struct resource crashk_res;
+ typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
+ extern note_buf_t *crash_notes;
+
++
+ #else /* !CONFIG_KEXEC */
+ struct pt_regs;
+ struct task_struct;
+diff -rpuN linux-2.6.18.8/include/linux/libata.h linux-2.6.18-xen-3.3.0/include/linux/libata.h
+--- linux-2.6.18.8/include/linux/libata.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/libata.h 2008-08-21 11:36:07.000000000 +0200
+@@ -162,6 +162,12 @@ enum {
+ ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H
+ * Register FIS clearing BSY */
+ ATA_FLAG_DEBUGMSG = (1 << 13),
++ ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */
++ ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */
++ ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */
++ ATA_FLAG_AN = (1 << 18), /* controller supports AN */
++ ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */
++ ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */
+
+ /* The following flag belongs to ap->pflags but is kept in
+ * ap->flags because it's referenced in many LLDs and will be
+diff -rpuN linux-2.6.18.8/include/linux/mm.h linux-2.6.18-xen-3.3.0/include/linux/mm.h
+--- linux-2.6.18.8/include/linux/mm.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/mm.h 2008-08-21 11:36:07.000000000 +0200
+@@ -164,6 +164,10 @@ extern unsigned int kobjsize(const void
+ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
+ #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
+ #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
++#ifdef CONFIG_XEN
++#define VM_FOREIGN 0x04000000 /* Has pages belonging to another VM */
++#endif
++#define VM_ALWAYSDUMP 0x08000000 /* Always include in core dumps */
+
+ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
+ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+@@ -202,6 +206,10 @@ struct vm_operations_struct {
+ /* notification that a previously read-only page is about to become
+ * writable, if an error is returned it will cause a SIGBUS */
+ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
++ /* Area-specific function for clearing the PTE at @ptep. Returns the
++ * original value of @ptep. */
++ pte_t (*zap_pte)(struct vm_area_struct *vma,
++ unsigned long addr, pte_t *ptep, int is_fullmm);
+ #ifdef CONFIG_NUMA
+ int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
+ struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
+@@ -1027,6 +1035,13 @@ struct page *follow_page(struct vm_area_
+ #define FOLL_GET 0x04 /* do get_page on page */
+ #define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
+
++#ifdef CONFIG_XEN
++typedef int (*pte_fn_t)(pte_t *pte, struct page *pmd_page, unsigned long addr,
++ void *data);
++extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
++ unsigned long size, pte_fn_t fn, void *data);
++#endif
++
+ #ifdef CONFIG_PROC_FS
+ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+ #else
+diff -rpuN linux-2.6.18.8/include/linux/moduleparam.h linux-2.6.18-xen-3.3.0/include/linux/moduleparam.h
+--- linux-2.6.18.8/include/linux/moduleparam.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/moduleparam.h 2008-08-21 11:36:07.000000000 +0200
+@@ -58,13 +58,23 @@ struct kparam_array
+ void *elem;
+ };
+
++/* On alpha, ia64 and ppc64 relocations to global data cannot go into
++ read-only sections (which is part of respective UNIX ABI on these
++ platforms). So 'const' makes no sense and even causes compile failures
++ with some compilers. */
++#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64)
++#define __moduleparam_const
++#else
++#define __moduleparam_const const
++#endif
++
+ /* This is the fundamental function for registering boot/module
+ parameters. perm sets the visibility in driverfs: 000 means it's
+ not there, read bits mean it's readable, write bits mean it's
+ writable. */
+ #define __module_param_call(prefix, name, set, get, arg, perm) \
+ static char __param_str_##name[] = prefix #name; \
+- static struct kernel_param const __param_##name \
++ static struct kernel_param __moduleparam_const __param_##name \
+ __attribute_used__ \
+ __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
+ = { __param_str_##name, perm, set, get, arg }
+diff -rpuN linux-2.6.18.8/include/linux/netfilter_bridge.h linux-2.6.18-xen-3.3.0/include/linux/netfilter_bridge.h
+--- linux-2.6.18.8/include/linux/netfilter_bridge.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/netfilter_bridge.h 2008-08-21 11:36:07.000000000 +0200
+@@ -7,6 +7,7 @@
+ #include <linux/netfilter.h>
+ #if defined(__KERNEL__) && defined(CONFIG_BRIDGE_NETFILTER)
+ #include <linux/if_ether.h>
++#include <linux/if_vlan.h>
+ #endif
+
+ /* Bridge Hooks */
+@@ -54,16 +55,19 @@ int nf_bridge_maybe_copy_header(struct s
+
+ if (skb->nf_bridge) {
+ if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
+- err = skb_cow(skb, 18);
++ int header_size = ETH_HLEN + VLAN_HLEN;
++
++ err = skb_cow(skb, header_size);
+ if (err)
+ return err;
+- memcpy(skb->data - 18, skb->nf_bridge->data, 18);
+- skb_push(skb, 4);
++ memcpy(skb->data - header_size, skb->nf_bridge->data,
++ header_size);
++ skb_push(skb, VLAN_HLEN);
+ } else {
+- err = skb_cow(skb, 16);
++ err = skb_cow(skb, ETH_HLEN);
+ if (err)
+ return err;
+- memcpy(skb->data - 16, skb->nf_bridge->data, 16);
++ memcpy(skb->data - ETH_HLEN, skb->nf_bridge->data, ETH_HLEN);
+ }
+ }
+ return 0;
+diff -rpuN linux-2.6.18.8/include/linux/oprofile.h linux-2.6.18-xen-3.3.0/include/linux/oprofile.h
+--- linux-2.6.18.8/include/linux/oprofile.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/oprofile.h 2008-08-21 11:36:07.000000000 +0200
+@@ -16,6 +16,8 @@
+ #include <linux/types.h>
+ #include <linux/spinlock.h>
+ #include <asm/atomic.h>
++
++#include <xen/interface/xenoprof.h>
+
+ struct super_block;
+ struct dentry;
+@@ -27,6 +29,11 @@ struct oprofile_operations {
+ /* create any necessary configuration files in the oprofile fs.
+ * Optional. */
+ int (*create_files)(struct super_block * sb, struct dentry * root);
++ /* setup active domains with Xen */
++ int (*set_active)(int *active_domains, unsigned int adomains);
++ /* setup passive domains with Xen */
++ int (*set_passive)(int *passive_domains, unsigned int pdomains);
++
+ /* Do any necessary interrupt setup. Optional. */
+ int (*setup)(void);
+ /* Do any necessary interrupt shutdown. Optional. */
+@@ -78,6 +85,8 @@ void oprofile_add_pc(unsigned long pc, i
+ /* add a backtrace entry, to be called from the ->backtrace callback */
+ void oprofile_add_trace(unsigned long eip);
+
++/* add a domain switch entry */
++int oprofile_add_domain_switch(int32_t domain_id);
+
+ /**
+ * Create a file of the given name as a child of the given root, with
+diff -rpuN linux-2.6.18.8/include/linux/page-flags.h linux-2.6.18-xen-3.3.0/include/linux/page-flags.h
+--- linux-2.6.18.8/include/linux/page-flags.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/page-flags.h 2008-08-21 11:36:07.000000000 +0200
+@@ -98,6 +98,8 @@
+ #define PG_uncached 31 /* Page has been mapped as uncached */
+ #endif
+
++#define PG_foreign 20 /* Page is owned by foreign allocator. */
++
+ /*
+ * Manipulation of page state flags
+ */
+@@ -247,6 +249,19 @@
+ #define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags)
+ #define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags)
+
++#define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
++#define SetPageForeign(_page, dtor) do { \
++ set_bit(PG_foreign, &(_page)->flags); \
++ BUG_ON((dtor) == (void (*)(struct page *))0); \
++ (_page)->index = (long)(dtor); \
++} while (0)
++#define ClearPageForeign(page) do { \
++ clear_bit(PG_foreign, &(page)->flags); \
++ (page)->index = 0; \
++} while (0)
++#define PageForeignDestructor(_page) \
++ ((void (*)(struct page *))(_page)->index)(_page)
++
+ struct page; /* forward declaration */
+
+ int test_clear_page_dirty(struct page *page);
+diff -rpuN linux-2.6.18.8/include/linux/pci.h linux-2.6.18-xen-3.3.0/include/linux/pci.h
+--- linux-2.6.18.8/include/linux/pci.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/pci.h 2008-08-21 11:36:07.000000000 +0200
+@@ -152,6 +152,9 @@ struct pci_dev {
+ * directly, use the values stored here. They might be different!
+ */
+ unsigned int irq;
++#ifdef CONFIG_XEN
++ unsigned int irq_old;
++#endif
+ struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
+
+ /* These fields are used by common fixups */
+@@ -596,6 +599,10 @@ static inline int pci_enable_msix(struct
+ struct msix_entry *entries, int nvec) {return -1;}
+ static inline void pci_disable_msix(struct pci_dev *dev) {}
+ static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
++#ifdef CONFIG_XEN
++#define register_msi_get_owner(func) 0
++#define unregister_msi_get_owner(func) 0
++#endif
+ #else
+ extern void pci_scan_msi_device(struct pci_dev *dev);
+ extern int pci_enable_msi(struct pci_dev *dev);
+@@ -604,6 +611,10 @@ extern int pci_enable_msix(struct pci_de
+ struct msix_entry *entries, int nvec);
+ extern void pci_disable_msix(struct pci_dev *dev);
+ extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
++#ifdef CONFIG_XEN
++extern int register_msi_get_owner(int (*func)(struct pci_dev *dev));
++extern int unregister_msi_get_owner(int (*func)(struct pci_dev *dev));
++#endif
+ #endif
+
+ extern void pci_block_user_cfg_access(struct pci_dev *dev);
+diff -rpuN linux-2.6.18.8/include/linux/pci_ids.h linux-2.6.18-xen-3.3.0/include/linux/pci_ids.h
+--- linux-2.6.18.8/include/linux/pci_ids.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/pci_ids.h 2008-08-21 11:36:07.000000000 +0200
+@@ -15,6 +15,8 @@
+ #define PCI_CLASS_STORAGE_FLOPPY 0x0102
+ #define PCI_CLASS_STORAGE_IPI 0x0103
+ #define PCI_CLASS_STORAGE_RAID 0x0104
++#define PCI_CLASS_STORAGE_SATA 0x0106
++#define PCI_CLASS_STORAGE_SATA_AHCI 0x010601
+ #define PCI_CLASS_STORAGE_SAS 0x0107
+ #define PCI_CLASS_STORAGE_OTHER 0x0180
+
+@@ -363,6 +365,8 @@
+ #define PCI_DEVICE_ID_ATI_IXP600_SATA 0x4380
+ #define PCI_DEVICE_ID_ATI_IXP600_SRAID 0x4381
+ #define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c
++#define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390
++#define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c
+
+ #define PCI_VENDOR_ID_VLSI 0x1004
+ #define PCI_DEVICE_ID_VLSI_82C592 0x0005
+@@ -2188,6 +2192,13 @@
+ #define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
+ #define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
+ #define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
++#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910
++#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2911
++#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912
++#define PCI_DEVICE_ID_INTEL_ICH9_3 0x2913
++#define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914
++#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2915
++#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
+ #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
+ #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
+ #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
+@@ -2202,6 +2213,12 @@
+ #define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599
+ #define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a
+ #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
++#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
++#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
++#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
++#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a
++#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30
++#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60
+ #define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
+ #define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
+ #define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
+diff -rpuN linux-2.6.18.8/include/linux/sched.h linux-2.6.18-xen-3.3.0/include/linux/sched.h
+--- linux-2.6.18.8/include/linux/sched.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/sched.h 2008-08-21 11:36:07.000000000 +0200
+@@ -211,10 +211,15 @@ extern void update_process_times(int use
+ extern void scheduler_tick(void);
+
+ #ifdef CONFIG_DETECT_SOFTLOCKUP
++extern unsigned long softlockup_get_next_event(void);
+ extern void softlockup_tick(void);
+ extern void spawn_softlockup_task(void);
+ extern void touch_softlockup_watchdog(void);
+ #else
++static inline unsigned long softlockup_get_next_event(void)
++{
++ return MAX_JIFFY_OFFSET;
++}
+ static inline void softlockup_tick(void)
+ {
+ }
+diff -rpuN linux-2.6.18.8/include/linux/skbuff.h linux-2.6.18-xen-3.3.0/include/linux/skbuff.h
+--- linux-2.6.18.8/include/linux/skbuff.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/skbuff.h 2008-08-21 11:36:07.000000000 +0200
+@@ -203,6 +203,8 @@ enum {
+ * @local_df: allow local fragmentation
+ * @cloned: Head may be cloned (check refcnt to be sure)
+ * @nohdr: Payload reference only, must not modify header
++ * @proto_data_valid: Protocol data validated since arriving at localhost
++ * @proto_csum_blank: Protocol csum must be added before leaving localhost
+ * @pkt_type: Packet class
+ * @fclone: skbuff clone status
+ * @ip_summed: Driver fed us an IP checksum
+@@ -282,7 +284,13 @@ struct sk_buff {
+ nfctinfo:3;
+ __u8 pkt_type:3,
+ fclone:2,
++#ifndef CONFIG_XEN
+ ipvs_property:1;
++#else
++ ipvs_property:1,
++ proto_data_valid:1,
++ proto_csum_blank:1;
++#endif
+ __be16 protocol;
+
+ void (*destructor)(struct sk_buff *skb);
+diff -rpuN linux-2.6.18.8/include/linux/sysctl.h linux-2.6.18-xen-3.3.0/include/linux/sysctl.h
+--- linux-2.6.18.8/include/linux/sysctl.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/sysctl.h 2008-08-21 11:36:07.000000000 +0200
+@@ -6,10 +6,17 @@
+ ****************************************************************
+ ****************************************************************
+ **
++ ** WARNING:
+ ** The values in this file are exported to user space via
+- ** the sysctl() binary interface. However this interface
+- ** is unstable and deprecated and will be removed in the future.
+- ** For a stable interface use /proc/sys.
++ ** the sysctl() binary interface. Do *NOT* change the
++ ** numbering of any existing values here, and do not change
++ ** any numbers within any one set of values. If you have to
++ ** have to redefine an existing interface, use a new number for it.
++ ** The kernel will then return -ENOTDIR to any application using
++ ** the old binary interface.
++ **
++ ** For new interfaces unless you really need a binary number
++ ** please use CTL_UNNUMBERED.
+ **
+ ****************************************************************
+ ****************************************************************
+@@ -48,6 +55,7 @@ struct __sysctl_args {
+ #ifdef __KERNEL__
+ #define CTL_ANY -1 /* Matches any name */
+ #define CTL_NONE 0
++#define CTL_UNNUMBERED CTL_NONE /* sysctl without a binary number */
+ #endif
+
+ enum
+diff -rpuN linux-2.6.18.8/include/linux/vermagic.h linux-2.6.18-xen-3.3.0/include/linux/vermagic.h
+--- linux-2.6.18.8/include/linux/vermagic.h 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/linux/vermagic.h 2008-08-21 11:36:07.000000000 +0200
+@@ -17,6 +17,11 @@
+ #else
+ #define MODULE_VERMAGIC_MODULE_UNLOAD ""
+ #endif
++#ifdef CONFIG_XEN
++#define MODULE_VERMAGIC_XEN "Xen "
++#else
++#define MODULE_VERMAGIC_XEN
++#endif
+ #ifndef MODULE_ARCH_VERMAGIC
+ #define MODULE_ARCH_VERMAGIC ""
+ #endif
+@@ -24,5 +29,6 @@
+ #define VERMAGIC_STRING \
+ UTS_RELEASE " " \
+ MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
+- MODULE_VERMAGIC_MODULE_UNLOAD MODULE_ARCH_VERMAGIC \
++ MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_XEN \
++ MODULE_ARCH_VERMAGIC \
+ "gcc-" __stringify(__GNUC__) "." __stringify(__GNUC_MINOR__)
+diff -rpuN linux-2.6.18.8/include/xen/balloon.h linux-2.6.18-xen-3.3.0/include/xen/balloon.h
+--- linux-2.6.18.8/include/xen/balloon.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/balloon.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,57 @@
++/******************************************************************************
++ * balloon.h
++ *
++ * Xen balloon driver - enables returning/claiming memory to/from Xen.
++ *
++ * Copyright (c) 2003, B Dragovic
++ * Copyright (c) 2003-2004, M Williamson, K Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __ASM_BALLOON_H__
++#define __ASM_BALLOON_H__
++
++/*
++ * Inform the balloon driver that it should allow some slop for device-driver
++ * memory activities.
++ */
++void balloon_update_driver_allowance(long delta);
++
++/* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */
++struct page **alloc_empty_pages_and_pagevec(int nr_pages);
++void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
++
++void balloon_release_driver_page(struct page *page);
++
++/*
++ * Prevent the balloon driver from changing the memory reservation during
++ * a driver critical region.
++ */
++extern spinlock_t balloon_lock;
++#define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags)
++#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
++
++#endif /* __ASM_BALLOON_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/blkif.h linux-2.6.18-xen-3.3.0/include/xen/blkif.h
+--- linux-2.6.18.8/include/xen/blkif.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/blkif.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,123 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_BLKIF_H__
++#define __XEN_BLKIF_H__
++
++#include <xen/interface/io/ring.h>
++#include <xen/interface/io/blkif.h>
++#include <xen/interface/io/protocols.h>
++
++/* Not a real protocol. Used to generate ring structs which contain
++ * the elements common to all protocols only. This way we get a
++ * compiler-checkable way to use common struct elements, so we can
++ * avoid using switch(protocol) in a number of places. */
++struct blkif_common_request {
++ char dummy;
++};
++struct blkif_common_response {
++ char dummy;
++};
++
++/* i386 protocol version */
++#pragma pack(push, 4)
++struct blkif_x86_32_request {
++ uint8_t operation; /* BLKIF_OP_??? */
++ uint8_t nr_segments; /* number of segments */
++ blkif_vdev_t handle; /* only for read/write requests */
++ uint64_t id; /* private guest value, echoed in resp */
++ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
++ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++struct blkif_x86_32_response {
++ uint64_t id; /* copied from request */
++ uint8_t operation; /* copied from request */
++ int16_t status; /* BLKIF_RSP_??? */
++};
++typedef struct blkif_x86_32_request blkif_x86_32_request_t;
++typedef struct blkif_x86_32_response blkif_x86_32_response_t;
++#pragma pack(pop)
++
++/* x86_64 protocol version */
++struct blkif_x86_64_request {
++ uint8_t operation; /* BLKIF_OP_??? */
++ uint8_t nr_segments; /* number of segments */
++ blkif_vdev_t handle; /* only for read/write requests */
++ uint64_t __attribute__((__aligned__(8))) id;
++ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
++ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++struct blkif_x86_64_response {
++ uint64_t __attribute__((__aligned__(8))) id;
++ uint8_t operation; /* copied from request */
++ int16_t status; /* BLKIF_RSP_??? */
++};
++typedef struct blkif_x86_64_request blkif_x86_64_request_t;
++typedef struct blkif_x86_64_response blkif_x86_64_response_t;
++
++DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response);
++DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response);
++DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response);
++
++union blkif_back_rings {
++ blkif_back_ring_t native;
++ blkif_common_back_ring_t common;
++ blkif_x86_32_back_ring_t x86_32;
++ blkif_x86_64_back_ring_t x86_64;
++};
++typedef union blkif_back_rings blkif_back_rings_t;
++
++enum blkif_protocol {
++ BLKIF_PROTOCOL_NATIVE = 1,
++ BLKIF_PROTOCOL_X86_32 = 2,
++ BLKIF_PROTOCOL_X86_64 = 3,
++};
++
++static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src)
++{
++ int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
++ dst->operation = src->operation;
++ dst->nr_segments = src->nr_segments;
++ dst->handle = src->handle;
++ dst->id = src->id;
++ dst->sector_number = src->sector_number;
++ barrier();
++ if (n > dst->nr_segments)
++ n = dst->nr_segments;
++ for (i = 0; i < n; i++)
++ dst->seg[i] = src->seg[i];
++}
++
++static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src)
++{
++ int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
++ dst->operation = src->operation;
++ dst->nr_segments = src->nr_segments;
++ dst->handle = src->handle;
++ dst->id = src->id;
++ dst->sector_number = src->sector_number;
++ barrier();
++ if (n > dst->nr_segments)
++ n = dst->nr_segments;
++ for (i = 0; i < n; i++)
++ dst->seg[i] = src->seg[i];
++}
++
++#endif /* __XEN_BLKIF_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/compat_ioctl.h linux-2.6.18-xen-3.3.0/include/xen/compat_ioctl.h
+--- linux-2.6.18.8/include/xen/compat_ioctl.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/compat_ioctl.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,45 @@
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
++ *
++ * Copyright IBM Corp. 2007
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ * Hollis Blanchard <hollisb@us.ibm.com>
++ */
++
++#ifndef __LINUX_XEN_COMPAT_H__
++#define __LINUX_XEN_COMPAT_H__
++
++#include <linux/compat.h>
++
++extern int privcmd_ioctl_32(int fd, unsigned int cmd, unsigned long arg);
++struct privcmd_mmap_32 {
++ int num;
++ domid_t dom;
++ compat_uptr_t entry;
++};
++
++struct privcmd_mmapbatch_32 {
++ int num; /* number of pages to populate */
++ domid_t dom; /* target domain */
++ __u64 addr; /* virtual address */
++ compat_uptr_t arr; /* array of mfns - top nibble set on err */
++};
++#define IOCTL_PRIVCMD_MMAP_32 \
++ _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap_32))
++#define IOCTL_PRIVCMD_MMAPBATCH_32 \
++ _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch_32))
++
++#endif /* __LINUX_XEN_COMPAT_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/cpu_hotplug.h linux-2.6.18-xen-3.3.0/include/xen/cpu_hotplug.h
+--- linux-2.6.18.8/include/xen/cpu_hotplug.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/cpu_hotplug.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,41 @@
++#ifndef __XEN_CPU_HOTPLUG_H__
++#define __XEN_CPU_HOTPLUG_H__
++
++#include <linux/kernel.h>
++#include <linux/cpumask.h>
++
++#if defined(CONFIG_X86) && defined(CONFIG_SMP)
++extern cpumask_t cpu_initialized_map;
++#endif
++
++#if defined(CONFIG_HOTPLUG_CPU)
++
++int cpu_up_check(unsigned int cpu);
++void init_xenbus_allowed_cpumask(void);
++int smp_suspend(void);
++void smp_resume(void);
++
++void cpu_bringup(void);
++
++#else /* !defined(CONFIG_HOTPLUG_CPU) */
++
++#define cpu_up_check(cpu) (0)
++#define init_xenbus_allowed_cpumask() ((void)0)
++
++static inline int smp_suspend(void)
++{
++ if (num_online_cpus() > 1) {
++ printk(KERN_WARNING "Can't suspend SMP guests "
++ "without CONFIG_HOTPLUG_CPU\n");
++ return -EOPNOTSUPP;
++ }
++ return 0;
++}
++
++static inline void smp_resume(void)
++{
++}
++
++#endif /* !defined(CONFIG_HOTPLUG_CPU) */
++
++#endif /* __XEN_CPU_HOTPLUG_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/driver_util.h linux-2.6.18-xen-3.3.0/include/xen/driver_util.h
+--- linux-2.6.18.8/include/xen/driver_util.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/driver_util.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,14 @@
++
++#ifndef __ASM_XEN_DRIVER_UTIL_H__
++#define __ASM_XEN_DRIVER_UTIL_H__
++
++#include <linux/vmalloc.h>
++#include <linux/device.h>
++
++/* Allocate/destroy a 'vmalloc' VM area. */
++extern struct vm_struct *alloc_vm_area(unsigned long size);
++extern void free_vm_area(struct vm_struct *area);
++
++extern struct class *get_xen_class(void);
++
++#endif /* __ASM_XEN_DRIVER_UTIL_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/evtchn.h linux-2.6.18-xen-3.3.0/include/xen/evtchn.h
+--- linux-2.6.18.8/include/xen/evtchn.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/evtchn.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,153 @@
++/******************************************************************************
++ * evtchn.h
++ *
++ * Communication via Xen event channels.
++ * Also definitions for the device that demuxes notifications to userspace.
++ *
++ * Copyright (c) 2004-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __ASM_EVTCHN_H__
++#define __ASM_EVTCHN_H__
++
++#include <linux/interrupt.h>
++#include <asm/hypervisor.h>
++#include <asm/ptrace.h>
++#include <asm/synch_bitops.h>
++#include <xen/interface/event_channel.h>
++#include <linux/smp.h>
++
++/*
++ * LOW-LEVEL DEFINITIONS
++ */
++
++/*
++ * Dynamically bind an event source to an IRQ-like callback handler.
++ * On some platforms this may not be implemented via the Linux IRQ subsystem.
++ * The IRQ argument passed to the callback handler is the same as returned
++ * from the bind call. It may not correspond to a Linux IRQ number.
++ * Returns IRQ or negative errno.
++ */
++int bind_caller_port_to_irqhandler(
++ unsigned int caller_port,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++int bind_listening_port_to_irqhandler(
++ unsigned int remote_domain,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++int bind_interdomain_evtchn_to_irqhandler(
++ unsigned int remote_domain,
++ unsigned int remote_port,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++int bind_virq_to_irqhandler(
++ unsigned int virq,
++ unsigned int cpu,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++int bind_ipi_to_irqhandler(
++ unsigned int ipi,
++ unsigned int cpu,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++
++/*
++ * Common unbind function for all event sources. Takes IRQ to unbind from.
++ * Automatically closes the underlying event channel (except for bindings
++ * made with bind_caller_port_to_irqhandler()).
++ */
++void unbind_from_irqhandler(unsigned int irq, void *dev_id);
++
++void irq_resume(void);
++
++/* Entry point for notifications into Linux subsystems. */
++asmlinkage void evtchn_do_upcall(struct pt_regs *regs);
++
++/* Entry point for notifications into the userland character device. */
++void evtchn_device_upcall(int port);
++
++void mask_evtchn(int port);
++void disable_all_local_evtchn(void);
++void unmask_evtchn(int port);
++
++#ifdef CONFIG_SMP
++void rebind_evtchn_to_cpu(int port, unsigned int cpu);
++#else
++#define rebind_evtchn_to_cpu(port, cpu) ((void)0)
++#endif
++
++static inline int test_and_set_evtchn_mask(int port)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ return synch_test_and_set_bit(port, s->evtchn_mask);
++}
++
++static inline void clear_evtchn(int port)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ synch_clear_bit(port, s->evtchn_pending);
++}
++
++static inline void notify_remote_via_evtchn(int port)
++{
++ struct evtchn_send send = { .port = port };
++ VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send));
++}
++
++/*
++ * Use these to access the event channel underlying the IRQ handle returned
++ * by bind_*_to_irqhandler().
++ */
++void notify_remote_via_irq(int irq);
++int irq_to_evtchn_port(int irq);
++
++#define PIRQ_SET_MAPPING 0x0
++#define PIRQ_CLEAR_MAPPING 0x1
++#define PIRQ_GET_MAPPING 0x3
++int pirq_mapstatus(int pirq, int action);
++int set_pirq_hw_action(int pirq, int (*action)(int pirq, int action));
++int clear_pirq_hw_action(int pirq);
++
++#define PIRQ_STARTUP 1
++#define PIRQ_SHUTDOWN 2
++#define PIRQ_ENABLE 3
++#define PIRQ_DISABLE 4
++#define PIRQ_END 5
++#define PIRQ_ACK 6
++
++#endif /* __ASM_EVTCHN_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/features.h linux-2.6.18-xen-3.3.0/include/xen/features.h
+--- linux-2.6.18.8/include/xen/features.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/features.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,20 @@
++/******************************************************************************
++ * features.h
++ *
++ * Query the features reported by Xen.
++ *
++ * Copyright (c) 2006, Ian Campbell
++ */
++
++#ifndef __ASM_XEN_FEATURES_H__
++#define __ASM_XEN_FEATURES_H__
++
++#include <xen/interface/version.h>
++
++extern void setup_xen_features(void);
++
++extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32];
++
++#define xen_feature(flag) (xen_features[flag])
++
++#endif /* __ASM_XEN_FEATURES_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/firmware.h linux-2.6.18-xen-3.3.0/include/xen/firmware.h
+--- linux-2.6.18.8/include/xen/firmware.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/firmware.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,10 @@
++#ifndef __XEN_FIRMWARE_H__
++#define __XEN_FIRMWARE_H__
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++void copy_edd(void);
++#endif
++
++void copy_edid(void);
++
++#endif /* __XEN_FIRMWARE_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/gnttab.h linux-2.6.18-xen-3.3.0/include/xen/gnttab.h
+--- linux-2.6.18.8/include/xen/gnttab.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/gnttab.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,163 @@
++/******************************************************************************
++ * gnttab.h
++ *
++ * Two sets of functionality:
++ * 1. Granting foreign access to our memory reservation.
++ * 2. Accessing others' memory reservations via grant references.
++ * (i.e., mechanisms for both sender and recipient of grant references)
++ *
++ * Copyright (c) 2004-2005, K A Fraser
++ * Copyright (c) 2005, Christopher Clark
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __ASM_GNTTAB_H__
++#define __ASM_GNTTAB_H__
++
++#include <asm/hypervisor.h>
++#include <asm/maddr.h> /* maddr_t */
++#include <linux/mm.h>
++#include <xen/interface/grant_table.h>
++#include <xen/features.h>
++
++struct gnttab_free_callback {
++ struct gnttab_free_callback *next;
++ void (*fn)(void *);
++ void *arg;
++ u16 count;
++};
++
++int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
++ int flags);
++
++/*
++ * End access through the given grant reference, iff the grant entry is no
++ * longer in use. Return 1 if the grant entry was freed, 0 if it is still in
++ * use.
++ */
++int gnttab_end_foreign_access_ref(grant_ref_t ref);
++
++/*
++ * Eventually end access through the given grant reference, and once that
++ * access has been ended, free the given page too. Access will be ended
++ * immediately iff the grant entry is not in use, otherwise it will happen
++ * some time later. page may be 0, in which case no freeing will occur.
++ */
++void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page);
++
++int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
++
++unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
++unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
++
++int gnttab_query_foreign_access(grant_ref_t ref);
++
++/*
++ * operations on reserved batches of grant references
++ */
++int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
++
++void gnttab_free_grant_reference(grant_ref_t ref);
++
++void gnttab_free_grant_references(grant_ref_t head);
++
++int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
++
++int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
++
++void gnttab_release_grant_reference(grant_ref_t *private_head,
++ grant_ref_t release);
++
++void gnttab_request_free_callback(struct gnttab_free_callback *callback,
++ void (*fn)(void *), void *arg, u16 count);
++void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
++
++void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
++ unsigned long frame, int flags);
++
++void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
++ unsigned long pfn);
++
++int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep);
++void __gnttab_dma_map_page(struct page *page);
++static inline void __gnttab_dma_unmap_page(struct page *page)
++{
++}
++
++void gnttab_reset_grant_page(struct page *page);
++
++int gnttab_suspend(void);
++int gnttab_resume(void);
++
++void *arch_gnttab_alloc_shared(unsigned long *frames);
++
++static inline void
++gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr,
++ uint32_t flags, grant_ref_t ref, domid_t domid)
++{
++ if (flags & GNTMAP_contains_pte)
++ map->host_addr = addr;
++ else if (xen_feature(XENFEAT_auto_translated_physmap))
++ map->host_addr = __pa(addr);
++ else
++ map->host_addr = addr;
++
++ map->flags = flags;
++ map->ref = ref;
++ map->dom = domid;
++}
++
++static inline void
++gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr,
++ uint32_t flags, grant_handle_t handle)
++{
++ if (flags & GNTMAP_contains_pte)
++ unmap->host_addr = addr;
++ else if (xen_feature(XENFEAT_auto_translated_physmap))
++ unmap->host_addr = __pa(addr);
++ else
++ unmap->host_addr = addr;
++
++ unmap->handle = handle;
++ unmap->dev_bus_addr = 0;
++}
++
++static inline void
++gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr,
++ maddr_t new_addr, grant_handle_t handle)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ unmap->host_addr = __pa(addr);
++ unmap->new_addr = __pa(new_addr);
++ } else {
++ unmap->host_addr = addr;
++ unmap->new_addr = new_addr;
++ }
++
++ unmap->handle = handle;
++}
++
++#endif /* __ASM_GNTTAB_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/hvm.h linux-2.6.18-xen-3.3.0/include/xen/hvm.h
+--- linux-2.6.18.8/include/xen/hvm.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/hvm.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,23 @@
++/* Simple wrappers around HVM functions */
++#ifndef XEN_HVM_H__
++#define XEN_HVM_H__
++
++#include <xen/interface/hvm/params.h>
++
++static inline unsigned long hvm_get_parameter(int idx)
++{
++ struct xen_hvm_param xhv;
++ int r;
++
++ xhv.domid = DOMID_SELF;
++ xhv.index = idx;
++ r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
++ if (r < 0) {
++ printk(KERN_ERR "cannot get hvm parameter %d: %d.\n",
++ idx, r);
++ return 0;
++ }
++ return xhv.value;
++}
++
++#endif /* XEN_HVM_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/hypercall.h linux-2.6.18-xen-3.3.0/include/xen/hypercall.h
+--- linux-2.6.18.8/include/xen/hypercall.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/hypercall.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,30 @@
++#ifndef __XEN_HYPERCALL_H__
++#define __XEN_HYPERCALL_H__
++
++#include <asm/hypercall.h>
++
++static inline int __must_check
++HYPERVISOR_multicall_check(
++ multicall_entry_t *call_list, unsigned int nr_calls,
++ const unsigned long *rc_list)
++{
++ int rc = HYPERVISOR_multicall(call_list, nr_calls);
++
++ if (unlikely(rc < 0))
++ return rc;
++ BUG_ON(rc);
++ BUG_ON((int)nr_calls < 0);
++
++ for ( ; nr_calls > 0; --nr_calls, ++call_list)
++ if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0)))
++ return nr_calls;
++
++ return 0;
++}
++
++/* A construct to ignore the return value of hypercall wrappers in a few
++ * exceptional cases (simply casting the function result to void doesn't
++ * avoid the compiler warning): */
++#define VOID(expr) ((void)((expr)?:0))
++
++#endif /* __XEN_HYPERCALL_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/hypervisor_sysfs.h linux-2.6.18-xen-3.3.0/include/xen/hypervisor_sysfs.h
+--- linux-2.6.18.8/include/xen/hypervisor_sysfs.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/hypervisor_sysfs.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,30 @@
++/*
++ * copyright (c) 2006 IBM Corporation
++ * Authored by: Mike D. Day <ncmike@us.ibm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef _HYP_SYSFS_H_
++#define _HYP_SYSFS_H_
++
++#include <linux/kobject.h>
++#include <linux/sysfs.h>
++
++#define HYPERVISOR_ATTR_RO(_name) \
++static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name)
++
++#define HYPERVISOR_ATTR_RW(_name) \
++static struct hyp_sysfs_attr _name##_attr = \
++ __ATTR(_name, 0644, _name##_show, _name##_store)
++
++struct hyp_sysfs_attr {
++ struct attribute attr;
++ ssize_t (*show)(struct hyp_sysfs_attr *, char *);
++ ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t);
++ void *hyp_attr_data;
++};
++
++#endif /* _HYP_SYSFS_H_ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/acm.h linux-2.6.18-xen-3.3.0/include/xen/interface/acm.h
+--- linux-2.6.18.8/include/xen/interface/acm.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/acm.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,228 @@
++/*
++ * acm.h: Xen access control module interface defintions
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Reiner Sailer <sailer@watson.ibm.com>
++ * Copyright (c) 2005, International Business Machines Corporation.
++ */
++
++#ifndef _XEN_PUBLIC_ACM_H
++#define _XEN_PUBLIC_ACM_H
++
++#include "xen.h"
++
++/* if ACM_DEBUG defined, all hooks should
++ * print a short trace message (comment it out
++ * when not in testing mode )
++ */
++/* #define ACM_DEBUG */
++
++#ifdef ACM_DEBUG
++# define printkd(fmt, args...) printk(fmt,## args)
++#else
++# define printkd(fmt, args...)
++#endif
++
++/* default ssid reference value if not supplied */
++#define ACM_DEFAULT_SSID 0x0
++#define ACM_DEFAULT_LOCAL_SSID 0x0
++
++/* Internal ACM ERROR types */
++#define ACM_OK 0
++#define ACM_UNDEF -1
++#define ACM_INIT_SSID_ERROR -2
++#define ACM_INIT_SOID_ERROR -3
++#define ACM_ERROR -4
++
++/* External ACCESS DECISIONS */
++#define ACM_ACCESS_PERMITTED 0
++#define ACM_ACCESS_DENIED -111
++#define ACM_NULL_POINTER_ERROR -200
++
++/*
++ Error codes reported in when trying to test for a new policy
++ These error codes are reported in an array of tuples where
++ each error code is followed by a parameter describing the error
++ more closely, such as a domain id.
++*/
++#define ACM_EVTCHN_SHARING_VIOLATION 0x100
++#define ACM_GNTTAB_SHARING_VIOLATION 0x101
++#define ACM_DOMAIN_LOOKUP 0x102
++#define ACM_CHWALL_CONFLICT 0x103
++#define ACM_SSIDREF_IN_USE 0x104
++
++
++/* primary policy in lower 4 bits */
++#define ACM_NULL_POLICY 0
++#define ACM_CHINESE_WALL_POLICY 1
++#define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2
++#define ACM_POLICY_UNDEFINED 15
++
++/* combinations have secondary policy component in higher 4bit */
++#define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \
++ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY)
++
++/* policy: */
++#define ACM_POLICY_NAME(X) \
++ ((X) == (ACM_NULL_POLICY)) ? "NULL" : \
++ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \
++ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \
++ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \
++ "UNDEFINED"
++
++/* the following policy versions must be increased
++ * whenever the interpretation of the related
++ * policy's data structure changes
++ */
++#define ACM_POLICY_VERSION 3
++#define ACM_CHWALL_VERSION 1
++#define ACM_STE_VERSION 1
++
++/* defines a ssid reference used by xen */
++typedef uint32_t ssidref_t;
++
++/* hooks that are known to domains */
++#define ACMHOOK_none 0
++#define ACMHOOK_sharing 1
++
++/* -------security policy relevant type definitions-------- */
++
++/* type identifier; compares to "equal" or "not equal" */
++typedef uint16_t domaintype_t;
++
++/* CHINESE WALL POLICY DATA STRUCTURES
++ *
++ * current accumulated conflict type set:
++ * When a domain is started and has a type that is in
++ * a conflict set, the conflicting types are incremented in
++ * the aggregate set. When a domain is destroyed, the
++ * conflicting types to its type are decremented.
++ * If a domain has multiple types, this procedure works over
++ * all those types.
++ *
++ * conflict_aggregate_set[i] holds the number of
++ * running domains that have a conflict with type i.
++ *
++ * running_types[i] holds the number of running domains
++ * that include type i in their ssidref-referenced type set
++ *
++ * conflict_sets[i][j] is "0" if type j has no conflict
++ * with type i and is "1" otherwise.
++ */
++/* high-16 = version, low-16 = check magic */
++#define ACM_MAGIC 0x0001debc
++
++/* each offset in bytes from start of the struct they
++ * are part of */
++
++/* V3 of the policy buffer aded a version structure */
++struct acm_policy_version
++{
++ uint32_t major;
++ uint32_t minor;
++};
++
++
++/* each buffer consists of all policy information for
++ * the respective policy given in the policy code
++ *
++ * acm_policy_buffer, acm_chwall_policy_buffer,
++ * and acm_ste_policy_buffer need to stay 32-bit aligned
++ * because we create binary policies also with external
++ * tools that assume packed representations (e.g. the java tool)
++ */
++struct acm_policy_buffer {
++ uint32_t policy_version; /* ACM_POLICY_VERSION */
++ uint32_t magic;
++ uint32_t len;
++ uint32_t policy_reference_offset;
++ uint32_t primary_policy_code;
++ uint32_t primary_buffer_offset;
++ uint32_t secondary_policy_code;
++ uint32_t secondary_buffer_offset;
++ struct acm_policy_version xml_pol_version; /* add in V3 */
++};
++
++
++struct acm_policy_reference_buffer {
++ uint32_t len;
++};
++
++struct acm_chwall_policy_buffer {
++ uint32_t policy_version; /* ACM_CHWALL_VERSION */
++ uint32_t policy_code;
++ uint32_t chwall_max_types;
++ uint32_t chwall_max_ssidrefs;
++ uint32_t chwall_max_conflictsets;
++ uint32_t chwall_ssid_offset;
++ uint32_t chwall_conflict_sets_offset;
++ uint32_t chwall_running_types_offset;
++ uint32_t chwall_conflict_aggregate_offset;
++};
++
++struct acm_ste_policy_buffer {
++ uint32_t policy_version; /* ACM_STE_VERSION */
++ uint32_t policy_code;
++ uint32_t ste_max_types;
++ uint32_t ste_max_ssidrefs;
++ uint32_t ste_ssid_offset;
++};
++
++struct acm_stats_buffer {
++ uint32_t magic;
++ uint32_t len;
++ uint32_t primary_policy_code;
++ uint32_t primary_stats_offset;
++ uint32_t secondary_policy_code;
++ uint32_t secondary_stats_offset;
++};
++
++struct acm_ste_stats_buffer {
++ uint32_t ec_eval_count;
++ uint32_t gt_eval_count;
++ uint32_t ec_denied_count;
++ uint32_t gt_denied_count;
++ uint32_t ec_cachehit_count;
++ uint32_t gt_cachehit_count;
++};
++
++struct acm_ssid_buffer {
++ uint32_t len;
++ ssidref_t ssidref;
++ uint32_t policy_reference_offset;
++ uint32_t primary_policy_code;
++ uint32_t primary_max_types;
++ uint32_t primary_types_offset;
++ uint32_t secondary_policy_code;
++ uint32_t secondary_max_types;
++ uint32_t secondary_types_offset;
++};
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/acm_ops.h linux-2.6.18-xen-3.3.0/include/xen/interface/acm_ops.h
+--- linux-2.6.18.8/include/xen/interface/acm_ops.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/acm_ops.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,159 @@
++/*
++ * acm_ops.h: Xen access control module hypervisor commands
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Reiner Sailer <sailer@watson.ibm.com>
++ * Copyright (c) 2005,2006 International Business Machines Corporation.
++ */
++
++#ifndef __XEN_PUBLIC_ACM_OPS_H__
++#define __XEN_PUBLIC_ACM_OPS_H__
++
++#include "xen.h"
++#include "acm.h"
++
++/*
++ * Make sure you increment the interface version whenever you modify this file!
++ * This makes sure that old versions of acm tools will stop working in a
++ * well-defined way (rather than crashing the machine, for instance).
++ */
++#define ACM_INTERFACE_VERSION 0xAAAA000A
++
++/************************************************************************/
++
++/*
++ * Prototype for this hypercall is:
++ * int acm_op(int cmd, void *args)
++ * @cmd == ACMOP_??? (access control module operation).
++ * @args == Operation-specific extra arguments (NULL if none).
++ */
++
++
++#define ACMOP_setpolicy 1
++struct acm_setpolicy {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) pushcache;
++ uint32_t pushcache_size;
++};
++
++
++#define ACMOP_getpolicy 2
++struct acm_getpolicy {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) pullcache;
++ uint32_t pullcache_size;
++};
++
++
++#define ACMOP_dumpstats 3
++struct acm_dumpstats {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) pullcache;
++ uint32_t pullcache_size;
++};
++
++
++#define ACMOP_getssid 4
++#define ACM_GETBY_ssidref 1
++#define ACM_GETBY_domainid 2
++struct acm_getssid {
++ /* IN */
++ uint32_t get_ssid_by; /* ACM_GETBY_* */
++ union {
++ domaintype_t domainid;
++ ssidref_t ssidref;
++ } id;
++ XEN_GUEST_HANDLE_64(void) ssidbuf;
++ uint32_t ssidbuf_size;
++};
++
++#define ACMOP_getdecision 5
++struct acm_getdecision {
++ /* IN */
++ uint32_t get_decision_by1; /* ACM_GETBY_* */
++ uint32_t get_decision_by2; /* ACM_GETBY_* */
++ union {
++ domaintype_t domainid;
++ ssidref_t ssidref;
++ } id1;
++ union {
++ domaintype_t domainid;
++ ssidref_t ssidref;
++ } id2;
++ uint32_t hook;
++ /* OUT */
++ uint32_t acm_decision;
++};
++
++
++#define ACMOP_chgpolicy 6
++struct acm_change_policy {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) policy_pushcache;
++ uint32_t policy_pushcache_size;
++ XEN_GUEST_HANDLE_64(void) del_array;
++ uint32_t delarray_size;
++ XEN_GUEST_HANDLE_64(void) chg_array;
++ uint32_t chgarray_size;
++ /* OUT */
++ /* array with error code */
++ XEN_GUEST_HANDLE_64(void) err_array;
++ uint32_t errarray_size;
++};
++
++#define ACMOP_relabeldoms 7
++struct acm_relabel_doms {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) relabel_map;
++ uint32_t relabel_map_size;
++ /* OUT */
++ XEN_GUEST_HANDLE_64(void) err_array;
++ uint32_t errarray_size;
++};
++
++/* future interface to Xen */
++struct xen_acmctl {
++ uint32_t cmd;
++ uint32_t interface_version;
++ union {
++ struct acm_setpolicy setpolicy;
++ struct acm_getpolicy getpolicy;
++ struct acm_dumpstats dumpstats;
++ struct acm_getssid getssid;
++ struct acm_getdecision getdecision;
++ struct acm_change_policy change_policy;
++ struct acm_relabel_doms relabel_doms;
++ } u;
++};
++
++typedef struct xen_acmctl xen_acmctl_t;
++DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t);
++
++#endif /* __XEN_PUBLIC_ACM_OPS_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/arch-ia64/debug_op.h linux-2.6.18-xen-3.3.0/include/xen/interface/arch-ia64/debug_op.h
+--- linux-2.6.18.8/include/xen/interface/arch-ia64/debug_op.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/arch-ia64/debug_op.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,96 @@
++/******************************************************************************
++ * debug_op.h
++ *
++ * Copyright (c) 2007 Tristan Gingold <tgingold@free.fr>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#ifndef __XEN_PUBLIC_IA64_DEBUG_OP_H__
++#define __XEN_PUBLIC_IA64_DEBUG_OP_H__
++
++/* Set/Get extra conditions to break. */
++#define XEN_IA64_DEBUG_OP_SET_FLAGS 1
++#define XEN_IA64_DEBUG_OP_GET_FLAGS 2
++
++/* Break on kernel single step. */
++#define XEN_IA64_DEBUG_ON_KERN_SSTEP (1 << 0)
++
++/* Break on kernel debug (breakpoint or watch point). */
++#define XEN_IA64_DEBUG_ON_KERN_DEBUG (1 << 1)
++
++/* Break on kernel taken branch. */
++#define XEN_IA64_DEBUG_ON_KERN_TBRANCH (1 << 2)
++
++/* Break on interrupt injection. */
++#define XEN_IA64_DEBUG_ON_EXTINT (1 << 3)
++
++/* Break on interrupt injection. */
++#define XEN_IA64_DEBUG_ON_EXCEPT (1 << 4)
++
++/* Break on event injection. */
++#define XEN_IA64_DEBUG_ON_EVENT (1 << 5)
++
++/* Break on privop/virtualized instruction (slow path only). */
++#define XEN_IA64_DEBUG_ON_PRIVOP (1 << 6)
++
++/* Break on emulated PAL call (at entry). */
++#define XEN_IA64_DEBUG_ON_PAL (1 << 7)
++
++/* Break on emulated SAL call (at entry). */
++#define XEN_IA64_DEBUG_ON_SAL (1 << 8)
++
++/* Break on emulated EFI call (at entry). */
++#define XEN_IA64_DEBUG_ON_EFI (1 << 9)
++
++/* Break on rfi emulation (slow path only, before exec). */
++#define XEN_IA64_DEBUG_ON_RFI (1 << 10)
++
++/* Break on address translation switch. */
++#define XEN_IA64_DEBUG_ON_MMU (1 << 11)
++
++/* Break on bad guest physical address. */
++#define XEN_IA64_DEBUG_ON_BAD_MPA (1 << 12)
++
++/* Force psr.ss bit. */
++#define XEN_IA64_DEBUG_FORCE_SS (1 << 13)
++
++/* Force psr.db bit. */
++#define XEN_IA64_DEBUG_FORCE_DB (1 << 14)
++
++/* Break on ITR/PTR. */
++#define XEN_IA64_DEBUG_ON_TR (1 << 15)
++
++/* Break on ITC/PTC.L/PTC.G/PTC.GA. */
++#define XEN_IA64_DEBUG_ON_TC (1 << 16)
++
++/* Get translation cache. */
++#define XEN_IA64_DEBUG_OP_GET_TC 3
++
++/* Translate virtual address to guest physical address. */
++#define XEN_IA64_DEBUG_OP_TRANSLATE 4
++
++union xen_ia64_debug_op {
++ uint64_t flags;
++ struct xen_ia64_debug_vtlb {
++ uint64_t nbr; /* IN/OUT */
++ XEN_GUEST_HANDLE_64(ia64_tr_entry_t) tr; /* IN/OUT */
++ } vtlb;
++};
++typedef union xen_ia64_debug_op xen_ia64_debug_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_ia64_debug_op_t);
++
++#endif /* __XEN_PUBLIC_IA64_DEBUG_OP_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/arch-ia64/hvm/memmap.h linux-2.6.18-xen-3.3.0/include/xen/interface/arch-ia64/hvm/memmap.h
+--- linux-2.6.18.8/include/xen/interface/arch-ia64/hvm/memmap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/arch-ia64/hvm/memmap.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,88 @@
++/******************************************************************************
++ * memmap.h
++ *
++ * Copyright (c) 2008 Tristan Gingold <tgingold AT free fr>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#ifndef __XEN_PUBLIC_HVM_MEMMAP_IA64_H__
++#define __XEN_PUBLIC_HVM_MEMMAP_IA64_H__
++
++#define MEM_G (1UL << 30)
++#define MEM_M (1UL << 20)
++#define MEM_K (1UL << 10)
++
++/* Guest physical address of IO ports space. */
++#define MMIO_START (3 * MEM_G)
++#define MMIO_SIZE (512 * MEM_M)
++
++#define VGA_IO_START 0xA0000UL
++#define VGA_IO_SIZE 0x20000
++
++#define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
++#define LEGACY_IO_SIZE (64 * MEM_M)
++
++#define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE)
++#define IO_PAGE_SIZE XEN_PAGE_SIZE
++
++#define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE)
++#define STORE_PAGE_SIZE XEN_PAGE_SIZE
++
++#define BUFFER_IO_PAGE_START (STORE_PAGE_START + STORE_PAGE_SIZE)
++#define BUFFER_IO_PAGE_SIZE XEN_PAGE_SIZE
++
++#define BUFFER_PIO_PAGE_START (BUFFER_IO_PAGE_START + BUFFER_IO_PAGE_SIZE)
++#define BUFFER_PIO_PAGE_SIZE XEN_PAGE_SIZE
++
++#define IO_SAPIC_START 0xfec00000UL
++#define IO_SAPIC_SIZE 0x100000
++
++#define PIB_START 0xfee00000UL
++#define PIB_SIZE 0x200000
++
++#define GFW_START (4 * MEM_G - 16 * MEM_M)
++#define GFW_SIZE (16 * MEM_M)
++
++/* domVTI */
++#define GPFN_FRAME_BUFFER 0x1 /* VGA framebuffer */
++#define GPFN_LOW_MMIO 0x2 /* Low MMIO range */
++#define GPFN_PIB 0x3 /* PIB base */
++#define GPFN_IOSAPIC 0x4 /* IOSAPIC base */
++#define GPFN_LEGACY_IO 0x5 /* Legacy I/O base */
++#define GPFN_HIGH_MMIO 0x6 /* High MMIO range */
++
++/* Nvram belongs to GFW memory space */
++#define NVRAM_SIZE (MEM_K * 64)
++#define NVRAM_START (GFW_START + 10 * MEM_M)
++
++#define NVRAM_VALID_SIG 0x4650494e45584948 /* "HIXENIPF" */
++struct nvram_save_addr {
++ unsigned long addr;
++ unsigned long signature;
++};
++
++#endif /* __XEN_PUBLIC_HVM_MEMMAP_IA64_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/arch-ia64/hvm/save.h linux-2.6.18-xen-3.3.0/include/xen/interface/arch-ia64/hvm/save.h
+--- linux-2.6.18.8/include/xen/interface/arch-ia64/hvm/save.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/arch-ia64/hvm/save.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,201 @@
++/******************************************************************************
++ * save_types.h
++ *
++ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#ifndef __XEN_PUBLIC_HVM_SAVE_IA64_H__
++#define __XEN_PUBLIC_HVM_SAVE_IA64_H__
++
++#include <public/hvm/save.h>
++#include <public/arch-ia64.h>
++
++/*
++ * Save/restore header: general info about the save file.
++ */
++
++/* x86 uses 0x54381286 */
++#define HVM_FILE_MAGIC 0x343641492f6e6558UL /* "Xen/IA64" */
++#define HVM_FILE_VERSION 0x0000000000000001UL
++
++struct hvm_save_header {
++ uint64_t magic; /* Must be HVM_FILE_MAGIC */
++ uint64_t version; /* File format version */
++ uint64_t changeset; /* Version of Xen that saved this file */
++ uint64_t cpuid[5]; /* CPUID[0x01][%eax] on the saving machine */
++};
++
++DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
++
++/*
++ * CPU
++ */
++struct hvm_hw_ia64_cpu {
++ uint64_t ipsr;
++};
++DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_ia64_cpu);
++
++/*
++ * CPU
++ */
++struct hvm_hw_ia64_vpd {
++ struct vpd vpd;
++};
++DECLARE_HVM_SAVE_TYPE(VPD, 3, struct hvm_hw_ia64_vpd);
++
++/*
++ * device dependency
++ * vacpi => viosapic => vlsapic
++ */
++/*
++ * vlsapic
++ */
++struct hvm_hw_ia64_vlsapic {
++ uint64_t insvc[4];
++ uint64_t vhpi; // ??? should this be saved in vpd
++ uint8_t xtp;
++ uint8_t pal_init_pending;
++ uint8_t pad[2];
++};
++DECLARE_HVM_SAVE_TYPE(VLSAPIC, 4, struct hvm_hw_ia64_vlsapic);
++/* set
++ * unconditionaly set v->arch.irq_new_peding = 1
++ * unconditionaly set v->arch.irq_new_condition = 0
++ */
++
++/*
++ * vtime
++ */
++/* itc, itm, itv are saved by arch vcpu context */
++struct hvm_hw_ia64_vtime {
++ uint64_t itc;
++ uint64_t itm;
++
++ uint64_t last_itc;
++ uint64_t pending;
++};
++DECLARE_HVM_SAVE_TYPE(VTIME, 5, struct hvm_hw_ia64_vtime);
++/*
++ * calculate v->vtm.vtm_offset
++ * ??? Or should vtm_offset be set by leave_hypervisor_tail()?
++ * start vtm_timer if necessary by vtm_set_itm().
++ * ??? Or should vtm_timer be set by leave_hypervisor_tail()?
++ *
++ * ??? or should be done by schedule_tail()
++ * => schedule_tail() should do.
++ */
++
++/*
++ * viosapic
++ */
++#define VIOSAPIC_NUM_PINS 48
++
++union viosapic_rte
++{
++ uint64_t bits;
++ struct {
++ uint8_t vector;
++
++ uint8_t delivery_mode : 3;
++ uint8_t reserve1 : 1;
++ uint8_t delivery_status: 1;
++ uint8_t polarity : 1;
++ uint8_t reserve2 : 1;
++ uint8_t trig_mode : 1;
++
++ uint8_t mask : 1;
++ uint8_t reserve3 : 7;
++
++ uint8_t reserved[3];
++ uint16_t dest_id;
++ };
++};
++
++struct hvm_hw_ia64_viosapic {
++ uint64_t irr;
++ uint64_t isr;
++ uint32_t ioregsel;
++ uint32_t pad;
++ uint64_t lowest_vcpu_id;
++ uint64_t base_address;
++ union viosapic_rte redirtbl[VIOSAPIC_NUM_PINS];
++};
++DECLARE_HVM_SAVE_TYPE(VIOSAPIC, 6, struct hvm_hw_ia64_viosapic);
++
++/*
++ * vacpi
++ * PM timer
++ */
++struct vacpi_regs {
++ union {
++ struct {
++ uint32_t pm1a_sts:16;/* PM1a_EVT_BLK.PM1a_STS: status register */
++ uint32_t pm1a_en:16; /* PM1a_EVT_BLK.PM1a_EN: enable register */
++ };
++ uint32_t evt_blk;
++ };
++ uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */
++};
++
++struct hvm_hw_ia64_vacpi {
++ struct vacpi_regs regs;
++};
++DECLARE_HVM_SAVE_TYPE(VACPI, 7, struct hvm_hw_ia64_vacpi);
++/* update last_gtime and setup timer of struct vacpi */
++
++/*
++ * opt_feature: identity mapping of region 4, 5 and 7.
++ * With the c/s 16396:d2935f9c217f of xen-ia64-devel.hg,
++ * opt_feature hypercall supports only region 4,5,7 identity mappings.
++ * structure hvm_hw_ia64_identity_mappings only supports them.
++ * The new structure, struct hvm_hw_ia64_identity_mappings, is created to
++ * avoid to keep up with change of the xen/ia64 internal structure, struct
++ * opt_feature.
++ *
++ * If it is enhanced in the future, new structure will be created.
++ */
++struct hvm_hw_ia64_identity_mapping {
++ uint64_t on; /* on/off */
++ uint64_t pgprot; /* The page protection bit mask of the pte. */
++ uint64_t key; /* A protection key. */
++};
++
++struct hvm_hw_ia64_identity_mappings {
++ struct hvm_hw_ia64_identity_mapping im_reg4;/* Region 4 identity mapping */
++ struct hvm_hw_ia64_identity_mapping im_reg5;/* Region 5 identity mapping */
++ struct hvm_hw_ia64_identity_mapping im_reg7;/* Region 7 identity mapping */
++};
++DECLARE_HVM_SAVE_TYPE(OPT_FEATURE_IDENTITY_MAPPINGS, 8, struct hvm_hw_ia64_identity_mappings);
++
++/*
++ * Largest type-code in use
++ */
++#define HVM_SAVE_CODE_MAX 8
++
++#endif /* __XEN_PUBLIC_HVM_SAVE_IA64_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/arch-ia64/sioemu.h linux-2.6.18-xen-3.3.0/include/xen/interface/arch-ia64/sioemu.h
+--- linux-2.6.18.8/include/xen/interface/arch-ia64/sioemu.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/arch-ia64/sioemu.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,89 @@
++/******************************************************************************
++ * sioemu.h
++ *
++ * Copyright (c) 2008 Tristan Gingold <tgingold@free.fr>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#ifndef __XEN_PUBLIC_IA64_SIOEMU_H__
++#define __XEN_PUBLIC_IA64_SIOEMU_H__
++
++/* SIOEMU specific hypercalls.
++ The numbers are the minor part of FW_HYPERCALL_SIOEMU. */
++
++/* Defines the callback entry point. r8=ip, r9=data.
++ Must be called per-vcpu. */
++#define SIOEMU_HYPERCALL_SET_CALLBACK 0x01
++
++/* Finish sioemu fw initialization and start firmware. r8=ip. */
++#define SIOEMU_HYPERCALL_START_FW 0x02
++
++/* Add IO pages in physmap. */
++#define SIOEMU_HYPERCALL_ADD_IO_PHYSMAP 0x03
++
++/* Get wallclock time. */
++#define SIOEMU_HYPERCALL_GET_TIME 0x04
++
++/* Flush cache. */
++#define SIOEMU_HYPERCALL_FLUSH_CACHE 0x07
++
++/* Get freq base. */
++#define SIOEMU_HYPERCALL_FREQ_BASE 0x08
++
++/* Return from callback. */
++#define SIOEMU_HYPERCALL_CALLBACK_RETURN 0x09
++
++/* Deliver an interrupt. */
++#define SIOEMU_HYPERCALL_DELIVER_INT 0x0a
++
++/* SIOEMU callback reason. */
++
++/* An event (from event channel) has to be delivered. */
++#define SIOEMU_CB_EVENT 0x00
++
++/* Emulate an IO access. */
++#define SIOEMU_CB_IO_EMULATE 0x01
++
++/* An IPI is sent to a dead vcpu. */
++#define SIOEMU_CB_WAKEUP_VCPU 0x02
++
++/* A SAL hypercall is executed. */
++#define SIOEMU_CB_SAL_ASSIST 0x03
++
++#ifndef __ASSEMBLY__
++struct sioemu_callback_info {
++ /* Saved registers. */
++ unsigned long ip;
++ unsigned long psr;
++ unsigned long ifs;
++ unsigned long nats;
++ unsigned long r8;
++ unsigned long r9;
++ unsigned long r10;
++ unsigned long r11;
++
++ /* Callback parameters. */
++ unsigned long cause;
++ unsigned long arg0;
++ unsigned long arg1;
++ unsigned long arg2;
++ unsigned long arg3;
++ unsigned long _pad2[2];
++ unsigned long r2;
++};
++#endif /* __ASSEMBLY__ */
++#endif /* __XEN_PUBLIC_IA64_SIOEMU_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/arch-ia64.h linux-2.6.18-xen-3.3.0/include/xen/interface/arch-ia64.h
+--- linux-2.6.18.8/include/xen/interface/arch-ia64.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/arch-ia64.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,621 @@
++/******************************************************************************
++ * arch-ia64/hypervisor-if.h
++ *
++ * Guest OS interface to IA64 Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "xen.h"
++
++#ifndef __HYPERVISOR_IF_IA64_H__
++#define __HYPERVISOR_IF_IA64_H__
++
++#if !defined(__GNUC__) || defined(__STRICT_ANSI__)
++#error "Anonymous structs/unions are a GNU extension."
++#endif
++
++/* Structural guest handles introduced in 0x00030201. */
++#if __XEN_INTERFACE_VERSION__ >= 0x00030201
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef struct { type *p; } __guest_handle_ ## name
++#else
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef type * __guest_handle_ ## name
++#endif
++
++#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
++ ___DEFINE_XEN_GUEST_HANDLE(name, type); \
++ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
++
++#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
++#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
++#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name)
++#define uint64_aligned_t uint64_t
++#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
++#ifdef __XEN_TOOLS__
++#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
++#endif
++
++#ifndef __ASSEMBLY__
++typedef unsigned long xen_pfn_t;
++#define PRI_xen_pfn "lx"
++#endif
++
++/* Arch specific VIRQs definition */
++#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */
++#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */
++#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */
++
++/* Maximum number of virtual CPUs in multi-processor guests. */
++/* WARNING: before changing this, check that shared_info fits on a page */
++#define MAX_VIRT_CPUS 64
++
++/* IO ports location for PV. */
++#define IO_PORTS_PADDR 0x00000ffffc000000UL
++#define IO_PORTS_SIZE 0x0000000004000000UL
++
++#ifndef __ASSEMBLY__
++
++typedef unsigned long xen_ulong_t;
++
++#ifdef __XEN_TOOLS__
++#define XEN_PAGE_SIZE XC_PAGE_SIZE
++#else
++#define XEN_PAGE_SIZE PAGE_SIZE
++#endif
++
++#define INVALID_MFN (~0UL)
++
++struct pt_fpreg {
++ union {
++ unsigned long bits[2];
++ long double __dummy; /* force 16-byte alignment */
++ } u;
++};
++
++union vac {
++ unsigned long value;
++ struct {
++ int a_int:1;
++ int a_from_int_cr:1;
++ int a_to_int_cr:1;
++ int a_from_psr:1;
++ int a_from_cpuid:1;
++ int a_cover:1;
++ int a_bsw:1;
++ long reserved:57;
++ };
++};
++typedef union vac vac_t;
++
++union vdc {
++ unsigned long value;
++ struct {
++ int d_vmsw:1;
++ int d_extint:1;
++ int d_ibr_dbr:1;
++ int d_pmc:1;
++ int d_to_pmd:1;
++ int d_itm:1;
++ long reserved:58;
++ };
++};
++typedef union vdc vdc_t;
++
++struct mapped_regs {
++ union vac vac;
++ union vdc vdc;
++ unsigned long virt_env_vaddr;
++ unsigned long reserved1[29];
++ unsigned long vhpi;
++ unsigned long reserved2[95];
++ union {
++ unsigned long vgr[16];
++ unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
++ };
++ union {
++ unsigned long vbgr[16];
++ unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
++ };
++ unsigned long vnat;
++ unsigned long vbnat;
++ unsigned long vcpuid[5];
++ unsigned long reserved3[11];
++ unsigned long vpsr;
++ unsigned long vpr;
++ unsigned long reserved4[76];
++ union {
++ unsigned long vcr[128];
++ struct {
++ unsigned long dcr; // CR0
++ unsigned long itm;
++ unsigned long iva;
++ unsigned long rsv1[5];
++ unsigned long pta; // CR8
++ unsigned long rsv2[7];
++ unsigned long ipsr; // CR16
++ unsigned long isr;
++ unsigned long rsv3;
++ unsigned long iip;
++ unsigned long ifa;
++ unsigned long itir;
++ unsigned long iipa;
++ unsigned long ifs;
++ unsigned long iim; // CR24
++ unsigned long iha;
++ unsigned long rsv4[38];
++ unsigned long lid; // CR64
++ unsigned long ivr;
++ unsigned long tpr;
++ unsigned long eoi;
++ unsigned long irr[4];
++ unsigned long itv; // CR72
++ unsigned long pmv;
++ unsigned long cmcv;
++ unsigned long rsv5[5];
++ unsigned long lrr0; // CR80
++ unsigned long lrr1;
++ unsigned long rsv6[46];
++ };
++ };
++ union {
++ unsigned long reserved5[128];
++ struct {
++ unsigned long precover_ifs;
++ unsigned long unat; // not sure if this is needed until NaT arch is done
++ int interrupt_collection_enabled; // virtual psr.ic
++ /* virtual interrupt deliverable flag is evtchn_upcall_mask in
++ * shared info area now. interrupt_mask_addr is the address
++ * of evtchn_upcall_mask for current vcpu
++ */
++ unsigned char *interrupt_mask_addr;
++ int pending_interruption;
++ unsigned char vpsr_pp;
++ unsigned char vpsr_dfh;
++ unsigned char hpsr_dfh;
++ unsigned char hpsr_mfh;
++ unsigned long reserved5_1[4];
++ int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
++ int banknum; // 0 or 1, which virtual register bank is active
++ unsigned long rrs[8]; // region registers
++ unsigned long krs[8]; // kernel registers
++ unsigned long tmp[16]; // temp registers (e.g. for hyperprivops)
++ };
++ };
++};
++typedef struct mapped_regs mapped_regs_t;
++
++struct vpd {
++ struct mapped_regs vpd_low;
++ unsigned long reserved6[3456];
++ unsigned long vmm_avail[128];
++ unsigned long reserved7[4096];
++};
++typedef struct vpd vpd_t;
++
++struct arch_vcpu_info {
++};
++typedef struct arch_vcpu_info arch_vcpu_info_t;
++
++/*
++ * This structure is used for magic page in domain pseudo physical address
++ * space and the result of XENMEM_machine_memory_map.
++ * As the XENMEM_machine_memory_map result,
++ * xen_memory_map::nr_entries indicates the size in bytes
++ * including struct xen_ia64_memmap_info. Not the number of entries.
++ */
++struct xen_ia64_memmap_info {
++ uint64_t efi_memmap_size; /* size of EFI memory map */
++ uint64_t efi_memdesc_size; /* size of an EFI memory map descriptor */
++ uint32_t efi_memdesc_version; /* memory descriptor version */
++ void *memdesc[0]; /* array of efi_memory_desc_t */
++};
++typedef struct xen_ia64_memmap_info xen_ia64_memmap_info_t;
++
++struct arch_shared_info {
++ /* PFN of the start_info page. */
++ unsigned long start_info_pfn;
++
++ /* Interrupt vector for event channel. */
++ int evtchn_vector;
++
++ /* PFN of memmap_info page */
++ unsigned int memmap_info_num_pages;/* currently only = 1 case is
++ supported. */
++ unsigned long memmap_info_pfn;
++
++ uint64_t pad[31];
++};
++typedef struct arch_shared_info arch_shared_info_t;
++
++typedef unsigned long xen_callback_t;
++
++struct ia64_tr_entry {
++ unsigned long pte;
++ unsigned long itir;
++ unsigned long vadr;
++ unsigned long rid;
++};
++typedef struct ia64_tr_entry ia64_tr_entry_t;
++DEFINE_XEN_GUEST_HANDLE(ia64_tr_entry_t);
++
++struct vcpu_tr_regs {
++ struct ia64_tr_entry itrs[12];
++ struct ia64_tr_entry dtrs[12];
++};
++
++union vcpu_ar_regs {
++ unsigned long ar[128];
++ struct {
++ unsigned long kr[8];
++ unsigned long rsv1[8];
++ unsigned long rsc;
++ unsigned long bsp;
++ unsigned long bspstore;
++ unsigned long rnat;
++ unsigned long rsv2;
++ unsigned long fcr;
++ unsigned long rsv3[2];
++ unsigned long eflag;
++ unsigned long csd;
++ unsigned long ssd;
++ unsigned long cflg;
++ unsigned long fsr;
++ unsigned long fir;
++ unsigned long fdr;
++ unsigned long rsv4;
++ unsigned long ccv; /* 32 */
++ unsigned long rsv5[3];
++ unsigned long unat;
++ unsigned long rsv6[3];
++ unsigned long fpsr;
++ unsigned long rsv7[3];
++ unsigned long itc;
++ unsigned long rsv8[3];
++ unsigned long ign1[16];
++ unsigned long pfs; /* 64 */
++ unsigned long lc;
++ unsigned long ec;
++ unsigned long rsv9[45];
++ unsigned long ign2[16];
++ };
++};
++
++union vcpu_cr_regs {
++ unsigned long cr[128];
++ struct {
++ unsigned long dcr; // CR0
++ unsigned long itm;
++ unsigned long iva;
++ unsigned long rsv1[5];
++ unsigned long pta; // CR8
++ unsigned long rsv2[7];
++ unsigned long ipsr; // CR16
++ unsigned long isr;
++ unsigned long rsv3;
++ unsigned long iip;
++ unsigned long ifa;
++ unsigned long itir;
++ unsigned long iipa;
++ unsigned long ifs;
++ unsigned long iim; // CR24
++ unsigned long iha;
++ unsigned long rsv4[38];
++ unsigned long lid; // CR64
++ unsigned long ivr;
++ unsigned long tpr;
++ unsigned long eoi;
++ unsigned long irr[4];
++ unsigned long itv; // CR72
++ unsigned long pmv;
++ unsigned long cmcv;
++ unsigned long rsv5[5];
++ unsigned long lrr0; // CR80
++ unsigned long lrr1;
++ unsigned long rsv6[46];
++ };
++};
++
++struct vcpu_guest_context_regs {
++ unsigned long r[32];
++ unsigned long b[8];
++ unsigned long bank[16];
++ unsigned long ip;
++ unsigned long psr;
++ unsigned long cfm;
++ unsigned long pr;
++ unsigned int nats; /* NaT bits for r1-r31. */
++ unsigned int bnats; /* Nat bits for banked registers. */
++ union vcpu_ar_regs ar;
++ union vcpu_cr_regs cr;
++ struct pt_fpreg f[128];
++ unsigned long dbr[8];
++ unsigned long ibr[8];
++ unsigned long rr[8];
++ unsigned long pkr[16];
++
++ /* FIXME: cpuid,pmd,pmc */
++
++ unsigned long xip;
++ unsigned long xpsr;
++ unsigned long xfs;
++ unsigned long xr[4];
++
++ struct vcpu_tr_regs tr;
++
++ /* Physical registers in case of debug event. */
++ unsigned long excp_iipa;
++ unsigned long excp_ifa;
++ unsigned long excp_isr;
++ unsigned int excp_vector;
++
++ /*
++ * The rbs is intended to be the image of the stacked registers still
++ * in the cpu (not yet stored in memory). It is laid out as if it
++ * were written in memory at a 512 (64*8) aligned address + offset.
++ * rbs_voff is (offset / 8). rbs_nat contains NaT bits for the
++ * remaining rbs registers. rbs_rnat contains NaT bits for in memory
++ * rbs registers.
++ * Note: loadrs is 2**14 bytes == 2**11 slots.
++ */
++ unsigned int rbs_voff;
++ unsigned long rbs[2048];
++ unsigned long rbs_rnat;
++
++ /*
++ * RSE.N_STACKED_PHYS via PAL_RSE_INFO
++ * Strictly this isn't cpu context, but this value is necessary
++ * for domain save/restore. So is here.
++ */
++ unsigned long num_phys_stacked;
++};
++
++struct vcpu_guest_context {
++#define VGCF_EXTRA_REGS (1UL << 1) /* Set extra regs. */
++#define VGCF_SET_CR_IRR (1UL << 2) /* Set cr_irr[0:3]. */
++#define VGCF_online (1UL << 3) /* make this vcpu online */
++ unsigned long flags; /* VGCF_* flags */
++
++ struct vcpu_guest_context_regs regs;
++
++ unsigned long event_callback_ip;
++
++ /* xen doesn't share privregs pages with hvm domain so that this member
++ * doesn't make sense for hvm domain.
++ * ~0UL is already used for INVALID_P2M_ENTRY. */
++#define VGC_PRIVREGS_HVM (~(-2UL))
++ unsigned long privregs_pfn;
++};
++typedef struct vcpu_guest_context vcpu_guest_context_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
++
++/* dom0 vp op */
++#define __HYPERVISOR_ia64_dom0vp_op __HYPERVISOR_arch_0
++/* Map io space in machine address to dom0 physical address space.
++ Currently physical assigned address equals to machine address. */
++#define IA64_DOM0VP_ioremap 0
++
++/* Convert a pseudo physical page frame number to the corresponding
++ machine page frame number. If no page is assigned, INVALID_MFN or
++ GPFN_INV_MASK is returned depending on domain's non-vti/vti mode. */
++#define IA64_DOM0VP_phystomach 1
++
++/* Convert a machine page frame number to the corresponding pseudo physical
++ page frame number of the caller domain. */
++#define IA64_DOM0VP_machtophys 3
++
++/* Reserved for future use. */
++#define IA64_DOM0VP_iounmap 4
++
++/* Unmap and free pages contained in the specified pseudo physical region. */
++#define IA64_DOM0VP_zap_physmap 5
++
++/* Assign machine page frame to dom0's pseudo physical address space. */
++#define IA64_DOM0VP_add_physmap 6
++
++/* expose the p2m table into domain */
++#define IA64_DOM0VP_expose_p2m 7
++
++/* xen perfmon */
++#define IA64_DOM0VP_perfmon 8
++
++/* gmfn version of IA64_DOM0VP_add_physmap */
++#define IA64_DOM0VP_add_physmap_with_gmfn 9
++
++/* get fpswa revision */
++#define IA64_DOM0VP_fpswa_revision 10
++
++/* Add an I/O port space range */
++#define IA64_DOM0VP_add_io_space 11
++
++/* expose the foreign domain's p2m table into privileged domain */
++#define IA64_DOM0VP_expose_foreign_p2m 12
++#define IA64_DOM0VP_EFP_ALLOC_PTE 0x1 /* allocate p2m table */
++
++/* unexpose the foreign domain's p2m table into privileged domain */
++#define IA64_DOM0VP_unexpose_foreign_p2m 13
++
++// flags for page assignement to pseudo physical address space
++#define _ASSIGN_readonly 0
++#define ASSIGN_readonly (1UL << _ASSIGN_readonly)
++#define ASSIGN_writable (0UL << _ASSIGN_readonly) // dummy flag
++/* Internal only: memory attribute must be WC/UC/UCE. */
++#define _ASSIGN_nocache 1
++#define ASSIGN_nocache (1UL << _ASSIGN_nocache)
++// tlb tracking
++#define _ASSIGN_tlb_track 2
++#define ASSIGN_tlb_track (1UL << _ASSIGN_tlb_track)
++/* Internal only: associated with PGC_allocated bit */
++#define _ASSIGN_pgc_allocated 3
++#define ASSIGN_pgc_allocated (1UL << _ASSIGN_pgc_allocated)
++/* Page is an IO page. */
++#define _ASSIGN_io 4
++#define ASSIGN_io (1UL << _ASSIGN_io)
++
++/* This structure has the same layout of struct ia64_boot_param, defined in
++ <asm/system.h>. It is redefined here to ease use. */
++struct xen_ia64_boot_param {
++ unsigned long command_line; /* physical address of cmd line args */
++ unsigned long efi_systab; /* physical address of EFI system table */
++ unsigned long efi_memmap; /* physical address of EFI memory map */
++ unsigned long efi_memmap_size; /* size of EFI memory map */
++ unsigned long efi_memdesc_size; /* size of an EFI memory map descriptor */
++ unsigned int efi_memdesc_version; /* memory descriptor version */
++ struct {
++ unsigned short num_cols; /* number of columns on console. */
++ unsigned short num_rows; /* number of rows on console. */
++ unsigned short orig_x; /* cursor's x position */
++ unsigned short orig_y; /* cursor's y position */
++ } console_info;
++ unsigned long fpswa; /* physical address of the fpswa interface */
++ unsigned long initrd_start;
++ unsigned long initrd_size;
++ unsigned long domain_start; /* va where the boot time domain begins */
++ unsigned long domain_size; /* how big is the boot domain */
++};
++
++#endif /* !__ASSEMBLY__ */
++
++/* Size of the shared_info area (this is not related to page size). */
++#define XSI_SHIFT 14
++#define XSI_SIZE (1 << XSI_SHIFT)
++/* Log size of mapped_regs area (64 KB - only 4KB is used). */
++#define XMAPPEDREGS_SHIFT 12
++#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT)
++/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */
++#define XMAPPEDREGS_OFS XSI_SIZE
++
++/* Hyperprivops. */
++#define HYPERPRIVOP_START 0x1
++#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0)
++#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1)
++#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2)
++#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3)
++#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4)
++#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5)
++#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6)
++#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7)
++#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8)
++#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9)
++#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa)
++#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb)
++#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc)
++#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd)
++#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe)
++#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf)
++#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10)
++#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11)
++#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12)
++#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13)
++#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14)
++#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15)
++#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16)
++#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17)
++#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18)
++#define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19)
++#define HYPERPRIVOP_MAX (0x1a)
++
++/* Fast and light hypercalls. */
++#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1
++
++/* Extra debug features. */
++#define __HYPERVISOR_ia64_debug_op __HYPERVISOR_arch_2
++
++/* Xencomm macros. */
++#define XENCOMM_INLINE_MASK 0xf800000000000000UL
++#define XENCOMM_INLINE_FLAG 0x8000000000000000UL
++
++#ifndef __ASSEMBLY__
++
++/*
++ * Optimization features.
++ * The hypervisor may do some special optimizations for guests. This hypercall
++ * can be used to switch on/of these special optimizations.
++ */
++#define __HYPERVISOR_opt_feature 0x700UL
++
++#define XEN_IA64_OPTF_OFF 0x0
++#define XEN_IA64_OPTF_ON 0x1
++
++/*
++ * If this feature is switched on, the hypervisor inserts the
++ * tlb entries without calling the guests traphandler.
++ * This is useful in guests using region 7 for identity mapping
++ * like the linux kernel does.
++ */
++#define XEN_IA64_OPTF_IDENT_MAP_REG7 1
++
++/* Identity mapping of region 4 addresses in HVM. */
++#define XEN_IA64_OPTF_IDENT_MAP_REG4 2
++
++/* Identity mapping of region 5 addresses in HVM. */
++#define XEN_IA64_OPTF_IDENT_MAP_REG5 3
++
++#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0)
++
++struct xen_ia64_opt_feature {
++ unsigned long cmd; /* Which feature */
++ unsigned char on; /* Switch feature on/off */
++ union {
++ struct {
++ /* The page protection bit mask of the pte.
++ * This will be or'ed with the pte. */
++ unsigned long pgprot;
++ unsigned long key; /* A protection key for itir. */
++ };
++ };
++};
++
++#endif /* __ASSEMBLY__ */
++
++/* xen perfmon */
++#ifdef XEN
++#ifndef __ASSEMBLY__
++#ifndef _ASM_IA64_PERFMON_H
++
++#include <xen/list.h> // asm/perfmon.h requires struct list_head
++#include <asm/perfmon.h>
++// for PFM_xxx and pfarg_features_t, pfarg_context_t, pfarg_reg_t, pfarg_load_t
++
++#endif /* _ASM_IA64_PERFMON_H */
++
++DEFINE_XEN_GUEST_HANDLE(pfarg_features_t);
++DEFINE_XEN_GUEST_HANDLE(pfarg_context_t);
++DEFINE_XEN_GUEST_HANDLE(pfarg_reg_t);
++DEFINE_XEN_GUEST_HANDLE(pfarg_load_t);
++#endif /* __ASSEMBLY__ */
++#endif /* XEN */
++
++#ifndef __ASSEMBLY__
++#include "arch-ia64/hvm/memmap.h"
++#endif
++
++#endif /* __HYPERVISOR_IF_IA64_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/arch-powerpc.h linux-2.6.18-xen-3.3.0/include/xen/interface/arch-powerpc.h
+--- linux-2.6.18.8/include/xen/interface/arch-powerpc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/arch-powerpc.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,120 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) IBM Corp. 2005, 2006
++ *
++ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
++ */
++
++#include "xen.h"
++
++#ifndef __XEN_PUBLIC_ARCH_PPC_64_H__
++#define __XEN_PUBLIC_ARCH_PPC_64_H__
++
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef struct { \
++ int __pad[(sizeof (long long) - sizeof (void *)) / sizeof (int)]; \
++ type *p; \
++ } __attribute__((__aligned__(8))) __guest_handle_ ## name
++
++#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
++ ___DEFINE_XEN_GUEST_HANDLE(name, type); \
++ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
++#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
++#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
++#define set_xen_guest_handle(hnd, val) \
++ do { \
++ if (sizeof ((hnd).__pad)) \
++ (hnd).__pad[0] = 0; \
++ (hnd).p = val; \
++ } while (0)
++
++#ifdef __XEN_TOOLS__
++#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
++#endif
++
++#ifndef __ASSEMBLY__
++typedef unsigned long long xen_pfn_t;
++#define PRI_xen_pfn "llx"
++#endif
++
++/*
++ * Pointers and other address fields inside interface structures are padded to
++ * 64 bits. This means that field alignments aren't different between 32- and
++ * 64-bit architectures.
++ */
++/* NB. Multi-level macro ensures __LINE__ is expanded before concatenation. */
++#define __MEMORY_PADDING(_X)
++#define _MEMORY_PADDING(_X) __MEMORY_PADDING(_X)
++#define MEMORY_PADDING _MEMORY_PADDING(__LINE__)
++
++/* And the trap vector is... */
++#define TRAP_INSTR "li 0,-1; sc" /* XXX just "sc"? */
++
++#ifndef __ASSEMBLY__
++
++#define XENCOMM_INLINE_FLAG (1UL << 63)
++
++typedef uint64_t xen_ulong_t;
++
++/* User-accessible registers: nost of these need to be saved/restored
++ * for every nested Xen invocation. */
++struct cpu_user_regs
++{
++ uint64_t gprs[32];
++ uint64_t lr;
++ uint64_t ctr;
++ uint64_t srr0;
++ uint64_t srr1;
++ uint64_t pc;
++ uint64_t msr;
++ uint64_t fpscr; /* XXX Is this necessary */
++ uint64_t xer;
++ uint64_t hid4; /* debug only */
++ uint64_t dar; /* debug only */
++ uint32_t dsisr; /* debug only */
++ uint32_t cr;
++ uint32_t __pad; /* good spot for another 32bit reg */
++ uint32_t entry_vector;
++};
++typedef struct cpu_user_regs cpu_user_regs_t;
++
++typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* XXX timebase */
++
++/* ONLY used to communicate with dom0! See also struct exec_domain. */
++struct vcpu_guest_context {
++ cpu_user_regs_t user_regs; /* User-level CPU registers */
++ uint64_t sdr1; /* Pagetable base */
++ /* XXX etc */
++};
++typedef struct vcpu_guest_context vcpu_guest_context_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
++
++struct arch_shared_info {
++ uint64_t boot_timebase;
++};
++
++struct arch_vcpu_info {
++};
++
++/* Support for multi-processor guests. */
++#define MAX_VIRT_CPUS 32
++#endif
++
++#endif
+diff -rpuN linux-2.6.18.8/include/xen/interface/arch-x86/cpuid.h linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86/cpuid.h
+--- linux-2.6.18.8/include/xen/interface/arch-x86/cpuid.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86/cpuid.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,68 @@
++/******************************************************************************
++ * arch-x86/cpuid.h
++ *
++ * CPUID interface to Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2007 Citrix Systems, Inc.
++ *
++ * Authors:
++ * Keir Fraser <keir.fraser@citrix.com>
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__
++#define __XEN_PUBLIC_ARCH_X86_CPUID_H__
++
++/* Xen identification leaves start at 0x40000000. */
++#define XEN_CPUID_FIRST_LEAF 0x40000000
++#define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i))
++
++/*
++ * Leaf 1 (0x40000000)
++ * EAX: Largest Xen-information leaf. All leaves up to an including @EAX
++ * are supported by the Xen host.
++ * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification
++ * of a Xen host.
++ */
++#define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */
++#define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */
++#define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */
++
++/*
++ * Leaf 2 (0x40000001)
++ * EAX[31:16]: Xen major version.
++ * EAX[15: 0]: Xen minor version.
++ * EBX-EDX: Reserved (currently all zeroes).
++ */
++
++/*
++ * Leaf 3 (0x40000002)
++ * EAX: Number of hypercall transfer pages. This register is always guaranteed
++ * to specify one hypercall page.
++ * EBX: Base address of Xen-specific MSRs.
++ * ECX: Features 1. Unused bits are set to zero.
++ * EDX: Features 2. Unused bits are set to zero.
++ */
++
++/* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */
++#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0
++#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0)
++
++#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/arch-x86/hvm/save.h linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86/hvm/save.h
+--- linux-2.6.18.8/include/xen/interface/arch-x86/hvm/save.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86/hvm/save.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,429 @@
++/*
++ * Structure definitions for HVM state that is held by Xen and must
++ * be saved along with the domain's memory and device-model state.
++ *
++ * Copyright (c) 2007 XenSource Ltd.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__
++#define __XEN_PUBLIC_HVM_SAVE_X86_H__
++
++/*
++ * Save/restore header: general info about the save file.
++ */
++
++#define HVM_FILE_MAGIC 0x54381286
++#define HVM_FILE_VERSION 0x00000001
++
++struct hvm_save_header {
++ uint32_t magic; /* Must be HVM_FILE_MAGIC */
++ uint32_t version; /* File format version */
++ uint64_t changeset; /* Version of Xen that saved this file */
++ uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */
++ uint32_t pad0;
++};
++
++DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
++
++
++/*
++ * Processor
++ */
++
++struct hvm_hw_cpu {
++ uint8_t fpu_regs[512];
++
++ uint64_t rax;
++ uint64_t rbx;
++ uint64_t rcx;
++ uint64_t rdx;
++ uint64_t rbp;
++ uint64_t rsi;
++ uint64_t rdi;
++ uint64_t rsp;
++ uint64_t r8;
++ uint64_t r9;
++ uint64_t r10;
++ uint64_t r11;
++ uint64_t r12;
++ uint64_t r13;
++ uint64_t r14;
++ uint64_t r15;
++
++ uint64_t rip;
++ uint64_t rflags;
++
++ uint64_t cr0;
++ uint64_t cr2;
++ uint64_t cr3;
++ uint64_t cr4;
++
++ uint64_t dr0;
++ uint64_t dr1;
++ uint64_t dr2;
++ uint64_t dr3;
++ uint64_t dr6;
++ uint64_t dr7;
++
++ uint32_t cs_sel;
++ uint32_t ds_sel;
++ uint32_t es_sel;
++ uint32_t fs_sel;
++ uint32_t gs_sel;
++ uint32_t ss_sel;
++ uint32_t tr_sel;
++ uint32_t ldtr_sel;
++
++ uint32_t cs_limit;
++ uint32_t ds_limit;
++ uint32_t es_limit;
++ uint32_t fs_limit;
++ uint32_t gs_limit;
++ uint32_t ss_limit;
++ uint32_t tr_limit;
++ uint32_t ldtr_limit;
++ uint32_t idtr_limit;
++ uint32_t gdtr_limit;
++
++ uint64_t cs_base;
++ uint64_t ds_base;
++ uint64_t es_base;
++ uint64_t fs_base;
++ uint64_t gs_base;
++ uint64_t ss_base;
++ uint64_t tr_base;
++ uint64_t ldtr_base;
++ uint64_t idtr_base;
++ uint64_t gdtr_base;
++
++ uint32_t cs_arbytes;
++ uint32_t ds_arbytes;
++ uint32_t es_arbytes;
++ uint32_t fs_arbytes;
++ uint32_t gs_arbytes;
++ uint32_t ss_arbytes;
++ uint32_t tr_arbytes;
++ uint32_t ldtr_arbytes;
++
++ uint32_t sysenter_cs;
++ uint32_t padding0;
++
++ uint64_t sysenter_esp;
++ uint64_t sysenter_eip;
++
++ /* msr for em64t */
++ uint64_t shadow_gs;
++
++ /* msr content saved/restored. */
++ uint64_t msr_flags;
++ uint64_t msr_lstar;
++ uint64_t msr_star;
++ uint64_t msr_cstar;
++ uint64_t msr_syscall_mask;
++ uint64_t msr_efer;
++
++ /* guest's idea of what rdtsc() would return */
++ uint64_t tsc;
++
++ /* pending event, if any */
++ union {
++ uint32_t pending_event;
++ struct {
++ uint8_t pending_vector:8;
++ uint8_t pending_type:3;
++ uint8_t pending_error_valid:1;
++ uint32_t pending_reserved:19;
++ uint8_t pending_valid:1;
++ };
++ };
++ /* error code for pending event */
++ uint32_t error_code;
++};
++
++DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu);
++
++
++/*
++ * PIC
++ */
++
++struct hvm_hw_vpic {
++ /* IR line bitmasks. */
++ uint8_t irr;
++ uint8_t imr;
++ uint8_t isr;
++
++ /* Line IRx maps to IRQ irq_base+x */
++ uint8_t irq_base;
++
++ /*
++ * Where are we in ICW2-4 initialisation (0 means no init in progress)?
++ * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1).
++ * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence)
++ * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence)
++ */
++ uint8_t init_state:4;
++
++ /* IR line with highest priority. */
++ uint8_t priority_add:4;
++
++ /* Reads from A=0 obtain ISR or IRR? */
++ uint8_t readsel_isr:1;
++
++ /* Reads perform a polling read? */
++ uint8_t poll:1;
++
++ /* Automatically clear IRQs from the ISR during INTA? */
++ uint8_t auto_eoi:1;
++
++ /* Automatically rotate IRQ priorities during AEOI? */
++ uint8_t rotate_on_auto_eoi:1;
++
++ /* Exclude slave inputs when considering in-service IRQs? */
++ uint8_t special_fully_nested_mode:1;
++
++ /* Special mask mode excludes masked IRs from AEOI and priority checks. */
++ uint8_t special_mask_mode:1;
++
++ /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */
++ uint8_t is_master:1;
++
++ /* Edge/trigger selection. */
++ uint8_t elcr;
++
++ /* Virtual INT output. */
++ uint8_t int_output;
++};
++
++DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic);
++
++
++/*
++ * IO-APIC
++ */
++
++#ifdef __ia64__
++#define VIOAPIC_IS_IOSAPIC 1
++#define VIOAPIC_NUM_PINS 24
++#else
++#define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */
++#endif
++
++struct hvm_hw_vioapic {
++ uint64_t base_address;
++ uint32_t ioregsel;
++ uint32_t id;
++ union vioapic_redir_entry
++ {
++ uint64_t bits;
++ struct {
++ uint8_t vector;
++ uint8_t delivery_mode:3;
++ uint8_t dest_mode:1;
++ uint8_t delivery_status:1;
++ uint8_t polarity:1;
++ uint8_t remote_irr:1;
++ uint8_t trig_mode:1;
++ uint8_t mask:1;
++ uint8_t reserve:7;
++#if !VIOAPIC_IS_IOSAPIC
++ uint8_t reserved[4];
++ uint8_t dest_id;
++#else
++ uint8_t reserved[3];
++ uint16_t dest_id;
++#endif
++ } fields;
++ } redirtbl[VIOAPIC_NUM_PINS];
++};
++
++DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic);
++
++
++/*
++ * LAPIC
++ */
++
++struct hvm_hw_lapic {
++ uint64_t apic_base_msr;
++ uint32_t disabled; /* VLAPIC_xx_DISABLED */
++ uint32_t timer_divisor;
++};
++
++DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic);
++
++struct hvm_hw_lapic_regs {
++ uint8_t data[1024];
++};
++
++DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs);
++
++
++/*
++ * IRQs
++ */
++
++struct hvm_hw_pci_irqs {
++ /*
++ * Virtual interrupt wires for a single PCI bus.
++ * Indexed by: device*4 + INTx#.
++ */
++ union {
++ DECLARE_BITMAP(i, 32*4);
++ uint64_t pad[2];
++ };
++};
++
++DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs);
++
++struct hvm_hw_isa_irqs {
++ /*
++ * Virtual interrupt wires for ISA devices.
++ * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing).
++ */
++ union {
++ DECLARE_BITMAP(i, 16);
++ uint64_t pad[1];
++ };
++};
++
++DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs);
++
++struct hvm_hw_pci_link {
++ /*
++ * PCI-ISA interrupt router.
++ * Each PCI <device:INTx#> is 'wire-ORed' into one of four links using
++ * the traditional 'barber's pole' mapping ((device + INTx#) & 3).
++ * The router provides a programmable mapping from each link to a GSI.
++ */
++ uint8_t route[4];
++ uint8_t pad0[4];
++};
++
++DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link);
++
++/*
++ * PIT
++ */
++
++struct hvm_hw_pit {
++ struct hvm_hw_pit_channel {
++ uint32_t count; /* can be 65536 */
++ uint16_t latched_count;
++ uint8_t count_latched;
++ uint8_t status_latched;
++ uint8_t status;
++ uint8_t read_state;
++ uint8_t write_state;
++ uint8_t write_latch;
++ uint8_t rw_mode;
++ uint8_t mode;
++ uint8_t bcd; /* not supported */
++ uint8_t gate; /* timer start */
++ } channels[3]; /* 3 x 16 bytes */
++ uint32_t speaker_data_on;
++ uint32_t pad0;
++};
++
++DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit);
++
++
++/*
++ * RTC
++ */
++
++#define RTC_CMOS_SIZE 14
++struct hvm_hw_rtc {
++ /* CMOS bytes */
++ uint8_t cmos_data[RTC_CMOS_SIZE];
++ /* Index register for 2-part operations */
++ uint8_t cmos_index;
++ uint8_t pad0;
++};
++
++DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc);
++
++
++/*
++ * HPET
++ */
++
++#define HPET_TIMER_NUM 3 /* 3 timers supported now */
++struct hvm_hw_hpet {
++ /* Memory-mapped, software visible registers */
++ uint64_t capability; /* capabilities */
++ uint64_t res0; /* reserved */
++ uint64_t config; /* configuration */
++ uint64_t res1; /* reserved */
++ uint64_t isr; /* interrupt status reg */
++ uint64_t res2[25]; /* reserved */
++ uint64_t mc64; /* main counter */
++ uint64_t res3; /* reserved */
++ struct { /* timers */
++ uint64_t config; /* configuration/cap */
++ uint64_t cmp; /* comparator */
++ uint64_t fsb; /* FSB route, not supported now */
++ uint64_t res4; /* reserved */
++ } timers[HPET_TIMER_NUM];
++ uint64_t res5[4*(24-HPET_TIMER_NUM)]; /* reserved, up to 0x3ff */
++
++ /* Hidden register state */
++ uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */
++};
++
++DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet);
++
++
++/*
++ * PM timer
++ */
++
++struct hvm_hw_pmtimer {
++ uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */
++ uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */
++ uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */
++};
++
++DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer);
++
++/*
++ * MTRR MSRs
++ */
++
++struct hvm_hw_mtrr {
++#define MTRR_VCNT 8
++#define NUM_FIXED_MSR 11
++ uint64_t msr_pat_cr;
++ /* mtrr physbase & physmask msr pair*/
++ uint64_t msr_mtrr_var[MTRR_VCNT*2];
++ uint64_t msr_mtrr_fixed[NUM_FIXED_MSR];
++ uint64_t msr_mtrr_cap;
++ uint64_t msr_mtrr_def_type;
++};
++
++DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr);
++
++/*
++ * Largest type-code in use
++ */
++#define HVM_SAVE_CODE_MAX 14
++
++#endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/arch-x86/xen.h linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86/xen.h
+--- linux-2.6.18.8/include/xen/interface/arch-x86/xen.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86/xen.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,204 @@
++/******************************************************************************
++ * arch-x86/xen.h
++ *
++ * Guest OS interface to x86 Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
++ */
++
++#include "../xen.h"
++
++#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__
++#define __XEN_PUBLIC_ARCH_X86_XEN_H__
++
++/* Structural guest handles introduced in 0x00030201. */
++#if __XEN_INTERFACE_VERSION__ >= 0x00030201
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef struct { type *p; } __guest_handle_ ## name
++#else
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef type * __guest_handle_ ## name
++#endif
++
++#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
++ ___DEFINE_XEN_GUEST_HANDLE(name, type); \
++ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
++#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
++#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
++#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
++#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
++#ifdef __XEN_TOOLS__
++#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
++#endif
++
++#if defined(__i386__)
++#include "xen-x86_32.h"
++#elif defined(__x86_64__)
++#include "xen-x86_64.h"
++#endif
++
++#ifndef __ASSEMBLY__
++typedef unsigned long xen_pfn_t;
++#define PRI_xen_pfn "lx"
++#endif
++
++/*
++ * SEGMENT DESCRIPTOR TABLES
++ */
++/*
++ * A number of GDT entries are reserved by Xen. These are not situated at the
++ * start of the GDT because some stupid OSes export hard-coded selector values
++ * in their ABI. These hard-coded values are always near the start of the GDT,
++ * so Xen places itself out of the way, at the far end of the GDT.
++ */
++#define FIRST_RESERVED_GDT_PAGE 14
++#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
++#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
++
++/* Maximum number of virtual CPUs in multi-processor guests. */
++#define MAX_VIRT_CPUS 32
++
++
++/* Machine check support */
++#include "xen-mca.h"
++
++#ifndef __ASSEMBLY__
++
++typedef unsigned long xen_ulong_t;
++
++/*
++ * Send an array of these to HYPERVISOR_set_trap_table().
++ * The privilege level specifies which modes may enter a trap via a software
++ * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
++ * privilege levels as follows:
++ * Level == 0: Noone may enter
++ * Level == 1: Kernel may enter
++ * Level == 2: Kernel may enter
++ * Level == 3: Everyone may enter
++ */
++#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
++#define TI_GET_IF(_ti) ((_ti)->flags & 4)
++#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
++#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
++struct trap_info {
++ uint8_t vector; /* exception vector */
++ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
++ uint16_t cs; /* code selector */
++ unsigned long address; /* code offset */
++};
++typedef struct trap_info trap_info_t;
++DEFINE_XEN_GUEST_HANDLE(trap_info_t);
++
++typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
++
++/*
++ * The following is all CPU context. Note that the fpu_ctxt block is filled
++ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
++ */
++struct vcpu_guest_context {
++ /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
++ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
++#define VGCF_I387_VALID (1<<0)
++#define VGCF_IN_KERNEL (1<<2)
++#define _VGCF_i387_valid 0
++#define VGCF_i387_valid (1<<_VGCF_i387_valid)
++#define _VGCF_in_kernel 2
++#define VGCF_in_kernel (1<<_VGCF_in_kernel)
++#define _VGCF_failsafe_disables_events 3
++#define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events)
++#define _VGCF_syscall_disables_events 4
++#define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events)
++#define _VGCF_online 5
++#define VGCF_online (1<<_VGCF_online)
++ unsigned long flags; /* VGCF_* flags */
++ struct cpu_user_regs user_regs; /* User-level CPU registers */
++ struct trap_info trap_ctxt[256]; /* Virtual IDT */
++ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
++ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
++ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
++ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
++ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
++ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
++#ifdef __i386__
++ unsigned long event_callback_cs; /* CS:EIP of event callback */
++ unsigned long event_callback_eip;
++ unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
++ unsigned long failsafe_callback_eip;
++#else
++ unsigned long event_callback_eip;
++ unsigned long failsafe_callback_eip;
++#ifdef __XEN__
++ union {
++ unsigned long syscall_callback_eip;
++ struct {
++ unsigned int event_callback_cs; /* compat CS of event cb */
++ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */
++ };
++ };
++#else
++ unsigned long syscall_callback_eip;
++#endif
++#endif
++ unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
++#ifdef __x86_64__
++ /* Segment base addresses. */
++ uint64_t fs_base;
++ uint64_t gs_base_kernel;
++ uint64_t gs_base_user;
++#endif
++};
++typedef struct vcpu_guest_context vcpu_guest_context_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
++
++struct arch_shared_info {
++ unsigned long max_pfn; /* max pfn that appears in table */
++ /* Frame containing list of mfns containing list of mfns containing p2m. */
++ xen_pfn_t pfn_to_mfn_frame_list_list;
++ unsigned long nmi_reason;
++ uint64_t pad[32];
++};
++typedef struct arch_shared_info arch_shared_info_t;
++
++#endif /* !__ASSEMBLY__ */
++
++/*
++ * Prefix forces emulation of some non-trapping instructions.
++ * Currently only CPUID.
++ */
++#ifdef __ASSEMBLY__
++#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
++#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
++#else
++#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
++#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
++#endif
++
++#endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/arch-x86/xen-mca.h linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86/xen-mca.h
+--- linux-2.6.18.8/include/xen/interface/arch-x86/xen-mca.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86/xen-mca.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,279 @@
++/******************************************************************************
++ * arch-x86/mca.h
++ *
++ * Contributed by Advanced Micro Devices, Inc.
++ * Author: Christoph Egger <Christoph.Egger@amd.com>
++ *
++ * Guest OS machine check interface to x86 Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++/* Full MCA functionality has the following Usecases from the guest side:
++ *
++ * Must have's:
++ * 1. Dom0 and DomU register machine check trap callback handlers
++ * (already done via "set_trap_table" hypercall)
++ * 2. Dom0 registers machine check event callback handler
++ * (doable via EVTCHNOP_bind_virq)
++ * 3. Dom0 and DomU fetches machine check data
++ * 4. Dom0 wants Xen to notify a DomU
++ * 5. Dom0 gets DomU ID from physical address
++ * 6. Dom0 wants Xen to kill DomU (already done for "xm destroy")
++ *
++ * Nice to have's:
++ * 7. Dom0 wants Xen to deactivate a physical CPU
++ * This is better done as separate task, physical CPU hotplugging,
++ * and hypercall(s) should be sysctl's
++ * 8. Page migration proposed from Xen NUMA work, where Dom0 can tell Xen to
++ * move a DomU (or Dom0 itself) away from a malicious page
++ * producing correctable errors.
++ * 9. offlining physical page:
++ * Xen free's and never re-uses a certain physical page.
++ * 10. Testfacility: Allow Dom0 to write values into machine check MSR's
++ * and tell Xen to trigger a machine check
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__
++#define __XEN_PUBLIC_ARCH_X86_MCA_H__
++
++/* Hypercall */
++#define __HYPERVISOR_mca __HYPERVISOR_arch_0
++
++#define XEN_MCA_INTERFACE_VERSION 0x03000001
++
++/* IN: Dom0 calls hypercall from MC event handler. */
++#define XEN_MC_CORRECTABLE 0x0
++/* IN: Dom0/DomU calls hypercall from MC trap handler. */
++#define XEN_MC_TRAP 0x1
++/* XEN_MC_CORRECTABLE and XEN_MC_TRAP are mutually exclusive. */
++
++/* OUT: All is ok */
++#define XEN_MC_OK 0x0
++/* OUT: Domain could not fetch data. */
++#define XEN_MC_FETCHFAILED 0x1
++/* OUT: There was no machine check data to fetch. */
++#define XEN_MC_NODATA 0x2
++/* OUT: Between notification time and this hypercall an other
++ * (most likely) correctable error happened. The fetched data,
++ * does not match the original machine check data. */
++#define XEN_MC_NOMATCH 0x4
++
++/* OUT: DomU did not register MC NMI handler. Try something else. */
++#define XEN_MC_CANNOTHANDLE 0x8
++/* OUT: Notifying DomU failed. Retry later or try something else. */
++#define XEN_MC_NOTDELIVERED 0x10
++/* Note, XEN_MC_CANNOTHANDLE and XEN_MC_NOTDELIVERED are mutually exclusive. */
++
++
++#ifndef __ASSEMBLY__
++
++#define VIRQ_MCA VIRQ_ARCH_0 /* G. (DOM0) Machine Check Architecture */
++
++/*
++ * Machine Check Architecure:
++ * structs are read-only and used to report all kinds of
++ * correctable and uncorrectable errors detected by the HW.
++ * Dom0 and DomU: register a handler to get notified.
++ * Dom0 only: Correctable errors are reported via VIRQ_MCA
++ * Dom0 and DomU: Uncorrectable errors are reported via nmi handlers
++ */
++#define MC_TYPE_GLOBAL 0
++#define MC_TYPE_BANK 1
++#define MC_TYPE_EXTENDED 2
++
++struct mcinfo_common {
++ uint16_t type; /* structure type */
++ uint16_t size; /* size of this struct in bytes */
++};
++
++
++#define MC_FLAG_CORRECTABLE (1 << 0)
++#define MC_FLAG_UNCORRECTABLE (1 << 1)
++
++/* contains global x86 mc information */
++struct mcinfo_global {
++ struct mcinfo_common common;
++
++ /* running domain at the time in error (most likely the impacted one) */
++ uint16_t mc_domid;
++ uint32_t mc_socketid; /* physical socket of the physical core */
++ uint16_t mc_coreid; /* physical impacted core */
++ uint16_t mc_core_threadid; /* core thread of physical core */
++ uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */
++ uint64_t mc_gstatus; /* global status */
++ uint32_t mc_flags;
++};
++
++/* contains bank local x86 mc information */
++struct mcinfo_bank {
++ struct mcinfo_common common;
++
++ uint16_t mc_bank; /* bank nr */
++ uint16_t mc_domid; /* Usecase 5: domain referenced by mc_addr on dom0
++ * and if mc_addr is valid. Never valid on DomU. */
++ uint64_t mc_status; /* bank status */
++ uint64_t mc_addr; /* bank address, only valid
++ * if addr bit is set in mc_status */
++ uint64_t mc_misc;
++};
++
++
++struct mcinfo_msr {
++ uint64_t reg; /* MSR */
++ uint64_t value; /* MSR value */
++};
++
++/* contains mc information from other
++ * or additional mc MSRs */
++struct mcinfo_extended {
++ struct mcinfo_common common;
++
++ /* You can fill up to five registers.
++ * If you need more, then use this structure
++ * multiple times. */
++
++ uint32_t mc_msrs; /* Number of msr with valid values. */
++ struct mcinfo_msr mc_msr[5];
++};
++
++#define MCINFO_HYPERCALLSIZE 1024
++#define MCINFO_MAXSIZE 768
++
++struct mc_info {
++ /* Number of mcinfo_* entries in mi_data */
++ uint32_t mi_nentries;
++
++ uint8_t mi_data[MCINFO_MAXSIZE - sizeof(uint32_t)];
++};
++typedef struct mc_info mc_info_t;
++
++
++
++/*
++ * OS's should use these instead of writing their own lookup function
++ * each with its own bugs and drawbacks.
++ * We use macros instead of static inline functions to allow guests
++ * to include this header in assembly files (*.S).
++ */
++/* Prototype:
++ * uint32_t x86_mcinfo_nentries(struct mc_info *mi);
++ */
++#define x86_mcinfo_nentries(_mi) \
++ (_mi)->mi_nentries
++/* Prototype:
++ * struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi);
++ */
++#define x86_mcinfo_first(_mi) \
++ (struct mcinfo_common *)((_mi)->mi_data)
++/* Prototype:
++ * struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic);
++ */
++#define x86_mcinfo_next(_mic) \
++ (struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size)
++
++/* Prototype:
++ * void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type);
++ */
++#define x86_mcinfo_lookup(_ret, _mi, _type) \
++ do { \
++ uint32_t found, i; \
++ struct mcinfo_common *_mic; \
++ \
++ found = 0; \
++ (_ret) = NULL; \
++ if (_mi == NULL) break; \
++ _mic = x86_mcinfo_first(_mi); \
++ for (i = 0; i < x86_mcinfo_nentries(_mi); i++) { \
++ if (_mic->type == (_type)) { \
++ found = 1; \
++ break; \
++ } \
++ _mic = x86_mcinfo_next(_mic); \
++ } \
++ (_ret) = found ? _mic : NULL; \
++ } while (0)
++
++
++/* Usecase 1
++ * Register machine check trap callback handler
++ * (already done via "set_trap_table" hypercall)
++ */
++
++/* Usecase 2
++ * Dom0 registers machine check event callback handler
++ * done by EVTCHNOP_bind_virq
++ */
++
++/* Usecase 3
++ * Fetch machine check data from hypervisor.
++ * Note, this hypercall is special, because both Dom0 and DomU must use this.
++ */
++#define XEN_MC_fetch 1
++struct xen_mc_fetch {
++ /* IN/OUT variables. */
++ uint32_t flags;
++
++/* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */
++/* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED, XEN_MC_NODATA, XEN_MC_NOMATCH */
++
++ /* OUT variables. */
++ uint32_t fetch_idx; /* only useful for Dom0 for the notify hypercall */
++ struct mc_info mc_info;
++};
++typedef struct xen_mc_fetch xen_mc_fetch_t;
++DEFINE_XEN_GUEST_HANDLE(xen_mc_fetch_t);
++
++
++/* Usecase 4
++ * This tells the hypervisor to notify a DomU about the machine check error
++ */
++#define XEN_MC_notifydomain 2
++struct xen_mc_notifydomain {
++ /* IN variables. */
++ uint16_t mc_domid; /* The unprivileged domain to notify. */
++ uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify.
++ * Usually echo'd value from the fetch hypercall. */
++ uint32_t fetch_idx; /* echo'd value from the fetch hypercall. */
++
++ /* IN/OUT variables. */
++ uint32_t flags;
++
++/* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */
++/* OUT: XEN_MC_OK, XEN_MC_CANNOTHANDLE, XEN_MC_NOTDELIVERED, XEN_MC_NOMATCH */
++};
++typedef struct xen_mc_notifydomain xen_mc_notifydomain_t;
++DEFINE_XEN_GUEST_HANDLE(xen_mc_notifydomain_t);
++
++
++struct xen_mc {
++ uint32_t cmd;
++ uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */
++ union {
++ struct xen_mc_fetch mc_fetch;
++ struct xen_mc_notifydomain mc_notifydomain;
++ uint8_t pad[MCINFO_HYPERCALLSIZE];
++ } u;
++};
++typedef struct xen_mc xen_mc_t;
++DEFINE_XEN_GUEST_HANDLE(xen_mc_t);
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/arch-x86/xen-x86_32.h linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86/xen-x86_32.h
+--- linux-2.6.18.8/include/xen/interface/arch-x86/xen-x86_32.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86/xen-x86_32.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,180 @@
++/******************************************************************************
++ * xen-x86_32.h
++ *
++ * Guest OS interface to x86 32-bit Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2007, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
++#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
++
++/*
++ * Hypercall interface:
++ * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5)
++ * Output: %eax
++ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
++ * call hypercall_page + hypercall-number * 32
++ * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx)
++ */
++
++#if __XEN_INTERFACE_VERSION__ < 0x00030203
++/*
++ * Legacy hypercall interface:
++ * As above, except the entry sequence to the hypervisor is:
++ * mov $hypercall-number*32,%eax ; int $0x82
++ */
++#define TRAP_INSTR "int $0x82"
++#endif
++
++/*
++ * These flat segments are in the Xen-private section of every GDT. Since these
++ * are also present in the initial GDT, many OSes will be able to avoid
++ * installing their own GDT.
++ */
++#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
++#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
++#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
++#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
++#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
++#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
++
++#define FLAT_KERNEL_CS FLAT_RING1_CS
++#define FLAT_KERNEL_DS FLAT_RING1_DS
++#define FLAT_KERNEL_SS FLAT_RING1_SS
++#define FLAT_USER_CS FLAT_RING3_CS
++#define FLAT_USER_DS FLAT_RING3_DS
++#define FLAT_USER_SS FLAT_RING3_SS
++
++#define __HYPERVISOR_VIRT_START_PAE 0xF5800000
++#define __MACH2PHYS_VIRT_START_PAE 0xF5800000
++#define __MACH2PHYS_VIRT_END_PAE 0xF6800000
++#define HYPERVISOR_VIRT_START_PAE \
++ mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE)
++#define MACH2PHYS_VIRT_START_PAE \
++ mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE)
++#define MACH2PHYS_VIRT_END_PAE \
++ mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE)
++
++/* Non-PAE bounds are obsolete. */
++#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000
++#define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000
++#define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000
++#define HYPERVISOR_VIRT_START_NONPAE \
++ mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE)
++#define MACH2PHYS_VIRT_START_NONPAE \
++ mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE)
++#define MACH2PHYS_VIRT_END_NONPAE \
++ mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE)
++
++#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE
++#define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE
++#define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE
++
++#ifndef HYPERVISOR_VIRT_START
++#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
++#endif
++
++#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
++#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
++#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2)
++#ifndef machine_to_phys_mapping
++#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START)
++#endif
++
++/* 32-/64-bit invariability for control interfaces (domctl/sysctl). */
++#if defined(__XEN__) || defined(__XEN_TOOLS__)
++#undef ___DEFINE_XEN_GUEST_HANDLE
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef struct { type *p; } \
++ __guest_handle_ ## name; \
++ typedef struct { union { type *p; uint64_aligned_t q; }; } \
++ __guest_handle_64_ ## name
++#undef set_xen_guest_handle
++#define set_xen_guest_handle(hnd, val) \
++ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \
++ (hnd).p = val; \
++ } while ( 0 )
++#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
++#define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name
++#define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name)
++#endif
++
++#ifndef __ASSEMBLY__
++
++struct cpu_user_regs {
++ uint32_t ebx;
++ uint32_t ecx;
++ uint32_t edx;
++ uint32_t esi;
++ uint32_t edi;
++ uint32_t ebp;
++ uint32_t eax;
++ uint16_t error_code; /* private */
++ uint16_t entry_vector; /* private */
++ uint32_t eip;
++ uint16_t cs;
++ uint8_t saved_upcall_mask;
++ uint8_t _pad0;
++ uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
++ uint32_t esp;
++ uint16_t ss, _pad1;
++ uint16_t es, _pad2;
++ uint16_t ds, _pad3;
++ uint16_t fs, _pad4;
++ uint16_t gs, _pad5;
++};
++typedef struct cpu_user_regs cpu_user_regs_t;
++DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
++
++/*
++ * Page-directory addresses above 4GB do not fit into architectural %cr3.
++ * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
++ * must use the following accessor macros to pack/unpack valid MFNs.
++ */
++#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
++#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
++
++struct arch_vcpu_info {
++ unsigned long cr2;
++ unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */
++};
++typedef struct arch_vcpu_info arch_vcpu_info_t;
++
++struct xen_callback {
++ unsigned long cs;
++ unsigned long eip;
++};
++typedef struct xen_callback xen_callback_t;
++
++#endif /* !__ASSEMBLY__ */
++
++#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/arch-x86/xen-x86_64.h linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86/xen-x86_64.h
+--- linux-2.6.18.8/include/xen/interface/arch-x86/xen-x86_64.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86/xen-x86_64.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,212 @@
++/******************************************************************************
++ * xen-x86_64.h
++ *
++ * Guest OS interface to x86 64-bit Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
++#define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
++
++/*
++ * Hypercall interface:
++ * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5)
++ * Output: %rax
++ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
++ * call hypercall_page + hypercall-number * 32
++ * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi)
++ */
++
++#if __XEN_INTERFACE_VERSION__ < 0x00030203
++/*
++ * Legacy hypercall interface:
++ * As above, except the entry sequence to the hypervisor is:
++ * mov $hypercall-number*32,%eax ; syscall
++ * Clobbered: %rcx, %r11, argument registers (as above)
++ */
++#define TRAP_INSTR "syscall"
++#endif
++
++/*
++ * 64-bit segment selectors
++ * These flat segments are in the Xen-private section of every GDT. Since these
++ * are also present in the initial GDT, many OSes will be able to avoid
++ * installing their own GDT.
++ */
++
++#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
++#define FLAT_RING3_CS64 0xe033 /* GDT index 261 */
++#define FLAT_RING3_DS32 0xe02b /* GDT index 262 */
++#define FLAT_RING3_DS64 0x0000 /* NULL selector */
++#define FLAT_RING3_SS32 0xe02b /* GDT index 262 */
++#define FLAT_RING3_SS64 0xe02b /* GDT index 262 */
++
++#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
++#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
++#define FLAT_KERNEL_DS FLAT_KERNEL_DS64
++#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
++#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
++#define FLAT_KERNEL_CS FLAT_KERNEL_CS64
++#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
++#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
++#define FLAT_KERNEL_SS FLAT_KERNEL_SS64
++
++#define FLAT_USER_DS64 FLAT_RING3_DS64
++#define FLAT_USER_DS32 FLAT_RING3_DS32
++#define FLAT_USER_DS FLAT_USER_DS64
++#define FLAT_USER_CS64 FLAT_RING3_CS64
++#define FLAT_USER_CS32 FLAT_RING3_CS32
++#define FLAT_USER_CS FLAT_USER_CS64
++#define FLAT_USER_SS64 FLAT_RING3_SS64
++#define FLAT_USER_SS32 FLAT_RING3_SS32
++#define FLAT_USER_SS FLAT_USER_SS64
++
++#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
++#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
++#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
++#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
++
++#ifndef HYPERVISOR_VIRT_START
++#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
++#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
++#endif
++
++#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
++#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
++#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
++#ifndef machine_to_phys_mapping
++#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
++#endif
++
++/*
++ * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
++ * @which == SEGBASE_* ; @base == 64-bit base address
++ * Returns 0 on success.
++ */
++#define SEGBASE_FS 0
++#define SEGBASE_GS_USER 1
++#define SEGBASE_GS_KERNEL 2
++#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
++
++/*
++ * int HYPERVISOR_iret(void)
++ * All arguments are on the kernel stack, in the following format.
++ * Never returns if successful. Current kernel context is lost.
++ * The saved CS is mapped as follows:
++ * RING0 -> RING3 kernel mode.
++ * RING1 -> RING3 kernel mode.
++ * RING2 -> RING3 kernel mode.
++ * RING3 -> RING3 user mode.
++ * However RING0 indicates that the guest kernel should return to iteself
++ * directly with
++ * orb $3,1*8(%rsp)
++ * iretq
++ * If flags contains VGCF_in_syscall:
++ * Restore RAX, RIP, RFLAGS, RSP.
++ * Discard R11, RCX, CS, SS.
++ * Otherwise:
++ * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
++ * All other registers are saved on hypercall entry and restored to user.
++ */
++/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
++#define _VGCF_in_syscall 8
++#define VGCF_in_syscall (1<<_VGCF_in_syscall)
++#define VGCF_IN_SYSCALL VGCF_in_syscall
++
++#ifndef __ASSEMBLY__
++
++struct iret_context {
++ /* Top of stack (%rsp at point of hypercall). */
++ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
++ /* Bottom of iret stack frame. */
++};
++
++#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
++/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
++#define __DECL_REG(name) union { \
++ uint64_t r ## name, e ## name; \
++ uint32_t _e ## name; \
++}
++#else
++/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
++#define __DECL_REG(name) uint64_t r ## name
++#endif
++
++struct cpu_user_regs {
++ uint64_t r15;
++ uint64_t r14;
++ uint64_t r13;
++ uint64_t r12;
++ __DECL_REG(bp);
++ __DECL_REG(bx);
++ uint64_t r11;
++ uint64_t r10;
++ uint64_t r9;
++ uint64_t r8;
++ __DECL_REG(ax);
++ __DECL_REG(cx);
++ __DECL_REG(dx);
++ __DECL_REG(si);
++ __DECL_REG(di);
++ uint32_t error_code; /* private */
++ uint32_t entry_vector; /* private */
++ __DECL_REG(ip);
++ uint16_t cs, _pad0[1];
++ uint8_t saved_upcall_mask;
++ uint8_t _pad1[3];
++ __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */
++ __DECL_REG(sp);
++ uint16_t ss, _pad2[3];
++ uint16_t es, _pad3[3];
++ uint16_t ds, _pad4[3];
++ uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
++ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
++};
++typedef struct cpu_user_regs cpu_user_regs_t;
++DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
++
++#undef __DECL_REG
++
++#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
++#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
++
++struct arch_vcpu_info {
++ unsigned long cr2;
++ unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
++};
++typedef struct arch_vcpu_info arch_vcpu_info_t;
++
++typedef unsigned long xen_callback_t;
++
++#endif /* !__ASSEMBLY__ */
++
++#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/arch-x86_32.h linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86_32.h
+--- linux-2.6.18.8/include/xen/interface/arch-x86_32.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86_32.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,27 @@
++/******************************************************************************
++ * arch-x86_32.h
++ *
++ * Guest OS interface to x86 32-bit Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
++ */
++
++#include "arch-x86/xen.h"
+diff -rpuN linux-2.6.18.8/include/xen/interface/arch-x86_64.h linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86_64.h
+--- linux-2.6.18.8/include/xen/interface/arch-x86_64.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/arch-x86_64.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,27 @@
++/******************************************************************************
++ * arch-x86_64.h
++ *
++ * Guest OS interface to x86 64-bit Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
++ */
++
++#include "arch-x86/xen.h"
+diff -rpuN linux-2.6.18.8/include/xen/interface/callback.h linux-2.6.18-xen-3.3.0/include/xen/interface/callback.h
+--- linux-2.6.18.8/include/xen/interface/callback.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/callback.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,121 @@
++/******************************************************************************
++ * callback.h
++ *
++ * Register guest OS callbacks with Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2006, Ian Campbell
++ */
++
++#ifndef __XEN_PUBLIC_CALLBACK_H__
++#define __XEN_PUBLIC_CALLBACK_H__
++
++#include "xen.h"
++
++/*
++ * Prototype for this hypercall is:
++ * long callback_op(int cmd, void *extra_args)
++ * @cmd == CALLBACKOP_??? (callback operation).
++ * @extra_args == Operation-specific extra arguments (NULL if none).
++ */
++
++/* ia64, x86: Callback for event delivery. */
++#define CALLBACKTYPE_event 0
++
++/* x86: Failsafe callback when guest state cannot be restored by Xen. */
++#define CALLBACKTYPE_failsafe 1
++
++/* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */
++#define CALLBACKTYPE_syscall 2
++
++/*
++ * x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel
++ * feature is enabled. Do not use this callback type in new code.
++ */
++#define CALLBACKTYPE_sysenter_deprecated 3
++
++/* x86: Callback for NMI delivery. */
++#define CALLBACKTYPE_nmi 4
++
++/*
++ * x86: sysenter is only available as follows:
++ * - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled
++ * - 64-bit hypervisor: 32-bit guest applications on Intel CPUs
++ * ('32-on-32-on-64', '32-on-64-on-64')
++ * [nb. also 64-bit guest applications on Intel CPUs
++ * ('64-on-64-on-64'), but syscall is preferred]
++ */
++#define CALLBACKTYPE_sysenter 5
++
++/*
++ * x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs
++ * ('32-on-32-on-64', '32-on-64-on-64')
++ */
++#define CALLBACKTYPE_syscall32 7
++
++/*
++ * Disable event deliver during callback? This flag is ignored for event and
++ * NMI callbacks: event delivery is unconditionally disabled.
++ */
++#define _CALLBACKF_mask_events 0
++#define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events)
++
++/*
++ * Register a callback.
++ */
++#define CALLBACKOP_register 0
++struct callback_register {
++ uint16_t type;
++ uint16_t flags;
++ xen_callback_t address;
++};
++typedef struct callback_register callback_register_t;
++DEFINE_XEN_GUEST_HANDLE(callback_register_t);
++
++/*
++ * Unregister a callback.
++ *
++ * Not all callbacks can be unregistered. -EINVAL will be returned if
++ * you attempt to unregister such a callback.
++ */
++#define CALLBACKOP_unregister 1
++struct callback_unregister {
++ uint16_t type;
++ uint16_t _unused;
++};
++typedef struct callback_unregister callback_unregister_t;
++DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);
++
++#if __XEN_INTERFACE_VERSION__ < 0x00030207
++#undef CALLBACKTYPE_sysenter
++#define CALLBACKTYPE_sysenter CALLBACKTYPE_sysenter_deprecated
++#endif
++
++#endif /* __XEN_PUBLIC_CALLBACK_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/COPYING linux-2.6.18-xen-3.3.0/include/xen/interface/COPYING
+--- linux-2.6.18.8/include/xen/interface/COPYING 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/COPYING 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,38 @@
++XEN NOTICE
++==========
++
++This copyright applies to all files within this subdirectory and its
++subdirectories:
++ include/public/*.h
++ include/public/hvm/*.h
++ include/public/io/*.h
++
++The intention is that these files can be freely copied into the source
++tree of an operating system when porting that OS to run on Xen. Doing
++so does *not* cause the OS to become subject to the terms of the GPL.
++
++All other files in the Xen source distribution are covered by version
++2 of the GNU General Public License except where explicitly stated
++otherwise within individual source files.
++
++ -- Keir Fraser (on behalf of the Xen team)
++
++=====================================================================
++
++Permission is hereby granted, free of charge, to any person obtaining a copy
++of this software and associated documentation files (the "Software"), to
++deal in the Software without restriction, including without limitation the
++rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++sell copies of the Software, and to permit persons to whom the Software is
++furnished to do so, subject to the following conditions:
++
++The above copyright notice and this permission notice shall be included in
++all copies or substantial portions of the Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++DEALINGS IN THE SOFTWARE.
+diff -rpuN linux-2.6.18.8/include/xen/interface/dom0_ops.h linux-2.6.18-xen-3.3.0/include/xen/interface/dom0_ops.h
+--- linux-2.6.18.8/include/xen/interface/dom0_ops.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/dom0_ops.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,120 @@
++/******************************************************************************
++ * dom0_ops.h
++ *
++ * Process command requests from domain-0 guest OS.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2002-2003, B Dragovic
++ * Copyright (c) 2002-2006, K Fraser
++ */
++
++#ifndef __XEN_PUBLIC_DOM0_OPS_H__
++#define __XEN_PUBLIC_DOM0_OPS_H__
++
++#include "xen.h"
++#include "platform.h"
++
++#if __XEN_INTERFACE_VERSION__ >= 0x00030204
++#error "dom0_ops.h is a compatibility interface only"
++#endif
++
++#define DOM0_INTERFACE_VERSION XENPF_INTERFACE_VERSION
++
++#define DOM0_SETTIME XENPF_settime
++#define dom0_settime xenpf_settime
++#define dom0_settime_t xenpf_settime_t
++
++#define DOM0_ADD_MEMTYPE XENPF_add_memtype
++#define dom0_add_memtype xenpf_add_memtype
++#define dom0_add_memtype_t xenpf_add_memtype_t
++
++#define DOM0_DEL_MEMTYPE XENPF_del_memtype
++#define dom0_del_memtype xenpf_del_memtype
++#define dom0_del_memtype_t xenpf_del_memtype_t
++
++#define DOM0_READ_MEMTYPE XENPF_read_memtype
++#define dom0_read_memtype xenpf_read_memtype
++#define dom0_read_memtype_t xenpf_read_memtype_t
++
++#define DOM0_MICROCODE XENPF_microcode_update
++#define dom0_microcode xenpf_microcode_update
++#define dom0_microcode_t xenpf_microcode_update_t
++
++#define DOM0_PLATFORM_QUIRK XENPF_platform_quirk
++#define dom0_platform_quirk xenpf_platform_quirk
++#define dom0_platform_quirk_t xenpf_platform_quirk_t
++
++typedef uint64_t cpumap_t;
++
++/* Unsupported legacy operation -- defined for API compatibility. */
++#define DOM0_MSR 15
++struct dom0_msr {
++ /* IN variables. */
++ uint32_t write;
++ cpumap_t cpu_mask;
++ uint32_t msr;
++ uint32_t in1;
++ uint32_t in2;
++ /* OUT variables. */
++ uint32_t out1;
++ uint32_t out2;
++};
++typedef struct dom0_msr dom0_msr_t;
++DEFINE_XEN_GUEST_HANDLE(dom0_msr_t);
++
++/* Unsupported legacy operation -- defined for API compatibility. */
++#define DOM0_PHYSICAL_MEMORY_MAP 40
++struct dom0_memory_map_entry {
++ uint64_t start, end;
++ uint32_t flags; /* reserved */
++ uint8_t is_ram;
++};
++typedef struct dom0_memory_map_entry dom0_memory_map_entry_t;
++DEFINE_XEN_GUEST_HANDLE(dom0_memory_map_entry_t);
++
++struct dom0_op {
++ uint32_t cmd;
++ uint32_t interface_version; /* DOM0_INTERFACE_VERSION */
++ union {
++ struct dom0_msr msr;
++ struct dom0_settime settime;
++ struct dom0_add_memtype add_memtype;
++ struct dom0_del_memtype del_memtype;
++ struct dom0_read_memtype read_memtype;
++ struct dom0_microcode microcode;
++ struct dom0_platform_quirk platform_quirk;
++ struct dom0_memory_map_entry physical_memory_map;
++ uint8_t pad[128];
++ } u;
++};
++typedef struct dom0_op dom0_op_t;
++DEFINE_XEN_GUEST_HANDLE(dom0_op_t);
++
++#endif /* __XEN_PUBLIC_DOM0_OPS_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/domctl.h linux-2.6.18-xen-3.3.0/include/xen/interface/domctl.h
+--- linux-2.6.18.8/include/xen/interface/domctl.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/domctl.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,676 @@
++/******************************************************************************
++ * domctl.h
++ *
++ * Domain management operations. For use by node control stack.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2002-2003, B Dragovic
++ * Copyright (c) 2002-2006, K Fraser
++ */
++
++#ifndef __XEN_PUBLIC_DOMCTL_H__
++#define __XEN_PUBLIC_DOMCTL_H__
++
++#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
++#error "domctl operations are intended for use by node control tools only"
++#endif
++
++#include "xen.h"
++
++#define XEN_DOMCTL_INTERFACE_VERSION 0x00000005
++
++struct xenctl_cpumap {
++ XEN_GUEST_HANDLE_64(uint8) bitmap;
++ uint32_t nr_cpus;
++};
++
++/*
++ * NB. xen_domctl.domain is an IN/OUT parameter for this operation.
++ * If it is specified as zero, an id is auto-allocated and returned.
++ */
++#define XEN_DOMCTL_createdomain 1
++struct xen_domctl_createdomain {
++ /* IN parameters */
++ uint32_t ssidref;
++ xen_domain_handle_t handle;
++ /* Is this an HVM guest (as opposed to a PV guest)? */
++#define _XEN_DOMCTL_CDF_hvm_guest 0
++#define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest)
++ /* Use hardware-assisted paging if available? */
++#define _XEN_DOMCTL_CDF_hap 1
++#define XEN_DOMCTL_CDF_hap (1U<<_XEN_DOMCTL_CDF_hap)
++ uint32_t flags;
++};
++typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
++
++#define XEN_DOMCTL_destroydomain 2
++#define XEN_DOMCTL_pausedomain 3
++#define XEN_DOMCTL_unpausedomain 4
++#define XEN_DOMCTL_resumedomain 27
++
++#define XEN_DOMCTL_getdomaininfo 5
++struct xen_domctl_getdomaininfo {
++ /* OUT variables. */
++ domid_t domain; /* Also echoed in domctl.domain */
++ /* Domain is scheduled to die. */
++#define _XEN_DOMINF_dying 0
++#define XEN_DOMINF_dying (1U<<_XEN_DOMINF_dying)
++ /* Domain is an HVM guest (as opposed to a PV guest). */
++#define _XEN_DOMINF_hvm_guest 1
++#define XEN_DOMINF_hvm_guest (1U<<_XEN_DOMINF_hvm_guest)
++ /* The guest OS has shut down. */
++#define _XEN_DOMINF_shutdown 2
++#define XEN_DOMINF_shutdown (1U<<_XEN_DOMINF_shutdown)
++ /* Currently paused by control software. */
++#define _XEN_DOMINF_paused 3
++#define XEN_DOMINF_paused (1U<<_XEN_DOMINF_paused)
++ /* Currently blocked pending an event. */
++#define _XEN_DOMINF_blocked 4
++#define XEN_DOMINF_blocked (1U<<_XEN_DOMINF_blocked)
++ /* Domain is currently running. */
++#define _XEN_DOMINF_running 5
++#define XEN_DOMINF_running (1U<<_XEN_DOMINF_running)
++ /* Being debugged. */
++#define _XEN_DOMINF_debugged 6
++#define XEN_DOMINF_debugged (1U<<_XEN_DOMINF_debugged)
++ /* CPU to which this domain is bound. */
++#define XEN_DOMINF_cpumask 255
++#define XEN_DOMINF_cpushift 8
++ /* XEN_DOMINF_shutdown guest-supplied code. */
++#define XEN_DOMINF_shutdownmask 255
++#define XEN_DOMINF_shutdownshift 16
++ uint32_t flags; /* XEN_DOMINF_* */
++ uint64_aligned_t tot_pages;
++ uint64_aligned_t max_pages;
++ uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */
++ uint64_aligned_t cpu_time;
++ uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
++ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
++ uint32_t ssidref;
++ xen_domain_handle_t handle;
++};
++typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t);
++
++
++#define XEN_DOMCTL_getmemlist 6
++struct xen_domctl_getmemlist {
++ /* IN variables. */
++ /* Max entries to write to output buffer. */
++ uint64_aligned_t max_pfns;
++ /* Start index in guest's page list. */
++ uint64_aligned_t start_pfn;
++ XEN_GUEST_HANDLE_64(uint64) buffer;
++ /* OUT variables. */
++ uint64_aligned_t num_pfns;
++};
++typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
++
++
++#define XEN_DOMCTL_getpageframeinfo 7
++
++#define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28
++#define XEN_DOMCTL_PFINFO_NOTAB (0x0U<<28)
++#define XEN_DOMCTL_PFINFO_L1TAB (0x1U<<28)
++#define XEN_DOMCTL_PFINFO_L2TAB (0x2U<<28)
++#define XEN_DOMCTL_PFINFO_L3TAB (0x3U<<28)
++#define XEN_DOMCTL_PFINFO_L4TAB (0x4U<<28)
++#define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28)
++#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
++#define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */
++#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
++
++struct xen_domctl_getpageframeinfo {
++ /* IN variables. */
++ uint64_aligned_t gmfn; /* GMFN to query */
++ /* OUT variables. */
++ /* Is the page PINNED to a type? */
++ uint32_t type; /* see above type defs */
++};
++typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t);
++
++
++#define XEN_DOMCTL_getpageframeinfo2 8
++struct xen_domctl_getpageframeinfo2 {
++ /* IN variables. */
++ uint64_aligned_t num;
++ /* IN/OUT variables. */
++ XEN_GUEST_HANDLE_64(uint32) array;
++};
++typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t);
++
++
++/*
++ * Control shadow pagetables operation
++ */
++#define XEN_DOMCTL_shadow_op 10
++
++/* Disable shadow mode. */
++#define XEN_DOMCTL_SHADOW_OP_OFF 0
++
++/* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE 32
++
++/* Log-dirty bitmap operations. */
++ /* Return the bitmap and clean internal copy for next round. */
++#define XEN_DOMCTL_SHADOW_OP_CLEAN 11
++ /* Return the bitmap but do not modify internal copy. */
++#define XEN_DOMCTL_SHADOW_OP_PEEK 12
++
++/* Memory allocation accessors. */
++#define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION 30
++#define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION 31
++
++/* Legacy enable operations. */
++ /* Equiv. to ENABLE with no mode flags. */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST 1
++ /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY 2
++ /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE 3
++
++/* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */
++ /*
++ * Shadow pagetables are refcounted: guest does not use explicit mmu
++ * operations nor write-protect its pagetables.
++ */
++#define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT (1 << 1)
++ /*
++ * Log pages in a bitmap as they are dirtied.
++ * Used for live relocation to determine which pages must be re-sent.
++ */
++#define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2)
++ /*
++ * Automatically translate GPFNs into MFNs.
++ */
++#define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3)
++ /*
++ * Xen does not steal virtual address space from the guest.
++ * Requires HVM support.
++ */
++#define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL (1 << 4)
++
++struct xen_domctl_shadow_op_stats {
++ uint32_t fault_count;
++ uint32_t dirty_count;
++};
++typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t);
++
++struct xen_domctl_shadow_op {
++ /* IN variables. */
++ uint32_t op; /* XEN_DOMCTL_SHADOW_OP_* */
++
++ /* OP_ENABLE */
++ uint32_t mode; /* XEN_DOMCTL_SHADOW_ENABLE_* */
++
++ /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */
++ uint32_t mb; /* Shadow memory allocation in MB */
++
++ /* OP_PEEK / OP_CLEAN */
++ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
++ uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */
++ struct xen_domctl_shadow_op_stats stats;
++};
++typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t);
++
++
++#define XEN_DOMCTL_max_mem 11
++struct xen_domctl_max_mem {
++ /* IN variables. */
++ uint64_aligned_t max_memkb;
++};
++typedef struct xen_domctl_max_mem xen_domctl_max_mem_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t);
++
++
++#define XEN_DOMCTL_setvcpucontext 12
++#define XEN_DOMCTL_getvcpucontext 13
++struct xen_domctl_vcpucontext {
++ uint32_t vcpu; /* IN */
++ XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */
++};
++typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t);
++
++
++#define XEN_DOMCTL_getvcpuinfo 14
++struct xen_domctl_getvcpuinfo {
++ /* IN variables. */
++ uint32_t vcpu;
++ /* OUT variables. */
++ uint8_t online; /* currently online (not hotplugged)? */
++ uint8_t blocked; /* blocked waiting for an event? */
++ uint8_t running; /* currently scheduled on its CPU? */
++ uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */
++ uint32_t cpu; /* current mapping */
++};
++typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
++
++
++/* Get/set which physical cpus a vcpu can execute on. */
++#define XEN_DOMCTL_setvcpuaffinity 9
++#define XEN_DOMCTL_getvcpuaffinity 25
++struct xen_domctl_vcpuaffinity {
++ uint32_t vcpu; /* IN */
++ struct xenctl_cpumap cpumap; /* IN/OUT */
++};
++typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
++
++
++#define XEN_DOMCTL_max_vcpus 15
++struct xen_domctl_max_vcpus {
++ uint32_t max; /* maximum number of vcpus */
++};
++typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
++
++
++#define XEN_DOMCTL_scheduler_op 16
++/* Scheduler types. */
++#define XEN_SCHEDULER_SEDF 4
++#define XEN_SCHEDULER_CREDIT 5
++/* Set or get info? */
++#define XEN_DOMCTL_SCHEDOP_putinfo 0
++#define XEN_DOMCTL_SCHEDOP_getinfo 1
++struct xen_domctl_scheduler_op {
++ uint32_t sched_id; /* XEN_SCHEDULER_* */
++ uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */
++ union {
++ struct xen_domctl_sched_sedf {
++ uint64_aligned_t period;
++ uint64_aligned_t slice;
++ uint64_aligned_t latency;
++ uint32_t extratime;
++ uint32_t weight;
++ } sedf;
++ struct xen_domctl_sched_credit {
++ uint16_t weight;
++ uint16_t cap;
++ } credit;
++ } u;
++};
++typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t);
++
++
++#define XEN_DOMCTL_setdomainhandle 17
++struct xen_domctl_setdomainhandle {
++ xen_domain_handle_t handle;
++};
++typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t);
++
++
++#define XEN_DOMCTL_setdebugging 18
++struct xen_domctl_setdebugging {
++ uint8_t enable;
++};
++typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t);
++
++
++#define XEN_DOMCTL_irq_permission 19
++struct xen_domctl_irq_permission {
++ uint8_t pirq;
++ uint8_t allow_access; /* flag to specify enable/disable of IRQ access */
++};
++typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t);
++
++
++#define XEN_DOMCTL_iomem_permission 20
++struct xen_domctl_iomem_permission {
++ uint64_aligned_t first_mfn;/* first page (physical page number) in range */
++ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */
++ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */
++};
++typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t);
++
++
++#define XEN_DOMCTL_ioport_permission 21
++struct xen_domctl_ioport_permission {
++ uint32_t first_port; /* first port int range */
++ uint32_t nr_ports; /* size of port range */
++ uint8_t allow_access; /* allow or deny access to range? */
++};
++typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t);
++
++
++#define XEN_DOMCTL_hypercall_init 22
++struct xen_domctl_hypercall_init {
++ uint64_aligned_t gmfn; /* GMFN to be initialised */
++};
++typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
++
++
++#define XEN_DOMCTL_arch_setup 23
++#define _XEN_DOMAINSETUP_hvm_guest 0
++#define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest)
++#define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */
++#define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query)
++#define _XEN_DOMAINSETUP_sioemu_guest 2
++#define XEN_DOMAINSETUP_sioemu_guest (1UL<<_XEN_DOMAINSETUP_sioemu_guest)
++typedef struct xen_domctl_arch_setup {
++ uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */
++#ifdef __ia64__
++ uint64_aligned_t bp; /* mpaddr of boot param area */
++ uint64_aligned_t maxmem; /* Highest memory address for MDT. */
++ uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */
++ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */
++ int8_t vhpt_size_log2; /* Log2 of VHPT size. */
++#endif
++} xen_domctl_arch_setup_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t);
++
++
++#define XEN_DOMCTL_settimeoffset 24
++struct xen_domctl_settimeoffset {
++ int32_t time_offset_seconds; /* applied to domain wallclock time */
++};
++typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
++
++
++#define XEN_DOMCTL_gethvmcontext 33
++#define XEN_DOMCTL_sethvmcontext 34
++typedef struct xen_domctl_hvmcontext {
++ uint32_t size; /* IN/OUT: size of buffer / bytes filled */
++ XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call
++ * gethvmcontext with NULL
++ * buffer to get size req'd */
++} xen_domctl_hvmcontext_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
++
++
++#define XEN_DOMCTL_set_address_size 35
++#define XEN_DOMCTL_get_address_size 36
++typedef struct xen_domctl_address_size {
++ uint32_t size;
++} xen_domctl_address_size_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t);
++
++
++#define XEN_DOMCTL_real_mode_area 26
++struct xen_domctl_real_mode_area {
++ uint32_t log; /* log2 of Real Mode Area size */
++};
++typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t);
++
++
++#define XEN_DOMCTL_sendtrigger 28
++#define XEN_DOMCTL_SENDTRIGGER_NMI 0
++#define XEN_DOMCTL_SENDTRIGGER_RESET 1
++#define XEN_DOMCTL_SENDTRIGGER_INIT 2
++struct xen_domctl_sendtrigger {
++ uint32_t trigger; /* IN */
++ uint32_t vcpu; /* IN */
++};
++typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
++
++
++/* Assign PCI device to HVM guest. Sets up IOMMU structures. */
++#define XEN_DOMCTL_assign_device 37
++#define XEN_DOMCTL_test_assign_device 45
++#define XEN_DOMCTL_deassign_device 47
++struct xen_domctl_assign_device {
++ uint32_t machine_bdf; /* machine PCI ID of assigned device */
++};
++typedef struct xen_domctl_assign_device xen_domctl_assign_device_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t);
++
++/* Retrieve sibling devices infomation of machine_bdf */
++#define XEN_DOMCTL_get_device_group 50
++struct xen_domctl_get_device_group {
++ uint32_t machine_bdf; /* IN */
++ uint32_t max_sdevs; /* IN */
++ uint32_t num_sdevs; /* OUT */
++ XEN_GUEST_HANDLE_64(uint32) sdev_array; /* OUT */
++};
++typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t);
++
++/* Pass-through interrupts: bind real irq -> hvm devfn. */
++#define XEN_DOMCTL_bind_pt_irq 38
++#define XEN_DOMCTL_unbind_pt_irq 48
++typedef enum pt_irq_type_e {
++ PT_IRQ_TYPE_PCI,
++ PT_IRQ_TYPE_ISA,
++ PT_IRQ_TYPE_MSI,
++} pt_irq_type_t;
++struct xen_domctl_bind_pt_irq {
++ uint32_t machine_irq;
++ pt_irq_type_t irq_type;
++ uint32_t hvm_domid;
++
++ union {
++ struct {
++ uint8_t isa_irq;
++ } isa;
++ struct {
++ uint8_t bus;
++ uint8_t device;
++ uint8_t intx;
++ } pci;
++ struct {
++ uint8_t gvec;
++ uint32_t gflags;
++ } msi;
++ } u;
++};
++typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t);
++
++
++/* Bind machine I/O address range -> HVM address range. */
++#define XEN_DOMCTL_memory_mapping 39
++#define DPCI_ADD_MAPPING 1
++#define DPCI_REMOVE_MAPPING 0
++struct xen_domctl_memory_mapping {
++ uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */
++ uint64_aligned_t first_mfn; /* first page (machine page) in range */
++ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */
++ uint32_t add_mapping; /* add or remove mapping */
++ uint32_t padding; /* padding for 64-bit aligned structure */
++};
++typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t);
++
++
++/* Bind machine I/O port range -> HVM I/O port range. */
++#define XEN_DOMCTL_ioport_mapping 40
++struct xen_domctl_ioport_mapping {
++ uint32_t first_gport; /* first guest IO port*/
++ uint32_t first_mport; /* first machine IO port */
++ uint32_t nr_ports; /* size of port range */
++ uint32_t add_mapping; /* add or remove mapping */
++};
++typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t);
++
++
++/*
++ * Pin caching type of RAM space for x86 HVM domU.
++ */
++#define XEN_DOMCTL_pin_mem_cacheattr 41
++/* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */
++#define XEN_DOMCTL_MEM_CACHEATTR_UC 0
++#define XEN_DOMCTL_MEM_CACHEATTR_WC 1
++#define XEN_DOMCTL_MEM_CACHEATTR_WT 4
++#define XEN_DOMCTL_MEM_CACHEATTR_WP 5
++#define XEN_DOMCTL_MEM_CACHEATTR_WB 6
++#define XEN_DOMCTL_MEM_CACHEATTR_UCM 7
++struct xen_domctl_pin_mem_cacheattr {
++ uint64_aligned_t start, end;
++ unsigned int type; /* XEN_DOMCTL_MEM_CACHEATTR_* */
++};
++typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t);
++
++
++#define XEN_DOMCTL_set_ext_vcpucontext 42
++#define XEN_DOMCTL_get_ext_vcpucontext 43
++struct xen_domctl_ext_vcpucontext {
++ /* IN: VCPU that this call applies to. */
++ uint32_t vcpu;
++ /*
++ * SET: Size of struct (IN)
++ * GET: Size of struct (OUT)
++ */
++ uint32_t size;
++#if defined(__i386__) || defined(__x86_64__)
++ /* SYSCALL from 32-bit mode and SYSENTER callback information. */
++ /* NB. SYSCALL from 64-bit mode is contained in vcpu_guest_context_t */
++ uint64_aligned_t syscall32_callback_eip;
++ uint64_aligned_t sysenter_callback_eip;
++ uint16_t syscall32_callback_cs;
++ uint16_t sysenter_callback_cs;
++ uint8_t syscall32_disables_events;
++ uint8_t sysenter_disables_events;
++#endif
++};
++typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t);
++
++/*
++ * Set optimizaton features for a domain
++ */
++#define XEN_DOMCTL_set_opt_feature 44
++struct xen_domctl_set_opt_feature {
++#if defined(__ia64__)
++ struct xen_ia64_opt_feature optf;
++#else
++ /* Make struct non-empty: do not depend on this field name! */
++ uint64_t dummy;
++#endif
++};
++typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t);
++
++/*
++ * Set the target domain for a domain
++ */
++#define XEN_DOMCTL_set_target 46
++struct xen_domctl_set_target {
++ domid_t target;
++};
++typedef struct xen_domctl_set_target xen_domctl_set_target_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t);
++
++#if defined(__i386__) || defined(__x86_64__)
++# define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF
++# define XEN_DOMCTL_set_cpuid 49
++struct xen_domctl_cpuid {
++ unsigned int input[2];
++ unsigned int eax;
++ unsigned int ebx;
++ unsigned int ecx;
++ unsigned int edx;
++};
++typedef struct xen_domctl_cpuid xen_domctl_cpuid_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t);
++#endif
++
++#define XEN_DOMCTL_subscribe 29
++struct xen_domctl_subscribe {
++ uint32_t port; /* IN */
++};
++typedef struct xen_domctl_subscribe xen_domctl_subscribe_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t);
++
++/*
++ * Define the maximum machine address size which should be allocated
++ * to a guest.
++ */
++#define XEN_DOMCTL_set_machine_address_size 51
++#define XEN_DOMCTL_get_machine_address_size 52
++
++
++struct xen_domctl {
++ uint32_t cmd;
++ uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
++ domid_t domain;
++ union {
++ struct xen_domctl_createdomain createdomain;
++ struct xen_domctl_getdomaininfo getdomaininfo;
++ struct xen_domctl_getmemlist getmemlist;
++ struct xen_domctl_getpageframeinfo getpageframeinfo;
++ struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
++ struct xen_domctl_vcpuaffinity vcpuaffinity;
++ struct xen_domctl_shadow_op shadow_op;
++ struct xen_domctl_max_mem max_mem;
++ struct xen_domctl_vcpucontext vcpucontext;
++ struct xen_domctl_getvcpuinfo getvcpuinfo;
++ struct xen_domctl_max_vcpus max_vcpus;
++ struct xen_domctl_scheduler_op scheduler_op;
++ struct xen_domctl_setdomainhandle setdomainhandle;
++ struct xen_domctl_setdebugging setdebugging;
++ struct xen_domctl_irq_permission irq_permission;
++ struct xen_domctl_iomem_permission iomem_permission;
++ struct xen_domctl_ioport_permission ioport_permission;
++ struct xen_domctl_hypercall_init hypercall_init;
++ struct xen_domctl_arch_setup arch_setup;
++ struct xen_domctl_settimeoffset settimeoffset;
++ struct xen_domctl_real_mode_area real_mode_area;
++ struct xen_domctl_hvmcontext hvmcontext;
++ struct xen_domctl_address_size address_size;
++ struct xen_domctl_sendtrigger sendtrigger;
++ struct xen_domctl_get_device_group get_device_group;
++ struct xen_domctl_assign_device assign_device;
++ struct xen_domctl_bind_pt_irq bind_pt_irq;
++ struct xen_domctl_memory_mapping memory_mapping;
++ struct xen_domctl_ioport_mapping ioport_mapping;
++ struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr;
++ struct xen_domctl_ext_vcpucontext ext_vcpucontext;
++ struct xen_domctl_set_opt_feature set_opt_feature;
++ struct xen_domctl_set_target set_target;
++ struct xen_domctl_subscribe subscribe;
++#if defined(__i386__) || defined(__x86_64__)
++ struct xen_domctl_cpuid cpuid;
++#endif
++ uint8_t pad[128];
++ } u;
++};
++typedef struct xen_domctl xen_domctl_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_t);
++
++#endif /* __XEN_PUBLIC_DOMCTL_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/elfnote.h linux-2.6.18-xen-3.3.0/include/xen/interface/elfnote.h
+--- linux-2.6.18.8/include/xen/interface/elfnote.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/elfnote.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,233 @@
++/******************************************************************************
++ * elfnote.h
++ *
++ * Definitions used for the Xen ELF notes.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2006, Ian Campbell, XenSource Ltd.
++ */
++
++#ifndef __XEN_PUBLIC_ELFNOTE_H__
++#define __XEN_PUBLIC_ELFNOTE_H__
++
++/*
++ * The notes should live in a PT_NOTE segment and have "Xen" in the
++ * name field.
++ *
++ * Numeric types are either 4 or 8 bytes depending on the content of
++ * the desc field.
++ *
++ * LEGACY indicated the fields in the legacy __xen_guest string which
++ * this a note type replaces.
++ */
++
++/*
++ * NAME=VALUE pair (string).
++ */
++#define XEN_ELFNOTE_INFO 0
++
++/*
++ * The virtual address of the entry point (numeric).
++ *
++ * LEGACY: VIRT_ENTRY
++ */
++#define XEN_ELFNOTE_ENTRY 1
++
++/* The virtual address of the hypercall transfer page (numeric).
++ *
++ * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page
++ * number not a virtual address)
++ */
++#define XEN_ELFNOTE_HYPERCALL_PAGE 2
++
++/* The virtual address where the kernel image should be mapped (numeric).
++ *
++ * Defaults to 0.
++ *
++ * LEGACY: VIRT_BASE
++ */
++#define XEN_ELFNOTE_VIRT_BASE 3
++
++/*
++ * The offset of the ELF paddr field from the acutal required
++ * psuedo-physical address (numeric).
++ *
++ * This is used to maintain backwards compatibility with older kernels
++ * which wrote __PAGE_OFFSET into that field. This field defaults to 0
++ * if not present.
++ *
++ * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE)
++ */
++#define XEN_ELFNOTE_PADDR_OFFSET 4
++
++/*
++ * The version of Xen that we work with (string).
++ *
++ * LEGACY: XEN_VER
++ */
++#define XEN_ELFNOTE_XEN_VERSION 5
++
++/*
++ * The name of the guest operating system (string).
++ *
++ * LEGACY: GUEST_OS
++ */
++#define XEN_ELFNOTE_GUEST_OS 6
++
++/*
++ * The version of the guest operating system (string).
++ *
++ * LEGACY: GUEST_VER
++ */
++#define XEN_ELFNOTE_GUEST_VERSION 7
++
++/*
++ * The loader type (string).
++ *
++ * LEGACY: LOADER
++ */
++#define XEN_ELFNOTE_LOADER 8
++
++/*
++ * The kernel supports PAE (x86/32 only, string = "yes", "no" or
++ * "bimodal").
++ *
++ * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting
++ * may be given as "yes,bimodal" which will cause older Xen to treat
++ * this kernel as PAE.
++ *
++ * LEGACY: PAE (n.b. The legacy interface included a provision to
++ * indicate 'extended-cr3' support allowing L3 page tables to be
++ * placed above 4G. It is assumed that any kernel new enough to use
++ * these ELF notes will include this and therefore "yes" here is
++ * equivalent to "yes[entended-cr3]" in the __xen_guest interface.
++ */
++#define XEN_ELFNOTE_PAE_MODE 9
++
++/*
++ * The features supported/required by this kernel (string).
++ *
++ * The string must consist of a list of feature names (as given in
++ * features.h, without the "XENFEAT_" prefix) separated by '|'
++ * characters. If a feature is required for the kernel to function
++ * then the feature name must be preceded by a '!' character.
++ *
++ * LEGACY: FEATURES
++ */
++#define XEN_ELFNOTE_FEATURES 10
++
++/*
++ * The kernel requires the symbol table to be loaded (string = "yes" or "no")
++ * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence
++ * of this string as a boolean flag rather than requiring "yes" or
++ * "no".
++ */
++#define XEN_ELFNOTE_BSD_SYMTAB 11
++
++/*
++ * The lowest address the hypervisor hole can begin at (numeric).
++ *
++ * This must not be set higher than HYPERVISOR_VIRT_START. Its presence
++ * also indicates to the hypervisor that the kernel can deal with the
++ * hole starting at a higher address.
++ */
++#define XEN_ELFNOTE_HV_START_LOW 12
++
++/*
++ * List of maddr_t-sized mask/value pairs describing how to recognize
++ * (non-present) L1 page table entries carrying valid MFNs (numeric).
++ */
++#define XEN_ELFNOTE_L1_MFN_VALID 13
++
++/*
++ * Whether or not the guest supports cooperative suspend cancellation.
++ */
++#define XEN_ELFNOTE_SUSPEND_CANCEL 14
++
++/*
++ * The number of the highest elfnote defined.
++ */
++#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUSPEND_CANCEL
++
++/*
++ * System information exported through crash notes.
++ *
++ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO
++ * note in case of a system crash. This note will contain various
++ * information about the system, see xen/include/xen/elfcore.h.
++ */
++#define XEN_ELFNOTE_CRASH_INFO 0x1000001
++
++/*
++ * System registers exported through crash notes.
++ *
++ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS
++ * note per cpu in case of a system crash. This note is architecture
++ * specific and will contain registers not saved in the "CORE" note.
++ * See xen/include/xen/elfcore.h for more information.
++ */
++#define XEN_ELFNOTE_CRASH_REGS 0x1000002
++
++
++/*
++ * xen dump-core none note.
++ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE
++ * in its dump file to indicate that the file is xen dump-core
++ * file. This note doesn't have any other information.
++ * See tools/libxc/xc_core.h for more information.
++ */
++#define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000
++
++/*
++ * xen dump-core header note.
++ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER
++ * in its dump file.
++ * See tools/libxc/xc_core.h for more information.
++ */
++#define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001
++
++/*
++ * xen dump-core xen version note.
++ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION
++ * in its dump file. It contains the xen version obtained via the
++ * XENVER hypercall.
++ * See tools/libxc/xc_core.h for more information.
++ */
++#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002
++
++/*
++ * xen dump-core format version note.
++ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION
++ * in its dump file. It contains a format version identifier.
++ * See tools/libxc/xc_core.h for more information.
++ */
++#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003
++
++#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/elfstructs.h linux-2.6.18-xen-3.3.0/include/xen/interface/elfstructs.h
+--- linux-2.6.18.8/include/xen/interface/elfstructs.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/elfstructs.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,527 @@
++#ifndef __XEN_PUBLIC_ELFSTRUCTS_H__
++#define __XEN_PUBLIC_ELFSTRUCTS_H__ 1
++/*
++ * Copyright (c) 1995, 1996 Erik Theisen. All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * 3. The name of the author may not be used to endorse or promote products
++ * derived from this software without specific prior written permission
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++typedef uint8_t Elf_Byte;
++
++typedef uint32_t Elf32_Addr; /* Unsigned program address */
++typedef uint32_t Elf32_Off; /* Unsigned file offset */
++typedef int32_t Elf32_Sword; /* Signed large integer */
++typedef uint32_t Elf32_Word; /* Unsigned large integer */
++typedef uint16_t Elf32_Half; /* Unsigned medium integer */
++
++typedef uint64_t Elf64_Addr;
++typedef uint64_t Elf64_Off;
++typedef int32_t Elf64_Shalf;
++
++typedef int32_t Elf64_Sword;
++typedef uint32_t Elf64_Word;
++
++typedef int64_t Elf64_Sxword;
++typedef uint64_t Elf64_Xword;
++
++typedef uint32_t Elf64_Half;
++typedef uint16_t Elf64_Quarter;
++
++/*
++ * e_ident[] identification indexes
++ * See http://www.caldera.com/developers/gabi/2000-07-17/ch4.eheader.html
++ */
++#define EI_MAG0 0 /* file ID */
++#define EI_MAG1 1 /* file ID */
++#define EI_MAG2 2 /* file ID */
++#define EI_MAG3 3 /* file ID */
++#define EI_CLASS 4 /* file class */
++#define EI_DATA 5 /* data encoding */
++#define EI_VERSION 6 /* ELF header version */
++#define EI_OSABI 7 /* OS/ABI ID */
++#define EI_ABIVERSION 8 /* ABI version */
++#define EI_PAD 9 /* start of pad bytes */
++#define EI_NIDENT 16 /* Size of e_ident[] */
++
++/* e_ident[] magic number */
++#define ELFMAG0 0x7f /* e_ident[EI_MAG0] */
++#define ELFMAG1 'E' /* e_ident[EI_MAG1] */
++#define ELFMAG2 'L' /* e_ident[EI_MAG2] */
++#define ELFMAG3 'F' /* e_ident[EI_MAG3] */
++#define ELFMAG "\177ELF" /* magic */
++#define SELFMAG 4 /* size of magic */
++
++/* e_ident[] file class */
++#define ELFCLASSNONE 0 /* invalid */
++#define ELFCLASS32 1 /* 32-bit objs */
++#define ELFCLASS64 2 /* 64-bit objs */
++#define ELFCLASSNUM 3 /* number of classes */
++
++/* e_ident[] data encoding */
++#define ELFDATANONE 0 /* invalid */
++#define ELFDATA2LSB 1 /* Little-Endian */
++#define ELFDATA2MSB 2 /* Big-Endian */
++#define ELFDATANUM 3 /* number of data encode defines */
++
++/* e_ident[] Operating System/ABI */
++#define ELFOSABI_SYSV 0 /* UNIX System V ABI */
++#define ELFOSABI_HPUX 1 /* HP-UX operating system */
++#define ELFOSABI_NETBSD 2 /* NetBSD */
++#define ELFOSABI_LINUX 3 /* GNU/Linux */
++#define ELFOSABI_HURD 4 /* GNU/Hurd */
++#define ELFOSABI_86OPEN 5 /* 86Open common IA32 ABI */
++#define ELFOSABI_SOLARIS 6 /* Solaris */
++#define ELFOSABI_MONTEREY 7 /* Monterey */
++#define ELFOSABI_IRIX 8 /* IRIX */
++#define ELFOSABI_FREEBSD 9 /* FreeBSD */
++#define ELFOSABI_TRU64 10 /* TRU64 UNIX */
++#define ELFOSABI_MODESTO 11 /* Novell Modesto */
++#define ELFOSABI_OPENBSD 12 /* OpenBSD */
++#define ELFOSABI_ARM 97 /* ARM */
++#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
++
++/* e_ident */
++#define IS_ELF(ehdr) ((ehdr).e_ident[EI_MAG0] == ELFMAG0 && \
++ (ehdr).e_ident[EI_MAG1] == ELFMAG1 && \
++ (ehdr).e_ident[EI_MAG2] == ELFMAG2 && \
++ (ehdr).e_ident[EI_MAG3] == ELFMAG3)
++
++/* ELF Header */
++typedef struct elfhdr {
++ unsigned char e_ident[EI_NIDENT]; /* ELF Identification */
++ Elf32_Half e_type; /* object file type */
++ Elf32_Half e_machine; /* machine */
++ Elf32_Word e_version; /* object file version */
++ Elf32_Addr e_entry; /* virtual entry point */
++ Elf32_Off e_phoff; /* program header table offset */
++ Elf32_Off e_shoff; /* section header table offset */
++ Elf32_Word e_flags; /* processor-specific flags */
++ Elf32_Half e_ehsize; /* ELF header size */
++ Elf32_Half e_phentsize; /* program header entry size */
++ Elf32_Half e_phnum; /* number of program header entries */
++ Elf32_Half e_shentsize; /* section header entry size */
++ Elf32_Half e_shnum; /* number of section header entries */
++ Elf32_Half e_shstrndx; /* section header table's "section
++ header string table" entry offset */
++} Elf32_Ehdr;
++
++typedef struct {
++ unsigned char e_ident[EI_NIDENT]; /* Id bytes */
++ Elf64_Quarter e_type; /* file type */
++ Elf64_Quarter e_machine; /* machine type */
++ Elf64_Half e_version; /* version number */
++ Elf64_Addr e_entry; /* entry point */
++ Elf64_Off e_phoff; /* Program hdr offset */
++ Elf64_Off e_shoff; /* Section hdr offset */
++ Elf64_Half e_flags; /* Processor flags */
++ Elf64_Quarter e_ehsize; /* sizeof ehdr */
++ Elf64_Quarter e_phentsize; /* Program header entry size */
++ Elf64_Quarter e_phnum; /* Number of program headers */
++ Elf64_Quarter e_shentsize; /* Section header entry size */
++ Elf64_Quarter e_shnum; /* Number of section headers */
++ Elf64_Quarter e_shstrndx; /* String table index */
++} Elf64_Ehdr;
++
++/* e_type */
++#define ET_NONE 0 /* No file type */
++#define ET_REL 1 /* relocatable file */
++#define ET_EXEC 2 /* executable file */
++#define ET_DYN 3 /* shared object file */
++#define ET_CORE 4 /* core file */
++#define ET_NUM 5 /* number of types */
++#define ET_LOPROC 0xff00 /* reserved range for processor */
++#define ET_HIPROC 0xffff /* specific e_type */
++
++/* e_machine */
++#define EM_NONE 0 /* No Machine */
++#define EM_M32 1 /* AT&T WE 32100 */
++#define EM_SPARC 2 /* SPARC */
++#define EM_386 3 /* Intel 80386 */
++#define EM_68K 4 /* Motorola 68000 */
++#define EM_88K 5 /* Motorola 88000 */
++#define EM_486 6 /* Intel 80486 - unused? */
++#define EM_860 7 /* Intel 80860 */
++#define EM_MIPS 8 /* MIPS R3000 Big-Endian only */
++/*
++ * Don't know if EM_MIPS_RS4_BE,
++ * EM_SPARC64, EM_PARISC,
++ * or EM_PPC are ABI compliant
++ */
++#define EM_MIPS_RS4_BE 10 /* MIPS R4000 Big-Endian */
++#define EM_SPARC64 11 /* SPARC v9 64-bit unoffical */
++#define EM_PARISC 15 /* HPPA */
++#define EM_SPARC32PLUS 18 /* Enhanced instruction set SPARC */
++#define EM_PPC 20 /* PowerPC */
++#define EM_PPC64 21 /* PowerPC 64-bit */
++#define EM_ARM 40 /* Advanced RISC Machines ARM */
++#define EM_ALPHA 41 /* DEC ALPHA */
++#define EM_SPARCV9 43 /* SPARC version 9 */
++#define EM_ALPHA_EXP 0x9026 /* DEC ALPHA */
++#define EM_IA_64 50 /* Intel Merced */
++#define EM_X86_64 62 /* AMD x86-64 architecture */
++#define EM_VAX 75 /* DEC VAX */
++
++/* Version */
++#define EV_NONE 0 /* Invalid */
++#define EV_CURRENT 1 /* Current */
++#define EV_NUM 2 /* number of versions */
++
++/* Section Header */
++typedef struct {
++ Elf32_Word sh_name; /* name - index into section header
++ string table section */
++ Elf32_Word sh_type; /* type */
++ Elf32_Word sh_flags; /* flags */
++ Elf32_Addr sh_addr; /* address */
++ Elf32_Off sh_offset; /* file offset */
++ Elf32_Word sh_size; /* section size */
++ Elf32_Word sh_link; /* section header table index link */
++ Elf32_Word sh_info; /* extra information */
++ Elf32_Word sh_addralign; /* address alignment */
++ Elf32_Word sh_entsize; /* section entry size */
++} Elf32_Shdr;
++
++typedef struct {
++ Elf64_Half sh_name; /* section name */
++ Elf64_Half sh_type; /* section type */
++ Elf64_Xword sh_flags; /* section flags */
++ Elf64_Addr sh_addr; /* virtual address */
++ Elf64_Off sh_offset; /* file offset */
++ Elf64_Xword sh_size; /* section size */
++ Elf64_Half sh_link; /* link to another */
++ Elf64_Half sh_info; /* misc info */
++ Elf64_Xword sh_addralign; /* memory alignment */
++ Elf64_Xword sh_entsize; /* table entry size */
++} Elf64_Shdr;
++
++/* Special Section Indexes */
++#define SHN_UNDEF 0 /* undefined */
++#define SHN_LORESERVE 0xff00 /* lower bounds of reserved indexes */
++#define SHN_LOPROC 0xff00 /* reserved range for processor */
++#define SHN_HIPROC 0xff1f /* specific section indexes */
++#define SHN_ABS 0xfff1 /* absolute value */
++#define SHN_COMMON 0xfff2 /* common symbol */
++#define SHN_HIRESERVE 0xffff /* upper bounds of reserved indexes */
++
++/* sh_type */
++#define SHT_NULL 0 /* inactive */
++#define SHT_PROGBITS 1 /* program defined information */
++#define SHT_SYMTAB 2 /* symbol table section */
++#define SHT_STRTAB 3 /* string table section */
++#define SHT_RELA 4 /* relocation section with addends*/
++#define SHT_HASH 5 /* symbol hash table section */
++#define SHT_DYNAMIC 6 /* dynamic section */
++#define SHT_NOTE 7 /* note section */
++#define SHT_NOBITS 8 /* no space section */
++#define SHT_REL 9 /* relation section without addends */
++#define SHT_SHLIB 10 /* reserved - purpose unknown */
++#define SHT_DYNSYM 11 /* dynamic symbol table section */
++#define SHT_NUM 12 /* number of section types */
++#define SHT_LOPROC 0x70000000 /* reserved range for processor */
++#define SHT_HIPROC 0x7fffffff /* specific section header types */
++#define SHT_LOUSER 0x80000000 /* reserved range for application */
++#define SHT_HIUSER 0xffffffff /* specific indexes */
++
++/* Section names */
++#define ELF_BSS ".bss" /* uninitialized data */
++#define ELF_DATA ".data" /* initialized data */
++#define ELF_DEBUG ".debug" /* debug */
++#define ELF_DYNAMIC ".dynamic" /* dynamic linking information */
++#define ELF_DYNSTR ".dynstr" /* dynamic string table */
++#define ELF_DYNSYM ".dynsym" /* dynamic symbol table */
++#define ELF_FINI ".fini" /* termination code */
++#define ELF_GOT ".got" /* global offset table */
++#define ELF_HASH ".hash" /* symbol hash table */
++#define ELF_INIT ".init" /* initialization code */
++#define ELF_REL_DATA ".rel.data" /* relocation data */
++#define ELF_REL_FINI ".rel.fini" /* relocation termination code */
++#define ELF_REL_INIT ".rel.init" /* relocation initialization code */
++#define ELF_REL_DYN ".rel.dyn" /* relocaltion dynamic link info */
++#define ELF_REL_RODATA ".rel.rodata" /* relocation read-only data */
++#define ELF_REL_TEXT ".rel.text" /* relocation code */
++#define ELF_RODATA ".rodata" /* read-only data */
++#define ELF_SHSTRTAB ".shstrtab" /* section header string table */
++#define ELF_STRTAB ".strtab" /* string table */
++#define ELF_SYMTAB ".symtab" /* symbol table */
++#define ELF_TEXT ".text" /* code */
++
++
++/* Section Attribute Flags - sh_flags */
++#define SHF_WRITE 0x1 /* Writable */
++#define SHF_ALLOC 0x2 /* occupies memory */
++#define SHF_EXECINSTR 0x4 /* executable */
++#define SHF_MASKPROC 0xf0000000 /* reserved bits for processor */
++ /* specific section attributes */
++
++/* Symbol Table Entry */
++typedef struct elf32_sym {
++ Elf32_Word st_name; /* name - index into string table */
++ Elf32_Addr st_value; /* symbol value */
++ Elf32_Word st_size; /* symbol size */
++ unsigned char st_info; /* type and binding */
++ unsigned char st_other; /* 0 - no defined meaning */
++ Elf32_Half st_shndx; /* section header index */
++} Elf32_Sym;
++
++typedef struct {
++ Elf64_Half st_name; /* Symbol name index in str table */
++ Elf_Byte st_info; /* type / binding attrs */
++ Elf_Byte st_other; /* unused */
++ Elf64_Quarter st_shndx; /* section index of symbol */
++ Elf64_Xword st_value; /* value of symbol */
++ Elf64_Xword st_size; /* size of symbol */
++} Elf64_Sym;
++
++/* Symbol table index */
++#define STN_UNDEF 0 /* undefined */
++
++/* Extract symbol info - st_info */
++#define ELF32_ST_BIND(x) ((x) >> 4)
++#define ELF32_ST_TYPE(x) (((unsigned int) x) & 0xf)
++#define ELF32_ST_INFO(b,t) (((b) << 4) + ((t) & 0xf))
++
++#define ELF64_ST_BIND(x) ((x) >> 4)
++#define ELF64_ST_TYPE(x) (((unsigned int) x) & 0xf)
++#define ELF64_ST_INFO(b,t) (((b) << 4) + ((t) & 0xf))
++
++/* Symbol Binding - ELF32_ST_BIND - st_info */
++#define STB_LOCAL 0 /* Local symbol */
++#define STB_GLOBAL 1 /* Global symbol */
++#define STB_WEAK 2 /* like global - lower precedence */
++#define STB_NUM 3 /* number of symbol bindings */
++#define STB_LOPROC 13 /* reserved range for processor */
++#define STB_HIPROC 15 /* specific symbol bindings */
++
++/* Symbol type - ELF32_ST_TYPE - st_info */
++#define STT_NOTYPE 0 /* not specified */
++#define STT_OBJECT 1 /* data object */
++#define STT_FUNC 2 /* function */
++#define STT_SECTION 3 /* section */
++#define STT_FILE 4 /* file */
++#define STT_NUM 5 /* number of symbol types */
++#define STT_LOPROC 13 /* reserved range for processor */
++#define STT_HIPROC 15 /* specific symbol types */
++
++/* Relocation entry with implicit addend */
++typedef struct {
++ Elf32_Addr r_offset; /* offset of relocation */
++ Elf32_Word r_info; /* symbol table index and type */
++} Elf32_Rel;
++
++/* Relocation entry with explicit addend */
++typedef struct {
++ Elf32_Addr r_offset; /* offset of relocation */
++ Elf32_Word r_info; /* symbol table index and type */
++ Elf32_Sword r_addend;
++} Elf32_Rela;
++
++/* Extract relocation info - r_info */
++#define ELF32_R_SYM(i) ((i) >> 8)
++#define ELF32_R_TYPE(i) ((unsigned char) (i))
++#define ELF32_R_INFO(s,t) (((s) << 8) + (unsigned char)(t))
++
++typedef struct {
++ Elf64_Xword r_offset; /* where to do it */
++ Elf64_Xword r_info; /* index & type of relocation */
++} Elf64_Rel;
++
++typedef struct {
++ Elf64_Xword r_offset; /* where to do it */
++ Elf64_Xword r_info; /* index & type of relocation */
++ Elf64_Sxword r_addend; /* adjustment value */
++} Elf64_Rela;
++
++#define ELF64_R_SYM(info) ((info) >> 32)
++#define ELF64_R_TYPE(info) ((info) & 0xFFFFFFFF)
++#define ELF64_R_INFO(s,t) (((s) << 32) + (u_int32_t)(t))
++
++/* Program Header */
++typedef struct {
++ Elf32_Word p_type; /* segment type */
++ Elf32_Off p_offset; /* segment offset */
++ Elf32_Addr p_vaddr; /* virtual address of segment */
++ Elf32_Addr p_paddr; /* physical address - ignored? */
++ Elf32_Word p_filesz; /* number of bytes in file for seg. */
++ Elf32_Word p_memsz; /* number of bytes in mem. for seg. */
++ Elf32_Word p_flags; /* flags */
++ Elf32_Word p_align; /* memory alignment */
++} Elf32_Phdr;
++
++typedef struct {
++ Elf64_Half p_type; /* entry type */
++ Elf64_Half p_flags; /* flags */
++ Elf64_Off p_offset; /* offset */
++ Elf64_Addr p_vaddr; /* virtual address */
++ Elf64_Addr p_paddr; /* physical address */
++ Elf64_Xword p_filesz; /* file size */
++ Elf64_Xword p_memsz; /* memory size */
++ Elf64_Xword p_align; /* memory & file alignment */
++} Elf64_Phdr;
++
++/* Segment types - p_type */
++#define PT_NULL 0 /* unused */
++#define PT_LOAD 1 /* loadable segment */
++#define PT_DYNAMIC 2 /* dynamic linking section */
++#define PT_INTERP 3 /* the RTLD */
++#define PT_NOTE 4 /* auxiliary information */
++#define PT_SHLIB 5 /* reserved - purpose undefined */
++#define PT_PHDR 6 /* program header */
++#define PT_NUM 7 /* Number of segment types */
++#define PT_LOPROC 0x70000000 /* reserved range for processor */
++#define PT_HIPROC 0x7fffffff /* specific segment types */
++
++/* Segment flags - p_flags */
++#define PF_X 0x1 /* Executable */
++#define PF_W 0x2 /* Writable */
++#define PF_R 0x4 /* Readable */
++#define PF_MASKPROC 0xf0000000 /* reserved bits for processor */
++ /* specific segment flags */
++
++/* Dynamic structure */
++typedef struct {
++ Elf32_Sword d_tag; /* controls meaning of d_val */
++ union {
++ Elf32_Word d_val; /* Multiple meanings - see d_tag */
++ Elf32_Addr d_ptr; /* program virtual address */
++ } d_un;
++} Elf32_Dyn;
++
++typedef struct {
++ Elf64_Xword d_tag; /* controls meaning of d_val */
++ union {
++ Elf64_Addr d_ptr;
++ Elf64_Xword d_val;
++ } d_un;
++} Elf64_Dyn;
++
++/* Dynamic Array Tags - d_tag */
++#define DT_NULL 0 /* marks end of _DYNAMIC array */
++#define DT_NEEDED 1 /* string table offset of needed lib */
++#define DT_PLTRELSZ 2 /* size of relocation entries in PLT */
++#define DT_PLTGOT 3 /* address PLT/GOT */
++#define DT_HASH 4 /* address of symbol hash table */
++#define DT_STRTAB 5 /* address of string table */
++#define DT_SYMTAB 6 /* address of symbol table */
++#define DT_RELA 7 /* address of relocation table */
++#define DT_RELASZ 8 /* size of relocation table */
++#define DT_RELAENT 9 /* size of relocation entry */
++#define DT_STRSZ 10 /* size of string table */
++#define DT_SYMENT 11 /* size of symbol table entry */
++#define DT_INIT 12 /* address of initialization func. */
++#define DT_FINI 13 /* address of termination function */
++#define DT_SONAME 14 /* string table offset of shared obj */
++#define DT_RPATH 15 /* string table offset of library
++ search path */
++#define DT_SYMBOLIC 16 /* start sym search in shared obj. */
++#define DT_REL 17 /* address of rel. tbl. w addends */
++#define DT_RELSZ 18 /* size of DT_REL relocation table */
++#define DT_RELENT 19 /* size of DT_REL relocation entry */
++#define DT_PLTREL 20 /* PLT referenced relocation entry */
++#define DT_DEBUG 21 /* bugger */
++#define DT_TEXTREL 22 /* Allow rel. mod. to unwritable seg */
++#define DT_JMPREL 23 /* add. of PLT's relocation entries */
++#define DT_BIND_NOW 24 /* Bind now regardless of env setting */
++#define DT_NUM 25 /* Number used. */
++#define DT_LOPROC 0x70000000 /* reserved range for processor */
++#define DT_HIPROC 0x7fffffff /* specific dynamic array tags */
++
++/* Standard ELF hashing function */
++unsigned int elf_hash(const unsigned char *name);
++
++/*
++ * Note Definitions
++ */
++typedef struct {
++ Elf32_Word namesz;
++ Elf32_Word descsz;
++ Elf32_Word type;
++} Elf32_Note;
++
++typedef struct {
++ Elf64_Half namesz;
++ Elf64_Half descsz;
++ Elf64_Half type;
++} Elf64_Note;
++
++
++#if defined(ELFSIZE)
++#define CONCAT(x,y) __CONCAT(x,y)
++#define ELFNAME(x) CONCAT(elf,CONCAT(ELFSIZE,CONCAT(_,x)))
++#define ELFNAME2(x,y) CONCAT(x,CONCAT(_elf,CONCAT(ELFSIZE,CONCAT(_,y))))
++#define ELFNAMEEND(x) CONCAT(x,CONCAT(_elf,ELFSIZE))
++#define ELFDEFNNAME(x) CONCAT(ELF,CONCAT(ELFSIZE,CONCAT(_,x)))
++#endif
++
++#if defined(ELFSIZE) && (ELFSIZE == 32)
++#define Elf_Ehdr Elf32_Ehdr
++#define Elf_Phdr Elf32_Phdr
++#define Elf_Shdr Elf32_Shdr
++#define Elf_Sym Elf32_Sym
++#define Elf_Rel Elf32_Rel
++#define Elf_RelA Elf32_Rela
++#define Elf_Dyn Elf32_Dyn
++#define Elf_Word Elf32_Word
++#define Elf_Sword Elf32_Sword
++#define Elf_Addr Elf32_Addr
++#define Elf_Off Elf32_Off
++#define Elf_Nhdr Elf32_Nhdr
++#define Elf_Note Elf32_Note
++
++#define ELF_R_SYM ELF32_R_SYM
++#define ELF_R_TYPE ELF32_R_TYPE
++#define ELF_R_INFO ELF32_R_INFO
++#define ELFCLASS ELFCLASS32
++
++#define ELF_ST_BIND ELF32_ST_BIND
++#define ELF_ST_TYPE ELF32_ST_TYPE
++#define ELF_ST_INFO ELF32_ST_INFO
++
++#define AuxInfo Aux32Info
++#elif defined(ELFSIZE) && (ELFSIZE == 64)
++#define Elf_Ehdr Elf64_Ehdr
++#define Elf_Phdr Elf64_Phdr
++#define Elf_Shdr Elf64_Shdr
++#define Elf_Sym Elf64_Sym
++#define Elf_Rel Elf64_Rel
++#define Elf_RelA Elf64_Rela
++#define Elf_Dyn Elf64_Dyn
++#define Elf_Word Elf64_Word
++#define Elf_Sword Elf64_Sword
++#define Elf_Addr Elf64_Addr
++#define Elf_Off Elf64_Off
++#define Elf_Nhdr Elf64_Nhdr
++#define Elf_Note Elf64_Note
++
++#define ELF_R_SYM ELF64_R_SYM
++#define ELF_R_TYPE ELF64_R_TYPE
++#define ELF_R_INFO ELF64_R_INFO
++#define ELFCLASS ELFCLASS64
++
++#define ELF_ST_BIND ELF64_ST_BIND
++#define ELF_ST_TYPE ELF64_ST_TYPE
++#define ELF_ST_INFO ELF64_ST_INFO
++
++#define AuxInfo Aux64Info
++#endif
++
++#endif /* __XEN_PUBLIC_ELFSTRUCTS_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/event_channel.h linux-2.6.18-xen-3.3.0/include/xen/interface/event_channel.h
+--- linux-2.6.18.8/include/xen/interface/event_channel.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/event_channel.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,264 @@
++/******************************************************************************
++ * event_channel.h
++ *
++ * Event channels between domains.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2003-2004, K A Fraser.
++ */
++
++#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
++#define __XEN_PUBLIC_EVENT_CHANNEL_H__
++
++/*
++ * Prototype for this hypercall is:
++ * int event_channel_op(int cmd, void *args)
++ * @cmd == EVTCHNOP_??? (event-channel operation).
++ * @args == Operation-specific extra arguments (NULL if none).
++ */
++
++typedef uint32_t evtchn_port_t;
++DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
++
++/*
++ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
++ * accepting interdomain bindings from domain <remote_dom>. A fresh port
++ * is allocated in <dom> and returned as <port>.
++ * NOTES:
++ * 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
++ * 2. <rdom> may be DOMID_SELF, allowing loopback connections.
++ */
++#define EVTCHNOP_alloc_unbound 6
++struct evtchn_alloc_unbound {
++ /* IN parameters */
++ domid_t dom, remote_dom;
++ /* OUT parameters */
++ evtchn_port_t port;
++};
++typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
++
++/*
++ * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
++ * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
++ * a port that is unbound and marked as accepting bindings from the calling
++ * domain. A fresh port is allocated in the calling domain and returned as
++ * <local_port>.
++ * NOTES:
++ * 2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
++ */
++#define EVTCHNOP_bind_interdomain 0
++struct evtchn_bind_interdomain {
++ /* IN parameters. */
++ domid_t remote_dom;
++ evtchn_port_t remote_port;
++ /* OUT parameters. */
++ evtchn_port_t local_port;
++};
++typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t;
++
++/*
++ * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
++ * vcpu.
++ * NOTES:
++ * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list
++ * in xen.h for the classification of each VIRQ.
++ * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be
++ * re-bound via EVTCHNOP_bind_vcpu.
++ * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu.
++ * The allocated event channel is bound to the specified vcpu and the
++ * binding cannot be changed.
++ */
++#define EVTCHNOP_bind_virq 1
++struct evtchn_bind_virq {
++ /* IN parameters. */
++ uint32_t virq;
++ uint32_t vcpu;
++ /* OUT parameters. */
++ evtchn_port_t port;
++};
++typedef struct evtchn_bind_virq evtchn_bind_virq_t;
++
++/*
++ * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
++ * NOTES:
++ * 1. A physical IRQ may be bound to at most one event channel per domain.
++ * 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
++ */
++#define EVTCHNOP_bind_pirq 2
++struct evtchn_bind_pirq {
++ /* IN parameters. */
++ uint32_t pirq;
++#define BIND_PIRQ__WILL_SHARE 1
++ uint32_t flags; /* BIND_PIRQ__* */
++ /* OUT parameters. */
++ evtchn_port_t port;
++};
++typedef struct evtchn_bind_pirq evtchn_bind_pirq_t;
++
++/*
++ * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
++ * NOTES:
++ * 1. The allocated event channel is bound to the specified vcpu. The binding
++ * may not be changed.
++ */
++#define EVTCHNOP_bind_ipi 7
++struct evtchn_bind_ipi {
++ uint32_t vcpu;
++ /* OUT parameters. */
++ evtchn_port_t port;
++};
++typedef struct evtchn_bind_ipi evtchn_bind_ipi_t;
++
++/*
++ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
++ * interdomain then the remote end is placed in the unbound state
++ * (EVTCHNSTAT_unbound), awaiting a new connection.
++ */
++#define EVTCHNOP_close 3
++struct evtchn_close {
++ /* IN parameters. */
++ evtchn_port_t port;
++};
++typedef struct evtchn_close evtchn_close_t;
++
++/*
++ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
++ * endpoint is <port>.
++ */
++#define EVTCHNOP_send 4
++struct evtchn_send {
++ /* IN parameters. */
++ evtchn_port_t port;
++};
++typedef struct evtchn_send evtchn_send_t;
++
++/*
++ * EVTCHNOP_status: Get the current status of the communication channel which
++ * has an endpoint at <dom, port>.
++ * NOTES:
++ * 1. <dom> may be specified as DOMID_SELF.
++ * 2. Only a sufficiently-privileged domain may obtain the status of an event
++ * channel for which <dom> is not DOMID_SELF.
++ */
++#define EVTCHNOP_status 5
++struct evtchn_status {
++ /* IN parameters */
++ domid_t dom;
++ evtchn_port_t port;
++ /* OUT parameters */
++#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
++#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
++#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
++#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
++#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
++#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
++ uint32_t status;
++ uint32_t vcpu; /* VCPU to which this channel is bound. */
++ union {
++ struct {
++ domid_t dom;
++ } unbound; /* EVTCHNSTAT_unbound */
++ struct {
++ domid_t dom;
++ evtchn_port_t port;
++ } interdomain; /* EVTCHNSTAT_interdomain */
++ uint32_t pirq; /* EVTCHNSTAT_pirq */
++ uint32_t virq; /* EVTCHNSTAT_virq */
++ } u;
++};
++typedef struct evtchn_status evtchn_status_t;
++
++/*
++ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
++ * event is pending.
++ * NOTES:
++ * 1. IPI-bound channels always notify the vcpu specified at bind time.
++ * This binding cannot be changed.
++ * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
++ * This binding cannot be changed.
++ * 3. All other channels notify vcpu0 by default. This default is set when
++ * the channel is allocated (a port that is freed and subsequently reused
++ * has its binding reset to vcpu0).
++ */
++#define EVTCHNOP_bind_vcpu 8
++struct evtchn_bind_vcpu {
++ /* IN parameters. */
++ evtchn_port_t port;
++ uint32_t vcpu;
++};
++typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t;
++
++/*
++ * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
++ * a notification to the appropriate VCPU if an event is pending.
++ */
++#define EVTCHNOP_unmask 9
++struct evtchn_unmask {
++ /* IN parameters. */
++ evtchn_port_t port;
++};
++typedef struct evtchn_unmask evtchn_unmask_t;
++
++/*
++ * EVTCHNOP_reset: Close all event channels associated with specified domain.
++ * NOTES:
++ * 1. <dom> may be specified as DOMID_SELF.
++ * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
++ */
++#define EVTCHNOP_reset 10
++struct evtchn_reset {
++ /* IN parameters. */
++ domid_t dom;
++};
++typedef struct evtchn_reset evtchn_reset_t;
++
++/*
++ * Argument to event_channel_op_compat() hypercall. Superceded by new
++ * event_channel_op() hypercall since 0x00030202.
++ */
++struct evtchn_op {
++ uint32_t cmd; /* EVTCHNOP_* */
++ union {
++ struct evtchn_alloc_unbound alloc_unbound;
++ struct evtchn_bind_interdomain bind_interdomain;
++ struct evtchn_bind_virq bind_virq;
++ struct evtchn_bind_pirq bind_pirq;
++ struct evtchn_bind_ipi bind_ipi;
++ struct evtchn_close close;
++ struct evtchn_send send;
++ struct evtchn_status status;
++ struct evtchn_bind_vcpu bind_vcpu;
++ struct evtchn_unmask unmask;
++ } u;
++};
++typedef struct evtchn_op evtchn_op_t;
++DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
++
++#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/features.h linux-2.6.18-xen-3.3.0/include/xen/interface/features.h
+--- linux-2.6.18.8/include/xen/interface/features.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/features.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,74 @@
++/******************************************************************************
++ * features.h
++ *
++ * Feature flags, reported by XENVER_get_features.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2006, Keir Fraser <keir@xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_FEATURES_H__
++#define __XEN_PUBLIC_FEATURES_H__
++
++/*
++ * If set, the guest does not need to write-protect its pagetables, and can
++ * update them via direct writes.
++ */
++#define XENFEAT_writable_page_tables 0
++
++/*
++ * If set, the guest does not need to write-protect its segment descriptor
++ * tables, and can update them via direct writes.
++ */
++#define XENFEAT_writable_descriptor_tables 1
++
++/*
++ * If set, translation between the guest's 'pseudo-physical' address space
++ * and the host's machine address space are handled by the hypervisor. In this
++ * mode the guest does not need to perform phys-to/from-machine translations
++ * when performing page table operations.
++ */
++#define XENFEAT_auto_translated_physmap 2
++
++/* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */
++#define XENFEAT_supervisor_mode_kernel 3
++
++/*
++ * If set, the guest does not need to allocate x86 PAE page directories
++ * below 4GB. This flag is usually implied by auto_translated_physmap.
++ */
++#define XENFEAT_pae_pgdir_above_4gb 4
++
++/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
++#define XENFEAT_mmu_pt_update_preserve_ad 5
++
++#define XENFEAT_NR_SUBMAPS 1
++
++#endif /* __XEN_PUBLIC_FEATURES_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/foreign/Makefile linux-2.6.18-xen-3.3.0/include/xen/interface/foreign/Makefile
+--- linux-2.6.18.8/include/xen/interface/foreign/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/foreign/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,37 @@
++XEN_ROOT=../../../..
++include $(XEN_ROOT)/Config.mk
++
++architectures := x86_32 x86_64 ia64
++headers := $(patsubst %, %.h, $(architectures))
++scripts := $(wildcard *.py)
++
++.PHONY: all clean check-headers
++all: $(headers) check-headers
++
++clean:
++ rm -f $(headers)
++ rm -f checker checker.c $(XEN_TARGET_ARCH).size
++ rm -f *.pyc *.o *~
++
++ifeq ($(CROSS_COMPILE)$(XEN_TARGET_ARCH),$(XEN_COMPILE_ARCH))
++check-headers: checker
++ ./checker > $(XEN_TARGET_ARCH).size
++ diff -u reference.size $(XEN_TARGET_ARCH).size
++checker: checker.c $(headers)
++ $(HOSTCC) $(HOSTCFLAGS) -o $@ $<
++else
++check-headers:
++ @echo "cross build: skipping check"
++endif
++
++x86_32.h: ../arch-x86/xen-x86_32.h ../arch-x86/xen.h ../xen.h $(scripts)
++ python mkheader.py $* $@ $(filter %.h,$^)
++
++x86_64.h: ../arch-x86/xen-x86_64.h ../arch-x86/xen.h ../xen.h $(scripts)
++ python mkheader.py $* $@ $(filter %.h,$^)
++
++ia64.h: ../arch-ia64.h ../xen.h $(scripts)
++ python mkheader.py $* $@ $(filter %.h,$^)
++
++checker.c: $(scripts)
++ python mkchecker.py $(XEN_TARGET_ARCH) $@ $(architectures)
+diff -rpuN linux-2.6.18.8/include/xen/interface/foreign/mkchecker.py linux-2.6.18-xen-3.3.0/include/xen/interface/foreign/mkchecker.py
+--- linux-2.6.18.8/include/xen/interface/foreign/mkchecker.py 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/foreign/mkchecker.py 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,58 @@
++#!/usr/bin/python
++
++import sys;
++from structs import structs;
++
++# command line arguments
++arch = sys.argv[1];
++outfile = sys.argv[2];
++archs = sys.argv[3:];
++
++f = open(outfile, "w");
++f.write('''
++/*
++ * sanity checks for generated foreign headers:
++ * - verify struct sizes
++ *
++ * generated by %s -- DO NOT EDIT
++ */
++#include <stdio.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <inttypes.h>
++#include "../xen.h"
++''');
++
++for a in archs:
++ f.write('#include "%s.h"\n' % a);
++
++f.write('int main(int argc, char *argv[])\n{\n');
++
++f.write('\tprintf("\\n");');
++f.write('printf("%-25s |", "structs");\n');
++for a in archs:
++ f.write('\tprintf("%%8s", "%s");\n' % a);
++f.write('\tprintf("\\n");');
++
++f.write('\tprintf("\\n");');
++for struct in structs:
++ f.write('\tprintf("%%-25s |", "%s");\n' % struct);
++ for a in archs:
++ if a == arch:
++ s = struct; # native
++ else:
++ s = struct + "_" + a;
++ f.write('#ifdef %s_has_no_%s\n' % (a, struct));
++ f.write('\tprintf("%8s", "-");\n');
++ f.write("#else\n");
++ f.write('\tprintf("%%8zd", sizeof(struct %s));\n' % s);
++ f.write("#endif\n");
++
++ f.write('\tprintf("\\n");\n\n');
++
++f.write('\tprintf("\\n");\n');
++f.write('\texit(0);\n');
++f.write('}\n');
++
++f.close();
++
+diff -rpuN linux-2.6.18.8/include/xen/interface/foreign/mkheader.py linux-2.6.18-xen-3.3.0/include/xen/interface/foreign/mkheader.py
+--- linux-2.6.18.8/include/xen/interface/foreign/mkheader.py 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/foreign/mkheader.py 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,167 @@
++#!/usr/bin/python
++
++import sys, re;
++from structs import unions, structs, defines;
++
++# command line arguments
++arch = sys.argv[1];
++outfile = sys.argv[2];
++infiles = sys.argv[3:];
++
++
++###########################################################################
++# configuration #2: architecture information
++
++inttypes = {};
++header = {};
++footer = {};
++
++# x86_32
++inttypes["x86_32"] = {
++ "unsigned long" : "uint32_t",
++ "long" : "uint32_t",
++ "xen_pfn_t" : "uint32_t",
++};
++header["x86_32"] = """
++#define __i386___X86_32 1
++#pragma pack(4)
++""";
++footer["x86_32"] = """
++#pragma pack()
++""";
++
++# x86_64
++inttypes["x86_64"] = {
++ "unsigned long" : "__align8__ uint64_t",
++ "long" : "__align8__ uint64_t",
++ "xen_pfn_t" : "__align8__ uint64_t",
++};
++header["x86_64"] = """
++#ifdef __GNUC__
++# define __DECL_REG(name) union { uint64_t r ## name, e ## name; }
++# define __align8__ __attribute__((aligned (8)))
++#else
++# define __DECL_REG(name) uint64_t r ## name
++# define __align8__ FIXME
++#endif
++#define __x86_64___X86_64 1
++""";
++
++# ia64
++inttypes["ia64"] = {
++ "unsigned long" : "__align8__ uint64_t",
++ "long" : "__align8__ uint64_t",
++ "xen_pfn_t" : "__align8__ uint64_t",
++ "long double" : "__align16__ ldouble_t",
++};
++header["ia64"] = """
++#define __align8__ __attribute__((aligned (8)))
++#define __align16__ __attribute__((aligned (16)))
++typedef unsigned char ldouble_t[16];
++""";
++
++
++###########################################################################
++# main
++
++input = "";
++output = "";
++fileid = re.sub("[-.]", "_", "__FOREIGN_%s__" % outfile.upper());
++
++# read input header files
++for name in infiles:
++ f = open(name, "r");
++ input += f.read();
++ f.close();
++
++# add header
++output += """
++/*
++ * public xen defines and struct for %s
++ * generated by %s -- DO NOT EDIT
++ */
++
++#ifndef %s
++#define %s 1
++
++""" % (arch, sys.argv[0], fileid, fileid)
++
++if arch in header:
++ output += header[arch];
++ output += "\n";
++
++# add defines to output
++for line in re.findall("#define[^\n]+", input):
++ for define in defines:
++ regex = "#define\s+%s\\b" % define;
++ match = re.search(regex, line);
++ if None == match:
++ continue;
++ if define.upper()[0] == define[0]:
++ replace = define + "_" + arch.upper();
++ else:
++ replace = define + "_" + arch;
++ regex = "\\b%s\\b" % define;
++ output += re.sub(regex, replace, line) + "\n";
++output += "\n";
++
++# delete defines, comments, empty lines
++input = re.sub("#define[^\n]+\n", "", input);
++input = re.compile("/\*(.*?)\*/", re.S).sub("", input)
++input = re.compile("\n\s*\n", re.S).sub("\n", input);
++
++# add unions to output
++for union in unions:
++ regex = "union\s+%s\s*\{(.*?)\n\};" % union;
++ match = re.search(regex, input, re.S)
++ if None == match:
++ output += "#define %s_has_no_%s 1\n" % (arch, union);
++ else:
++ output += "union %s_%s {%s\n};\n" % (union, arch, match.group(1));
++ output += "\n";
++
++# add structs to output
++for struct in structs:
++ regex = "struct\s+%s\s*\{(.*?)\n\};" % struct;
++ match = re.search(regex, input, re.S)
++ if None == match:
++ output += "#define %s_has_no_%s 1\n" % (arch, struct);
++ else:
++ output += "struct %s_%s {%s\n};\n" % (struct, arch, match.group(1));
++ output += "typedef struct %s_%s %s_%s_t;\n" % (struct, arch, struct, arch);
++ output += "\n";
++
++# add footer
++if arch in footer:
++ output += footer[arch];
++ output += "\n";
++output += "#endif /* %s */\n" % fileid;
++
++# replace: defines
++for define in defines:
++ if define.upper()[0] == define[0]:
++ replace = define + "_" + arch.upper();
++ else:
++ replace = define + "_" + arch;
++ output = re.sub("\\b%s\\b" % define, replace, output);
++
++# replace: unions
++for union in unions:
++ output = re.sub("\\b(union\s+%s)\\b" % union, "\\1_%s" % arch, output);
++
++# replace: structs + struct typedefs
++for struct in structs:
++ output = re.sub("\\b(struct\s+%s)\\b" % struct, "\\1_%s" % arch, output);
++ output = re.sub("\\b(%s)_t\\b" % struct, "\\1_%s_t" % arch, output);
++
++# replace: integer types
++integers = inttypes[arch].keys();
++integers.sort(lambda a, b: cmp(len(b),len(a)));
++for type in integers:
++ output = re.sub("\\b%s\\b" % type, inttypes[arch][type], output);
++
++# print results
++f = open(outfile, "w");
++f.write(output);
++f.close;
++
+diff -rpuN linux-2.6.18.8/include/xen/interface/foreign/reference.size linux-2.6.18-xen-3.3.0/include/xen/interface/foreign/reference.size
+--- linux-2.6.18.8/include/xen/interface/foreign/reference.size 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/foreign/reference.size 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,18 @@
++
++structs | x86_32 x86_64 ia64
++
++start_info | 1104 1152 1152
++trap_info | 8 16 -
++pt_fpreg | - - 16
++cpu_user_regs | 68 200 -
++xen_ia64_boot_param | - - 96
++ia64_tr_entry | - - 32
++vcpu_tr_regs | - - 768
++vcpu_guest_context_regs | - - 22176
++vcpu_guest_context | 2800 5168 22208
++arch_vcpu_info | 24 16 0
++vcpu_time_info | 32 32 32
++vcpu_info | 64 64 48
++arch_shared_info | 268 280 272
++shared_info | 2584 3368 4384
++
+diff -rpuN linux-2.6.18.8/include/xen/interface/foreign/structs.py linux-2.6.18-xen-3.3.0/include/xen/interface/foreign/structs.py
+--- linux-2.6.18.8/include/xen/interface/foreign/structs.py 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/foreign/structs.py 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,58 @@
++# configuration: what needs translation
++
++unions = [ "vcpu_cr_regs",
++ "vcpu_ar_regs" ];
++
++structs = [ "start_info",
++ "trap_info",
++ "pt_fpreg",
++ "cpu_user_regs",
++ "xen_ia64_boot_param",
++ "ia64_tr_entry",
++ "vcpu_tr_regs",
++ "vcpu_guest_context_regs",
++ "vcpu_guest_context",
++ "arch_vcpu_info",
++ "vcpu_time_info",
++ "vcpu_info",
++ "arch_shared_info",
++ "shared_info" ];
++
++defines = [ "__i386__",
++ "__x86_64__",
++
++ "FLAT_RING1_CS",
++ "FLAT_RING1_DS",
++ "FLAT_RING1_SS",
++
++ "FLAT_RING3_CS64",
++ "FLAT_RING3_DS64",
++ "FLAT_RING3_SS64",
++ "FLAT_KERNEL_CS64",
++ "FLAT_KERNEL_DS64",
++ "FLAT_KERNEL_SS64",
++
++ "FLAT_KERNEL_CS",
++ "FLAT_KERNEL_DS",
++ "FLAT_KERNEL_SS",
++
++ # x86_{32,64}
++ "_VGCF_i387_valid",
++ "VGCF_i387_valid",
++ "_VGCF_in_kernel",
++ "VGCF_in_kernel",
++ "_VGCF_failsafe_disables_events",
++ "VGCF_failsafe_disables_events",
++ "_VGCF_syscall_disables_events",
++ "VGCF_syscall_disables_events",
++ "_VGCF_online",
++ "VGCF_online",
++
++ # ia64
++ "VGCF_EXTRA_REGS",
++
++ # all archs
++ "xen_pfn_to_cr3",
++ "MAX_VIRT_CPUS",
++ "MAX_GUEST_CMDLINE" ];
++
+diff -rpuN linux-2.6.18.8/include/xen/interface/grant_table.h linux-2.6.18-xen-3.3.0/include/xen/interface/grant_table.h
+--- linux-2.6.18.8/include/xen/interface/grant_table.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/grant_table.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,431 @@
++/******************************************************************************
++ * grant_table.h
++ *
++ * Interface for granting foreign access to page frames, and receiving
++ * page-ownership transfers.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
++#define __XEN_PUBLIC_GRANT_TABLE_H__
++
++
++/***********************************
++ * GRANT TABLE REPRESENTATION
++ */
++
++/* Some rough guidelines on accessing and updating grant-table entries
++ * in a concurrency-safe manner. For more information, Linux contains a
++ * reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
++ *
++ * NB. WMB is a no-op on current-generation x86 processors. However, a
++ * compiler barrier will still be required.
++ *
++ * Introducing a valid entry into the grant table:
++ * 1. Write ent->domid.
++ * 2. Write ent->frame:
++ * GTF_permit_access: Frame to which access is permitted.
++ * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
++ * frame, or zero if none.
++ * 3. Write memory barrier (WMB).
++ * 4. Write ent->flags, inc. valid type.
++ *
++ * Invalidating an unused GTF_permit_access entry:
++ * 1. flags = ent->flags.
++ * 2. Observe that !(flags & (GTF_reading|GTF_writing)).
++ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
++ * NB. No need for WMB as reuse of entry is control-dependent on success of
++ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
++ *
++ * Invalidating an in-use GTF_permit_access entry:
++ * This cannot be done directly. Request assistance from the domain controller
++ * which can set a timeout on the use of a grant entry and take necessary
++ * action. (NB. This is not yet implemented!).
++ *
++ * Invalidating an unused GTF_accept_transfer entry:
++ * 1. flags = ent->flags.
++ * 2. Observe that !(flags & GTF_transfer_committed). [*]
++ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
++ * NB. No need for WMB as reuse of entry is control-dependent on success of
++ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
++ * [*] If GTF_transfer_committed is set then the grant entry is 'committed'.
++ * The guest must /not/ modify the grant entry until the address of the
++ * transferred frame is written. It is safe for the guest to spin waiting
++ * for this to occur (detect by observing GTF_transfer_completed in
++ * ent->flags).
++ *
++ * Invalidating a committed GTF_accept_transfer entry:
++ * 1. Wait for (ent->flags & GTF_transfer_completed).
++ *
++ * Changing a GTF_permit_access from writable to read-only:
++ * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
++ *
++ * Changing a GTF_permit_access from read-only to writable:
++ * Use SMP-safe bit-setting instruction.
++ */
++
++/*
++ * A grant table comprises a packed array of grant entries in one or more
++ * page frames shared between Xen and a guest.
++ * [XEN]: This field is written by Xen and read by the sharing guest.
++ * [GST]: This field is written by the guest and read by Xen.
++ */
++struct grant_entry {
++ /* GTF_xxx: various type and flag information. [XEN,GST] */
++ uint16_t flags;
++ /* The domain being granted foreign privileges. [GST] */
++ domid_t domid;
++ /*
++ * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
++ * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
++ */
++ uint32_t frame;
++};
++typedef struct grant_entry grant_entry_t;
++
++/*
++ * Type of grant entry.
++ * GTF_invalid: This grant entry grants no privileges.
++ * GTF_permit_access: Allow @domid to map/access @frame.
++ * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
++ * to this guest. Xen writes the page number to @frame.
++ */
++#define GTF_invalid (0U<<0)
++#define GTF_permit_access (1U<<0)
++#define GTF_accept_transfer (2U<<0)
++#define GTF_type_mask (3U<<0)
++
++/*
++ * Subflags for GTF_permit_access.
++ * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
++ * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
++ * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
++ * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST]
++ */
++#define _GTF_readonly (2)
++#define GTF_readonly (1U<<_GTF_readonly)
++#define _GTF_reading (3)
++#define GTF_reading (1U<<_GTF_reading)
++#define _GTF_writing (4)
++#define GTF_writing (1U<<_GTF_writing)
++#define _GTF_PWT (5)
++#define GTF_PWT (1U<<_GTF_PWT)
++#define _GTF_PCD (6)
++#define GTF_PCD (1U<<_GTF_PCD)
++#define _GTF_PAT (7)
++#define GTF_PAT (1U<<_GTF_PAT)
++
++/*
++ * Subflags for GTF_accept_transfer:
++ * GTF_transfer_committed: Xen sets this flag to indicate that it is committed
++ * to transferring ownership of a page frame. When a guest sees this flag
++ * it must /not/ modify the grant entry until GTF_transfer_completed is
++ * set by Xen.
++ * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag
++ * after reading GTF_transfer_committed. Xen will always write the frame
++ * address, followed by ORing this flag, in a timely manner.
++ */
++#define _GTF_transfer_committed (2)
++#define GTF_transfer_committed (1U<<_GTF_transfer_committed)
++#define _GTF_transfer_completed (3)
++#define GTF_transfer_completed (1U<<_GTF_transfer_completed)
++
++
++/***********************************
++ * GRANT TABLE QUERIES AND USES
++ */
++
++/*
++ * Reference to a grant entry in a specified domain's grant table.
++ */
++typedef uint32_t grant_ref_t;
++
++/*
++ * Handle to track a mapping created via a grant reference.
++ */
++typedef uint32_t grant_handle_t;
++
++/*
++ * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
++ * by devices and/or host CPUs. If successful, <handle> is a tracking number
++ * that must be presented later to destroy the mapping(s). On error, <handle>
++ * is a negative status code.
++ * NOTES:
++ * 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
++ * via which I/O devices may access the granted frame.
++ * 2. If GNTMAP_host_map is specified then a mapping will be added at
++ * either a host virtual address in the current address space, or at
++ * a PTE at the specified machine address. The type of mapping to
++ * perform is selected through the GNTMAP_contains_pte flag, and the
++ * address is specified in <host_addr>.
++ * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
++ * host mapping is destroyed by other means then it is *NOT* guaranteed
++ * to be accounted to the correct grant reference!
++ */
++#define GNTTABOP_map_grant_ref 0
++struct gnttab_map_grant_ref {
++ /* IN parameters. */
++ uint64_t host_addr;
++ uint32_t flags; /* GNTMAP_* */
++ grant_ref_t ref;
++ domid_t dom;
++ /* OUT parameters. */
++ int16_t status; /* GNTST_* */
++ grant_handle_t handle;
++ uint64_t dev_bus_addr;
++};
++typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t);
++
++/*
++ * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
++ * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that
++ * field is ignored. If non-zero, they must refer to a device/host mapping
++ * that is tracked by <handle>
++ * NOTES:
++ * 1. The call may fail in an undefined manner if either mapping is not
++ * tracked by <handle>.
++ * 3. After executing a batch of unmaps, it is guaranteed that no stale
++ * mappings will remain in the device or host TLBs.
++ */
++#define GNTTABOP_unmap_grant_ref 1
++struct gnttab_unmap_grant_ref {
++ /* IN parameters. */
++ uint64_t host_addr;
++ uint64_t dev_bus_addr;
++ grant_handle_t handle;
++ /* OUT parameters. */
++ int16_t status; /* GNTST_* */
++};
++typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t);
++
++/*
++ * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
++ * <nr_frames> pages. The frame addresses are written to the <frame_list>.
++ * Only <nr_frames> addresses are written, even if the table is larger.
++ * NOTES:
++ * 1. <dom> may be specified as DOMID_SELF.
++ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
++ * 3. Xen may not support more than a single grant-table page per domain.
++ */
++#define GNTTABOP_setup_table 2
++struct gnttab_setup_table {
++ /* IN parameters. */
++ domid_t dom;
++ uint32_t nr_frames;
++ /* OUT parameters. */
++ int16_t status; /* GNTST_* */
++ XEN_GUEST_HANDLE(ulong) frame_list;
++};
++typedef struct gnttab_setup_table gnttab_setup_table_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
++
++/*
++ * GNTTABOP_dump_table: Dump the contents of the grant table to the
++ * xen console. Debugging use only.
++ */
++#define GNTTABOP_dump_table 3
++struct gnttab_dump_table {
++ /* IN parameters. */
++ domid_t dom;
++ /* OUT parameters. */
++ int16_t status; /* GNTST_* */
++};
++typedef struct gnttab_dump_table gnttab_dump_table_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
++
++/*
++ * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
++ * foreign domain has previously registered its interest in the transfer via
++ * <domid, ref>.
++ *
++ * Note that, even if the transfer fails, the specified page no longer belongs
++ * to the calling domain *unless* the error is GNTST_bad_page.
++ */
++#define GNTTABOP_transfer 4
++struct gnttab_transfer {
++ /* IN parameters. */
++ xen_pfn_t mfn;
++ domid_t domid;
++ grant_ref_t ref;
++ /* OUT parameters. */
++ int16_t status;
++};
++typedef struct gnttab_transfer gnttab_transfer_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t);
++
++
++/*
++ * GNTTABOP_copy: Hypervisor based copy
++ * source and destinations can be eithers MFNs or, for foreign domains,
++ * grant references. the foreign domain has to grant read/write access
++ * in its grant table.
++ *
++ * The flags specify what type source and destinations are (either MFN
++ * or grant reference).
++ *
++ * Note that this can also be used to copy data between two domains
++ * via a third party if the source and destination domains had previously
++ * grant appropriate access to their pages to the third party.
++ *
++ * source_offset specifies an offset in the source frame, dest_offset
++ * the offset in the target frame and len specifies the number of
++ * bytes to be copied.
++ */
++
++#define _GNTCOPY_source_gref (0)
++#define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref)
++#define _GNTCOPY_dest_gref (1)
++#define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
++
++#define GNTTABOP_copy 5
++typedef struct gnttab_copy {
++ /* IN parameters. */
++ struct {
++ union {
++ grant_ref_t ref;
++ xen_pfn_t gmfn;
++ } u;
++ domid_t domid;
++ uint16_t offset;
++ } source, dest;
++ uint16_t len;
++ uint16_t flags; /* GNTCOPY_* */
++ /* OUT parameters. */
++ int16_t status;
++} gnttab_copy_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t);
++
++/*
++ * GNTTABOP_query_size: Query the current and maximum sizes of the shared
++ * grant table.
++ * NOTES:
++ * 1. <dom> may be specified as DOMID_SELF.
++ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
++ */
++#define GNTTABOP_query_size 6
++struct gnttab_query_size {
++ /* IN parameters. */
++ domid_t dom;
++ /* OUT parameters. */
++ uint32_t nr_frames;
++ uint32_t max_nr_frames;
++ int16_t status; /* GNTST_* */
++};
++typedef struct gnttab_query_size gnttab_query_size_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
++
++/*
++ * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
++ * tracked by <handle> but atomically replace the page table entry with one
++ * pointing to the machine address under <new_addr>. <new_addr> will be
++ * redirected to the null entry.
++ * NOTES:
++ * 1. The call may fail in an undefined manner if either mapping is not
++ * tracked by <handle>.
++ * 2. After executing a batch of unmaps, it is guaranteed that no stale
++ * mappings will remain in the device or host TLBs.
++ */
++#define GNTTABOP_unmap_and_replace 7
++struct gnttab_unmap_and_replace {
++ /* IN parameters. */
++ uint64_t host_addr;
++ uint64_t new_addr;
++ grant_handle_t handle;
++ /* OUT parameters. */
++ int16_t status; /* GNTST_* */
++};
++typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t;
++DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
++
++
++/*
++ * Bitfield values for update_pin_status.flags.
++ */
++ /* Map the grant entry for access by I/O devices. */
++#define _GNTMAP_device_map (0)
++#define GNTMAP_device_map (1<<_GNTMAP_device_map)
++ /* Map the grant entry for access by host CPUs. */
++#define _GNTMAP_host_map (1)
++#define GNTMAP_host_map (1<<_GNTMAP_host_map)
++ /* Accesses to the granted frame will be restricted to read-only access. */
++#define _GNTMAP_readonly (2)
++#define GNTMAP_readonly (1<<_GNTMAP_readonly)
++ /*
++ * GNTMAP_host_map subflag:
++ * 0 => The host mapping is usable only by the guest OS.
++ * 1 => The host mapping is usable by guest OS + current application.
++ */
++#define _GNTMAP_application_map (3)
++#define GNTMAP_application_map (1<<_GNTMAP_application_map)
++
++ /*
++ * GNTMAP_contains_pte subflag:
++ * 0 => This map request contains a host virtual address.
++ * 1 => This map request contains the machine addess of the PTE to update.
++ */
++#define _GNTMAP_contains_pte (4)
++#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
++
++/*
++ * Values for error status returns. All errors are -ve.
++ */
++#define GNTST_okay (0) /* Normal return. */
++#define GNTST_general_error (-1) /* General undefined error. */
++#define GNTST_bad_domain (-2) /* Unrecognsed domain id. */
++#define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */
++#define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */
++#define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */
++#define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/
++#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
++#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
++#define GNTST_bad_page (-9) /* Specified page was invalid for op. */
++#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
++#define GNTST_address_too_big (-11) /* transfer page address too large. */
++
++#define GNTTABOP_error_msgs { \
++ "okay", \
++ "undefined error", \
++ "unrecognised domain id", \
++ "invalid grant reference", \
++ "invalid mapping handle", \
++ "invalid virtual address", \
++ "invalid device address", \
++ "no spare translation slot in the I/O MMU", \
++ "permission denied", \
++ "bad page", \
++ "copy arguments cross page boundary", \
++ "page address size too large" \
++}
++
++#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/hvm/e820.h linux-2.6.18-xen-3.3.0/include/xen/interface/hvm/e820.h
+--- linux-2.6.18.8/include/xen/interface/hvm/e820.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/hvm/e820.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,34 @@
++
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_E820_H__
++#define __XEN_PUBLIC_HVM_E820_H__
++
++/* E820 location in HVM virtual address space. */
++#define HVM_E820_PAGE 0x00090000
++#define HVM_E820_NR_OFFSET 0x000001E8
++#define HVM_E820_OFFSET 0x000002D0
++
++#define HVM_BELOW_4G_RAM_END 0xF0000000
++#define HVM_BELOW_4G_MMIO_START HVM_BELOW_4G_RAM_END
++#define HVM_BELOW_4G_MMIO_LENGTH ((1ULL << 32) - HVM_BELOW_4G_MMIO_START)
++
++#endif /* __XEN_PUBLIC_HVM_E820_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/hvm/hvm_info_table.h linux-2.6.18-xen-3.3.0/include/xen/interface/hvm/hvm_info_table.h
+--- linux-2.6.18.8/include/xen/interface/hvm/hvm_info_table.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/hvm/hvm_info_table.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,41 @@
++/******************************************************************************
++ * hvm/hvm_info_table.h
++ *
++ * HVM parameter and information table, written into guest memory map.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
++#define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
++
++#define HVM_INFO_PFN 0x09F
++#define HVM_INFO_OFFSET 0x800
++#define HVM_INFO_PADDR ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET)
++
++struct hvm_info_table {
++ char signature[8]; /* "HVM INFO" */
++ uint32_t length;
++ uint8_t checksum;
++ uint8_t acpi_enabled;
++ uint8_t apic_mode;
++ uint32_t nr_vcpus;
++};
++
++#endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/hvm/hvm_op.h linux-2.6.18-xen-3.3.0/include/xen/interface/hvm/hvm_op.h
+--- linux-2.6.18.8/include/xen/interface/hvm/hvm_op.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/hvm/hvm_op.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,131 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
++#define __XEN_PUBLIC_HVM_HVM_OP_H__
++
++/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
++#define HVMOP_set_param 0
++#define HVMOP_get_param 1
++struct xen_hvm_param {
++ domid_t domid; /* IN */
++ uint32_t index; /* IN */
++ uint64_t value; /* IN/OUT */
++};
++typedef struct xen_hvm_param xen_hvm_param_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
++
++/* Set the logical level of one of a domain's PCI INTx wires. */
++#define HVMOP_set_pci_intx_level 2
++struct xen_hvm_set_pci_intx_level {
++ /* Domain to be updated. */
++ domid_t domid;
++ /* PCI INTx identification in PCI topology (domain:bus:device:intx). */
++ uint8_t domain, bus, device, intx;
++ /* Assertion level (0 = unasserted, 1 = asserted). */
++ uint8_t level;
++};
++typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t);
++
++/* Set the logical level of one of a domain's ISA IRQ wires. */
++#define HVMOP_set_isa_irq_level 3
++struct xen_hvm_set_isa_irq_level {
++ /* Domain to be updated. */
++ domid_t domid;
++ /* ISA device identification, by ISA IRQ (0-15). */
++ uint8_t isa_irq;
++ /* Assertion level (0 = unasserted, 1 = asserted). */
++ uint8_t level;
++};
++typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
++
++#define HVMOP_set_pci_link_route 4
++struct xen_hvm_set_pci_link_route {
++ /* Domain to be updated. */
++ domid_t domid;
++ /* PCI link identifier (0-3). */
++ uint8_t link;
++ /* ISA IRQ (1-15), or 0 (disable link). */
++ uint8_t isa_irq;
++};
++typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
++
++/* Flushes all VCPU TLBs: @arg must be NULL. */
++#define HVMOP_flush_tlbs 5
++
++/* Following tools-only interfaces may change in future. */
++#if defined(__XEN__) || defined(__XEN_TOOLS__)
++
++/* Track dirty VRAM. */
++#define HVMOP_track_dirty_vram 6
++struct xen_hvm_track_dirty_vram {
++ /* Domain to be tracked. */
++ domid_t domid;
++ /* First pfn to track. */
++ uint64_aligned_t first_pfn;
++ /* Number of pages to track. */
++ uint64_aligned_t nr;
++ /* OUT variable. */
++ /* Dirty bitmap buffer. */
++ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
++};
++typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
++
++/* Notify that some pages got modified by the Device Model. */
++#define HVMOP_modified_memory 7
++struct xen_hvm_modified_memory {
++ /* Domain to be updated. */
++ domid_t domid;
++ /* First pfn. */
++ uint64_aligned_t first_pfn;
++ /* Number of pages. */
++ uint64_aligned_t nr;
++};
++typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
++
++#define HVMOP_set_mem_type 8
++typedef enum {
++ HVMMEM_ram_rw, /* Normal read/write guest RAM */
++ HVMMEM_ram_ro, /* Read-only; writes are discarded */
++ HVMMEM_mmio_dm, /* Reads and write go to the device model */
++} hvmmem_type_t;
++/* Notify that a region of memory is to be treated in a specific way. */
++struct xen_hvm_set_mem_type {
++ /* Domain to be updated. */
++ domid_t domid;
++ /* Memory type */
++ hvmmem_type_t hvmmem_type;
++ /* First pfn. */
++ uint64_aligned_t first_pfn;
++ /* Number of pages. */
++ uint64_aligned_t nr;
++};
++typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
++
++
++#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
++
++#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/hvm/ioreq.h linux-2.6.18-xen-3.3.0/include/xen/interface/hvm/ioreq.h
+--- linux-2.6.18.8/include/xen/interface/hvm/ioreq.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/hvm/ioreq.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,127 @@
++/*
++ * ioreq.h: I/O request definitions for device models
++ * Copyright (c) 2004, Intel Corporation.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _IOREQ_H_
++#define _IOREQ_H_
++
++#define IOREQ_READ 1
++#define IOREQ_WRITE 0
++
++#define STATE_IOREQ_NONE 0
++#define STATE_IOREQ_READY 1
++#define STATE_IOREQ_INPROCESS 2
++#define STATE_IORESP_READY 3
++
++#define IOREQ_TYPE_PIO 0 /* pio */
++#define IOREQ_TYPE_COPY 1 /* mmio ops */
++#define IOREQ_TYPE_TIMEOFFSET 7
++#define IOREQ_TYPE_INVALIDATE 8 /* mapcache */
++
++/*
++ * VMExit dispatcher should cooperate with instruction decoder to
++ * prepare this structure and notify service OS and DM by sending
++ * virq
++ */
++struct ioreq {
++ uint64_t addr; /* physical address */
++ uint64_t size; /* size in bytes */
++ uint64_t count; /* for rep prefixes */
++ uint64_t data; /* data (or paddr of data) */
++ uint8_t state:4;
++ uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr
++ * of the real data to use. */
++ uint8_t dir:1; /* 1=read, 0=write */
++ uint8_t df:1;
++ uint8_t pad:1;
++ uint8_t type; /* I/O type */
++ uint8_t _pad0[6];
++ uint64_t io_count; /* How many IO done on a vcpu */
++};
++typedef struct ioreq ioreq_t;
++
++struct vcpu_iodata {
++ struct ioreq vp_ioreq;
++ /* Event channel port, used for notifications to/from the device model. */
++ uint32_t vp_eport;
++ uint32_t _pad0;
++};
++typedef struct vcpu_iodata vcpu_iodata_t;
++
++struct shared_iopage {
++ struct vcpu_iodata vcpu_iodata[1];
++};
++typedef struct shared_iopage shared_iopage_t;
++
++struct buf_ioreq {
++ uint8_t type; /* I/O type */
++ uint8_t pad:1;
++ uint8_t dir:1; /* 1=read, 0=write */
++ uint8_t size:2; /* 0=>1, 1=>2, 2=>4, 3=>8. If 8, use two buf_ioreqs */
++ uint32_t addr:20;/* physical address */
++ uint32_t data; /* data */
++};
++typedef struct buf_ioreq buf_ioreq_t;
++
++#define IOREQ_BUFFER_SLOT_NUM 511 /* 8 bytes each, plus 2 4-byte indexes */
++struct buffered_iopage {
++ unsigned int read_pointer;
++ unsigned int write_pointer;
++ buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM];
++}; /* NB. Size of this structure must be no greater than one page. */
++typedef struct buffered_iopage buffered_iopage_t;
++
++#if defined(__ia64__)
++struct pio_buffer {
++ uint32_t page_offset;
++ uint32_t pointer;
++ uint32_t data_end;
++ uint32_t buf_size;
++ void *opaque;
++};
++
++#define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */
++#define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */
++#define PIO_BUFFER_ENTRY_NUM 2
++struct buffered_piopage {
++ struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM];
++ uint8_t buffer[1];
++};
++#endif /* defined(__ia64__) */
++
++#define ACPI_PM1A_EVT_BLK_ADDRESS 0x0000000000001f40
++#define ACPI_PM1A_CNT_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04)
++#define ACPI_PM_TMR_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08)
++#define ACPI_GPE0_BLK_ADDRESS (ACPI_PM_TMR_BLK_ADDRESS + 0x20)
++#define ACPI_GPE0_BLK_LEN 0x08
++
++#endif /* _IOREQ_H_ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/hvm/params.h linux-2.6.18-xen-3.3.0/include/xen/interface/hvm/params.h
+--- linux-2.6.18.8/include/xen/interface/hvm/params.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/hvm/params.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,98 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
++#define __XEN_PUBLIC_HVM_PARAMS_H__
++
++#include "hvm_op.h"
++
++/*
++ * Parameter space for HVMOP_{set,get}_param.
++ */
++
++/*
++ * How should CPU0 event-channel notifications be delivered?
++ * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt).
++ * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows:
++ * Domain = val[47:32], Bus = val[31:16],
++ * DevFn = val[15: 8], IntX = val[ 1: 0]
++ * If val == 0 then CPU0 event-channel notifications are not delivered.
++ */
++#define HVM_PARAM_CALLBACK_IRQ 0
++
++/*
++ * These are not used by Xen. They are here for convenience of HVM-guest
++ * xenbus implementations.
++ */
++#define HVM_PARAM_STORE_PFN 1
++#define HVM_PARAM_STORE_EVTCHN 2
++
++#define HVM_PARAM_PAE_ENABLED 4
++
++#define HVM_PARAM_IOREQ_PFN 5
++
++#define HVM_PARAM_BUFIOREQ_PFN 6
++
++#ifdef __ia64__
++#define HVM_PARAM_NVRAM_FD 7
++#define HVM_PARAM_VHPT_SIZE 8
++#define HVM_PARAM_BUFPIOREQ_PFN 9
++#endif
++
++/*
++ * Set mode for virtual timers (currently x86 only):
++ * delay_for_missed_ticks (default):
++ * Do not advance a vcpu's time beyond the correct delivery time for
++ * interrupts that have been missed due to preemption. Deliver missed
++ * interrupts when the vcpu is rescheduled and advance the vcpu's virtual
++ * time stepwise for each one.
++ * no_delay_for_missed_ticks:
++ * As above, missed interrupts are delivered, but guest time always tracks
++ * wallclock (i.e., real) time while doing so.
++ * no_missed_ticks_pending:
++ * No missed interrupts are held pending. Instead, to ensure ticks are
++ * delivered at some non-zero rate, if we detect missed ticks then the
++ * internal tick alarm is not disabled if the VCPU is preempted during the
++ * next tick period.
++ * one_missed_tick_pending:
++ * Missed interrupts are collapsed together and delivered as one 'late tick'.
++ * Guest time always tracks wallclock (i.e., real) time.
++ */
++#define HVM_PARAM_TIMER_MODE 10
++#define HVMPTM_delay_for_missed_ticks 0
++#define HVMPTM_no_delay_for_missed_ticks 1
++#define HVMPTM_no_missed_ticks_pending 2
++#define HVMPTM_one_missed_tick_pending 3
++
++/* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */
++#define HVM_PARAM_HPET_ENABLED 11
++
++/* Identity-map page directory used by Intel EPT when CR0.PG=0. */
++#define HVM_PARAM_IDENT_PT 12
++
++/* Device Model domain, defaults to 0. */
++#define HVM_PARAM_DM_DOMAIN 13
++
++/* ACPI S state: currently support S0 and S3 on x86. */
++#define HVM_PARAM_ACPI_S_STATE 14
++
++#define HVM_NR_PARAMS 15
++
++#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/hvm/save.h linux-2.6.18-xen-3.3.0/include/xen/interface/hvm/save.h
+--- linux-2.6.18.8/include/xen/interface/hvm/save.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/hvm/save.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,88 @@
++/*
++ * hvm/save.h
++ *
++ * Structure definitions for HVM state that is held by Xen and must
++ * be saved along with the domain's memory and device-model state.
++ *
++ * Copyright (c) 2007 XenSource Ltd.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_SAVE_H__
++#define __XEN_PUBLIC_HVM_SAVE_H__
++
++/*
++ * Structures in this header *must* have the same layout in 32bit
++ * and 64bit environments: this means that all fields must be explicitly
++ * sized types and aligned to their sizes, and the structs must be
++ * a multiple of eight bytes long.
++ *
++ * Only the state necessary for saving and restoring (i.e. fields
++ * that are analogous to actual hardware state) should go in this file.
++ * Internal mechanisms should be kept in Xen-private headers.
++ */
++
++#if !defined(__GNUC__) || defined(__STRICT_ANSI__)
++#error "Anonymous structs/unions are a GNU extension."
++#endif
++
++/*
++ * Each entry is preceded by a descriptor giving its type and length
++ */
++struct hvm_save_descriptor {
++ uint16_t typecode; /* Used to demux the various types below */
++ uint16_t instance; /* Further demux within a type */
++ uint32_t length; /* In bytes, *not* including this descriptor */
++};
++
++
++/*
++ * Each entry has a datatype associated with it: for example, the CPU state
++ * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU),
++ * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU).
++ * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system
++ * ugliness.
++ */
++
++#define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \
++ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; }
++
++#define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t)
++#define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x)))
++#define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c))
++
++
++/*
++ * The series of save records is teminated by a zero-type, zero-length
++ * descriptor.
++ */
++
++struct hvm_save_end {};
++DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end);
++
++#if defined(__i386__) || defined(__x86_64__)
++#include "../arch-x86/hvm/save.h"
++#elif defined(__ia64__)
++#include "../arch-ia64/hvm/save.h"
++#else
++#error "unsupported architecture"
++#endif
++
++#endif /* __XEN_PUBLIC_HVM_SAVE_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/hvm/vmx_assist.h linux-2.6.18-xen-3.3.0/include/xen/interface/hvm/vmx_assist.h
+--- linux-2.6.18.8/include/xen/interface/hvm/vmx_assist.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/hvm/vmx_assist.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,122 @@
++/*
++ * vmx_assist.h: Context definitions for the VMXASSIST world switch.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Leendert van Doorn, leendert@watson.ibm.com
++ * Copyright (c) 2005, International Business Machines Corporation.
++ */
++
++#ifndef _VMX_ASSIST_H_
++#define _VMX_ASSIST_H_
++
++#define VMXASSIST_BASE 0xD0000
++#define VMXASSIST_MAGIC 0x17101966
++#define VMXASSIST_MAGIC_OFFSET (VMXASSIST_BASE+8)
++
++#define VMXASSIST_NEW_CONTEXT (VMXASSIST_BASE + 12)
++#define VMXASSIST_OLD_CONTEXT (VMXASSIST_NEW_CONTEXT + 4)
++
++#ifndef __ASSEMBLY__
++
++#define NR_EXCEPTION_HANDLER 32
++#define NR_INTERRUPT_HANDLERS 16
++#define NR_TRAPS (NR_EXCEPTION_HANDLER+NR_INTERRUPT_HANDLERS)
++
++union vmcs_arbytes {
++ struct arbyte_fields {
++ unsigned int seg_type : 4,
++ s : 1,
++ dpl : 2,
++ p : 1,
++ reserved0 : 4,
++ avl : 1,
++ reserved1 : 1,
++ default_ops_size: 1,
++ g : 1,
++ null_bit : 1,
++ reserved2 : 15;
++ } fields;
++ unsigned int bytes;
++};
++
++/*
++ * World switch state
++ */
++struct vmx_assist_context {
++ uint32_t eip; /* execution pointer */
++ uint32_t esp; /* stack pointer */
++ uint32_t eflags; /* flags register */
++ uint32_t cr0;
++ uint32_t cr3; /* page table directory */
++ uint32_t cr4;
++ uint32_t idtr_limit; /* idt */
++ uint32_t idtr_base;
++ uint32_t gdtr_limit; /* gdt */
++ uint32_t gdtr_base;
++ uint32_t cs_sel; /* cs selector */
++ uint32_t cs_limit;
++ uint32_t cs_base;
++ union vmcs_arbytes cs_arbytes;
++ uint32_t ds_sel; /* ds selector */
++ uint32_t ds_limit;
++ uint32_t ds_base;
++ union vmcs_arbytes ds_arbytes;
++ uint32_t es_sel; /* es selector */
++ uint32_t es_limit;
++ uint32_t es_base;
++ union vmcs_arbytes es_arbytes;
++ uint32_t ss_sel; /* ss selector */
++ uint32_t ss_limit;
++ uint32_t ss_base;
++ union vmcs_arbytes ss_arbytes;
++ uint32_t fs_sel; /* fs selector */
++ uint32_t fs_limit;
++ uint32_t fs_base;
++ union vmcs_arbytes fs_arbytes;
++ uint32_t gs_sel; /* gs selector */
++ uint32_t gs_limit;
++ uint32_t gs_base;
++ union vmcs_arbytes gs_arbytes;
++ uint32_t tr_sel; /* task selector */
++ uint32_t tr_limit;
++ uint32_t tr_base;
++ union vmcs_arbytes tr_arbytes;
++ uint32_t ldtr_sel; /* ldtr selector */
++ uint32_t ldtr_limit;
++ uint32_t ldtr_base;
++ union vmcs_arbytes ldtr_arbytes;
++
++ unsigned char rm_irqbase[2];
++};
++typedef struct vmx_assist_context vmx_assist_context_t;
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _VMX_ASSIST_H_ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/io/blkif.h linux-2.6.18-xen-3.3.0/include/xen/interface/io/blkif.h
+--- linux-2.6.18.8/include/xen/interface/io/blkif.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/io/blkif.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,141 @@
++/******************************************************************************
++ * blkif.h
++ *
++ * Unified block-device I/O interface for Xen guest OSes.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2003-2004, Keir Fraser
++ */
++
++#ifndef __XEN_PUBLIC_IO_BLKIF_H__
++#define __XEN_PUBLIC_IO_BLKIF_H__
++
++#include "ring.h"
++#include "../grant_table.h"
++
++/*
++ * Front->back notifications: When enqueuing a new request, sending a
++ * notification can be made conditional on req_event (i.e., the generic
++ * hold-off mechanism provided by the ring macros). Backends must set
++ * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
++ *
++ * Back->front notifications: When enqueuing a new response, sending a
++ * notification can be made conditional on rsp_event (i.e., the generic
++ * hold-off mechanism provided by the ring macros). Frontends must set
++ * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
++ */
++
++#ifndef blkif_vdev_t
++#define blkif_vdev_t uint16_t
++#endif
++#define blkif_sector_t uint64_t
++
++/*
++ * REQUEST CODES.
++ */
++#define BLKIF_OP_READ 0
++#define BLKIF_OP_WRITE 1
++/*
++ * Recognised only if "feature-barrier" is present in backend xenbus info.
++ * The "feature-barrier" node contains a boolean indicating whether barrier
++ * requests are likely to succeed or fail. Either way, a barrier request
++ * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
++ * the underlying block-device hardware. The boolean simply indicates whether
++ * or not it is worthwhile for the frontend to attempt barrier requests.
++ * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not*
++ * create the "feature-barrier" node!
++ */
++#define BLKIF_OP_WRITE_BARRIER 2
++/*
++ * Recognised if "feature-flush-cache" is present in backend xenbus
++ * info. A flush will ask the underlying storage hardware to flush its
++ * non-volatile caches as appropriate. The "feature-flush-cache" node
++ * contains a boolean indicating whether flush requests are likely to
++ * succeed or fail. Either way, a flush request may fail at any time
++ * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying
++ * block-device hardware. The boolean simply indicates whether or not it
++ * is worthwhile for the frontend to attempt flushes. If a backend does
++ * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the
++ * "feature-flush-cache" node!
++ */
++#define BLKIF_OP_FLUSH_DISKCACHE 3
++
++/*
++ * Maximum scatter/gather segments per request.
++ * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
++ * NB. This could be 12 if the ring indexes weren't stored in the same page.
++ */
++#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
++
++struct blkif_request_segment {
++ grant_ref_t gref; /* reference to I/O buffer frame */
++ /* @first_sect: first sector in frame to transfer (inclusive). */
++ /* @last_sect: last sector in frame to transfer (inclusive). */
++ uint8_t first_sect, last_sect;
++};
++
++struct blkif_request {
++ uint8_t operation; /* BLKIF_OP_??? */
++ uint8_t nr_segments; /* number of segments */
++ blkif_vdev_t handle; /* only for read/write requests */
++ uint64_t id; /* private guest value, echoed in resp */
++ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
++ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++typedef struct blkif_request blkif_request_t;
++
++struct blkif_response {
++ uint64_t id; /* copied from request */
++ uint8_t operation; /* copied from request */
++ int16_t status; /* BLKIF_RSP_??? */
++};
++typedef struct blkif_response blkif_response_t;
++
++/*
++ * STATUS RETURN CODES.
++ */
++ /* Operation not supported (only happens on barrier writes). */
++#define BLKIF_RSP_EOPNOTSUPP -2
++ /* Operation failed for some unspecified reason (-EIO). */
++#define BLKIF_RSP_ERROR -1
++ /* Operation completed successfully. */
++#define BLKIF_RSP_OKAY 0
++
++/*
++ * Generate blkif ring structures and types.
++ */
++
++DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
++
++#define VDISK_CDROM 0x1
++#define VDISK_REMOVABLE 0x2
++#define VDISK_READONLY 0x4
++
++#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/io/console.h linux-2.6.18-xen-3.3.0/include/xen/interface/io/console.h
+--- linux-2.6.18.8/include/xen/interface/io/console.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/io/console.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,51 @@
++/******************************************************************************
++ * console.h
++ *
++ * Console I/O interface for Xen guest OSes.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, Keir Fraser
++ */
++
++#ifndef __XEN_PUBLIC_IO_CONSOLE_H__
++#define __XEN_PUBLIC_IO_CONSOLE_H__
++
++typedef uint32_t XENCONS_RING_IDX;
++
++#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
++
++struct xencons_interface {
++ char in[1024];
++ char out[2048];
++ XENCONS_RING_IDX in_cons, in_prod;
++ XENCONS_RING_IDX out_cons, out_prod;
++};
++
++#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/io/fbif.h linux-2.6.18-xen-3.3.0/include/xen/interface/io/fbif.h
+--- linux-2.6.18.8/include/xen/interface/io/fbif.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/io/fbif.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,176 @@
++/*
++ * fbif.h -- Xen virtual frame buffer device
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
++ */
++
++#ifndef __XEN_PUBLIC_IO_FBIF_H__
++#define __XEN_PUBLIC_IO_FBIF_H__
++
++/* Out events (frontend -> backend) */
++
++/*
++ * Out events may be sent only when requested by backend, and receipt
++ * of an unknown out event is an error.
++ */
++
++/* Event type 1 currently not used */
++/*
++ * Framebuffer update notification event
++ * Capable frontend sets feature-update in xenstore.
++ * Backend requests it by setting request-update in xenstore.
++ */
++#define XENFB_TYPE_UPDATE 2
++
++struct xenfb_update
++{
++ uint8_t type; /* XENFB_TYPE_UPDATE */
++ int32_t x; /* source x */
++ int32_t y; /* source y */
++ int32_t width; /* rect width */
++ int32_t height; /* rect height */
++};
++
++/*
++ * Framebuffer resize notification event
++ * Capable backend sets feature-resize in xenstore.
++ */
++#define XENFB_TYPE_RESIZE 3
++
++struct xenfb_resize
++{
++ uint8_t type; /* XENFB_TYPE_RESIZE */
++ int32_t width; /* width in pixels */
++ int32_t height; /* height in pixels */
++ int32_t stride; /* stride in bytes */
++ int32_t depth; /* depth in bits */
++ int32_t offset; /* offset of the framebuffer in bytes */
++};
++
++#define XENFB_OUT_EVENT_SIZE 40
++
++union xenfb_out_event
++{
++ uint8_t type;
++ struct xenfb_update update;
++ struct xenfb_resize resize;
++ char pad[XENFB_OUT_EVENT_SIZE];
++};
++
++/* In events (backend -> frontend) */
++
++/*
++ * Frontends should ignore unknown in events.
++ */
++
++/*
++ * Framebuffer refresh period advice
++ * Backend sends it to advise the frontend their preferred period of
++ * refresh. Frontends that keep the framebuffer constantly up-to-date
++ * just ignore it. Frontends that use the advice should immediately
++ * refresh the framebuffer (and send an update notification event if
++ * those have been requested), then use the update frequency to guide
++ * their periodical refreshs.
++ */
++#define XENFB_TYPE_REFRESH_PERIOD 1
++#define XENFB_NO_REFRESH 0
++
++struct xenfb_refresh_period
++{
++ uint8_t type; /* XENFB_TYPE_UPDATE_PERIOD */
++ uint32_t period; /* period of refresh, in ms,
++ * XENFB_NO_REFRESH if no refresh is needed */
++};
++
++#define XENFB_IN_EVENT_SIZE 40
++
++union xenfb_in_event
++{
++ uint8_t type;
++ struct xenfb_refresh_period refresh_period;
++ char pad[XENFB_IN_EVENT_SIZE];
++};
++
++/* shared page */
++
++#define XENFB_IN_RING_SIZE 1024
++#define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE)
++#define XENFB_IN_RING_OFFS 1024
++#define XENFB_IN_RING(page) \
++ ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
++#define XENFB_IN_RING_REF(page, idx) \
++ (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
++
++#define XENFB_OUT_RING_SIZE 2048
++#define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE)
++#define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE)
++#define XENFB_OUT_RING(page) \
++ ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
++#define XENFB_OUT_RING_REF(page, idx) \
++ (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
++
++struct xenfb_page
++{
++ uint32_t in_cons, in_prod;
++ uint32_t out_cons, out_prod;
++
++ int32_t width; /* the width of the framebuffer (in pixels) */
++ int32_t height; /* the height of the framebuffer (in pixels) */
++ uint32_t line_length; /* the length of a row of pixels (in bytes) */
++ uint32_t mem_length; /* the length of the framebuffer (in bytes) */
++ uint8_t depth; /* the depth of a pixel (in bits) */
++
++ /*
++ * Framebuffer page directory
++ *
++ * Each directory page holds PAGE_SIZE / sizeof(*pd)
++ * framebuffer pages, and can thus map up to PAGE_SIZE *
++ * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
++ * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 Megs
++ * 64 bit. 256 directories give enough room for a 512 Meg
++ * framebuffer with a max resolution of 12,800x10,240. Should
++ * be enough for a while with room leftover for expansion.
++ */
++ unsigned long pd[256];
++};
++
++/*
++ * Wart: xenkbd needs to know default resolution. Put it here until a
++ * better solution is found, but don't leak it to the backend.
++ */
++#ifdef __KERNEL__
++#define XENFB_WIDTH 800
++#define XENFB_HEIGHT 600
++#define XENFB_DEPTH 32
++#endif
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/io/fsif.h linux-2.6.18-xen-3.3.0/include/xen/interface/io/fsif.h
+--- linux-2.6.18.8/include/xen/interface/io/fsif.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/io/fsif.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,191 @@
++/******************************************************************************
++ * fsif.h
++ *
++ * Interface to FS level split device drivers.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2007, Grzegorz Milos, <gm281@cam.ac.uk>.
++ */
++
++#ifndef __XEN_PUBLIC_IO_FSIF_H__
++#define __XEN_PUBLIC_IO_FSIF_H__
++
++#include "ring.h"
++#include "../grant_table.h"
++
++#define REQ_FILE_OPEN 1
++#define REQ_FILE_CLOSE 2
++#define REQ_FILE_READ 3
++#define REQ_FILE_WRITE 4
++#define REQ_STAT 5
++#define REQ_FILE_TRUNCATE 6
++#define REQ_REMOVE 7
++#define REQ_RENAME 8
++#define REQ_CREATE 9
++#define REQ_DIR_LIST 10
++#define REQ_CHMOD 11
++#define REQ_FS_SPACE 12
++#define REQ_FILE_SYNC 13
++
++struct fsif_open_request {
++ grant_ref_t gref;
++};
++
++struct fsif_close_request {
++ uint32_t fd;
++};
++
++struct fsif_read_request {
++ uint32_t fd;
++ int32_t pad;
++ uint64_t len;
++ uint64_t offset;
++ grant_ref_t grefs[1]; /* Variable length */
++};
++
++struct fsif_write_request {
++ uint32_t fd;
++ int32_t pad;
++ uint64_t len;
++ uint64_t offset;
++ grant_ref_t grefs[1]; /* Variable length */
++};
++
++struct fsif_stat_request {
++ uint32_t fd;
++};
++
++/* This structure is a copy of some fields from stat structure, returned
++ * via the ring. */
++struct fsif_stat_response {
++ int32_t stat_mode;
++ uint32_t stat_uid;
++ uint32_t stat_gid;
++ int32_t stat_ret;
++ int64_t stat_size;
++ int64_t stat_atime;
++ int64_t stat_mtime;
++ int64_t stat_ctime;
++};
++
++struct fsif_truncate_request {
++ uint32_t fd;
++ int32_t pad;
++ int64_t length;
++};
++
++struct fsif_remove_request {
++ grant_ref_t gref;
++};
++
++struct fsif_rename_request {
++ uint16_t old_name_offset;
++ uint16_t new_name_offset;
++ grant_ref_t gref;
++};
++
++struct fsif_create_request {
++ int8_t directory;
++ int8_t pad;
++ int16_t pad2;
++ int32_t mode;
++ grant_ref_t gref;
++};
++
++struct fsif_list_request {
++ uint32_t offset;
++ grant_ref_t gref;
++};
++
++#define NR_FILES_SHIFT 0
++#define NR_FILES_SIZE 16 /* 16 bits for the number of files mask */
++#define NR_FILES_MASK (((1ULL << NR_FILES_SIZE) - 1) << NR_FILES_SHIFT)
++#define ERROR_SIZE 32 /* 32 bits for the error mask */
++#define ERROR_SHIFT (NR_FILES_SIZE + NR_FILES_SHIFT)
++#define ERROR_MASK (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT)
++#define HAS_MORE_SHIFT (ERROR_SHIFT + ERROR_SIZE)
++#define HAS_MORE_FLAG (1ULL << HAS_MORE_SHIFT)
++
++struct fsif_chmod_request {
++ uint32_t fd;
++ int32_t mode;
++};
++
++struct fsif_space_request {
++ grant_ref_t gref;
++};
++
++struct fsif_sync_request {
++ uint32_t fd;
++};
++
++
++/* FS operation request */
++struct fsif_request {
++ uint8_t type; /* Type of the request */
++ uint8_t pad;
++ uint16_t id; /* Request ID, copied to the response */
++ uint32_t pad2;
++ union {
++ struct fsif_open_request fopen;
++ struct fsif_close_request fclose;
++ struct fsif_read_request fread;
++ struct fsif_write_request fwrite;
++ struct fsif_stat_request fstat;
++ struct fsif_truncate_request ftruncate;
++ struct fsif_remove_request fremove;
++ struct fsif_rename_request frename;
++ struct fsif_create_request fcreate;
++ struct fsif_list_request flist;
++ struct fsif_chmod_request fchmod;
++ struct fsif_space_request fspace;
++ struct fsif_sync_request fsync;
++ } u;
++};
++typedef struct fsif_request fsif_request_t;
++
++/* FS operation response */
++struct fsif_response {
++ uint16_t id;
++ uint16_t pad1;
++ uint32_t pad2;
++ union {
++ uint64_t ret_val;
++ struct fsif_stat_response fstat;
++ };
++};
++
++typedef struct fsif_response fsif_response_t;
++
++#define FSIF_RING_ENTRY_SIZE 64
++
++#define FSIF_NR_READ_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_read_request)) / \
++ sizeof(grant_ref_t) + 1)
++#define FSIF_NR_WRITE_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_write_request)) / \
++ sizeof(grant_ref_t) + 1)
++
++DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response);
++
++#define STATE_INITIALISED "init"
++#define STATE_READY "ready"
++
++
++
++#endif
+diff -rpuN linux-2.6.18.8/include/xen/interface/io/kbdif.h linux-2.6.18-xen-3.3.0/include/xen/interface/io/kbdif.h
+--- linux-2.6.18.8/include/xen/interface/io/kbdif.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/io/kbdif.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,132 @@
++/*
++ * kbdif.h -- Xen virtual keyboard/mouse
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
++ */
++
++#ifndef __XEN_PUBLIC_IO_KBDIF_H__
++#define __XEN_PUBLIC_IO_KBDIF_H__
++
++/* In events (backend -> frontend) */
++
++/*
++ * Frontends should ignore unknown in events.
++ */
++
++/* Pointer movement event */
++#define XENKBD_TYPE_MOTION 1
++/* Event type 2 currently not used */
++/* Key event (includes pointer buttons) */
++#define XENKBD_TYPE_KEY 3
++/*
++ * Pointer position event
++ * Capable backend sets feature-abs-pointer in xenstore.
++ * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting
++ * request-abs-update in xenstore.
++ */
++#define XENKBD_TYPE_POS 4
++
++struct xenkbd_motion
++{
++ uint8_t type; /* XENKBD_TYPE_MOTION */
++ int32_t rel_x; /* relative X motion */
++ int32_t rel_y; /* relative Y motion */
++ int32_t rel_z; /* relative Z motion (wheel) */
++};
++
++struct xenkbd_key
++{
++ uint8_t type; /* XENKBD_TYPE_KEY */
++ uint8_t pressed; /* 1 if pressed; 0 otherwise */
++ uint32_t keycode; /* KEY_* from linux/input.h */
++};
++
++struct xenkbd_position
++{
++ uint8_t type; /* XENKBD_TYPE_POS */
++ int32_t abs_x; /* absolute X position (in FB pixels) */
++ int32_t abs_y; /* absolute Y position (in FB pixels) */
++ int32_t rel_z; /* relative Z motion (wheel) */
++};
++
++#define XENKBD_IN_EVENT_SIZE 40
++
++union xenkbd_in_event
++{
++ uint8_t type;
++ struct xenkbd_motion motion;
++ struct xenkbd_key key;
++ struct xenkbd_position pos;
++ char pad[XENKBD_IN_EVENT_SIZE];
++};
++
++/* Out events (frontend -> backend) */
++
++/*
++ * Out events may be sent only when requested by backend, and receipt
++ * of an unknown out event is an error.
++ * No out events currently defined.
++ */
++
++#define XENKBD_OUT_EVENT_SIZE 40
++
++union xenkbd_out_event
++{
++ uint8_t type;
++ char pad[XENKBD_OUT_EVENT_SIZE];
++};
++
++/* shared page */
++
++#define XENKBD_IN_RING_SIZE 2048
++#define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
++#define XENKBD_IN_RING_OFFS 1024
++#define XENKBD_IN_RING(page) \
++ ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
++#define XENKBD_IN_RING_REF(page, idx) \
++ (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
++
++#define XENKBD_OUT_RING_SIZE 1024
++#define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE)
++#define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE)
++#define XENKBD_OUT_RING(page) \
++ ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
++#define XENKBD_OUT_RING_REF(page, idx) \
++ (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
++
++struct xenkbd_page
++{
++ uint32_t in_cons, in_prod;
++ uint32_t out_cons, out_prod;
++};
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/io/netif.h linux-2.6.18-xen-3.3.0/include/xen/interface/io/netif.h
+--- linux-2.6.18.8/include/xen/interface/io/netif.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/io/netif.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,205 @@
++/******************************************************************************
++ * netif.h
++ *
++ * Unified network-device I/O interface for Xen guest OSes.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2003-2004, Keir Fraser
++ */
++
++#ifndef __XEN_PUBLIC_IO_NETIF_H__
++#define __XEN_PUBLIC_IO_NETIF_H__
++
++#include "ring.h"
++#include "../grant_table.h"
++
++/*
++ * Notifications after enqueuing any type of message should be conditional on
++ * the appropriate req_event or rsp_event field in the shared ring.
++ * If the client sends notification for rx requests then it should specify
++ * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume
++ * that it cannot safely queue packets (as it may not be kicked to send them).
++ */
++
++/*
++ * This is the 'wire' format for packets:
++ * Request 1: netif_tx_request -- NETTXF_* (any flags)
++ * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info)
++ * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE)
++ * Request 4: netif_tx_request -- NETTXF_more_data
++ * Request 5: netif_tx_request -- NETTXF_more_data
++ * ...
++ * Request N: netif_tx_request -- 0
++ */
++
++/* Protocol checksum field is blank in the packet (hardware offload)? */
++#define _NETTXF_csum_blank (0)
++#define NETTXF_csum_blank (1U<<_NETTXF_csum_blank)
++
++/* Packet data has been validated against protocol checksum. */
++#define _NETTXF_data_validated (1)
++#define NETTXF_data_validated (1U<<_NETTXF_data_validated)
++
++/* Packet continues in the next request descriptor. */
++#define _NETTXF_more_data (2)
++#define NETTXF_more_data (1U<<_NETTXF_more_data)
++
++/* Packet to be followed by extra descriptor(s). */
++#define _NETTXF_extra_info (3)
++#define NETTXF_extra_info (1U<<_NETTXF_extra_info)
++
++struct netif_tx_request {
++ grant_ref_t gref; /* Reference to buffer page */
++ uint16_t offset; /* Offset within buffer page */
++ uint16_t flags; /* NETTXF_* */
++ uint16_t id; /* Echoed in response message. */
++ uint16_t size; /* Packet size in bytes. */
++};
++typedef struct netif_tx_request netif_tx_request_t;
++
++/* Types of netif_extra_info descriptors. */
++#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
++#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
++#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
++#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
++#define XEN_NETIF_EXTRA_TYPE_MAX (4)
++
++/* netif_extra_info flags. */
++#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
++#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
++
++/* GSO types - only TCPv4 currently supported. */
++#define XEN_NETIF_GSO_TYPE_TCPV4 (1)
++
++/*
++ * This structure needs to fit within both netif_tx_request and
++ * netif_rx_response for compatibility.
++ */
++struct netif_extra_info {
++ uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
++ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
++
++ union {
++ /*
++ * XEN_NETIF_EXTRA_TYPE_GSO:
++ */
++ struct {
++ /*
++ * Maximum payload size of each segment. For example, for TCP this
++ * is just the path MSS.
++ */
++ uint16_t size;
++
++ /*
++ * GSO type. This determines the protocol of the packet and any
++ * extra features required to segment the packet properly.
++ */
++ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
++
++ /* Future expansion. */
++ uint8_t pad;
++
++ /*
++ * GSO features. This specifies any extra GSO features required
++ * to process this packet, such as ECN support for TCPv4.
++ */
++ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
++ } gso;
++
++ /*
++ * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
++ * Backend advertises availability via 'feature-multicast-control'
++ * xenbus node containing value '1'.
++ * Frontend requests this feature by advertising
++ * 'request-multicast-control' xenbus node containing value '1'.
++ * If multicast control is requested then multicast flooding is
++ * disabled and the frontend must explicitly register its interest
++ * in multicast groups using dummy transmit requests containing
++ * MCAST_{ADD,DEL} extra-info fragments.
++ */
++ struct {
++ uint8_t addr[6]; /* Address to add/remove. */
++ } mcast;
++
++ uint16_t pad[3];
++ } u;
++};
++typedef struct netif_extra_info netif_extra_info_t;
++
++struct netif_tx_response {
++ uint16_t id;
++ int16_t status; /* NETIF_RSP_* */
++};
++typedef struct netif_tx_response netif_tx_response_t;
++
++struct netif_rx_request {
++ uint16_t id; /* Echoed in response message. */
++ grant_ref_t gref; /* Reference to incoming granted frame */
++};
++typedef struct netif_rx_request netif_rx_request_t;
++
++/* Packet data has been validated against protocol checksum. */
++#define _NETRXF_data_validated (0)
++#define NETRXF_data_validated (1U<<_NETRXF_data_validated)
++
++/* Protocol checksum field is blank in the packet (hardware offload)? */
++#define _NETRXF_csum_blank (1)
++#define NETRXF_csum_blank (1U<<_NETRXF_csum_blank)
++
++/* Packet continues in the next request descriptor. */
++#define _NETRXF_more_data (2)
++#define NETRXF_more_data (1U<<_NETRXF_more_data)
++
++/* Packet to be followed by extra descriptor(s). */
++#define _NETRXF_extra_info (3)
++#define NETRXF_extra_info (1U<<_NETRXF_extra_info)
++
++struct netif_rx_response {
++ uint16_t id;
++ uint16_t offset; /* Offset in page of start of received packet */
++ uint16_t flags; /* NETRXF_* */
++ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
++};
++typedef struct netif_rx_response netif_rx_response_t;
++
++/*
++ * Generate netif ring structures and types.
++ */
++
++DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response);
++DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
++
++#define NETIF_RSP_DROPPED -2
++#define NETIF_RSP_ERROR -1
++#define NETIF_RSP_OKAY 0
++/* No response: used for auxiliary requests (e.g., netif_tx_extra). */
++#define NETIF_RSP_NULL 1
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/io/pciif.h linux-2.6.18-xen-3.3.0/include/xen/interface/io/pciif.h
+--- linux-2.6.18.8/include/xen/interface/io/pciif.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/io/pciif.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,101 @@
++/*
++ * PCI Backend/Frontend Common Data Structures & Macros
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#ifndef __XEN_PCI_COMMON_H__
++#define __XEN_PCI_COMMON_H__
++
++/* Be sure to bump this number if you change this file */
++#define XEN_PCI_MAGIC "7"
++
++/* xen_pci_sharedinfo flags */
++#define _XEN_PCIF_active (0)
++#define XEN_PCIF_active (1<<_XEN_PCI_active)
++
++/* xen_pci_op commands */
++#define XEN_PCI_OP_conf_read (0)
++#define XEN_PCI_OP_conf_write (1)
++#define XEN_PCI_OP_enable_msi (2)
++#define XEN_PCI_OP_disable_msi (3)
++#define XEN_PCI_OP_enable_msix (4)
++#define XEN_PCI_OP_disable_msix (5)
++
++/* xen_pci_op error numbers */
++#define XEN_PCI_ERR_success (0)
++#define XEN_PCI_ERR_dev_not_found (-1)
++#define XEN_PCI_ERR_invalid_offset (-2)
++#define XEN_PCI_ERR_access_denied (-3)
++#define XEN_PCI_ERR_not_implemented (-4)
++/* XEN_PCI_ERR_op_failed - backend failed to complete the operation */
++#define XEN_PCI_ERR_op_failed (-5)
++
++/*
++ * it should be PAGE_SIZE-sizeof(struct xen_pci_op))/sizeof(struct msix_entry))
++ * Should not exceed 128
++ */
++#define SH_INFO_MAX_VEC 128
++
++struct xen_msix_entry {
++ uint16_t vector;
++ uint16_t entry;
++};
++struct xen_pci_op {
++ /* IN: what action to perform: XEN_PCI_OP_* */
++ uint32_t cmd;
++
++ /* OUT: will contain an error number (if any) from errno.h */
++ int32_t err;
++
++ /* IN: which device to touch */
++ uint32_t domain; /* PCI Domain/Segment */
++ uint32_t bus;
++ uint32_t devfn;
++
++ /* IN: which configuration registers to touch */
++ int32_t offset;
++ int32_t size;
++
++ /* IN/OUT: Contains the result after a READ or the value to WRITE */
++ uint32_t value;
++ /* IN: Contains extra infor for this operation */
++ uint32_t info;
++ /*IN: param for msi-x */
++ struct xen_msix_entry msix_entries[SH_INFO_MAX_VEC];
++};
++
++struct xen_pci_sharedinfo {
++ /* flags - XEN_PCIF_* */
++ uint32_t flags;
++ struct xen_pci_op op;
++};
++
++#endif /* __XEN_PCI_COMMON_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/io/protocols.h linux-2.6.18-xen-3.3.0/include/xen/interface/io/protocols.h
+--- linux-2.6.18.8/include/xen/interface/io/protocols.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/io/protocols.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,40 @@
++/******************************************************************************
++ * protocols.h
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PROTOCOLS_H__
++#define __XEN_PROTOCOLS_H__
++
++#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
++#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
++#define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
++
++#if defined(__i386__)
++# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
++#elif defined(__x86_64__)
++# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
++#elif defined(__ia64__)
++# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
++#else
++# error arch fixup needed here
++#endif
++
++#endif
+diff -rpuN linux-2.6.18.8/include/xen/interface/io/ring.h linux-2.6.18-xen-3.3.0/include/xen/interface/io/ring.h
+--- linux-2.6.18.8/include/xen/interface/io/ring.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/io/ring.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,307 @@
++/******************************************************************************
++ * ring.h
++ *
++ * Shared producer-consumer ring macros.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Tim Deegan and Andrew Warfield November 2004.
++ */
++
++#ifndef __XEN_PUBLIC_IO_RING_H__
++#define __XEN_PUBLIC_IO_RING_H__
++
++#include "../xen-compat.h"
++
++#if __XEN_INTERFACE_VERSION__ < 0x00030208
++#define xen_mb() mb()
++#define xen_rmb() rmb()
++#define xen_wmb() wmb()
++#endif
++
++typedef unsigned int RING_IDX;
++
++/* Round a 32-bit unsigned constant down to the nearest power of two. */
++#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
++#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
++#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
++#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
++#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
++
++/*
++ * Calculate size of a shared ring, given the total available space for the
++ * ring and indexes (_sz), and the name tag of the request/response structure.
++ * A ring contains as many entries as will fit, rounded down to the nearest
++ * power of two (so we can mask with (size-1) to loop around).
++ */
++#define __RING_SIZE(_s, _sz) \
++ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
++
++/*
++ * Macros to make the correct C datatypes for a new kind of ring.
++ *
++ * To make a new ring datatype, you need to have two message structures,
++ * let's say request_t, and response_t already defined.
++ *
++ * In a header where you want the ring datatype declared, you then do:
++ *
++ * DEFINE_RING_TYPES(mytag, request_t, response_t);
++ *
++ * These expand out to give you a set of types, as you can see below.
++ * The most important of these are:
++ *
++ * mytag_sring_t - The shared ring.
++ * mytag_front_ring_t - The 'front' half of the ring.
++ * mytag_back_ring_t - The 'back' half of the ring.
++ *
++ * To initialize a ring in your code you need to know the location and size
++ * of the shared memory area (PAGE_SIZE, for instance). To initialise
++ * the front half:
++ *
++ * mytag_front_ring_t front_ring;
++ * SHARED_RING_INIT((mytag_sring_t *)shared_page);
++ * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
++ *
++ * Initializing the back follows similarly (note that only the front
++ * initializes the shared ring):
++ *
++ * mytag_back_ring_t back_ring;
++ * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
++ */
++
++#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
++ \
++/* Shared ring entry */ \
++union __name##_sring_entry { \
++ __req_t req; \
++ __rsp_t rsp; \
++}; \
++ \
++/* Shared ring page */ \
++struct __name##_sring { \
++ RING_IDX req_prod, req_event; \
++ RING_IDX rsp_prod, rsp_event; \
++ uint8_t pad[48]; \
++ union __name##_sring_entry ring[1]; /* variable-length */ \
++}; \
++ \
++/* "Front" end's private variables */ \
++struct __name##_front_ring { \
++ RING_IDX req_prod_pvt; \
++ RING_IDX rsp_cons; \
++ unsigned int nr_ents; \
++ struct __name##_sring *sring; \
++}; \
++ \
++/* "Back" end's private variables */ \
++struct __name##_back_ring { \
++ RING_IDX rsp_prod_pvt; \
++ RING_IDX req_cons; \
++ unsigned int nr_ents; \
++ struct __name##_sring *sring; \
++}; \
++ \
++/* Syntactic sugar */ \
++typedef struct __name##_sring __name##_sring_t; \
++typedef struct __name##_front_ring __name##_front_ring_t; \
++typedef struct __name##_back_ring __name##_back_ring_t
++
++/*
++ * Macros for manipulating rings.
++ *
++ * FRONT_RING_whatever works on the "front end" of a ring: here
++ * requests are pushed on to the ring and responses taken off it.
++ *
++ * BACK_RING_whatever works on the "back end" of a ring: here
++ * requests are taken off the ring and responses put on.
++ *
++ * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
++ * This is OK in 1-for-1 request-response situations where the
++ * requestor (front end) never has more than RING_SIZE()-1
++ * outstanding requests.
++ */
++
++/* Initialising empty rings */
++#define SHARED_RING_INIT(_s) do { \
++ (_s)->req_prod = (_s)->rsp_prod = 0; \
++ (_s)->req_event = (_s)->rsp_event = 1; \
++ (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \
++} while(0)
++
++#define FRONT_RING_INIT(_r, _s, __size) do { \
++ (_r)->req_prod_pvt = 0; \
++ (_r)->rsp_cons = 0; \
++ (_r)->nr_ents = __RING_SIZE(_s, __size); \
++ (_r)->sring = (_s); \
++} while (0)
++
++#define BACK_RING_INIT(_r, _s, __size) do { \
++ (_r)->rsp_prod_pvt = 0; \
++ (_r)->req_cons = 0; \
++ (_r)->nr_ents = __RING_SIZE(_s, __size); \
++ (_r)->sring = (_s); \
++} while (0)
++
++/* Initialize to existing shared indexes -- for recovery */
++#define FRONT_RING_ATTACH(_r, _s, __size) do { \
++ (_r)->sring = (_s); \
++ (_r)->req_prod_pvt = (_s)->req_prod; \
++ (_r)->rsp_cons = (_s)->rsp_prod; \
++ (_r)->nr_ents = __RING_SIZE(_s, __size); \
++} while (0)
++
++#define BACK_RING_ATTACH(_r, _s, __size) do { \
++ (_r)->sring = (_s); \
++ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
++ (_r)->req_cons = (_s)->req_prod; \
++ (_r)->nr_ents = __RING_SIZE(_s, __size); \
++} while (0)
++
++/* How big is this ring? */
++#define RING_SIZE(_r) \
++ ((_r)->nr_ents)
++
++/* Number of free requests (for use on front side only). */
++#define RING_FREE_REQUESTS(_r) \
++ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
++
++/* Test if there is an empty slot available on the front ring.
++ * (This is only meaningful from the front. )
++ */
++#define RING_FULL(_r) \
++ (RING_FREE_REQUESTS(_r) == 0)
++
++/* Test if there are outstanding messages to be processed on a ring. */
++#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
++ ((_r)->sring->rsp_prod - (_r)->rsp_cons)
++
++#ifdef __GNUC__
++#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
++ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
++ unsigned int rsp = RING_SIZE(_r) - \
++ ((_r)->req_cons - (_r)->rsp_prod_pvt); \
++ req < rsp ? req : rsp; \
++})
++#else
++/* Same as above, but without the nice GCC ({ ... }) syntax. */
++#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
++ ((((_r)->sring->req_prod - (_r)->req_cons) < \
++ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
++ ((_r)->sring->req_prod - (_r)->req_cons) : \
++ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
++#endif
++
++/* Direct access to individual ring elements, by index. */
++#define RING_GET_REQUEST(_r, _idx) \
++ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
++
++#define RING_GET_RESPONSE(_r, _idx) \
++ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
++
++/* Loop termination condition: Would the specified index overflow the ring? */
++#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
++ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
++
++#define RING_PUSH_REQUESTS(_r) do { \
++ xen_wmb(); /* back sees requests /before/ updated producer index */ \
++ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
++} while (0)
++
++#define RING_PUSH_RESPONSES(_r) do { \
++ xen_wmb(); /* front sees resps /before/ updated producer index */ \
++ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
++} while (0)
++
++/*
++ * Notification hold-off (req_event and rsp_event):
++ *
++ * When queueing requests or responses on a shared ring, it may not always be
++ * necessary to notify the remote end. For example, if requests are in flight
++ * in a backend, the front may be able to queue further requests without
++ * notifying the back (if the back checks for new requests when it queues
++ * responses).
++ *
++ * When enqueuing requests or responses:
++ *
++ * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
++ * is a boolean return value. True indicates that the receiver requires an
++ * asynchronous notification.
++ *
++ * After dequeuing requests or responses (before sleeping the connection):
++ *
++ * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
++ * The second argument is a boolean return value. True indicates that there
++ * are pending messages on the ring (i.e., the connection should not be put
++ * to sleep).
++ *
++ * These macros will set the req_event/rsp_event field to trigger a
++ * notification on the very next message that is enqueued. If you want to
++ * create batches of work (i.e., only receive a notification after several
++ * messages have been enqueued) then you will need to create a customised
++ * version of the FINAL_CHECK macro in your own code, which sets the event
++ * field appropriately.
++ */
++
++#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
++ RING_IDX __old = (_r)->sring->req_prod; \
++ RING_IDX __new = (_r)->req_prod_pvt; \
++ xen_wmb(); /* back sees requests /before/ updated producer index */ \
++ (_r)->sring->req_prod = __new; \
++ xen_mb(); /* back sees new requests /before/ we check req_event */ \
++ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
++ (RING_IDX)(__new - __old)); \
++} while (0)
++
++#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
++ RING_IDX __old = (_r)->sring->rsp_prod; \
++ RING_IDX __new = (_r)->rsp_prod_pvt; \
++ xen_wmb(); /* front sees resps /before/ updated producer index */ \
++ (_r)->sring->rsp_prod = __new; \
++ xen_mb(); /* front sees new resps /before/ we check rsp_event */ \
++ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
++ (RING_IDX)(__new - __old)); \
++} while (0)
++
++#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
++ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
++ if (_work_to_do) break; \
++ (_r)->sring->req_event = (_r)->req_cons + 1; \
++ xen_mb(); \
++ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
++} while (0)
++
++#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
++ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
++ if (_work_to_do) break; \
++ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
++ xen_mb(); \
++ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
++} while (0)
++
++#endif /* __XEN_PUBLIC_IO_RING_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/io/tpmif.h linux-2.6.18-xen-3.3.0/include/xen/interface/io/tpmif.h
+--- linux-2.6.18.8/include/xen/interface/io/tpmif.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/io/tpmif.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,77 @@
++/******************************************************************************
++ * tpmif.h
++ *
++ * TPM I/O interface for Xen guest OSes.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb@us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from tools/libxc/xen/io/netif.h
++ *
++ * Copyright (c) 2003-2004, Keir Fraser
++ */
++
++#ifndef __XEN_PUBLIC_IO_TPMIF_H__
++#define __XEN_PUBLIC_IO_TPMIF_H__
++
++#include "../grant_table.h"
++
++struct tpmif_tx_request {
++ unsigned long addr; /* Machine address of packet. */
++ grant_ref_t ref; /* grant table access reference */
++ uint16_t unused;
++ uint16_t size; /* Packet size in bytes. */
++};
++typedef struct tpmif_tx_request tpmif_tx_request_t;
++
++/*
++ * The TPMIF_TX_RING_SIZE defines the number of pages the
++ * front-end and backend can exchange (= size of array).
++ */
++typedef uint32_t TPMIF_RING_IDX;
++
++#define TPMIF_TX_RING_SIZE 1
++
++/* This structure must fit in a memory page. */
++
++struct tpmif_ring {
++ struct tpmif_tx_request req;
++};
++typedef struct tpmif_ring tpmif_ring_t;
++
++struct tpmif_tx_interface {
++ struct tpmif_ring ring[TPMIF_TX_RING_SIZE];
++};
++typedef struct tpmif_tx_interface tpmif_tx_interface_t;
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/io/vscsiif.h linux-2.6.18-xen-3.3.0/include/xen/interface/io/vscsiif.h
+--- linux-2.6.18.8/include/xen/interface/io/vscsiif.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/io/vscsiif.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,105 @@
++/******************************************************************************
++ * vscsiif.h
++ *
++ * Based on the blkif.h code.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright(c) FUJITSU Limited 2008.
++ */
++
++#ifndef __XEN__PUBLIC_IO_SCSI_H__
++#define __XEN__PUBLIC_IO_SCSI_H__
++
++#include "ring.h"
++#include "../grant_table.h"
++
++/* command between backend and frontend */
++#define VSCSIIF_ACT_SCSI_CDB 1 /* SCSI CDB command */
++#define VSCSIIF_ACT_SCSI_ABORT 2 /* SCSI Device(Lun) Abort*/
++#define VSCSIIF_ACT_SCSI_RESET 3 /* SCSI Device(Lun) Reset*/
++
++
++#define VSCSIIF_BACK_MAX_PENDING_REQS 128
++
++/*
++ * Maximum scatter/gather segments per request.
++ *
++ * Considering balance between allocating al least 16 "vscsiif_request"
++ * structures on one page (4096bytes) and number of scatter gather
++ * needed, we decided to use 26 as a magic number.
++ */
++#define VSCSIIF_SG_TABLESIZE 26
++
++/*
++ * base on linux kernel 2.6.18
++ */
++#define VSCSIIF_MAX_COMMAND_SIZE 16
++#define VSCSIIF_SENSE_BUFFERSIZE 96
++
++
++struct vscsiif_request {
++ uint16_t rqid; /* private guest value, echoed in resp */
++ uint8_t act; /* command between backend and frontend */
++ uint8_t cmd_len;
++
++ uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
++ uint16_t timeout_per_command; /* The command is issued by twice
++ the value in Backend. */
++ uint16_t channel, id, lun;
++ uint16_t padding;
++ uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1)
++ DMA_FROM_DEVICE(2)
++ DMA_NONE(3) requests */
++ uint8_t nr_segments; /* Number of pieces of scatter-gather */
++
++ struct scsiif_request_segment {
++ grant_ref_t gref;
++ uint16_t offset;
++ uint16_t length;
++ } seg[VSCSIIF_SG_TABLESIZE];
++ uint32_t reserved[3];
++};
++typedef struct vscsiif_request vscsiif_request_t;
++
++struct vscsiif_response {
++ uint16_t rqid;
++ uint8_t padding;
++ uint8_t sense_len;
++ uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
++ int32_t rslt;
++ uint32_t residual_len; /* request bufflen -
++ return the value from physical device */
++ uint32_t reserved[36];
++};
++typedef struct vscsiif_response vscsiif_response_t;
++
++DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
++
++
++#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/io/xenbus.h linux-2.6.18-xen-3.3.0/include/xen/interface/io/xenbus.h
+--- linux-2.6.18.8/include/xen/interface/io/xenbus.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/io/xenbus.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,80 @@
++/*****************************************************************************
++ * xenbus.h
++ *
++ * Xenbus protocol details.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) 2005 XenSource Ltd.
++ */
++
++#ifndef _XEN_PUBLIC_IO_XENBUS_H
++#define _XEN_PUBLIC_IO_XENBUS_H
++
++/*
++ * The state of either end of the Xenbus, i.e. the current communication
++ * status of initialisation across the bus. States here imply nothing about
++ * the state of the connection between the driver and the kernel's device
++ * layers.
++ */
++enum xenbus_state {
++ XenbusStateUnknown = 0,
++
++ XenbusStateInitialising = 1,
++
++ /*
++ * InitWait: Finished early initialisation but waiting for information
++ * from the peer or hotplug scripts.
++ */
++ XenbusStateInitWait = 2,
++
++ /*
++ * Initialised: Waiting for a connection from the peer.
++ */
++ XenbusStateInitialised = 3,
++
++ XenbusStateConnected = 4,
++
++ /*
++ * Closing: The device is being closed due to an error or an unplug event.
++ */
++ XenbusStateClosing = 5,
++
++ XenbusStateClosed = 6,
++
++ /*
++ * Reconfiguring: The device is being reconfigured.
++ */
++ XenbusStateReconfiguring = 7,
++
++ XenbusStateReconfigured = 8
++};
++typedef enum xenbus_state XenbusState;
++
++#endif /* _XEN_PUBLIC_IO_XENBUS_H */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/io/xs_wire.h linux-2.6.18-xen-3.3.0/include/xen/interface/io/xs_wire.h
+--- linux-2.6.18.8/include/xen/interface/io/xs_wire.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/io/xs_wire.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,130 @@
++/*
++ * Details of the "wire" protocol between Xen Store Daemon and client
++ * library or guest kernel.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) 2005 Rusty Russell IBM Corporation
++ */
++
++#ifndef _XS_WIRE_H
++#define _XS_WIRE_H
++
++enum xsd_sockmsg_type
++{
++ XS_DEBUG,
++ XS_DIRECTORY,
++ XS_READ,
++ XS_GET_PERMS,
++ XS_WATCH,
++ XS_UNWATCH,
++ XS_TRANSACTION_START,
++ XS_TRANSACTION_END,
++ XS_INTRODUCE,
++ XS_RELEASE,
++ XS_GET_DOMAIN_PATH,
++ XS_WRITE,
++ XS_MKDIR,
++ XS_RM,
++ XS_SET_PERMS,
++ XS_WATCH_EVENT,
++ XS_ERROR,
++ XS_IS_DOMAIN_INTRODUCED,
++ XS_RESUME,
++ XS_SET_TARGET
++};
++
++#define XS_WRITE_NONE "NONE"
++#define XS_WRITE_CREATE "CREATE"
++#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
++
++/* We hand errors as strings, for portability. */
++struct xsd_errors
++{
++ int errnum;
++ const char *errstring;
++};
++#define XSD_ERROR(x) { x, #x }
++/* LINTED: static unused */
++static struct xsd_errors xsd_errors[]
++#if defined(__GNUC__)
++__attribute__((unused))
++#endif
++ = {
++ XSD_ERROR(EINVAL),
++ XSD_ERROR(EACCES),
++ XSD_ERROR(EEXIST),
++ XSD_ERROR(EISDIR),
++ XSD_ERROR(ENOENT),
++ XSD_ERROR(ENOMEM),
++ XSD_ERROR(ENOSPC),
++ XSD_ERROR(EIO),
++ XSD_ERROR(ENOTEMPTY),
++ XSD_ERROR(ENOSYS),
++ XSD_ERROR(EROFS),
++ XSD_ERROR(EBUSY),
++ XSD_ERROR(EAGAIN),
++ XSD_ERROR(EISCONN)
++};
++
++struct xsd_sockmsg
++{
++ uint32_t type; /* XS_??? */
++ uint32_t req_id;/* Request identifier, echoed in daemon's response. */
++ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */
++ uint32_t len; /* Length of data following this. */
++
++ /* Generally followed by nul-terminated string(s). */
++};
++
++enum xs_watch_type
++{
++ XS_WATCH_PATH = 0,
++ XS_WATCH_TOKEN
++};
++
++/* Inter-domain shared memory communications. */
++#define XENSTORE_RING_SIZE 1024
++typedef uint32_t XENSTORE_RING_IDX;
++#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
++struct xenstore_domain_interface {
++ char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
++ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
++ XENSTORE_RING_IDX req_cons, req_prod;
++ XENSTORE_RING_IDX rsp_cons, rsp_prod;
++};
++
++/* Violating this is very bad. See docs/misc/xenstore.txt. */
++#define XENSTORE_PAYLOAD_MAX 4096
++
++/* Violating these just gets you an error back */
++#define XENSTORE_ABS_PATH_MAX 3072
++#define XENSTORE_REL_PATH_MAX 2048
++
++#endif /* _XS_WIRE_H */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/kexec.h linux-2.6.18-xen-3.3.0/include/xen/interface/kexec.h
+--- linux-2.6.18.8/include/xen/interface/kexec.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/kexec.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,189 @@
++/******************************************************************************
++ * kexec.h - Public portion
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Xen port written by:
++ * - Simon 'Horms' Horman <horms@verge.net.au>
++ * - Magnus Damm <magnus@valinux.co.jp>
++ */
++
++#ifndef _XEN_PUBLIC_KEXEC_H
++#define _XEN_PUBLIC_KEXEC_H
++
++
++/* This file describes the Kexec / Kdump hypercall interface for Xen.
++ *
++ * Kexec under vanilla Linux allows a user to reboot the physical machine
++ * into a new user-specified kernel. The Xen port extends this idea
++ * to allow rebooting of the machine from dom0. When kexec for dom0
++ * is used to reboot, both the hypervisor and the domains get replaced
++ * with some other kernel. It is possible to kexec between vanilla
++ * Linux and Xen and back again. Xen to Xen works well too.
++ *
++ * The hypercall interface for kexec can be divided into three main
++ * types of hypercall operations:
++ *
++ * 1) Range information:
++ * This is used by the dom0 kernel to ask the hypervisor about various
++ * address information. This information is needed to allow kexec-tools
++ * to fill in the ELF headers for /proc/vmcore properly.
++ *
++ * 2) Load and unload of images:
++ * There are no big surprises here, the kexec binary from kexec-tools
++ * runs in userspace in dom0. The tool loads/unloads data into the
++ * dom0 kernel such as new kernel, initramfs and hypervisor. When
++ * loaded the dom0 kernel performs a load hypercall operation, and
++ * before releasing all page references the dom0 kernel calls unload.
++ *
++ * 3) Kexec operation:
++ * This is used to start a previously loaded kernel.
++ */
++
++#include "xen.h"
++
++#if defined(__i386__) || defined(__x86_64__)
++#define KEXEC_XEN_NO_PAGES 17
++#endif
++
++/*
++ * Prototype for this hypercall is:
++ * int kexec_op(int cmd, void *args)
++ * @cmd == KEXEC_CMD_...
++ * KEXEC operation to perform
++ * @args == Operation-specific extra arguments (NULL if none).
++ */
++
++/*
++ * Kexec supports two types of operation:
++ * - kexec into a regular kernel, very similar to a standard reboot
++ * - KEXEC_TYPE_DEFAULT is used to specify this type
++ * - kexec into a special "crash kernel", aka kexec-on-panic
++ * - KEXEC_TYPE_CRASH is used to specify this type
++ * - parts of our system may be broken at kexec-on-panic time
++ * - the code should be kept as simple and self-contained as possible
++ */
++
++#define KEXEC_TYPE_DEFAULT 0
++#define KEXEC_TYPE_CRASH 1
++
++
++/* The kexec implementation for Xen allows the user to load two
++ * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH.
++ * All data needed for a kexec reboot is kept in one xen_kexec_image_t
++ * per "instance". The data mainly consists of machine address lists to pages
++ * together with destination addresses. The data in xen_kexec_image_t
++ * is passed to the "code page" which is one page of code that performs
++ * the final relocations before jumping to the new kernel.
++ */
++
++typedef struct xen_kexec_image {
++#if defined(__i386__) || defined(__x86_64__)
++ unsigned long page_list[KEXEC_XEN_NO_PAGES];
++#endif
++#if defined(__ia64__)
++ unsigned long reboot_code_buffer;
++#endif
++ unsigned long indirection_page;
++ unsigned long start_address;
++} xen_kexec_image_t;
++
++/*
++ * Perform kexec having previously loaded a kexec or kdump kernel
++ * as appropriate.
++ * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
++ */
++#define KEXEC_CMD_kexec 0
++typedef struct xen_kexec_exec {
++ int type;
++} xen_kexec_exec_t;
++
++/*
++ * Load/Unload kernel image for kexec or kdump.
++ * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
++ * image == relocation information for kexec (ignored for unload) [in]
++ */
++#define KEXEC_CMD_kexec_load 1
++#define KEXEC_CMD_kexec_unload 2
++typedef struct xen_kexec_load {
++ int type;
++ xen_kexec_image_t image;
++} xen_kexec_load_t;
++
++#define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */
++#define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */
++#define KEXEC_RANGE_MA_CPU 2 /* machine address and size of a CPU note */
++#define KEXEC_RANGE_MA_XENHEAP 3 /* machine address and size of xenheap
++ * Note that although this is adjacent
++ * to Xen it exists in a separate EFI
++ * region on ia64, and thus needs to be
++ * inserted into iomem_machine separately */
++#define KEXEC_RANGE_MA_BOOT_PARAM 4 /* machine address and size of
++ * the ia64_boot_param */
++#define KEXEC_RANGE_MA_EFI_MEMMAP 5 /* machine address and size of
++ * of the EFI Memory Map */
++#define KEXEC_RANGE_MA_VMCOREINFO 6 /* machine address and size of vmcoreinfo */
++
++/*
++ * Find the address and size of certain memory areas
++ * range == KEXEC_RANGE_... [in]
++ * nr == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in]
++ * size == number of bytes reserved in window [out]
++ * start == address of the first byte in the window [out]
++ */
++#define KEXEC_CMD_kexec_get_range 3
++typedef struct xen_kexec_range {
++ int range;
++ int nr;
++ unsigned long size;
++ unsigned long start;
++} xen_kexec_range_t;
++
++/* vmcoreinfo stuff */
++#define VMCOREINFO_BYTES (4096)
++#define VMCOREINFO_NOTE_NAME "VMCOREINFO_XEN"
++void arch_crash_save_vmcoreinfo(void);
++void vmcoreinfo_append_str(const char *fmt, ...)
++ __attribute__ ((format (printf, 1, 2)));
++#define VMCOREINFO_PAGESIZE(value) \
++ vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
++#define VMCOREINFO_SYMBOL(name) \
++ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
++#define VMCOREINFO_SYMBOL_ALIAS(alias, name) \
++ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #alias, (unsigned long)&name)
++#define VMCOREINFO_STRUCT_SIZE(name) \
++ vmcoreinfo_append_str("SIZE(%s)=%zu\n", #name, sizeof(struct name))
++#define VMCOREINFO_OFFSET(name, field) \
++ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
++ (unsigned long)offsetof(struct name, field))
++#define VMCOREINFO_OFFSET_ALIAS(name, field, alias) \
++ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #alias, \
++ (unsigned long)offsetof(struct name, field))
++
++#endif /* _XEN_PUBLIC_KEXEC_H */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/libelf.h linux-2.6.18-xen-3.3.0/include/xen/interface/libelf.h
+--- linux-2.6.18.8/include/xen/interface/libelf.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/libelf.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,265 @@
++/******************************************************************************
++ * libelf.h
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XC_LIBELF__
++#define __XC_LIBELF__ 1
++
++#if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
++#define XEN_ELF_LITTLE_ENDIAN
++#else
++#error define architectural endianness
++#endif
++
++#undef ELFSIZE
++#include "elfnote.h"
++#include "elfstructs.h"
++#include "features.h"
++
++/* ------------------------------------------------------------------------ */
++
++typedef union {
++ Elf32_Ehdr e32;
++ Elf64_Ehdr e64;
++} elf_ehdr;
++
++typedef union {
++ Elf32_Phdr e32;
++ Elf64_Phdr e64;
++} elf_phdr;
++
++typedef union {
++ Elf32_Shdr e32;
++ Elf64_Shdr e64;
++} elf_shdr;
++
++typedef union {
++ Elf32_Sym e32;
++ Elf64_Sym e64;
++} elf_sym;
++
++typedef union {
++ Elf32_Rel e32;
++ Elf64_Rel e64;
++} elf_rel;
++
++typedef union {
++ Elf32_Rela e32;
++ Elf64_Rela e64;
++} elf_rela;
++
++typedef union {
++ Elf32_Note e32;
++ Elf64_Note e64;
++} elf_note;
++
++struct elf_binary {
++ /* elf binary */
++ const char *image;
++ size_t size;
++ char class;
++ char data;
++
++ const elf_ehdr *ehdr;
++ const char *sec_strtab;
++ const elf_shdr *sym_tab;
++ const char *sym_strtab;
++
++ /* loaded to */
++ char *dest;
++ uint64_t pstart;
++ uint64_t pend;
++ uint64_t reloc_offset;
++
++ uint64_t bsd_symtab_pstart;
++ uint64_t bsd_symtab_pend;
++
++#ifndef __XEN__
++ /* misc */
++ FILE *log;
++#endif
++ int verbose;
++};
++
++/* ------------------------------------------------------------------------ */
++/* accessing elf header fields */
++
++#ifdef XEN_ELF_BIG_ENDIAN
++# define NATIVE_ELFDATA ELFDATA2MSB
++#else
++# define NATIVE_ELFDATA ELFDATA2LSB
++#endif
++
++#define elf_32bit(elf) (ELFCLASS32 == (elf)->class)
++#define elf_64bit(elf) (ELFCLASS64 == (elf)->class)
++#define elf_msb(elf) (ELFDATA2MSB == (elf)->data)
++#define elf_lsb(elf) (ELFDATA2LSB == (elf)->data)
++#define elf_swap(elf) (NATIVE_ELFDATA != (elf)->data)
++
++#define elf_uval(elf, str, elem) \
++ ((ELFCLASS64 == (elf)->class) \
++ ? elf_access_unsigned((elf), (str), \
++ offsetof(typeof(*(str)),e64.elem), \
++ sizeof((str)->e64.elem)) \
++ : elf_access_unsigned((elf), (str), \
++ offsetof(typeof(*(str)),e32.elem), \
++ sizeof((str)->e32.elem)))
++
++#define elf_sval(elf, str, elem) \
++ ((ELFCLASS64 == (elf)->class) \
++ ? elf_access_signed((elf), (str), \
++ offsetof(typeof(*(str)),e64.elem), \
++ sizeof((str)->e64.elem)) \
++ : elf_access_signed((elf), (str), \
++ offsetof(typeof(*(str)),e32.elem), \
++ sizeof((str)->e32.elem)))
++
++#define elf_size(elf, str) \
++ ((ELFCLASS64 == (elf)->class) \
++ ? sizeof((str)->e64) : sizeof((str)->e32))
++
++uint64_t elf_access_unsigned(struct elf_binary *elf, const void *ptr,
++ uint64_t offset, size_t size);
++int64_t elf_access_signed(struct elf_binary *elf, const void *ptr,
++ uint64_t offset, size_t size);
++
++uint64_t elf_round_up(struct elf_binary *elf, uint64_t addr);
++
++/* ------------------------------------------------------------------------ */
++/* xc_libelf_tools.c */
++
++int elf_shdr_count(struct elf_binary *elf);
++int elf_phdr_count(struct elf_binary *elf);
++
++const elf_shdr *elf_shdr_by_name(struct elf_binary *elf, const char *name);
++const elf_shdr *elf_shdr_by_index(struct elf_binary *elf, int index);
++const elf_phdr *elf_phdr_by_index(struct elf_binary *elf, int index);
++
++const char *elf_section_name(struct elf_binary *elf, const elf_shdr * shdr);
++const void *elf_section_start(struct elf_binary *elf, const elf_shdr * shdr);
++const void *elf_section_end(struct elf_binary *elf, const elf_shdr * shdr);
++
++const void *elf_segment_start(struct elf_binary *elf, const elf_phdr * phdr);
++const void *elf_segment_end(struct elf_binary *elf, const elf_phdr * phdr);
++
++const elf_sym *elf_sym_by_name(struct elf_binary *elf, const char *symbol);
++const elf_sym *elf_sym_by_index(struct elf_binary *elf, int index);
++
++const char *elf_note_name(struct elf_binary *elf, const elf_note * note);
++const void *elf_note_desc(struct elf_binary *elf, const elf_note * note);
++uint64_t elf_note_numeric(struct elf_binary *elf, const elf_note * note);
++const elf_note *elf_note_next(struct elf_binary *elf, const elf_note * note);
++
++int elf_is_elfbinary(const void *image);
++int elf_phdr_is_loadable(struct elf_binary *elf, const elf_phdr * phdr);
++
++/* ------------------------------------------------------------------------ */
++/* xc_libelf_loader.c */
++
++int elf_init(struct elf_binary *elf, const char *image, size_t size);
++#ifdef __XEN__
++void elf_set_verbose(struct elf_binary *elf);
++#else
++void elf_set_logfile(struct elf_binary *elf, FILE * log, int verbose);
++#endif
++
++void elf_parse_binary(struct elf_binary *elf);
++void elf_load_binary(struct elf_binary *elf);
++
++void *elf_get_ptr(struct elf_binary *elf, unsigned long addr);
++uint64_t elf_lookup_addr(struct elf_binary *elf, const char *symbol);
++
++void elf_parse_bsdsyms(struct elf_binary *elf, uint64_t pstart); /* private */
++
++/* ------------------------------------------------------------------------ */
++/* xc_libelf_relocate.c */
++
++int elf_reloc(struct elf_binary *elf);
++
++/* ------------------------------------------------------------------------ */
++/* xc_libelf_dominfo.c */
++
++#define UNSET_ADDR ((uint64_t)-1)
++
++enum xen_elfnote_type {
++ XEN_ENT_NONE = 0,
++ XEN_ENT_LONG = 1,
++ XEN_ENT_STR = 2
++};
++
++struct xen_elfnote {
++ enum xen_elfnote_type type;
++ const char *name;
++ union {
++ const char *str;
++ uint64_t num;
++ } data;
++};
++
++struct elf_dom_parms {
++ /* raw */
++ const char *guest_info;
++ const void *elf_note_start;
++ const void *elf_note_end;
++ struct xen_elfnote elf_notes[XEN_ELFNOTE_MAX + 1];
++
++ /* parsed */
++ char guest_os[16];
++ char guest_ver[16];
++ char xen_ver[16];
++ char loader[16];
++ int pae;
++ int bsd_symtab;
++ uint64_t virt_base;
++ uint64_t virt_entry;
++ uint64_t virt_hypercall;
++ uint64_t virt_hv_start_low;
++ uint64_t elf_paddr_offset;
++ uint32_t f_supported[XENFEAT_NR_SUBMAPS];
++ uint32_t f_required[XENFEAT_NR_SUBMAPS];
++
++ /* calculated */
++ uint64_t virt_offset;
++ uint64_t virt_kstart;
++ uint64_t virt_kend;
++};
++
++static inline void elf_xen_feature_set(int nr, uint32_t * addr)
++{
++ addr[nr >> 5] |= 1 << (nr & 31);
++}
++static inline int elf_xen_feature_get(int nr, uint32_t * addr)
++{
++ return !!(addr[nr >> 5] & (1 << (nr & 31)));
++}
++
++int elf_xen_parse_features(const char *features,
++ uint32_t *supported,
++ uint32_t *required);
++int elf_xen_parse_note(struct elf_binary *elf,
++ struct elf_dom_parms *parms,
++ const elf_note *note);
++int elf_xen_parse_guest_info(struct elf_binary *elf,
++ struct elf_dom_parms *parms);
++int elf_xen_parse(struct elf_binary *elf,
++ struct elf_dom_parms *parms);
++
++#endif /* __XC_LIBELF__ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/memory.h linux-2.6.18-xen-3.3.0/include/xen/interface/memory.h
+--- linux-2.6.18.8/include/xen/interface/memory.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/memory.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,295 @@
++/******************************************************************************
++ * memory.h
++ *
++ * Memory reservation and information.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_MEMORY_H__
++#define __XEN_PUBLIC_MEMORY_H__
++
++/*
++ * Increase or decrease the specified domain's memory reservation. Returns the
++ * number of extents successfully allocated or freed.
++ * arg == addr of struct xen_memory_reservation.
++ */
++#define XENMEM_increase_reservation 0
++#define XENMEM_decrease_reservation 1
++#define XENMEM_populate_physmap 6
++
++#if __XEN_INTERFACE_VERSION__ >= 0x00030209
++/*
++ * Maximum # bits addressable by the user of the allocated region (e.g., I/O
++ * devices often have a 32-bit limitation even in 64-bit systems). If zero
++ * then the user has no addressing restriction. This field is not used by
++ * XENMEM_decrease_reservation.
++ */
++#define XENMEMF_address_bits(x) (x)
++#define XENMEMF_get_address_bits(x) ((x) & 0xffu)
++/* NUMA node to allocate from. */
++#define XENMEMF_node(x) (((x) + 1) << 8)
++#define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
++#endif
++
++struct xen_memory_reservation {
++
++ /*
++ * XENMEM_increase_reservation:
++ * OUT: MFN (*not* GMFN) bases of extents that were allocated
++ * XENMEM_decrease_reservation:
++ * IN: GMFN bases of extents to free
++ * XENMEM_populate_physmap:
++ * IN: GPFN bases of extents to populate with memory
++ * OUT: GMFN bases of extents that were allocated
++ * (NB. This command also updates the mach_to_phys translation table)
++ */
++ XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
++
++ /* Number of extents, and size/alignment of each (2^extent_order pages). */
++ xen_ulong_t nr_extents;
++ unsigned int extent_order;
++
++#if __XEN_INTERFACE_VERSION__ >= 0x00030209
++ /* XENMEMF flags. */
++ unsigned int mem_flags;
++#else
++ unsigned int address_bits;
++#endif
++
++ /*
++ * Domain whose reservation is being changed.
++ * Unprivileged domains can specify only DOMID_SELF.
++ */
++ domid_t domid;
++};
++typedef struct xen_memory_reservation xen_memory_reservation_t;
++DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
++
++/*
++ * An atomic exchange of memory pages. If return code is zero then
++ * @out.extent_list provides GMFNs of the newly-allocated memory.
++ * Returns zero on complete success, otherwise a negative error code.
++ * On complete success then always @nr_exchanged == @in.nr_extents.
++ * On partial success @nr_exchanged indicates how much work was done.
++ */
++#define XENMEM_exchange 11
++struct xen_memory_exchange {
++ /*
++ * [IN] Details of memory extents to be exchanged (GMFN bases).
++ * Note that @in.address_bits is ignored and unused.
++ */
++ struct xen_memory_reservation in;
++
++ /*
++ * [IN/OUT] Details of new memory extents.
++ * We require that:
++ * 1. @in.domid == @out.domid
++ * 2. @in.nr_extents << @in.extent_order ==
++ * @out.nr_extents << @out.extent_order
++ * 3. @in.extent_start and @out.extent_start lists must not overlap
++ * 4. @out.extent_start lists GPFN bases to be populated
++ * 5. @out.extent_start is overwritten with allocated GMFN bases
++ */
++ struct xen_memory_reservation out;
++
++ /*
++ * [OUT] Number of input extents that were successfully exchanged:
++ * 1. The first @nr_exchanged input extents were successfully
++ * deallocated.
++ * 2. The corresponding first entries in the output extent list correctly
++ * indicate the GMFNs that were successfully exchanged.
++ * 3. All other input and output extents are untouched.
++ * 4. If not all input exents are exchanged then the return code of this
++ * command will be non-zero.
++ * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
++ */
++ xen_ulong_t nr_exchanged;
++};
++typedef struct xen_memory_exchange xen_memory_exchange_t;
++DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
++
++/*
++ * Returns the maximum machine frame number of mapped RAM in this system.
++ * This command always succeeds (it never returns an error code).
++ * arg == NULL.
++ */
++#define XENMEM_maximum_ram_page 2
++
++/*
++ * Returns the current or maximum memory reservation, in pages, of the
++ * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
++ * arg == addr of domid_t.
++ */
++#define XENMEM_current_reservation 3
++#define XENMEM_maximum_reservation 4
++
++/*
++ * Returns the maximum GPFN in use by the guest, or -ve errcode on failure.
++ */
++#define XENMEM_maximum_gpfn 14
++
++/*
++ * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
++ * mapping table. Architectures which do not have a m2p table do not implement
++ * this command.
++ * arg == addr of xen_machphys_mfn_list_t.
++ */
++#define XENMEM_machphys_mfn_list 5
++struct xen_machphys_mfn_list {
++ /*
++ * Size of the 'extent_start' array. Fewer entries will be filled if the
++ * machphys table is smaller than max_extents * 2MB.
++ */
++ unsigned int max_extents;
++
++ /*
++ * Pointer to buffer to fill with list of extent starts. If there are
++ * any large discontiguities in the machine address space, 2MB gaps in
++ * the machphys table will be represented by an MFN base of zero.
++ */
++ XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
++
++ /*
++ * Number of extents written to the above array. This will be smaller
++ * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
++ */
++ unsigned int nr_extents;
++};
++typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
++DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
++
++/*
++ * Returns the location in virtual address space of the machine_to_phys
++ * mapping table. Architectures which do not have a m2p table, or which do not
++ * map it by default into guest address space, do not implement this command.
++ * arg == addr of xen_machphys_mapping_t.
++ */
++#define XENMEM_machphys_mapping 12
++struct xen_machphys_mapping {
++ xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
++ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
++};
++typedef struct xen_machphys_mapping xen_machphys_mapping_t;
++DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
++
++/*
++ * Sets the GPFN at which a particular page appears in the specified guest's
++ * pseudophysical address space.
++ * arg == addr of xen_add_to_physmap_t.
++ */
++#define XENMEM_add_to_physmap 7
++struct xen_add_to_physmap {
++ /* Which domain to change the mapping for. */
++ domid_t domid;
++
++ /* Source mapping space. */
++#define XENMAPSPACE_shared_info 0 /* shared info page */
++#define XENMAPSPACE_grant_table 1 /* grant table page */
++ unsigned int space;
++
++ /* Index into source mapping space. */
++ xen_ulong_t idx;
++
++ /* GPFN where the source mapping page should appear. */
++ xen_pfn_t gpfn;
++};
++typedef struct xen_add_to_physmap xen_add_to_physmap_t;
++DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
++
++/*
++ * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
++ * code on failure. This call only works for auto-translated guests.
++ */
++#define XENMEM_translate_gpfn_list 8
++struct xen_translate_gpfn_list {
++ /* Which domain to translate for? */
++ domid_t domid;
++
++ /* Length of list. */
++ xen_ulong_t nr_gpfns;
++
++ /* List of GPFNs to translate. */
++ XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list;
++
++ /*
++ * Output list to contain MFN translations. May be the same as the input
++ * list (in which case each input GPFN is overwritten with the output MFN).
++ */
++ XEN_GUEST_HANDLE(xen_pfn_t) mfn_list;
++};
++typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t;
++DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t);
++
++/*
++ * Returns the pseudo-physical memory map as it was when the domain
++ * was started (specified by XENMEM_set_memory_map).
++ * arg == addr of xen_memory_map_t.
++ */
++#define XENMEM_memory_map 9
++struct xen_memory_map {
++ /*
++ * On call the number of entries which can be stored in buffer. On
++ * return the number of entries which have been stored in
++ * buffer.
++ */
++ unsigned int nr_entries;
++
++ /*
++ * Entries in the buffer are in the same format as returned by the
++ * BIOS INT 0x15 EAX=0xE820 call.
++ */
++ XEN_GUEST_HANDLE(void) buffer;
++};
++typedef struct xen_memory_map xen_memory_map_t;
++DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
++
++/*
++ * Returns the real physical memory map. Passes the same structure as
++ * XENMEM_memory_map.
++ * arg == addr of xen_memory_map_t.
++ */
++#define XENMEM_machine_memory_map 10
++
++/*
++ * Set the pseudo-physical memory map of a domain, as returned by
++ * XENMEM_memory_map.
++ * arg == addr of xen_foreign_memory_map_t.
++ */
++#define XENMEM_set_memory_map 13
++struct xen_foreign_memory_map {
++ domid_t domid;
++ struct xen_memory_map map;
++};
++typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
++DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
++
++#endif /* __XEN_PUBLIC_MEMORY_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/nmi.h linux-2.6.18-xen-3.3.0/include/xen/interface/nmi.h
+--- linux-2.6.18.8/include/xen/interface/nmi.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/nmi.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,78 @@
++/******************************************************************************
++ * nmi.h
++ *
++ * NMI callback registration and reason codes.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_NMI_H__
++#define __XEN_PUBLIC_NMI_H__
++
++/*
++ * NMI reason codes:
++ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
++ */
++ /* I/O-check error reported via ISA port 0x61, bit 6. */
++#define _XEN_NMIREASON_io_error 0
++#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error)
++ /* Parity error reported via ISA port 0x61, bit 7. */
++#define _XEN_NMIREASON_parity_error 1
++#define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error)
++ /* Unknown hardware-generated NMI. */
++#define _XEN_NMIREASON_unknown 2
++#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown)
++
++/*
++ * long nmi_op(unsigned int cmd, void *arg)
++ * NB. All ops return zero on success, else a negative error code.
++ */
++
++/*
++ * Register NMI callback for this (calling) VCPU. Currently this only makes
++ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
++ * arg == pointer to xennmi_callback structure.
++ */
++#define XENNMI_register_callback 0
++struct xennmi_callback {
++ unsigned long handler_address;
++ unsigned long pad;
++};
++typedef struct xennmi_callback xennmi_callback_t;
++DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t);
++
++/*
++ * Deregister NMI callback for this (calling) VCPU.
++ * arg == NULL.
++ */
++#define XENNMI_unregister_callback 1
++
++#endif /* __XEN_PUBLIC_NMI_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/physdev.h linux-2.6.18-xen-3.3.0/include/xen/interface/physdev.h
+--- linux-2.6.18.8/include/xen/interface/physdev.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/physdev.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,219 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_PHYSDEV_H__
++#define __XEN_PUBLIC_PHYSDEV_H__
++
++/*
++ * Prototype for this hypercall is:
++ * int physdev_op(int cmd, void *args)
++ * @cmd == PHYSDEVOP_??? (physdev operation).
++ * @args == Operation-specific extra arguments (NULL if none).
++ */
++
++/*
++ * Notify end-of-interrupt (EOI) for the specified IRQ.
++ * @arg == pointer to physdev_eoi structure.
++ */
++#define PHYSDEVOP_eoi 12
++struct physdev_eoi {
++ /* IN */
++ uint32_t irq;
++};
++typedef struct physdev_eoi physdev_eoi_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
++
++/*
++ * Query the status of an IRQ line.
++ * @arg == pointer to physdev_irq_status_query structure.
++ */
++#define PHYSDEVOP_irq_status_query 5
++struct physdev_irq_status_query {
++ /* IN */
++ uint32_t irq;
++ /* OUT */
++ uint32_t flags; /* XENIRQSTAT_* */
++};
++typedef struct physdev_irq_status_query physdev_irq_status_query_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t);
++
++/* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
++#define _XENIRQSTAT_needs_eoi (0)
++#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi)
++
++/* IRQ shared by multiple guests? */
++#define _XENIRQSTAT_shared (1)
++#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared)
++
++/*
++ * Set the current VCPU's I/O privilege level.
++ * @arg == pointer to physdev_set_iopl structure.
++ */
++#define PHYSDEVOP_set_iopl 6
++struct physdev_set_iopl {
++ /* IN */
++ uint32_t iopl;
++};
++typedef struct physdev_set_iopl physdev_set_iopl_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
++
++/*
++ * Set the current VCPU's I/O-port permissions bitmap.
++ * @arg == pointer to physdev_set_iobitmap structure.
++ */
++#define PHYSDEVOP_set_iobitmap 7
++struct physdev_set_iobitmap {
++ /* IN */
++#if __XEN_INTERFACE_VERSION__ >= 0x00030205
++ XEN_GUEST_HANDLE(uint8) bitmap;
++#else
++ uint8_t *bitmap;
++#endif
++ uint32_t nr_ports;
++};
++typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t);
++
++/*
++ * Read or write an IO-APIC register.
++ * @arg == pointer to physdev_apic structure.
++ */
++#define PHYSDEVOP_apic_read 8
++#define PHYSDEVOP_apic_write 9
++struct physdev_apic {
++ /* IN */
++ unsigned long apic_physbase;
++ uint32_t reg;
++ /* IN or OUT */
++ uint32_t value;
++};
++typedef struct physdev_apic physdev_apic_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_apic_t);
++
++/*
++ * Allocate or free a physical upcall vector for the specified IRQ line.
++ * @arg == pointer to physdev_irq structure.
++ */
++#define PHYSDEVOP_alloc_irq_vector 10
++#define PHYSDEVOP_free_irq_vector 11
++struct physdev_irq {
++ /* IN */
++ uint32_t irq;
++ /* IN or OUT */
++ uint32_t vector;
++};
++typedef struct physdev_irq physdev_irq_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
++
++#define MAP_PIRQ_TYPE_MSI 0x0
++#define MAP_PIRQ_TYPE_GSI 0x1
++#define MAP_PIRQ_TYPE_UNKNOWN 0x2
++
++#define PHYSDEVOP_map_pirq 13
++struct physdev_map_pirq {
++ domid_t domid;
++ /* IN */
++ int type;
++ /* IN */
++ int index;
++ /* IN or OUT */
++ int pirq;
++ /* IN */
++ int bus;
++ /* IN */
++ int devfn;
++ /* IN */
++ int entry_nr;
++ /* IN */
++ uint64_t table_base;
++};
++typedef struct physdev_map_pirq physdev_map_pirq_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t);
++
++#define PHYSDEVOP_unmap_pirq 14
++struct physdev_unmap_pirq {
++ domid_t domid;
++ /* IN */
++ int pirq;
++};
++
++typedef struct physdev_unmap_pirq physdev_unmap_pirq_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t);
++
++#define PHYSDEVOP_manage_pci_add 15
++#define PHYSDEVOP_manage_pci_remove 16
++struct physdev_manage_pci {
++ /* IN */
++ uint8_t bus;
++ uint8_t devfn;
++};
++
++typedef struct physdev_manage_pci physdev_manage_pci_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t);
++
++/*
++ * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
++ * hypercall since 0x00030202.
++ */
++struct physdev_op {
++ uint32_t cmd;
++ union {
++ struct physdev_irq_status_query irq_status_query;
++ struct physdev_set_iopl set_iopl;
++ struct physdev_set_iobitmap set_iobitmap;
++ struct physdev_apic apic_op;
++ struct physdev_irq irq_op;
++ } u;
++};
++typedef struct physdev_op physdev_op_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
++
++/*
++ * Notify that some PIRQ-bound event channels have been unmasked.
++ * ** This command is obsolete since interface version 0x00030202 and is **
++ * ** unsupported by newer versions of Xen. **
++ */
++#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
++
++/*
++ * These all-capitals physdev operation names are superceded by the new names
++ * (defined above) since interface version 0x00030202.
++ */
++#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
++#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
++#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
++#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
++#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
++#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
++#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
++#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
++#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
++
++#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/platform.h linux-2.6.18-xen-3.3.0/include/xen/interface/platform.h
+--- linux-2.6.18.8/include/xen/interface/platform.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/platform.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,346 @@
++/******************************************************************************
++ * platform.h
++ *
++ * Hardware platform operations. Intended for use by domain-0 kernel.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2002-2006, K Fraser
++ */
++
++#ifndef __XEN_PUBLIC_PLATFORM_H__
++#define __XEN_PUBLIC_PLATFORM_H__
++
++#include "xen.h"
++
++#define XENPF_INTERFACE_VERSION 0x03000001
++
++/*
++ * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
++ * 1 January, 1970 if the current system time was <system_time>.
++ */
++#define XENPF_settime 17
++struct xenpf_settime {
++ /* IN variables. */
++ uint32_t secs;
++ uint32_t nsecs;
++ uint64_t system_time;
++};
++typedef struct xenpf_settime xenpf_settime_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t);
++
++/*
++ * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type.
++ * On x86, @type is an architecture-defined MTRR memory type.
++ * On success, returns the MTRR that was used (@reg) and a handle that can
++ * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting.
++ * (x86-specific).
++ */
++#define XENPF_add_memtype 31
++struct xenpf_add_memtype {
++ /* IN variables. */
++ xen_pfn_t mfn;
++ uint64_t nr_mfns;
++ uint32_t type;
++ /* OUT variables. */
++ uint32_t handle;
++ uint32_t reg;
++};
++typedef struct xenpf_add_memtype xenpf_add_memtype_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t);
++
++/*
++ * Tear down an existing memory-range type. If @handle is remembered then it
++ * should be passed in to accurately tear down the correct setting (in case
++ * of overlapping memory regions with differing types). If it is not known
++ * then @handle should be set to zero. In all cases @reg must be set.
++ * (x86-specific).
++ */
++#define XENPF_del_memtype 32
++struct xenpf_del_memtype {
++ /* IN variables. */
++ uint32_t handle;
++ uint32_t reg;
++};
++typedef struct xenpf_del_memtype xenpf_del_memtype_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t);
++
++/* Read current type of an MTRR (x86-specific). */
++#define XENPF_read_memtype 33
++struct xenpf_read_memtype {
++ /* IN variables. */
++ uint32_t reg;
++ /* OUT variables. */
++ xen_pfn_t mfn;
++ uint64_t nr_mfns;
++ uint32_t type;
++};
++typedef struct xenpf_read_memtype xenpf_read_memtype_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t);
++
++#define XENPF_microcode_update 35
++struct xenpf_microcode_update {
++ /* IN variables. */
++ XEN_GUEST_HANDLE(void) data; /* Pointer to microcode data */
++ uint32_t length; /* Length of microcode data. */
++};
++typedef struct xenpf_microcode_update xenpf_microcode_update_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t);
++
++#define XENPF_platform_quirk 39
++#define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */
++#define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */
++#define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */
++struct xenpf_platform_quirk {
++ /* IN variables. */
++ uint32_t quirk_id;
++};
++typedef struct xenpf_platform_quirk xenpf_platform_quirk_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t);
++
++#define XENPF_firmware_info 50
++#define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */
++#define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */
++#define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */
++struct xenpf_firmware_info {
++ /* IN variables. */
++ uint32_t type;
++ uint32_t index;
++ /* OUT variables. */
++ union {
++ struct {
++ /* Int13, Fn48: Check Extensions Present. */
++ uint8_t device; /* %dl: bios device number */
++ uint8_t version; /* %ah: major version */
++ uint16_t interface_support; /* %cx: support bitmap */
++ /* Int13, Fn08: Legacy Get Device Parameters. */
++ uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */
++ uint8_t legacy_max_head; /* %dh: max head # */
++ uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */
++ /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */
++ /* NB. First uint16_t of buffer must be set to buffer size. */
++ XEN_GUEST_HANDLE(void) edd_params;
++ } disk_info; /* XEN_FW_DISK_INFO */
++ struct {
++ uint8_t device; /* bios device number */
++ uint32_t mbr_signature; /* offset 0x1b8 in mbr */
++ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */
++ struct {
++ /* Int10, AX=4F15: Get EDID info. */
++ uint8_t capabilities;
++ uint8_t edid_transfer_time;
++ /* must refer to 128-byte buffer */
++ XEN_GUEST_HANDLE(uint8) edid;
++ } vbeddc_info; /* XEN_FW_VBEDDC_INFO */
++ } u;
++};
++typedef struct xenpf_firmware_info xenpf_firmware_info_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t);
++
++#define XENPF_enter_acpi_sleep 51
++struct xenpf_enter_acpi_sleep {
++ /* IN variables */
++ uint16_t pm1a_cnt_val; /* PM1a control value. */
++ uint16_t pm1b_cnt_val; /* PM1b control value. */
++ uint32_t sleep_state; /* Which state to enter (Sn). */
++ uint32_t flags; /* Must be zero. */
++};
++typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t);
++
++#define XENPF_change_freq 52
++struct xenpf_change_freq {
++ /* IN variables */
++ uint32_t flags; /* Must be zero. */
++ uint32_t cpu; /* Physical cpu. */
++ uint64_t freq; /* New frequency (Hz). */
++};
++typedef struct xenpf_change_freq xenpf_change_freq_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_change_freq_t);
++
++/*
++ * Get idle times (nanoseconds since boot) for physical CPUs specified in the
++ * @cpumap_bitmap with range [0..@cpumap_nr_cpus-1]. The @idletime array is
++ * indexed by CPU number; only entries with the corresponding @cpumap_bitmap
++ * bit set are written to. On return, @cpumap_bitmap is modified so that any
++ * non-existent CPUs are cleared. Such CPUs have their @idletime array entry
++ * cleared.
++ */
++#define XENPF_getidletime 53
++struct xenpf_getidletime {
++ /* IN/OUT variables */
++ /* IN: CPUs to interrogate; OUT: subset of IN which are present */
++ XEN_GUEST_HANDLE(uint8) cpumap_bitmap;
++ /* IN variables */
++ /* Size of cpumap bitmap. */
++ uint32_t cpumap_nr_cpus;
++ /* Must be indexable for every cpu in cpumap_bitmap. */
++ XEN_GUEST_HANDLE(uint64) idletime;
++ /* OUT variables */
++ /* System time when the idletime snapshots were taken. */
++ uint64_t now;
++};
++typedef struct xenpf_getidletime xenpf_getidletime_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_getidletime_t);
++
++#define XENPF_set_processor_pminfo 54
++
++/* ability bits */
++#define XEN_PROCESSOR_PM_CX 1
++#define XEN_PROCESSOR_PM_PX 2
++#define XEN_PROCESSOR_PM_TX 4
++
++/* cmd type */
++#define XEN_PM_CX 0
++#define XEN_PM_PX 1
++#define XEN_PM_TX 2
++
++/* Px sub info type */
++#define XEN_PX_PCT 1
++#define XEN_PX_PSS 2
++#define XEN_PX_PPC 4
++#define XEN_PX_PSD 8
++
++struct xen_power_register {
++ uint32_t space_id;
++ uint32_t bit_width;
++ uint32_t bit_offset;
++ uint32_t access_size;
++ uint64_t address;
++};
++
++struct xen_processor_csd {
++ uint32_t domain; /* domain number of one dependent group */
++ uint32_t coord_type; /* coordination type */
++ uint32_t num; /* number of processors in same domain */
++};
++typedef struct xen_processor_csd xen_processor_csd_t;
++DEFINE_XEN_GUEST_HANDLE(xen_processor_csd_t);
++
++struct xen_processor_cx {
++ struct xen_power_register reg; /* GAS for Cx trigger register */
++ uint8_t type; /* cstate value, c0: 0, c1: 1, ... */
++ uint32_t latency; /* worst latency (ms) to enter/exit this cstate */
++ uint32_t power; /* average power consumption(mW) */
++ uint32_t dpcnt; /* number of dependency entries */
++ XEN_GUEST_HANDLE(xen_processor_csd_t) dp; /* NULL if no dependency */
++};
++typedef struct xen_processor_cx xen_processor_cx_t;
++DEFINE_XEN_GUEST_HANDLE(xen_processor_cx_t);
++
++struct xen_processor_flags {
++ uint32_t bm_control:1;
++ uint32_t bm_check:1;
++ uint32_t has_cst:1;
++ uint32_t power_setup_done:1;
++ uint32_t bm_rld_set:1;
++};
++
++struct xen_processor_power {
++ uint32_t count; /* number of C state entries in array below */
++ struct xen_processor_flags flags; /* global flags of this processor */
++ XEN_GUEST_HANDLE(xen_processor_cx_t) states; /* supported c states */
++};
++
++struct xen_pct_register {
++ uint8_t descriptor;
++ uint16_t length;
++ uint8_t space_id;
++ uint8_t bit_width;
++ uint8_t bit_offset;
++ uint8_t reserved;
++ uint64_t address;
++};
++
++struct xen_processor_px {
++ uint64_t core_frequency; /* megahertz */
++ uint64_t power; /* milliWatts */
++ uint64_t transition_latency; /* microseconds */
++ uint64_t bus_master_latency; /* microseconds */
++ uint64_t control; /* control value */
++ uint64_t status; /* success indicator */
++};
++typedef struct xen_processor_px xen_processor_px_t;
++DEFINE_XEN_GUEST_HANDLE(xen_processor_px_t);
++
++struct xen_psd_package {
++ uint64_t num_entries;
++ uint64_t revision;
++ uint64_t domain;
++ uint64_t coord_type;
++ uint64_t num_processors;
++};
++
++struct xen_processor_performance {
++ uint32_t flags; /* flag for Px sub info type */
++ uint32_t ppc; /* Platform limitation on freq usage */
++ struct xen_pct_register control_register;
++ struct xen_pct_register status_register;
++ uint32_t state_count; /* total available performance states */
++ XEN_GUEST_HANDLE(xen_processor_px_t) states;
++ struct xen_psd_package domain_info;
++ uint32_t shared_type; /* coordination type of this processor */
++};
++typedef struct xen_processor_performance xen_processor_performance_t;
++DEFINE_XEN_GUEST_HANDLE(xen_processor_performance_t);
++
++struct xenpf_set_processor_pminfo {
++ /* IN variables */
++ uint32_t id; /* ACPI CPU ID */
++ uint32_t type; /* {XEN_PM_CX, XEN_PM_PX} */
++ union {
++ struct xen_processor_power power;/* Cx: _CST/_CSD */
++ struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */
++ };
++};
++typedef struct xenpf_set_processor_pminfo xenpf_set_processor_pminfo_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_set_processor_pminfo_t);
++
++struct xen_platform_op {
++ uint32_t cmd;
++ uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
++ union {
++ struct xenpf_settime settime;
++ struct xenpf_add_memtype add_memtype;
++ struct xenpf_del_memtype del_memtype;
++ struct xenpf_read_memtype read_memtype;
++ struct xenpf_microcode_update microcode;
++ struct xenpf_platform_quirk platform_quirk;
++ struct xenpf_firmware_info firmware_info;
++ struct xenpf_enter_acpi_sleep enter_acpi_sleep;
++ struct xenpf_change_freq change_freq;
++ struct xenpf_getidletime getidletime;
++ struct xenpf_set_processor_pminfo set_pminfo;
++ uint8_t pad[128];
++ } u;
++};
++typedef struct xen_platform_op xen_platform_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t);
++
++#endif /* __XEN_PUBLIC_PLATFORM_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/sched.h linux-2.6.18-xen-3.3.0/include/xen/interface/sched.h
+--- linux-2.6.18.8/include/xen/interface/sched.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/sched.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,121 @@
++/******************************************************************************
++ * sched.h
++ *
++ * Scheduler state interactions
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_SCHED_H__
++#define __XEN_PUBLIC_SCHED_H__
++
++#include "event_channel.h"
++
++/*
++ * The prototype for this hypercall is:
++ * long sched_op(int cmd, void *arg)
++ * @cmd == SCHEDOP_??? (scheduler operation).
++ * @arg == Operation-specific extra argument(s), as described below.
++ *
++ * Versions of Xen prior to 3.0.2 provided only the following legacy version
++ * of this hypercall, supporting only the commands yield, block and shutdown:
++ * long sched_op(int cmd, unsigned long arg)
++ * @cmd == SCHEDOP_??? (scheduler operation).
++ * @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
++ * == SHUTDOWN_* code (SCHEDOP_shutdown)
++ * This legacy version is available to new guests as sched_op_compat().
++ */
++
++/*
++ * Voluntarily yield the CPU.
++ * @arg == NULL.
++ */
++#define SCHEDOP_yield 0
++
++/*
++ * Block execution of this VCPU until an event is received for processing.
++ * If called with event upcalls masked, this operation will atomically
++ * reenable event delivery and check for pending events before blocking the
++ * VCPU. This avoids a "wakeup waiting" race.
++ * @arg == NULL.
++ */
++#define SCHEDOP_block 1
++
++/*
++ * Halt execution of this domain (all VCPUs) and notify the system controller.
++ * @arg == pointer to sched_shutdown structure.
++ */
++#define SCHEDOP_shutdown 2
++struct sched_shutdown {
++ unsigned int reason; /* SHUTDOWN_* */
++};
++typedef struct sched_shutdown sched_shutdown_t;
++DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
++
++/*
++ * Poll a set of event-channel ports. Return when one or more are pending. An
++ * optional timeout may be specified.
++ * @arg == pointer to sched_poll structure.
++ */
++#define SCHEDOP_poll 3
++struct sched_poll {
++ XEN_GUEST_HANDLE(evtchn_port_t) ports;
++ unsigned int nr_ports;
++ uint64_t timeout;
++};
++typedef struct sched_poll sched_poll_t;
++DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
++
++/*
++ * Declare a shutdown for another domain. The main use of this function is
++ * in interpreting shutdown requests and reasons for fully-virtualized
++ * domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
++ * @arg == pointer to sched_remote_shutdown structure.
++ */
++#define SCHEDOP_remote_shutdown 4
++struct sched_remote_shutdown {
++ domid_t domain_id; /* Remote domain ID */
++ unsigned int reason; /* SHUTDOWN_xxx reason */
++};
++typedef struct sched_remote_shutdown sched_remote_shutdown_t;
++DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
++
++/*
++ * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
++ * software to determine the appropriate action. For the most part, Xen does
++ * not care about the shutdown code.
++ */
++#define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
++#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
++#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
++#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
++
++#endif /* __XEN_PUBLIC_SCHED_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/sysctl.h linux-2.6.18-xen-3.3.0/include/xen/interface/sysctl.h
+--- linux-2.6.18.8/include/xen/interface/sysctl.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/sysctl.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,295 @@
++/******************************************************************************
++ * sysctl.h
++ *
++ * System management operations. For use by node control stack.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2002-2006, K Fraser
++ */
++
++#ifndef __XEN_PUBLIC_SYSCTL_H__
++#define __XEN_PUBLIC_SYSCTL_H__
++
++#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
++#error "sysctl operations are intended for use by node control tools only"
++#endif
++
++#include "xen.h"
++#include "domctl.h"
++
++#define XEN_SYSCTL_INTERFACE_VERSION 0x00000006
++
++/*
++ * Read console content from Xen buffer ring.
++ */
++#define XEN_SYSCTL_readconsole 1
++struct xen_sysctl_readconsole {
++ /* IN: Non-zero -> clear after reading. */
++ uint8_t clear;
++ /* IN: Non-zero -> start index specified by @index field. */
++ uint8_t incremental;
++ uint8_t pad0, pad1;
++ /*
++ * IN: Start index for consuming from ring buffer (if @incremental);
++ * OUT: End index after consuming from ring buffer.
++ */
++ uint32_t index;
++ /* IN: Virtual address to write console data. */
++ XEN_GUEST_HANDLE_64(char) buffer;
++ /* IN: Size of buffer; OUT: Bytes written to buffer. */
++ uint32_t count;
++};
++typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
++
++/* Get trace buffers machine base address */
++#define XEN_SYSCTL_tbuf_op 2
++struct xen_sysctl_tbuf_op {
++ /* IN variables */
++#define XEN_SYSCTL_TBUFOP_get_info 0
++#define XEN_SYSCTL_TBUFOP_set_cpu_mask 1
++#define XEN_SYSCTL_TBUFOP_set_evt_mask 2
++#define XEN_SYSCTL_TBUFOP_set_size 3
++#define XEN_SYSCTL_TBUFOP_enable 4
++#define XEN_SYSCTL_TBUFOP_disable 5
++ uint32_t cmd;
++ /* IN/OUT variables */
++ struct xenctl_cpumap cpu_mask;
++ uint32_t evt_mask;
++ /* OUT variables */
++ uint64_aligned_t buffer_mfn;
++ uint32_t size;
++};
++typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
++
++/*
++ * Get physical information about the host machine
++ */
++#define XEN_SYSCTL_physinfo 3
++ /* (x86) The platform supports HVM guests. */
++#define _XEN_SYSCTL_PHYSCAP_hvm 0
++#define XEN_SYSCTL_PHYSCAP_hvm (1u<<_XEN_SYSCTL_PHYSCAP_hvm)
++ /* (x86) The platform supports HVM-guest direct access to I/O devices. */
++#define _XEN_SYSCTL_PHYSCAP_hvm_directio 1
++#define XEN_SYSCTL_PHYSCAP_hvm_directio (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio)
++struct xen_sysctl_physinfo {
++ uint32_t threads_per_core;
++ uint32_t cores_per_socket;
++ uint32_t nr_cpus;
++ uint32_t nr_nodes;
++ uint32_t cpu_khz;
++ uint64_aligned_t total_pages;
++ uint64_aligned_t free_pages;
++ uint64_aligned_t scrub_pages;
++ uint32_t hw_cap[8];
++
++ /*
++ * IN: maximum addressable entry in the caller-provided cpu_to_node array.
++ * OUT: largest cpu identifier in the system.
++ * If OUT is greater than IN then the cpu_to_node array is truncated!
++ */
++ uint32_t max_cpu_id;
++ /*
++ * If not NULL, this array is filled with node identifier for each cpu.
++ * If a cpu has no node information (e.g., cpu not present) then the
++ * sentinel value ~0u is written.
++ * The size of this array is specified by the caller in @max_cpu_id.
++ * If the actual @max_cpu_id is smaller than the array then the trailing
++ * elements of the array will not be written by the sysctl.
++ */
++ XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
++
++ /* XEN_SYSCTL_PHYSCAP_??? */
++ uint32_t capabilities;
++};
++typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
++
++/*
++ * Get the ID of the current scheduler.
++ */
++#define XEN_SYSCTL_sched_id 4
++struct xen_sysctl_sched_id {
++ /* OUT variable */
++ uint32_t sched_id;
++};
++typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
++
++/* Interface for controlling Xen software performance counters. */
++#define XEN_SYSCTL_perfc_op 5
++/* Sub-operations: */
++#define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */
++#define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */
++struct xen_sysctl_perfc_desc {
++ char name[80]; /* name of perf counter */
++ uint32_t nr_vals; /* number of values for this counter */
++};
++typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t);
++typedef uint32_t xen_sysctl_perfc_val_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t);
++
++struct xen_sysctl_perfc_op {
++ /* IN variables. */
++ uint32_t cmd; /* XEN_SYSCTL_PERFCOP_??? */
++ /* OUT variables. */
++ uint32_t nr_counters; /* number of counters description */
++ uint32_t nr_vals; /* number of values */
++ /* counter information (or NULL) */
++ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc;
++ /* counter values (or NULL) */
++ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
++};
++typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
++
++#define XEN_SYSCTL_getdomaininfolist 6
++struct xen_sysctl_getdomaininfolist {
++ /* IN variables. */
++ domid_t first_domain;
++ uint32_t max_domains;
++ XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer;
++ /* OUT variables. */
++ uint32_t num_domains;
++};
++typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
++
++/* Inject debug keys into Xen. */
++#define XEN_SYSCTL_debug_keys 7
++struct xen_sysctl_debug_keys {
++ /* IN variables. */
++ XEN_GUEST_HANDLE_64(char) keys;
++ uint32_t nr_keys;
++};
++typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
++
++/* Get physical CPU information. */
++#define XEN_SYSCTL_getcpuinfo 8
++struct xen_sysctl_cpuinfo {
++ uint64_aligned_t idletime;
++};
++typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t);
++struct xen_sysctl_getcpuinfo {
++ /* IN variables. */
++ uint32_t max_cpus;
++ XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info;
++ /* OUT variables. */
++ uint32_t nr_cpus;
++};
++typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t);
++
++#define XEN_SYSCTL_availheap 9
++struct xen_sysctl_availheap {
++ /* IN variables. */
++ uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */
++ uint32_t max_bitwidth; /* Largest address width (zero if don't care). */
++ int32_t node; /* NUMA node of interest (-1 for all nodes). */
++ /* OUT variables. */
++ uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */
++};
++typedef struct xen_sysctl_availheap xen_sysctl_availheap_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t);
++
++#define XEN_SYSCTL_get_pmstat 10
++struct pm_px_val {
++ uint64_aligned_t freq; /* Px core frequency */
++ uint64_aligned_t residency; /* Px residency time */
++ uint64_aligned_t count; /* Px transition count */
++};
++typedef struct pm_px_val pm_px_val_t;
++DEFINE_XEN_GUEST_HANDLE(pm_px_val_t);
++
++struct pm_px_stat {
++ uint8_t total; /* total Px states */
++ uint8_t usable; /* usable Px states */
++ uint8_t last; /* last Px state */
++ uint8_t cur; /* current Px state */
++ XEN_GUEST_HANDLE_64(uint64) trans_pt; /* Px transition table */
++ XEN_GUEST_HANDLE_64(pm_px_val_t) pt;
++};
++typedef struct pm_px_stat pm_px_stat_t;
++DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t);
++
++struct pm_cx_stat {
++ uint32_t nr; /* entry nr in triggers & residencies, including C0 */
++ uint32_t last; /* last Cx state */
++ uint64_aligned_t idle_time; /* idle time from boot */
++ XEN_GUEST_HANDLE_64(uint64) triggers; /* Cx trigger counts */
++ XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */
++};
++
++struct xen_sysctl_get_pmstat {
++#define PMSTAT_CATEGORY_MASK 0xf0
++#define PMSTAT_PX 0x10
++#define PMSTAT_CX 0x20
++#define PMSTAT_get_max_px (PMSTAT_PX | 0x1)
++#define PMSTAT_get_pxstat (PMSTAT_PX | 0x2)
++#define PMSTAT_reset_pxstat (PMSTAT_PX | 0x3)
++#define PMSTAT_get_max_cx (PMSTAT_CX | 0x1)
++#define PMSTAT_get_cxstat (PMSTAT_CX | 0x2)
++#define PMSTAT_reset_cxstat (PMSTAT_CX | 0x3)
++ uint32_t type;
++ uint32_t cpuid;
++ union {
++ struct pm_px_stat getpx;
++ struct pm_cx_stat getcx;
++ /* other struct for tx, etc */
++ } u;
++};
++typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t);
++
++struct xen_sysctl {
++ uint32_t cmd;
++ uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
++ union {
++ struct xen_sysctl_readconsole readconsole;
++ struct xen_sysctl_tbuf_op tbuf_op;
++ struct xen_sysctl_physinfo physinfo;
++ struct xen_sysctl_sched_id sched_id;
++ struct xen_sysctl_perfc_op perfc_op;
++ struct xen_sysctl_getdomaininfolist getdomaininfolist;
++ struct xen_sysctl_debug_keys debug_keys;
++ struct xen_sysctl_getcpuinfo getcpuinfo;
++ struct xen_sysctl_availheap availheap;
++ struct xen_sysctl_get_pmstat get_pmstat;
++ uint8_t pad[128];
++ } u;
++};
++typedef struct xen_sysctl xen_sysctl_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t);
++
++#endif /* __XEN_PUBLIC_SYSCTL_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/trace.h linux-2.6.18-xen-3.3.0/include/xen/interface/trace.h
+--- linux-2.6.18.8/include/xen/interface/trace.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/trace.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,172 @@
++/******************************************************************************
++ * include/public/trace.h
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Mark Williamson, (C) 2004 Intel Research Cambridge
++ * Copyright (C) 2005 Bin Ren
++ */
++
++#ifndef __XEN_PUBLIC_TRACE_H__
++#define __XEN_PUBLIC_TRACE_H__
++
++#define TRACE_EXTRA_MAX 7
++#define TRACE_EXTRA_SHIFT 28
++
++/* Trace classes */
++#define TRC_CLS_SHIFT 16
++#define TRC_GEN 0x0001f000 /* General trace */
++#define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */
++#define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */
++#define TRC_HVM 0x0008f000 /* Xen HVM trace */
++#define TRC_MEM 0x0010f000 /* Xen memory trace */
++#define TRC_PV 0x0020f000 /* Xen PV traces */
++#define TRC_ALL 0x0ffff000
++#define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff)
++#define TRC_HD_CYCLE_FLAG (1UL<<31)
++#define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) )
++#define TRC_HD_EXTRA(x) (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX)
++
++/* Trace subclasses */
++#define TRC_SUBCLS_SHIFT 12
++
++/* trace subclasses for SVM */
++#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
++#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
++
++/* Trace events per class */
++#define TRC_LOST_RECORDS (TRC_GEN + 1)
++#define TRC_TRACE_WRAP_BUFFER (TRC_GEN + 2)
++#define TRC_TRACE_CPU_CHANGE (TRC_GEN + 3)
++
++#define TRC_SCHED_DOM_ADD (TRC_SCHED + 1)
++#define TRC_SCHED_DOM_REM (TRC_SCHED + 2)
++#define TRC_SCHED_SLEEP (TRC_SCHED + 3)
++#define TRC_SCHED_WAKE (TRC_SCHED + 4)
++#define TRC_SCHED_YIELD (TRC_SCHED + 5)
++#define TRC_SCHED_BLOCK (TRC_SCHED + 6)
++#define TRC_SCHED_SHUTDOWN (TRC_SCHED + 7)
++#define TRC_SCHED_CTL (TRC_SCHED + 8)
++#define TRC_SCHED_ADJDOM (TRC_SCHED + 9)
++#define TRC_SCHED_SWITCH (TRC_SCHED + 10)
++#define TRC_SCHED_S_TIMER_FN (TRC_SCHED + 11)
++#define TRC_SCHED_T_TIMER_FN (TRC_SCHED + 12)
++#define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED + 13)
++#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED + 14)
++#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED + 15)
++
++#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
++#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
++#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
++
++#define TRC_PV_HYPERCALL (TRC_PV + 1)
++#define TRC_PV_TRAP (TRC_PV + 3)
++#define TRC_PV_PAGE_FAULT (TRC_PV + 4)
++#define TRC_PV_FORCED_INVALID_OP (TRC_PV + 5)
++#define TRC_PV_EMULATE_PRIVOP (TRC_PV + 6)
++#define TRC_PV_EMULATE_4GB (TRC_PV + 7)
++#define TRC_PV_MATH_STATE_RESTORE (TRC_PV + 8)
++#define TRC_PV_PAGING_FIXUP (TRC_PV + 9)
++#define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV + 10)
++#define TRC_PV_PTWR_EMULATION (TRC_PV + 11)
++#define TRC_PV_PTWR_EMULATION_PAE (TRC_PV + 12)
++ /* Indicates that addresses in trace record are 64 bits */
++#define TRC_64_FLAG (0x100)
++
++/* trace events per subclass */
++#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01)
++#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02)
++#define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02)
++#define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01)
++#define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01)
++#define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02)
++#define TRC_HVM_PF_INJECT64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x02)
++#define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03)
++#define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04)
++#define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05)
++#define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06)
++#define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07)
++#define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08)
++#define TRC_HVM_CR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x08)
++#define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09)
++#define TRC_HVM_CR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x09)
++#define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A)
++#define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B)
++#define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C)
++#define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D)
++#define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E)
++#define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F)
++#define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10)
++#define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11)
++#define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12)
++#define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13)
++#define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14)
++#define TRC_HVM_INVLPG64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14)
++#define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15)
++#define TRC_HVM_IO_ASSIST (TRC_HVM_HANDLER + 0x16)
++#define TRC_HVM_MMIO_ASSIST (TRC_HVM_HANDLER + 0x17)
++#define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18)
++#define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19)
++#define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
++
++/* This structure represents a single trace buffer record. */
++struct t_rec {
++ uint32_t event:28;
++ uint32_t extra_u32:3; /* # entries in trailing extra_u32[] array */
++ uint32_t cycles_included:1; /* u.cycles or u.no_cycles? */
++ union {
++ struct {
++ uint32_t cycles_lo, cycles_hi; /* cycle counter timestamp */
++ uint32_t extra_u32[7]; /* event data items */
++ } cycles;
++ struct {
++ uint32_t extra_u32[7]; /* event data items */
++ } nocycles;
++ } u;
++};
++
++/*
++ * This structure contains the metadata for a single trace buffer. The head
++ * field, indexes into an array of struct t_rec's.
++ */
++struct t_buf {
++ /* Assume the data buffer size is X. X is generally not a power of 2.
++ * CONS and PROD are incremented modulo (2*X):
++ * 0 <= cons < 2*X
++ * 0 <= prod < 2*X
++ * This is done because addition modulo X breaks at 2^32 when X is not a
++ * power of 2:
++ * (((2^32 - 1) % X) + 1) % X != (2^32) % X
++ */
++ uint32_t cons; /* Offset of next item to be consumed by control tools. */
++ uint32_t prod; /* Offset of next item to be produced by Xen. */
++ /* Records follow immediately after the meta-data header. */
++};
++
++#endif /* __XEN_PUBLIC_TRACE_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/vcpu.h linux-2.6.18-xen-3.3.0/include/xen/interface/vcpu.h
+--- linux-2.6.18.8/include/xen/interface/vcpu.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/vcpu.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,213 @@
++/******************************************************************************
++ * vcpu.h
++ *
++ * VCPU initialisation, query, and hotplug.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_VCPU_H__
++#define __XEN_PUBLIC_VCPU_H__
++
++/*
++ * Prototype for this hypercall is:
++ * int vcpu_op(int cmd, int vcpuid, void *extra_args)
++ * @cmd == VCPUOP_??? (VCPU operation).
++ * @vcpuid == VCPU to operate on.
++ * @extra_args == Operation-specific extra arguments (NULL if none).
++ */
++
++/*
++ * Initialise a VCPU. Each VCPU can be initialised only once. A
++ * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
++ *
++ * @extra_arg == pointer to vcpu_guest_context structure containing initial
++ * state for the VCPU.
++ */
++#define VCPUOP_initialise 0
++
++/*
++ * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
++ * if the VCPU has not been initialised (VCPUOP_initialise).
++ */
++#define VCPUOP_up 1
++
++/*
++ * Bring down a VCPU (i.e., make it non-runnable).
++ * There are a few caveats that callers should observe:
++ * 1. This operation may return, and VCPU_is_up may return false, before the
++ * VCPU stops running (i.e., the command is asynchronous). It is a good
++ * idea to ensure that the VCPU has entered a non-critical loop before
++ * bringing it down. Alternatively, this operation is guaranteed
++ * synchronous if invoked by the VCPU itself.
++ * 2. After a VCPU is initialised, there is currently no way to drop all its
++ * references to domain memory. Even a VCPU that is down still holds
++ * memory references via its pagetable base pointer and GDT. It is good
++ * practise to move a VCPU onto an 'idle' or default page table, LDT and
++ * GDT before bringing it down.
++ */
++#define VCPUOP_down 2
++
++/* Returns 1 if the given VCPU is up. */
++#define VCPUOP_is_up 3
++
++/*
++ * Return information about the state and running time of a VCPU.
++ * @extra_arg == pointer to vcpu_runstate_info structure.
++ */
++#define VCPUOP_get_runstate_info 4
++struct vcpu_runstate_info {
++ /* VCPU's current state (RUNSTATE_*). */
++ int state;
++ /* When was current state entered (system time, ns)? */
++ uint64_t state_entry_time;
++ /*
++ * Time spent in each RUNSTATE_* (ns). The sum of these times is
++ * guaranteed not to drift from system time.
++ */
++ uint64_t time[4];
++};
++typedef struct vcpu_runstate_info vcpu_runstate_info_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t);
++
++/* VCPU is currently running on a physical CPU. */
++#define RUNSTATE_running 0
++
++/* VCPU is runnable, but not currently scheduled on any physical CPU. */
++#define RUNSTATE_runnable 1
++
++/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
++#define RUNSTATE_blocked 2
++
++/*
++ * VCPU is not runnable, but it is not blocked.
++ * This is a 'catch all' state for things like hotplug and pauses by the
++ * system administrator (or for critical sections in the hypervisor).
++ * RUNSTATE_blocked dominates this state (it is the preferred state).
++ */
++#define RUNSTATE_offline 3
++
++/*
++ * Register a shared memory area from which the guest may obtain its own
++ * runstate information without needing to execute a hypercall.
++ * Notes:
++ * 1. The registered address may be virtual or physical or guest handle,
++ * depending on the platform. Virtual address or guest handle should be
++ * registered on x86 systems.
++ * 2. Only one shared area may be registered per VCPU. The shared area is
++ * updated by the hypervisor each time the VCPU is scheduled. Thus
++ * runstate.state will always be RUNSTATE_running and
++ * runstate.state_entry_time will indicate the system time at which the
++ * VCPU was last scheduled to run.
++ * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
++ */
++#define VCPUOP_register_runstate_memory_area 5
++struct vcpu_register_runstate_memory_area {
++ union {
++ XEN_GUEST_HANDLE(vcpu_runstate_info_t) h;
++ struct vcpu_runstate_info *v;
++ uint64_t p;
++ } addr;
++};
++typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t);
++
++/*
++ * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
++ * which can be set via these commands. Periods smaller than one millisecond
++ * may not be supported.
++ */
++#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */
++#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */
++struct vcpu_set_periodic_timer {
++ uint64_t period_ns;
++};
++typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t);
++
++/*
++ * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
++ * timer which can be set via these commands.
++ */
++#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */
++#define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
++struct vcpu_set_singleshot_timer {
++ uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */
++ uint32_t flags; /* VCPU_SSHOTTMR_??? */
++};
++typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t);
++
++/* Flags to VCPUOP_set_singleshot_timer. */
++ /* Require the timeout to be in the future (return -ETIME if it's passed). */
++#define _VCPU_SSHOTTMR_future (0)
++#define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future)
++
++/*
++ * Register a memory location in the guest address space for the
++ * vcpu_info structure. This allows the guest to place the vcpu_info
++ * structure in a convenient place, such as in a per-cpu data area.
++ * The pointer need not be page aligned, but the structure must not
++ * cross a page boundary.
++ *
++ * This may be called only once per vcpu.
++ */
++#define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */
++struct vcpu_register_vcpu_info {
++ uint64_t mfn; /* mfn of page to place vcpu_info */
++ uint32_t offset; /* offset within page */
++ uint32_t rsvd; /* unused */
++};
++typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
++
++/* Send an NMI to the specified VCPU. @extra_arg == NULL. */
++#define VCPUOP_send_nmi 11
++
++/*
++ * Get the physical ID information for a pinned vcpu's underlying physical
++ * processor. The physical ID informmation is architecture-specific.
++ * On x86: id[31:0]=apic_id, id[63:32]=acpi_id, and all values 0xff and
++ * greater are reserved.
++ * This command returns -EINVAL if it is not a valid operation for this VCPU.
++ */
++#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */
++struct vcpu_get_physid {
++ uint64_t phys_id;
++};
++typedef struct vcpu_get_physid vcpu_get_physid_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t);
++#define xen_vcpu_physid_to_x86_apicid(physid) \
++ ((((uint32_t)(physid)) >= 0xff) ? 0xff : ((uint8_t)(physid)))
++#define xen_vcpu_physid_to_x86_acpiid(physid) \
++ ((((uint32_t)((physid)>>32)) >= 0xff) ? 0xff : ((uint8_t)((physid)>>32)))
++
++#endif /* __XEN_PUBLIC_VCPU_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/version.h linux-2.6.18-xen-3.3.0/include/xen/interface/version.h
+--- linux-2.6.18.8/include/xen/interface/version.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/version.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,91 @@
++/******************************************************************************
++ * version.h
++ *
++ * Xen version, type, and compile information.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com>
++ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_VERSION_H__
++#define __XEN_PUBLIC_VERSION_H__
++
++/* NB. All ops return zero on success, except XENVER_{version,pagesize} */
++
++/* arg == NULL; returns major:minor (16:16). */
++#define XENVER_version 0
++
++/* arg == xen_extraversion_t. */
++#define XENVER_extraversion 1
++typedef char xen_extraversion_t[16];
++#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t))
++
++/* arg == xen_compile_info_t. */
++#define XENVER_compile_info 2
++struct xen_compile_info {
++ char compiler[64];
++ char compile_by[16];
++ char compile_domain[32];
++ char compile_date[32];
++};
++typedef struct xen_compile_info xen_compile_info_t;
++
++#define XENVER_capabilities 3
++typedef char xen_capabilities_info_t[1024];
++#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t))
++
++#define XENVER_changeset 4
++typedef char xen_changeset_info_t[64];
++#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t))
++
++#define XENVER_platform_parameters 5
++struct xen_platform_parameters {
++ unsigned long virt_start;
++};
++typedef struct xen_platform_parameters xen_platform_parameters_t;
++
++#define XENVER_get_features 6
++struct xen_feature_info {
++ unsigned int submap_idx; /* IN: which 32-bit submap to return */
++ uint32_t submap; /* OUT: 32-bit submap */
++};
++typedef struct xen_feature_info xen_feature_info_t;
++
++/* Declares the features reported by XENVER_get_features. */
++#include "features.h"
++
++/* arg == NULL; returns host memory page size. */
++#define XENVER_pagesize 7
++
++/* arg == xen_domain_handle_t. */
++#define XENVER_guest_handle 8
++
++#endif /* __XEN_PUBLIC_VERSION_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/xencomm.h linux-2.6.18-xen-3.3.0/include/xen/interface/xencomm.h
+--- linux-2.6.18.8/include/xen/interface/xencomm.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/xencomm.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,41 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) IBM Corp. 2006
++ */
++
++#ifndef _XEN_XENCOMM_H_
++#define _XEN_XENCOMM_H_
++
++/* A xencomm descriptor is a scatter/gather list containing physical
++ * addresses corresponding to a virtually contiguous memory area. The
++ * hypervisor translates these physical addresses to machine addresses to copy
++ * to and from the virtually contiguous area.
++ */
++
++#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
++#define XENCOMM_INVALID (~0UL)
++
++struct xencomm_desc {
++ uint32_t magic;
++ uint32_t nr_addrs; /* the number of entries in address[] */
++ uint64_t address[0];
++};
++
++#endif /* _XEN_XENCOMM_H_ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/xen-compat.h linux-2.6.18-xen-3.3.0/include/xen/interface/xen-compat.h
+--- linux-2.6.18.8/include/xen/interface/xen-compat.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/xen-compat.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,44 @@
++/******************************************************************************
++ * xen-compat.h
++ *
++ * Guest OS interface to Xen. Compatibility layer.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2006, Christian Limpach
++ */
++
++#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
++#define __XEN_PUBLIC_XEN_COMPAT_H__
++
++#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030209
++
++#if defined(__XEN__) || defined(__XEN_TOOLS__)
++/* Xen is built with matching headers and implements the latest interface. */
++#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__
++#elif !defined(__XEN_INTERFACE_VERSION__)
++/* Guests which do not specify a version get the legacy interface. */
++#define __XEN_INTERFACE_VERSION__ 0x00000000
++#endif
++
++#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__
++#error "These header files do not support the requested interface version."
++#endif
++
++#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/xen.h linux-2.6.18-xen-3.3.0/include/xen/interface/xen.h
+--- linux-2.6.18.8/include/xen/interface/xen.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/xen.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,639 @@
++/******************************************************************************
++ * xen.h
++ *
++ * Guest OS interface to Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_XEN_H__
++#define __XEN_PUBLIC_XEN_H__
++
++#include "xen-compat.h"
++
++#if defined(__i386__) || defined(__x86_64__)
++#include "arch-x86/xen.h"
++#elif defined(__ia64__)
++#include "arch-ia64.h"
++#else
++#error "Unsupported architecture"
++#endif
++
++#ifndef __ASSEMBLY__
++/* Guest handles for primitive C types. */
++DEFINE_XEN_GUEST_HANDLE(char);
++__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
++DEFINE_XEN_GUEST_HANDLE(int);
++__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
++DEFINE_XEN_GUEST_HANDLE(long);
++__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
++DEFINE_XEN_GUEST_HANDLE(void);
++
++DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
++#endif
++
++/*
++ * HYPERCALLS
++ */
++
++#define __HYPERVISOR_set_trap_table 0
++#define __HYPERVISOR_mmu_update 1
++#define __HYPERVISOR_set_gdt 2
++#define __HYPERVISOR_stack_switch 3
++#define __HYPERVISOR_set_callbacks 4
++#define __HYPERVISOR_fpu_taskswitch 5
++#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */
++#define __HYPERVISOR_platform_op 7
++#define __HYPERVISOR_set_debugreg 8
++#define __HYPERVISOR_get_debugreg 9
++#define __HYPERVISOR_update_descriptor 10
++#define __HYPERVISOR_memory_op 12
++#define __HYPERVISOR_multicall 13
++#define __HYPERVISOR_update_va_mapping 14
++#define __HYPERVISOR_set_timer_op 15
++#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */
++#define __HYPERVISOR_xen_version 17
++#define __HYPERVISOR_console_io 18
++#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */
++#define __HYPERVISOR_grant_table_op 20
++#define __HYPERVISOR_vm_assist 21
++#define __HYPERVISOR_update_va_mapping_otherdomain 22
++#define __HYPERVISOR_iret 23 /* x86 only */
++#define __HYPERVISOR_vcpu_op 24
++#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
++#define __HYPERVISOR_mmuext_op 26
++#define __HYPERVISOR_xsm_op 27
++#define __HYPERVISOR_nmi_op 28
++#define __HYPERVISOR_sched_op 29
++#define __HYPERVISOR_callback_op 30
++#define __HYPERVISOR_xenoprof_op 31
++#define __HYPERVISOR_event_channel_op 32
++#define __HYPERVISOR_physdev_op 33
++#define __HYPERVISOR_hvm_op 34
++#define __HYPERVISOR_sysctl 35
++#define __HYPERVISOR_domctl 36
++#define __HYPERVISOR_kexec_op 37
++
++/* Architecture-specific hypercall definitions. */
++#define __HYPERVISOR_arch_0 48
++#define __HYPERVISOR_arch_1 49
++#define __HYPERVISOR_arch_2 50
++#define __HYPERVISOR_arch_3 51
++#define __HYPERVISOR_arch_4 52
++#define __HYPERVISOR_arch_5 53
++#define __HYPERVISOR_arch_6 54
++#define __HYPERVISOR_arch_7 55
++
++/*
++ * HYPERCALL COMPATIBILITY.
++ */
++
++/* New sched_op hypercall introduced in 0x00030101. */
++#if __XEN_INTERFACE_VERSION__ < 0x00030101
++#undef __HYPERVISOR_sched_op
++#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat
++#endif
++
++/* New event-channel and physdev hypercalls introduced in 0x00030202. */
++#if __XEN_INTERFACE_VERSION__ < 0x00030202
++#undef __HYPERVISOR_event_channel_op
++#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat
++#undef __HYPERVISOR_physdev_op
++#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
++#endif
++
++/* New platform_op hypercall introduced in 0x00030204. */
++#if __XEN_INTERFACE_VERSION__ < 0x00030204
++#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
++#endif
++
++/*
++ * VIRTUAL INTERRUPTS
++ *
++ * Virtual interrupts that a guest OS may receive from Xen.
++ *
++ * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
++ * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
++ * The latter can be allocated only once per guest: they must initially be
++ * allocated to VCPU0 but can subsequently be re-bound.
++ */
++#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
++#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
++#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
++#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
++#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
++#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
++#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
++#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
++
++/* Architecture-specific VIRQ definitions. */
++#define VIRQ_ARCH_0 16
++#define VIRQ_ARCH_1 17
++#define VIRQ_ARCH_2 18
++#define VIRQ_ARCH_3 19
++#define VIRQ_ARCH_4 20
++#define VIRQ_ARCH_5 21
++#define VIRQ_ARCH_6 22
++#define VIRQ_ARCH_7 23
++
++#define NR_VIRQS 24
++
++/*
++ * MMU-UPDATE REQUESTS
++ *
++ * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
++ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
++ * Where the FD has some effect, it is described below.
++ * ptr[1:0] specifies the appropriate MMU_* command.
++ *
++ * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
++ * Updates an entry in a page table. If updating an L1 table, and the new
++ * table entry is valid/present, the mapped frame must belong to the FD, if
++ * an FD has been specified. If attempting to map an I/O page then the
++ * caller assumes the privilege of the FD.
++ * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
++ * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
++ * ptr[:2] -- Machine address of the page-table entry to modify.
++ * val -- Value to write.
++ *
++ * ptr[1:0] == MMU_MACHPHYS_UPDATE:
++ * Updates an entry in the machine->pseudo-physical mapping table.
++ * ptr[:2] -- Machine address within the frame whose mapping to modify.
++ * The frame must belong to the FD, if one is specified.
++ * val -- Value to write into the mapping entry.
++ *
++ * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD:
++ * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed
++ * with those in @val.
++ */
++#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
++#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
++#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
++
++/*
++ * MMU EXTENDED OPERATIONS
++ *
++ * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
++ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
++ * Where the FD has some effect, it is described below.
++ *
++ * cmd: MMUEXT_(UN)PIN_*_TABLE
++ * mfn: Machine frame number to be (un)pinned as a p.t. page.
++ * The frame must belong to the FD, if one is specified.
++ *
++ * cmd: MMUEXT_NEW_BASEPTR
++ * mfn: Machine frame number of new page-table base to install in MMU.
++ *
++ * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
++ * mfn: Machine frame number of new page-table base to install in MMU
++ * when in user space.
++ *
++ * cmd: MMUEXT_TLB_FLUSH_LOCAL
++ * No additional arguments. Flushes local TLB.
++ *
++ * cmd: MMUEXT_INVLPG_LOCAL
++ * linear_addr: Linear address to be flushed from the local TLB.
++ *
++ * cmd: MMUEXT_TLB_FLUSH_MULTI
++ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
++ *
++ * cmd: MMUEXT_INVLPG_MULTI
++ * linear_addr: Linear address to be flushed.
++ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
++ *
++ * cmd: MMUEXT_TLB_FLUSH_ALL
++ * No additional arguments. Flushes all VCPUs' TLBs.
++ *
++ * cmd: MMUEXT_INVLPG_ALL
++ * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
++ *
++ * cmd: MMUEXT_FLUSH_CACHE
++ * No additional arguments. Writes back and flushes cache contents.
++ *
++ * cmd: MMUEXT_SET_LDT
++ * linear_addr: Linear address of LDT base (NB. must be page-aligned).
++ * nr_ents: Number of entries in LDT.
++ */
++#define MMUEXT_PIN_L1_TABLE 0
++#define MMUEXT_PIN_L2_TABLE 1
++#define MMUEXT_PIN_L3_TABLE 2
++#define MMUEXT_PIN_L4_TABLE 3
++#define MMUEXT_UNPIN_TABLE 4
++#define MMUEXT_NEW_BASEPTR 5
++#define MMUEXT_TLB_FLUSH_LOCAL 6
++#define MMUEXT_INVLPG_LOCAL 7
++#define MMUEXT_TLB_FLUSH_MULTI 8
++#define MMUEXT_INVLPG_MULTI 9
++#define MMUEXT_TLB_FLUSH_ALL 10
++#define MMUEXT_INVLPG_ALL 11
++#define MMUEXT_FLUSH_CACHE 12
++#define MMUEXT_SET_LDT 13
++#define MMUEXT_NEW_USER_BASEPTR 15
++
++#ifndef __ASSEMBLY__
++struct mmuext_op {
++ unsigned int cmd;
++ union {
++ /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
++ xen_pfn_t mfn;
++ /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
++ unsigned long linear_addr;
++ } arg1;
++ union {
++ /* SET_LDT */
++ unsigned int nr_ents;
++ /* TLB_FLUSH_MULTI, INVLPG_MULTI */
++#if __XEN_INTERFACE_VERSION__ >= 0x00030205
++ XEN_GUEST_HANDLE(void) vcpumask;
++#else
++ void *vcpumask;
++#endif
++ } arg2;
++};
++typedef struct mmuext_op mmuext_op_t;
++DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
++#endif
++
++/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
++/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
++/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
++#define UVMF_NONE (0UL<<0) /* No flushing at all. */
++#define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */
++#define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */
++#define UVMF_FLUSHTYPE_MASK (3UL<<0)
++#define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */
++#define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */
++#define UVMF_ALL (1UL<<2) /* Flush all TLBs. */
++
++/*
++ * Commands to HYPERVISOR_console_io().
++ */
++#define CONSOLEIO_write 0
++#define CONSOLEIO_read 1
++
++/*
++ * Commands to HYPERVISOR_vm_assist().
++ */
++#define VMASST_CMD_enable 0
++#define VMASST_CMD_disable 1
++
++/* x86/32 guests: simulate full 4GB segment limits. */
++#define VMASST_TYPE_4gb_segments 0
++
++/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
++#define VMASST_TYPE_4gb_segments_notify 1
++
++/*
++ * x86 guests: support writes to bottom-level PTEs.
++ * NB1. Page-directory entries cannot be written.
++ * NB2. Guest must continue to remove all writable mappings of PTEs.
++ */
++#define VMASST_TYPE_writable_pagetables 2
++
++/* x86/PAE guests: support PDPTs above 4GB. */
++#define VMASST_TYPE_pae_extended_cr3 3
++
++#define MAX_VMASST_TYPE 3
++
++#ifndef __ASSEMBLY__
++
++typedef uint16_t domid_t;
++
++/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
++#define DOMID_FIRST_RESERVED (0x7FF0U)
++
++/* DOMID_SELF is used in certain contexts to refer to oneself. */
++#define DOMID_SELF (0x7FF0U)
++
++/*
++ * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
++ * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
++ * is useful to ensure that no mappings to the OS's own heap are accidentally
++ * installed. (e.g., in Linux this could cause havoc as reference counts
++ * aren't adjusted on the I/O-mapping code path).
++ * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
++ * be specified by any calling domain.
++ */
++#define DOMID_IO (0x7FF1U)
++
++/*
++ * DOMID_XEN is used to allow privileged domains to map restricted parts of
++ * Xen's heap space (e.g., the machine_to_phys table).
++ * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
++ * the caller is privileged.
++ */
++#define DOMID_XEN (0x7FF2U)
++
++/*
++ * Send an array of these to HYPERVISOR_mmu_update().
++ * NB. The fields are natural pointer/address size for this architecture.
++ */
++struct mmu_update {
++ uint64_t ptr; /* Machine address of PTE. */
++ uint64_t val; /* New contents of PTE. */
++};
++typedef struct mmu_update mmu_update_t;
++DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
++
++/*
++ * Send an array of these to HYPERVISOR_multicall().
++ * NB. The fields are natural register size for this architecture.
++ */
++struct multicall_entry {
++ unsigned long op, result;
++ unsigned long args[6];
++};
++typedef struct multicall_entry multicall_entry_t;
++DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
++
++/*
++ * Event channel endpoints per domain:
++ * 1024 if a long is 32 bits; 4096 if a long is 64 bits.
++ */
++#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
++
++struct vcpu_time_info {
++ /*
++ * Updates to the following values are preceded and followed by an
++ * increment of 'version'. The guest can therefore detect updates by
++ * looking for changes to 'version'. If the least-significant bit of
++ * the version number is set then an update is in progress and the guest
++ * must wait to read a consistent set of values.
++ * The correct way to interact with the version number is similar to
++ * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
++ */
++ uint32_t version;
++ uint32_t pad0;
++ uint64_t tsc_timestamp; /* TSC at last update of time vals. */
++ uint64_t system_time; /* Time, in nanosecs, since boot. */
++ /*
++ * Current system time:
++ * system_time +
++ * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
++ * CPU frequency (Hz):
++ * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
++ */
++ uint32_t tsc_to_system_mul;
++ int8_t tsc_shift;
++ int8_t pad1[3];
++}; /* 32 bytes */
++typedef struct vcpu_time_info vcpu_time_info_t;
++
++struct vcpu_info {
++ /*
++ * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
++ * a pending notification for a particular VCPU. It is then cleared
++ * by the guest OS /before/ checking for pending work, thus avoiding
++ * a set-and-check race. Note that the mask is only accessed by Xen
++ * on the CPU that is currently hosting the VCPU. This means that the
++ * pending and mask flags can be updated by the guest without special
++ * synchronisation (i.e., no need for the x86 LOCK prefix).
++ * This may seem suboptimal because if the pending flag is set by
++ * a different CPU then an IPI may be scheduled even when the mask
++ * is set. However, note:
++ * 1. The task of 'interrupt holdoff' is covered by the per-event-
++ * channel mask bits. A 'noisy' event that is continually being
++ * triggered can be masked at source at this very precise
++ * granularity.
++ * 2. The main purpose of the per-VCPU mask is therefore to restrict
++ * reentrant execution: whether for concurrency control, or to
++ * prevent unbounded stack usage. Whatever the purpose, we expect
++ * that the mask will be asserted only for short periods at a time,
++ * and so the likelihood of a 'spurious' IPI is suitably small.
++ * The mask is read before making an event upcall to the guest: a
++ * non-zero mask therefore guarantees that the VCPU will not receive
++ * an upcall activation. The mask is cleared when the VCPU requests
++ * to block: this avoids wakeup-waiting races.
++ */
++ uint8_t evtchn_upcall_pending;
++ uint8_t evtchn_upcall_mask;
++ unsigned long evtchn_pending_sel;
++ struct arch_vcpu_info arch;
++ struct vcpu_time_info time;
++}; /* 64 bytes (x86) */
++#ifndef __XEN__
++typedef struct vcpu_info vcpu_info_t;
++#endif
++
++/*
++ * Xen/kernel shared data -- pointer provided in start_info.
++ *
++ * This structure is defined to be both smaller than a page, and the
++ * only data on the shared page, but may vary in actual size even within
++ * compatible Xen versions; guests should not rely on the size
++ * of this structure remaining constant.
++ */
++struct shared_info {
++ struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
++
++ /*
++ * A domain can create "event channels" on which it can send and receive
++ * asynchronous event notifications. There are three classes of event that
++ * are delivered by this mechanism:
++ * 1. Bi-directional inter- and intra-domain connections. Domains must
++ * arrange out-of-band to set up a connection (usually by allocating
++ * an unbound 'listener' port and avertising that via a storage service
++ * such as xenstore).
++ * 2. Physical interrupts. A domain with suitable hardware-access
++ * privileges can bind an event-channel port to a physical interrupt
++ * source.
++ * 3. Virtual interrupts ('events'). A domain can bind an event-channel
++ * port to a virtual interrupt source, such as the virtual-timer
++ * device or the emergency console.
++ *
++ * Event channels are addressed by a "port index". Each channel is
++ * associated with two bits of information:
++ * 1. PENDING -- notifies the domain that there is a pending notification
++ * to be processed. This bit is cleared by the guest.
++ * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
++ * will cause an asynchronous upcall to be scheduled. This bit is only
++ * updated by the guest. It is read-only within Xen. If a channel
++ * becomes pending while the channel is masked then the 'edge' is lost
++ * (i.e., when the channel is unmasked, the guest must manually handle
++ * pending notifications as no upcall will be scheduled by Xen).
++ *
++ * To expedite scanning of pending notifications, any 0->1 pending
++ * transition on an unmasked channel causes a corresponding bit in a
++ * per-vcpu selector word to be set. Each bit in the selector covers a
++ * 'C long' in the PENDING bitfield array.
++ */
++ unsigned long evtchn_pending[sizeof(unsigned long) * 8];
++ unsigned long evtchn_mask[sizeof(unsigned long) * 8];
++
++ /*
++ * Wallclock time: updated only by control software. Guests should base
++ * their gettimeofday() syscall on this wallclock-base value.
++ */
++ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
++ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
++ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
++
++ struct arch_shared_info arch;
++
++};
++#ifndef __XEN__
++typedef struct shared_info shared_info_t;
++#endif
++
++/*
++ * Start-of-day memory layout:
++ * 1. The domain is started within contiguous virtual-memory region.
++ * 2. The contiguous region ends on an aligned 4MB boundary.
++ * 3. This the order of bootstrap elements in the initial virtual region:
++ * a. relocated kernel image
++ * b. initial ram disk [mod_start, mod_len]
++ * c. list of allocated page frames [mfn_list, nr_pages]
++ * d. start_info_t structure [register ESI (x86)]
++ * e. bootstrap page tables [pt_base, CR3 (x86)]
++ * f. bootstrap stack [register ESP (x86)]
++ * 4. Bootstrap elements are packed together, but each is 4kB-aligned.
++ * 5. The initial ram disk may be omitted.
++ * 6. The list of page frames forms a contiguous 'pseudo-physical' memory
++ * layout for the domain. In particular, the bootstrap virtual-memory
++ * region is a 1:1 mapping to the first section of the pseudo-physical map.
++ * 7. All bootstrap elements are mapped read-writable for the guest OS. The
++ * only exception is the bootstrap page table, which is mapped read-only.
++ * 8. There is guaranteed to be at least 512kB padding after the final
++ * bootstrap element. If necessary, the bootstrap virtual region is
++ * extended by an extra 4MB to ensure this.
++ */
++
++#define MAX_GUEST_CMDLINE 1024
++struct start_info {
++ /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
++ char magic[32]; /* "xen-<version>-<platform>". */
++ unsigned long nr_pages; /* Total pages allocated to this domain. */
++ unsigned long shared_info; /* MACHINE address of shared info struct. */
++ uint32_t flags; /* SIF_xxx flags. */
++ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */
++ uint32_t store_evtchn; /* Event channel for store communication. */
++ union {
++ struct {
++ xen_pfn_t mfn; /* MACHINE page number of console page. */
++ uint32_t evtchn; /* Event channel for console page. */
++ } domU;
++ struct {
++ uint32_t info_off; /* Offset of console_info struct. */
++ uint32_t info_size; /* Size of console_info struct from start.*/
++ } dom0;
++ } console;
++ /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
++ unsigned long pt_base; /* VIRTUAL address of page directory. */
++ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
++ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
++ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
++ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
++ int8_t cmd_line[MAX_GUEST_CMDLINE];
++};
++typedef struct start_info start_info_t;
++
++/* New console union for dom0 introduced in 0x00030203. */
++#if __XEN_INTERFACE_VERSION__ < 0x00030203
++#define console_mfn console.domU.mfn
++#define console_evtchn console.domU.evtchn
++#endif
++
++/* These flags are passed in the 'flags' field of start_info_t. */
++#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
++#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
++#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
++
++typedef struct dom0_vga_console_info {
++ uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */
++#define XEN_VGATYPE_TEXT_MODE_3 0x03
++#define XEN_VGATYPE_VESA_LFB 0x23
++
++ union {
++ struct {
++ /* Font height, in pixels. */
++ uint16_t font_height;
++ /* Cursor location (column, row). */
++ uint16_t cursor_x, cursor_y;
++ /* Number of rows and columns (dimensions in characters). */
++ uint16_t rows, columns;
++ } text_mode_3;
++
++ struct {
++ /* Width and height, in pixels. */
++ uint16_t width, height;
++ /* Bytes per scan line. */
++ uint16_t bytes_per_line;
++ /* Bits per pixel. */
++ uint16_t bits_per_pixel;
++ /* LFB physical address, and size (in units of 64kB). */
++ uint32_t lfb_base;
++ uint32_t lfb_size;
++ /* RGB mask offsets and sizes, as defined by VBE 1.2+ */
++ uint8_t red_pos, red_size;
++ uint8_t green_pos, green_size;
++ uint8_t blue_pos, blue_size;
++ uint8_t rsvd_pos, rsvd_size;
++#if __XEN_INTERFACE_VERSION__ >= 0x00030206
++ /* VESA capabilities (offset 0xa, VESA command 0x4f00). */
++ uint32_t gbl_caps;
++ /* Mode attributes (offset 0x0, VESA command 0x4f01). */
++ uint16_t mode_attrs;
++#endif
++ } vesa_lfb;
++ } u;
++} dom0_vga_console_info_t;
++#define xen_vga_console_info dom0_vga_console_info
++#define xen_vga_console_info_t dom0_vga_console_info_t
++
++typedef uint8_t xen_domain_handle_t[16];
++
++/* Turn a plain number into a C unsigned long constant. */
++#define __mk_unsigned_long(x) x ## UL
++#define mk_unsigned_long(x) __mk_unsigned_long(x)
++
++__DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t);
++__DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t);
++__DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t);
++__DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t);
++
++#else /* __ASSEMBLY__ */
++
++/* In assembly code we cannot use C numeric constant suffixes. */
++#define mk_unsigned_long(x) x
++
++#endif /* !__ASSEMBLY__ */
++
++/* Default definitions for macros used by domctl/sysctl. */
++#if defined(__XEN__) || defined(__XEN_TOOLS__)
++#ifndef uint64_aligned_t
++#define uint64_aligned_t uint64_t
++#endif
++#ifndef XEN_GUEST_HANDLE_64
++#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name)
++#endif
++#endif
++
++#endif /* __XEN_PUBLIC_XEN_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/xenoprof.h linux-2.6.18-xen-3.3.0/include/xen/interface/xenoprof.h
+--- linux-2.6.18.8/include/xen/interface/xenoprof.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/xenoprof.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,138 @@
++/******************************************************************************
++ * xenoprof.h
++ *
++ * Interface for enabling system wide profiling based on hardware performance
++ * counters
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ * Written by Aravind Menon & Jose Renato Santos
++ */
++
++#ifndef __XEN_PUBLIC_XENOPROF_H__
++#define __XEN_PUBLIC_XENOPROF_H__
++
++#include "xen.h"
++
++/*
++ * Commands to HYPERVISOR_xenoprof_op().
++ */
++#define XENOPROF_init 0
++#define XENOPROF_reset_active_list 1
++#define XENOPROF_reset_passive_list 2
++#define XENOPROF_set_active 3
++#define XENOPROF_set_passive 4
++#define XENOPROF_reserve_counters 5
++#define XENOPROF_counter 6
++#define XENOPROF_setup_events 7
++#define XENOPROF_enable_virq 8
++#define XENOPROF_start 9
++#define XENOPROF_stop 10
++#define XENOPROF_disable_virq 11
++#define XENOPROF_release_counters 12
++#define XENOPROF_shutdown 13
++#define XENOPROF_get_buffer 14
++#define XENOPROF_set_backtrace 15
++#define XENOPROF_last_op 15
++
++#define MAX_OPROF_EVENTS 32
++#define MAX_OPROF_DOMAINS 25
++#define XENOPROF_CPU_TYPE_SIZE 64
++
++/* Xenoprof performance events (not Xen events) */
++struct event_log {
++ uint64_t eip;
++ uint8_t mode;
++ uint8_t event;
++};
++
++/* PC value that indicates a special code */
++#define XENOPROF_ESCAPE_CODE ~0UL
++/* Transient events for the xenoprof->oprofile cpu buf */
++#define XENOPROF_TRACE_BEGIN 1
++
++/* Xenoprof buffer shared between Xen and domain - 1 per VCPU */
++struct xenoprof_buf {
++ uint32_t event_head;
++ uint32_t event_tail;
++ uint32_t event_size;
++ uint32_t vcpu_id;
++ uint64_t xen_samples;
++ uint64_t kernel_samples;
++ uint64_t user_samples;
++ uint64_t lost_samples;
++ struct event_log event_log[1];
++};
++#ifndef __XEN__
++typedef struct xenoprof_buf xenoprof_buf_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t);
++#endif
++
++struct xenoprof_init {
++ int32_t num_events;
++ int32_t is_primary;
++ char cpu_type[XENOPROF_CPU_TYPE_SIZE];
++};
++typedef struct xenoprof_init xenoprof_init_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t);
++
++struct xenoprof_get_buffer {
++ int32_t max_samples;
++ int32_t nbuf;
++ int32_t bufsize;
++ uint64_t buf_gmaddr;
++};
++typedef struct xenoprof_get_buffer xenoprof_get_buffer_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t);
++
++struct xenoprof_counter {
++ uint32_t ind;
++ uint64_t count;
++ uint32_t enabled;
++ uint32_t event;
++ uint32_t hypervisor;
++ uint32_t kernel;
++ uint32_t user;
++ uint64_t unit_mask;
++};
++typedef struct xenoprof_counter xenoprof_counter_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t);
++
++typedef struct xenoprof_passive {
++ uint16_t domain_id;
++ int32_t max_samples;
++ int32_t nbuf;
++ int32_t bufsize;
++ uint64_t buf_gmaddr;
++} xenoprof_passive_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t);
++
++
++#endif /* __XEN_PUBLIC_XENOPROF_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/xsm/acm.h linux-2.6.18-xen-3.3.0/include/xen/interface/xsm/acm.h
+--- linux-2.6.18.8/include/xen/interface/xsm/acm.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/xsm/acm.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,235 @@
++/*
++ * acm.h: Xen access control module interface defintions
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Reiner Sailer <sailer@watson.ibm.com>
++ * Copyright (c) 2005, International Business Machines Corporation.
++ */
++
++#ifndef _XEN_PUBLIC_ACM_H
++#define _XEN_PUBLIC_ACM_H
++
++#include "../xen.h"
++
++/* if ACM_DEBUG defined, all hooks should
++ * print a short trace message (comment it out
++ * when not in testing mode )
++ */
++/* #define ACM_DEBUG */
++
++#ifdef ACM_DEBUG
++# define printkd(fmt, args...) printk(fmt,## args)
++#else
++# define printkd(fmt, args...)
++#endif
++
++/* default ssid reference value if not supplied */
++#define ACM_DEFAULT_SSID 0x0
++#define ACM_DEFAULT_LOCAL_SSID 0x0
++
++/* Internal ACM ERROR types */
++#define ACM_OK 0
++#define ACM_UNDEF -1
++#define ACM_INIT_SSID_ERROR -2
++#define ACM_INIT_SOID_ERROR -3
++#define ACM_ERROR -4
++
++/* External ACCESS DECISIONS */
++#define ACM_ACCESS_PERMITTED 0
++#define ACM_ACCESS_DENIED -111
++#define ACM_NULL_POINTER_ERROR -200
++
++/*
++ Error codes reported in when trying to test for a new policy
++ These error codes are reported in an array of tuples where
++ each error code is followed by a parameter describing the error
++ more closely, such as a domain id.
++*/
++#define ACM_EVTCHN_SHARING_VIOLATION 0x100
++#define ACM_GNTTAB_SHARING_VIOLATION 0x101
++#define ACM_DOMAIN_LOOKUP 0x102
++#define ACM_CHWALL_CONFLICT 0x103
++#define ACM_SSIDREF_IN_USE 0x104
++
++
++/* primary policy in lower 4 bits */
++#define ACM_NULL_POLICY 0
++#define ACM_CHINESE_WALL_POLICY 1
++#define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2
++#define ACM_POLICY_UNDEFINED 15
++
++/* combinations have secondary policy component in higher 4bit */
++#define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \
++ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY)
++
++/* policy: */
++#define ACM_POLICY_NAME(X) \
++ ((X) == (ACM_NULL_POLICY)) ? "NULL" : \
++ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \
++ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \
++ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \
++ "UNDEFINED"
++
++/* the following policy versions must be increased
++ * whenever the interpretation of the related
++ * policy's data structure changes
++ */
++#define ACM_POLICY_VERSION 4
++#define ACM_CHWALL_VERSION 1
++#define ACM_STE_VERSION 1
++
++/* defines a ssid reference used by xen */
++typedef uint32_t ssidref_t;
++
++/* hooks that are known to domains */
++#define ACMHOOK_none 0
++#define ACMHOOK_sharing 1
++#define ACMHOOK_authorization 2
++#define ACMHOOK_conflictset 3
++
++/* -------security policy relevant type definitions-------- */
++
++/* type identifier; compares to "equal" or "not equal" */
++typedef uint16_t domaintype_t;
++
++/* CHINESE WALL POLICY DATA STRUCTURES
++ *
++ * current accumulated conflict type set:
++ * When a domain is started and has a type that is in
++ * a conflict set, the conflicting types are incremented in
++ * the aggregate set. When a domain is destroyed, the
++ * conflicting types to its type are decremented.
++ * If a domain has multiple types, this procedure works over
++ * all those types.
++ *
++ * conflict_aggregate_set[i] holds the number of
++ * running domains that have a conflict with type i.
++ *
++ * running_types[i] holds the number of running domains
++ * that include type i in their ssidref-referenced type set
++ *
++ * conflict_sets[i][j] is "0" if type j has no conflict
++ * with type i and is "1" otherwise.
++ */
++/* high-16 = version, low-16 = check magic */
++#define ACM_MAGIC 0x0001debc
++
++/* size of the SHA1 hash identifying the XML policy from which the
++ binary policy was created */
++#define ACM_SHA1_HASH_SIZE 20
++
++/* each offset in bytes from start of the struct they
++ * are part of */
++
++/* V3 of the policy buffer aded a version structure */
++struct acm_policy_version
++{
++ uint32_t major;
++ uint32_t minor;
++};
++
++
++/* each buffer consists of all policy information for
++ * the respective policy given in the policy code
++ *
++ * acm_policy_buffer, acm_chwall_policy_buffer,
++ * and acm_ste_policy_buffer need to stay 32-bit aligned
++ * because we create binary policies also with external
++ * tools that assume packed representations (e.g. the java tool)
++ */
++struct acm_policy_buffer {
++ uint32_t magic;
++ uint32_t policy_version; /* ACM_POLICY_VERSION */
++ uint32_t len;
++ uint32_t policy_reference_offset;
++ uint32_t primary_policy_code;
++ uint32_t primary_buffer_offset;
++ uint32_t secondary_policy_code;
++ uint32_t secondary_buffer_offset;
++ struct acm_policy_version xml_pol_version; /* add in V3 */
++ uint8_t xml_policy_hash[ACM_SHA1_HASH_SIZE]; /* added in V4 */
++};
++
++
++struct acm_policy_reference_buffer {
++ uint32_t len;
++};
++
++struct acm_chwall_policy_buffer {
++ uint32_t policy_version; /* ACM_CHWALL_VERSION */
++ uint32_t policy_code;
++ uint32_t chwall_max_types;
++ uint32_t chwall_max_ssidrefs;
++ uint32_t chwall_max_conflictsets;
++ uint32_t chwall_ssid_offset;
++ uint32_t chwall_conflict_sets_offset;
++ uint32_t chwall_running_types_offset;
++ uint32_t chwall_conflict_aggregate_offset;
++};
++
++struct acm_ste_policy_buffer {
++ uint32_t policy_version; /* ACM_STE_VERSION */
++ uint32_t policy_code;
++ uint32_t ste_max_types;
++ uint32_t ste_max_ssidrefs;
++ uint32_t ste_ssid_offset;
++};
++
++struct acm_stats_buffer {
++ uint32_t magic;
++ uint32_t len;
++ uint32_t primary_policy_code;
++ uint32_t primary_stats_offset;
++ uint32_t secondary_policy_code;
++ uint32_t secondary_stats_offset;
++};
++
++struct acm_ste_stats_buffer {
++ uint32_t ec_eval_count;
++ uint32_t gt_eval_count;
++ uint32_t ec_denied_count;
++ uint32_t gt_denied_count;
++ uint32_t ec_cachehit_count;
++ uint32_t gt_cachehit_count;
++};
++
++struct acm_ssid_buffer {
++ uint32_t len;
++ ssidref_t ssidref;
++ uint32_t policy_reference_offset;
++ uint32_t primary_policy_code;
++ uint32_t primary_max_types;
++ uint32_t primary_types_offset;
++ uint32_t secondary_policy_code;
++ uint32_t secondary_max_types;
++ uint32_t secondary_types_offset;
++};
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/xsm/acm_ops.h linux-2.6.18-xen-3.3.0/include/xen/interface/xsm/acm_ops.h
+--- linux-2.6.18.8/include/xen/interface/xsm/acm_ops.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/xsm/acm_ops.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,159 @@
++/*
++ * acm_ops.h: Xen access control module hypervisor commands
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Reiner Sailer <sailer@watson.ibm.com>
++ * Copyright (c) 2005,2006 International Business Machines Corporation.
++ */
++
++#ifndef __XEN_PUBLIC_ACM_OPS_H__
++#define __XEN_PUBLIC_ACM_OPS_H__
++
++#include "../xen.h"
++#include "acm.h"
++
++/*
++ * Make sure you increment the interface version whenever you modify this file!
++ * This makes sure that old versions of acm tools will stop working in a
++ * well-defined way (rather than crashing the machine, for instance).
++ */
++#define ACM_INTERFACE_VERSION 0xAAAA000A
++
++/************************************************************************/
++
++/*
++ * Prototype for this hypercall is:
++ * int acm_op(int cmd, void *args)
++ * @cmd == ACMOP_??? (access control module operation).
++ * @args == Operation-specific extra arguments (NULL if none).
++ */
++
++
++#define ACMOP_setpolicy 1
++struct acm_setpolicy {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) pushcache;
++ uint32_t pushcache_size;
++};
++
++
++#define ACMOP_getpolicy 2
++struct acm_getpolicy {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) pullcache;
++ uint32_t pullcache_size;
++};
++
++
++#define ACMOP_dumpstats 3
++struct acm_dumpstats {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) pullcache;
++ uint32_t pullcache_size;
++};
++
++
++#define ACMOP_getssid 4
++#define ACM_GETBY_ssidref 1
++#define ACM_GETBY_domainid 2
++struct acm_getssid {
++ /* IN */
++ uint32_t get_ssid_by; /* ACM_GETBY_* */
++ union {
++ domaintype_t domainid;
++ ssidref_t ssidref;
++ } id;
++ XEN_GUEST_HANDLE_64(void) ssidbuf;
++ uint32_t ssidbuf_size;
++};
++
++#define ACMOP_getdecision 5
++struct acm_getdecision {
++ /* IN */
++ uint32_t get_decision_by1; /* ACM_GETBY_* */
++ uint32_t get_decision_by2; /* ACM_GETBY_* */
++ union {
++ domaintype_t domainid;
++ ssidref_t ssidref;
++ } id1;
++ union {
++ domaintype_t domainid;
++ ssidref_t ssidref;
++ } id2;
++ uint32_t hook;
++ /* OUT */
++ uint32_t acm_decision;
++};
++
++
++#define ACMOP_chgpolicy 6
++struct acm_change_policy {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) policy_pushcache;
++ uint32_t policy_pushcache_size;
++ XEN_GUEST_HANDLE_64(void) del_array;
++ uint32_t delarray_size;
++ XEN_GUEST_HANDLE_64(void) chg_array;
++ uint32_t chgarray_size;
++ /* OUT */
++ /* array with error code */
++ XEN_GUEST_HANDLE_64(void) err_array;
++ uint32_t errarray_size;
++};
++
++#define ACMOP_relabeldoms 7
++struct acm_relabel_doms {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) relabel_map;
++ uint32_t relabel_map_size;
++ /* OUT */
++ XEN_GUEST_HANDLE_64(void) err_array;
++ uint32_t errarray_size;
++};
++
++/* future interface to Xen */
++struct xen_acmctl {
++ uint32_t cmd;
++ uint32_t interface_version;
++ union {
++ struct acm_setpolicy setpolicy;
++ struct acm_getpolicy getpolicy;
++ struct acm_dumpstats dumpstats;
++ struct acm_getssid getssid;
++ struct acm_getdecision getdecision;
++ struct acm_change_policy change_policy;
++ struct acm_relabel_doms relabel_doms;
++ } u;
++};
++
++typedef struct xen_acmctl xen_acmctl_t;
++DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t);
++
++#endif /* __XEN_PUBLIC_ACM_OPS_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -rpuN linux-2.6.18.8/include/xen/interface/xsm/flask_op.h linux-2.6.18-xen-3.3.0/include/xen/interface/xsm/flask_op.h
+--- linux-2.6.18.8/include/xen/interface/xsm/flask_op.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/interface/xsm/flask_op.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,45 @@
++/*
++ * This file contains the flask_op hypercall commands and definitions.
++ *
++ * Author: George Coker, <gscoker@alpha.ncsc.mil>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2,
++ * as published by the Free Software Foundation.
++ */
++
++#ifndef __FLASK_OP_H__
++#define __FLASK_OP_H__
++
++#define FLASK_LOAD 1
++#define FLASK_GETENFORCE 2
++#define FLASK_SETENFORCE 3
++#define FLASK_CONTEXT_TO_SID 4
++#define FLASK_SID_TO_CONTEXT 5
++#define FLASK_ACCESS 6
++#define FLASK_CREATE 7
++#define FLASK_RELABEL 8
++#define FLASK_USER 9
++#define FLASK_POLICYVERS 10
++#define FLASK_GETBOOL 11
++#define FLASK_SETBOOL 12
++#define FLASK_COMMITBOOLS 13
++#define FLASK_MLS 14
++#define FLASK_DISABLE 15
++#define FLASK_GETAVC_THRESHOLD 16
++#define FLASK_SETAVC_THRESHOLD 17
++#define FLASK_AVC_HASHSTATS 18
++#define FLASK_AVC_CACHESTATS 19
++#define FLASK_MEMBER 20
++
++#define FLASK_LAST FLASK_MEMBER
++
++typedef struct flask_op {
++ uint32_t cmd;
++ uint32_t size;
++ char *buf;
++} flask_op_t;
++
++DEFINE_XEN_GUEST_HANDLE(flask_op_t);
++
++#endif
+diff -rpuN linux-2.6.18.8/include/xen/pcifront.h linux-2.6.18-xen-3.3.0/include/xen/pcifront.h
+--- linux-2.6.18.8/include/xen/pcifront.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/pcifront.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,83 @@
++/*
++ * PCI Frontend - arch-dependendent declarations
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#ifndef __XEN_ASM_PCIFRONT_H__
++#define __XEN_ASM_PCIFRONT_H__
++
++#include <linux/spinlock.h>
++
++#ifdef __KERNEL__
++
++#ifndef __ia64__
++
++struct pcifront_device;
++struct pci_bus;
++
++struct pcifront_sd {
++ int domain;
++ struct pcifront_device *pdev;
++};
++
++static inline struct pcifront_device *
++pcifront_get_pdev(struct pcifront_sd *sd)
++{
++ return sd->pdev;
++}
++
++static inline void pcifront_init_sd(struct pcifront_sd *sd,
++ unsigned int domain, unsigned int bus,
++ struct pcifront_device *pdev)
++{
++ sd->domain = domain;
++ sd->pdev = pdev;
++}
++
++#if defined(CONFIG_PCI_DOMAINS)
++static inline int pci_domain_nr(struct pci_bus *bus)
++{
++ struct pcifront_sd *sd = bus->sysdata;
++ return sd->domain;
++}
++static inline int pci_proc_domain(struct pci_bus *bus)
++{
++ return pci_domain_nr(bus);
++}
++#endif /* CONFIG_PCI_DOMAINS */
++
++static inline void pcifront_setup_root_resources(struct pci_bus *bus,
++ struct pcifront_sd *sd)
++{
++}
++
++#else /* __ia64__ */
++
++#include <linux/acpi.h>
++#include <asm/pci.h>
++#define pcifront_sd pci_controller
++
++extern void xen_add_resource(struct pci_controller *, unsigned int,
++ unsigned int, struct acpi_resource *);
++extern void xen_pcibios_setup_root_windows(struct pci_bus *,
++ struct pci_controller *);
++
++static inline struct pcifront_device *
++pcifront_get_pdev(struct pcifront_sd *sd)
++{
++ return (struct pcifront_device *)sd->platform_data;
++}
++
++static inline void pcifront_setup_root_resources(struct pci_bus *bus,
++ struct pcifront_sd *sd)
++{
++ xen_pcibios_setup_root_windows(bus, sd);
++}
++
++#endif /* __ia64__ */
++
++extern struct rw_semaphore pci_bus_sem;
++
++#endif /* __KERNEL__ */
++
++#endif /* __XEN_ASM_PCIFRONT_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/public/evtchn.h linux-2.6.18-xen-3.3.0/include/xen/public/evtchn.h
+--- linux-2.6.18.8/include/xen/public/evtchn.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/public/evtchn.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,88 @@
++/******************************************************************************
++ * evtchn.h
++ *
++ * Interface to /dev/xen/evtchn.
++ *
++ * Copyright (c) 2003-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __LINUX_PUBLIC_EVTCHN_H__
++#define __LINUX_PUBLIC_EVTCHN_H__
++
++/*
++ * Bind a fresh port to VIRQ @virq.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_VIRQ \
++ _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq))
++struct ioctl_evtchn_bind_virq {
++ unsigned int virq;
++};
++
++/*
++ * Bind a fresh port to remote <@remote_domain, @remote_port>.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_INTERDOMAIN \
++ _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain))
++struct ioctl_evtchn_bind_interdomain {
++ unsigned int remote_domain, remote_port;
++};
++
++/*
++ * Allocate a fresh port for binding to @remote_domain.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_UNBOUND_PORT \
++ _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port))
++struct ioctl_evtchn_bind_unbound_port {
++ unsigned int remote_domain;
++};
++
++/*
++ * Unbind previously allocated @port.
++ */
++#define IOCTL_EVTCHN_UNBIND \
++ _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind))
++struct ioctl_evtchn_unbind {
++ unsigned int port;
++};
++
++/*
++ * Unbind previously allocated @port.
++ */
++#define IOCTL_EVTCHN_NOTIFY \
++ _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify))
++struct ioctl_evtchn_notify {
++ unsigned int port;
++};
++
++/* Clear and reinitialise the event buffer. Clear error condition. */
++#define IOCTL_EVTCHN_RESET \
++ _IOC(_IOC_NONE, 'E', 5, 0)
++
++#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/public/gntdev.h linux-2.6.18-xen-3.3.0/include/xen/public/gntdev.h
+--- linux-2.6.18.8/include/xen/public/gntdev.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/public/gntdev.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,119 @@
++/******************************************************************************
++ * gntdev.h
++ *
++ * Interface to /dev/xen/gntdev.
++ *
++ * Copyright (c) 2007, D G Murray
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __LINUX_PUBLIC_GNTDEV_H__
++#define __LINUX_PUBLIC_GNTDEV_H__
++
++struct ioctl_gntdev_grant_ref {
++ /* The domain ID of the grant to be mapped. */
++ uint32_t domid;
++ /* The grant reference of the grant to be mapped. */
++ uint32_t ref;
++};
++
++/*
++ * Inserts the grant references into the mapping table of an instance
++ * of gntdev. N.B. This does not perform the mapping, which is deferred
++ * until mmap() is called with @index as the offset.
++ */
++#define IOCTL_GNTDEV_MAP_GRANT_REF \
++_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
++struct ioctl_gntdev_map_grant_ref {
++ /* IN parameters */
++ /* The number of grants to be mapped. */
++ uint32_t count;
++ uint32_t pad;
++ /* OUT parameters */
++ /* The offset to be used on a subsequent call to mmap(). */
++ uint64_t index;
++ /* Variable IN parameter. */
++ /* Array of grant references, of size @count. */
++ struct ioctl_gntdev_grant_ref refs[1];
++};
++
++/*
++ * Removes the grant references from the mapping table of an instance of
++ * of gntdev. N.B. munmap() must be called on the relevant virtual address(es)
++ * before this ioctl is called, or an error will result.
++ */
++#define IOCTL_GNTDEV_UNMAP_GRANT_REF \
++_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref))
++struct ioctl_gntdev_unmap_grant_ref {
++ /* IN parameters */
++ /* The offset was returned by the corresponding map operation. */
++ uint64_t index;
++ /* The number of pages to be unmapped. */
++ uint32_t count;
++ uint32_t pad;
++};
++
++/*
++ * Returns the offset in the driver's address space that corresponds
++ * to @vaddr. This can be used to perform a munmap(), followed by an
++ * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by
++ * the caller. The number of pages that were allocated at the same time as
++ * @vaddr is returned in @count.
++ *
++ * N.B. Where more than one page has been mapped into a contiguous range, the
++ * supplied @vaddr must correspond to the start of the range; otherwise
++ * an error will result. It is only possible to munmap() the entire
++ * contiguously-allocated range at once, and not any subrange thereof.
++ */
++#define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \
++_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr))
++struct ioctl_gntdev_get_offset_for_vaddr {
++ /* IN parameters */
++ /* The virtual address of the first mapped page in a range. */
++ uint64_t vaddr;
++ /* OUT parameters */
++ /* The offset that was used in the initial mmap() operation. */
++ uint64_t offset;
++ /* The number of pages mapped in the VM area that begins at @vaddr. */
++ uint32_t count;
++ uint32_t pad;
++};
++
++/*
++ * Sets the maximum number of grants that may mapped at once by this gntdev
++ * instance.
++ *
++ * N.B. This must be called before any other ioctl is performed on the device.
++ */
++#define IOCTL_GNTDEV_SET_MAX_GRANTS \
++_IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants))
++struct ioctl_gntdev_set_max_grants {
++ /* IN parameter */
++ /* The maximum number of grants that may be mapped at once. */
++ uint32_t count;
++};
++
++#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/public/privcmd.h linux-2.6.18-xen-3.3.0/include/xen/public/privcmd.h
+--- linux-2.6.18.8/include/xen/public/privcmd.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/public/privcmd.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,79 @@
++/******************************************************************************
++ * privcmd.h
++ *
++ * Interface to /proc/xen/privcmd.
++ *
++ * Copyright (c) 2003-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __LINUX_PUBLIC_PRIVCMD_H__
++#define __LINUX_PUBLIC_PRIVCMD_H__
++
++#include <linux/types.h>
++
++#ifndef __user
++#define __user
++#endif
++
++typedef struct privcmd_hypercall
++{
++ __u64 op;
++ __u64 arg[5];
++} privcmd_hypercall_t;
++
++typedef struct privcmd_mmap_entry {
++ __u64 va;
++ __u64 mfn;
++ __u64 npages;
++} privcmd_mmap_entry_t;
++
++typedef struct privcmd_mmap {
++ int num;
++ domid_t dom; /* target domain */
++ privcmd_mmap_entry_t __user *entry;
++} privcmd_mmap_t;
++
++typedef struct privcmd_mmapbatch {
++ int num; /* number of pages to populate */
++ domid_t dom; /* target domain */
++ __u64 addr; /* virtual address */
++ xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */
++} privcmd_mmapbatch_t;
++
++/*
++ * @cmd: IOCTL_PRIVCMD_HYPERCALL
++ * @arg: &privcmd_hypercall_t
++ * Return: Value returned from execution of the specified hypercall.
++ */
++#define IOCTL_PRIVCMD_HYPERCALL \
++ _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
++#define IOCTL_PRIVCMD_MMAP \
++ _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
++#define IOCTL_PRIVCMD_MMAPBATCH \
++ _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
++
++#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/xenbus.h linux-2.6.18-xen-3.3.0/include/xen/xenbus.h
+--- linux-2.6.18.8/include/xen/xenbus.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/xenbus.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,307 @@
++/******************************************************************************
++ * xenbus.h
++ *
++ * Talks to Xen Store to figure out what devices we have.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 XenSource Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef _XEN_XENBUS_H
++#define _XEN_XENBUS_H
++
++#include <linux/device.h>
++#include <linux/notifier.h>
++#include <linux/mutex.h>
++#include <linux/completion.h>
++#include <linux/init.h>
++#include <linux/err.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/xenbus.h>
++#include <xen/interface/io/xs_wire.h>
++
++/* Register callback to watch this node. */
++struct xenbus_watch
++{
++ struct list_head list;
++
++ /* Path being watched. */
++ const char *node;
++
++ /* Callback (executed in a process context with no locks held). */
++ void (*callback)(struct xenbus_watch *,
++ const char **vec, unsigned int len);
++
++ /* See XBWF_ definitions below. */
++ unsigned long flags;
++};
++
++/*
++ * Execute callback in its own kthread. Useful if the callback is long
++ * running or heavily serialised, to avoid taking out the main xenwatch thread
++ * for a long period of time (or even unwittingly causing a deadlock).
++ */
++#define XBWF_new_thread 1
++
++/* A xenbus device. */
++struct xenbus_device {
++ const char *devicetype;
++ const char *nodename;
++ const char *otherend;
++ int otherend_id;
++ struct xenbus_watch otherend_watch;
++ struct device dev;
++ enum xenbus_state state;
++ struct completion down;
++};
++
++static inline struct xenbus_device *to_xenbus_device(struct device *dev)
++{
++ return container_of(dev, struct xenbus_device, dev);
++}
++
++struct xenbus_device_id
++{
++ /* .../device/<device_type>/<identifier> */
++ char devicetype[32]; /* General class of device. */
++};
++
++/* A xenbus driver. */
++struct xenbus_driver {
++ char *name;
++ struct module *owner;
++ const struct xenbus_device_id *ids;
++ int (*probe)(struct xenbus_device *dev,
++ const struct xenbus_device_id *id);
++ void (*otherend_changed)(struct xenbus_device *dev,
++ enum xenbus_state backend_state);
++ int (*remove)(struct xenbus_device *dev);
++ int (*suspend)(struct xenbus_device *dev);
++ int (*suspend_cancel)(struct xenbus_device *dev);
++ int (*resume)(struct xenbus_device *dev);
++ int (*uevent)(struct xenbus_device *, char **, int, char *, int);
++ struct device_driver driver;
++ int (*read_otherend_details)(struct xenbus_device *dev);
++ int (*is_ready)(struct xenbus_device *dev);
++};
++
++static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
++{
++ return container_of(drv, struct xenbus_driver, driver);
++}
++
++int xenbus_register_frontend(struct xenbus_driver *drv);
++int xenbus_register_backend(struct xenbus_driver *drv);
++void xenbus_unregister_driver(struct xenbus_driver *drv);
++
++struct xenbus_transaction
++{
++ u32 id;
++};
++
++/* Nil transaction ID. */
++#define XBT_NIL ((struct xenbus_transaction) { 0 })
++
++char **xenbus_directory(struct xenbus_transaction t,
++ const char *dir, const char *node, unsigned int *num);
++void *xenbus_read(struct xenbus_transaction t,
++ const char *dir, const char *node, unsigned int *len);
++int xenbus_write(struct xenbus_transaction t,
++ const char *dir, const char *node, const char *string);
++int xenbus_mkdir(struct xenbus_transaction t,
++ const char *dir, const char *node);
++int xenbus_exists(struct xenbus_transaction t,
++ const char *dir, const char *node);
++int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node);
++int xenbus_transaction_start(struct xenbus_transaction *t);
++int xenbus_transaction_end(struct xenbus_transaction t, int abort);
++
++/* Single read and scanf: returns -errno or num scanned if > 0. */
++int xenbus_scanf(struct xenbus_transaction t,
++ const char *dir, const char *node, const char *fmt, ...)
++ __attribute__((format(scanf, 4, 5)));
++
++/* Single printf and write: returns -errno or 0. */
++int xenbus_printf(struct xenbus_transaction t,
++ const char *dir, const char *node, const char *fmt, ...)
++ __attribute__((format(printf, 4, 5)));
++
++/* Generic read function: NULL-terminated triples of name,
++ * sprintf-style type string, and pointer. Returns 0 or errno.*/
++int xenbus_gather(struct xenbus_transaction t, const char *dir, ...);
++
++/* notifer routines for when the xenstore comes up */
++int register_xenstore_notifier(struct notifier_block *nb);
++void unregister_xenstore_notifier(struct notifier_block *nb);
++
++int register_xenbus_watch(struct xenbus_watch *watch);
++void unregister_xenbus_watch(struct xenbus_watch *watch);
++void xs_suspend(void);
++void xs_resume(void);
++void xs_suspend_cancel(void);
++
++/* Used by xenbus_dev to borrow kernel's store connection. */
++void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg);
++
++/* Prepare for domain suspend: then resume or cancel the suspend. */
++void xenbus_suspend(void);
++void xenbus_resume(void);
++void xenbus_suspend_cancel(void);
++
++#define XENBUS_IS_ERR_READ(str) ({ \
++ if (!IS_ERR(str) && strlen(str) == 0) { \
++ kfree(str); \
++ str = ERR_PTR(-ERANGE); \
++ } \
++ IS_ERR(str); \
++})
++
++#define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
++
++
++/**
++ * Register a watch on the given path, using the given xenbus_watch structure
++ * for storage, and the given callback function as the callback. Return 0 on
++ * success, or -errno on error. On success, the given path will be saved as
++ * watch->node, and remains the caller's to free. On error, watch->node will
++ * be NULL, the device will switch to XenbusStateClosing, and the error will
++ * be saved in the store.
++ */
++int xenbus_watch_path(struct xenbus_device *dev, const char *path,
++ struct xenbus_watch *watch,
++ void (*callback)(struct xenbus_watch *,
++ const char **, unsigned int));
++
++
++/**
++ * Register a watch on the given path/path2, using the given xenbus_watch
++ * structure for storage, and the given callback function as the callback.
++ * Return 0 on success, or -errno on error. On success, the watched path
++ * (path/path2) will be saved as watch->node, and becomes the caller's to
++ * kfree(). On error, watch->node will be NULL, so the caller has nothing to
++ * free, the device will switch to XenbusStateClosing, and the error will be
++ * saved in the store.
++ */
++int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
++ const char *path2, struct xenbus_watch *watch,
++ void (*callback)(struct xenbus_watch *,
++ const char **, unsigned int));
++
++
++/**
++ * Advertise in the store a change of the given driver to the given new_state.
++ * Return 0 on success, or -errno on error. On error, the device will switch
++ * to XenbusStateClosing, and the error will be saved in the store.
++ */
++int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
++
++
++/**
++ * Grant access to the given ring_mfn to the peer of the given device. Return
++ * 0 on success, or -errno on error. On error, the device will switch to
++ * XenbusStateClosing, and the error will be saved in the store.
++ */
++int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
++
++
++/**
++ * Map a page of memory into this domain from another domain's grant table.
++ * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
++ * page to that address, and sets *vaddr to that address.
++ * xenbus_map_ring does not allocate the virtual address space (you must do
++ * this yourself!). It only maps in the page to the specified address.
++ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
++ * or -ENOMEM on error. If an error is returned, device will switch to
++ * XenbusStateClosing and the error message will be saved in XenStore.
++ */
++struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev,
++ int gnt_ref);
++int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
++ grant_handle_t *handle, void *vaddr);
++
++
++/**
++ * Unmap a page of memory in this domain that was imported from another domain.
++ * Use xenbus_unmap_ring_vfree if you mapped in your memory with
++ * xenbus_map_ring_valloc (it will free the virtual address space).
++ * Returns 0 on success and returns GNTST_* on error
++ * (see xen/include/interface/grant_table.h).
++ */
++int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *);
++int xenbus_unmap_ring(struct xenbus_device *dev,
++ grant_handle_t handle, void *vaddr);
++
++
++/**
++ * Allocate an event channel for the given xenbus_device, assigning the newly
++ * created local port to *port. Return 0 on success, or -errno on error. On
++ * error, the device will switch to XenbusStateClosing, and the error will be
++ * saved in the store.
++ */
++int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
++
++
++/**
++ * Free an existing event channel. Returns 0 on success or -errno on error.
++ */
++int xenbus_free_evtchn(struct xenbus_device *dev, int port);
++
++
++/**
++ * Return the state of the driver rooted at the given store path, or
++ * XenbusStateUnknown if no state can be read.
++ */
++enum xenbus_state xenbus_read_driver_state(const char *path);
++
++
++/***
++ * Report the given negative errno into the store, along with the given
++ * formatted message.
++ */
++void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
++ ...);
++
++
++/***
++ * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
++ * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
++ * closedown of this driver and its peer.
++ */
++void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
++ ...);
++
++int xenbus_dev_init(void);
++
++const char *xenbus_strstate(enum xenbus_state state);
++int xenbus_dev_is_online(struct xenbus_device *dev);
++int xenbus_frontend_closed(struct xenbus_device *dev);
++
++int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *));
++int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *));
++
++#endif /* _XEN_XENBUS_H */
+diff -rpuN linux-2.6.18.8/include/xen/xencomm.h linux-2.6.18-xen-3.3.0/include/xen/xencomm.h
+--- linux-2.6.18.8/include/xen/xencomm.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/xencomm.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,77 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
++ * Jerone Young <jyoung5@us.ibm.com>
++ */
++
++#ifndef _LINUX_XENCOMM_H_
++#define _LINUX_XENCOMM_H_
++
++#include <xen/interface/xencomm.h>
++
++#define XENCOMM_MINI_ADDRS 3
++struct xencomm_mini {
++ struct xencomm_desc _desc;
++ uint64_t address[XENCOMM_MINI_ADDRS];
++};
++
++/* To avoid additionnal virt to phys conversion, an opaque structure is
++ presented. */
++struct xencomm_handle;
++
++extern void xencomm_free(struct xencomm_handle *desc);
++extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes);
++extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr,
++ unsigned long bytes, struct xencomm_mini *xc_area);
++
++#if 0
++#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
++ struct xencomm_mini xc_desc ## _base[(n)] \
++ __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \
++ struct xencomm_mini* xc_desc = &xc_desc ## _base[0];
++#else
++/*
++ * gcc bug workaround:
++ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660
++ * gcc doesn't handle properly stack variable with
++ * __attribute__((__align__(sizeof(struct xencomm_mini))))
++ */
++#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
++ unsigned char xc_desc ## _base[((n) + 1 ) * \
++ sizeof(struct xencomm_mini)]; \
++ struct xencomm_mini *xc_desc = (struct xencomm_mini*) \
++ ((unsigned long)xc_desc ## _base + \
++ (sizeof(struct xencomm_mini) - \
++ ((unsigned long)xc_desc ## _base) % \
++ sizeof(struct xencomm_mini)));
++#endif
++#define xencomm_map_no_alloc(ptr, bytes) \
++ ({XENCOMM_MINI_ALIGNED(xc_desc, 1); \
++ __xencomm_map_no_alloc(ptr, bytes, xc_desc);})
++
++/* provided by architecture code: */
++extern unsigned long xencomm_vtop(unsigned long vaddr);
++
++static inline void *xencomm_pa(void *ptr)
++{
++ return (void *)xencomm_vtop((unsigned long)ptr);
++}
++
++#define xen_guest_handle(hnd) ((hnd).p)
++
++#endif /* _LINUX_XENCOMM_H_ */
+diff -rpuN linux-2.6.18.8/include/xen/xencons.h linux-2.6.18-xen-3.3.0/include/xen/xencons.h
+--- linux-2.6.18.8/include/xen/xencons.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/xencons.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,17 @@
++#ifndef __ASM_XENCONS_H__
++#define __ASM_XENCONS_H__
++
++struct dom0_vga_console_info;
++void dom0_init_screen_info(const struct dom0_vga_console_info *, size_t);
++
++void xencons_force_flush(void);
++void xencons_resume(void);
++
++/* Interrupt work hooks. Receive data, or kick data out. */
++void xencons_rx(char *buf, unsigned len, struct pt_regs *regs);
++void xencons_tx(void);
++
++int xencons_ring_init(void);
++int xencons_ring_send(const char *data, unsigned len);
++
++#endif /* __ASM_XENCONS_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/xenoprof.h linux-2.6.18-xen-3.3.0/include/xen/xenoprof.h
+--- linux-2.6.18.8/include/xen/xenoprof.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/xenoprof.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,42 @@
++/******************************************************************************
++ * xen/xenoprof.h
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#ifndef __XEN_XENOPROF_H__
++#define __XEN_XENOPROF_H__
++#ifdef CONFIG_XEN
++
++#include <asm/xenoprof.h>
++
++struct oprofile_operations;
++int xenoprofile_init(struct oprofile_operations * ops);
++void xenoprofile_exit(void);
++
++struct xenoprof_shared_buffer {
++ char *buffer;
++ struct xenoprof_arch_shared_buffer arch;
++};
++#else
++#define xenoprofile_init(ops) (-ENOSYS)
++#define xenoprofile_exit() do { } while (0)
++
++#endif /* CONFIG_XEN */
++#endif /* __XEN_XENOPROF_H__ */
+diff -rpuN linux-2.6.18.8/include/xen/xen_proc.h linux-2.6.18-xen-3.3.0/include/xen/xen_proc.h
+--- linux-2.6.18.8/include/xen/xen_proc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/include/xen/xen_proc.h 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,12 @@
++
++#ifndef __ASM_XEN_PROC_H__
++#define __ASM_XEN_PROC_H__
++
++#include <linux/proc_fs.h>
++
++extern struct proc_dir_entry *create_xen_proc_entry(
++ const char *name, mode_t mode);
++extern void remove_xen_proc_entry(
++ const char *name);
++
++#endif /* __ASM_XEN_PROC_H__ */
+diff -rpuN linux-2.6.18.8/kernel/cpu.c linux-2.6.18-xen-3.3.0/kernel/cpu.c
+--- linux-2.6.18.8/kernel/cpu.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/kernel/cpu.c 2008-08-21 11:36:07.000000000 +0200
+@@ -48,7 +48,10 @@ EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
+
+ void unlock_cpu_hotplug(void)
+ {
++/* cpufreq lock-takers fixed in mainline; shut up until dom0 kernel catches up*/
++#ifdef CONFIG_XEN
+ WARN_ON(recursive != current);
++#endif
+ if (recursive_depth) {
+ recursive_depth--;
+ return;
+diff -rpuN linux-2.6.18.8/kernel/fork.c linux-2.6.18-xen-3.3.0/kernel/fork.c
+--- linux-2.6.18.8/kernel/fork.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/kernel/fork.c 2008-08-21 11:36:07.000000000 +0200
+@@ -276,6 +276,9 @@ static inline int dup_mmap(struct mm_str
+ if (retval)
+ goto out;
+ }
++#ifdef arch_dup_mmap
++ arch_dup_mmap(mm, oldmm);
++#endif
+ retval = 0;
+ out:
+ up_write(&mm->mmap_sem);
+diff -rpuN linux-2.6.18.8/kernel/hrtimer.c linux-2.6.18-xen-3.3.0/kernel/hrtimer.c
+--- linux-2.6.18.8/kernel/hrtimer.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/kernel/hrtimer.c 2008-08-21 11:36:07.000000000 +0200
+@@ -59,7 +59,7 @@ static ktime_t ktime_get(void)
+ *
+ * returns the time in ktime_t format
+ */
+-static ktime_t ktime_get_real(void)
++ktime_t ktime_get_real(void)
+ {
+ struct timespec now;
+
+diff -rpuN linux-2.6.18.8/kernel/irq/spurious.c linux-2.6.18-xen-3.3.0/kernel/irq/spurious.c
+--- linux-2.6.18.8/kernel/irq/spurious.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/kernel/irq/spurious.c 2008-08-21 11:36:07.000000000 +0200
+@@ -139,7 +139,8 @@ void note_interrupt(unsigned int irq, st
+ irqreturn_t action_ret, struct pt_regs *regs)
+ {
+ if (unlikely(action_ret != IRQ_HANDLED)) {
+- desc->irqs_unhandled++;
++ if (!irq_ignore_unhandled(irq))
++ desc->irqs_unhandled++;
+ if (unlikely(action_ret != IRQ_NONE))
+ report_bad_irq(irq, desc, action_ret);
+ }
+diff -rpuN linux-2.6.18.8/kernel/Kconfig.preempt linux-2.6.18-xen-3.3.0/kernel/Kconfig.preempt
+--- linux-2.6.18.8/kernel/Kconfig.preempt 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/kernel/Kconfig.preempt 2008-08-21 11:36:07.000000000 +0200
+@@ -35,6 +35,7 @@ config PREEMPT_VOLUNTARY
+
+ config PREEMPT
+ bool "Preemptible Kernel (Low-Latency Desktop)"
++ depends on !XEN
+ help
+ This option reduces the latency of the kernel by making
+ all kernel code (that is not executing in a critical section)
+diff -rpuN linux-2.6.18.8/kernel/kexec.c linux-2.6.18-xen-3.3.0/kernel/kexec.c
+--- linux-2.6.18.8/kernel/kexec.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/kernel/kexec.c 2008-08-21 11:36:07.000000000 +0200
+@@ -330,13 +330,26 @@ static int kimage_is_destination_range(s
+ return 0;
+ }
+
+-static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
++static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order, unsigned long limit)
+ {
+ struct page *pages;
+
+ pages = alloc_pages(gfp_mask, order);
+ if (pages) {
+ unsigned int count, i;
++#ifdef CONFIG_XEN
++ int address_bits;
++
++ if (limit == ~0UL)
++ address_bits = BITS_PER_LONG;
++ else
++ address_bits = long_log2(limit);
++
++ if (xen_limit_pages_to_max_mfn(pages, order, address_bits) < 0) {
++ __free_pages(pages, order);
++ return NULL;
++ }
++#endif
+ pages->mapping = NULL;
+ set_page_private(pages, order);
+ count = 1 << order;
+@@ -355,6 +368,9 @@ static void kimage_free_pages(struct pag
+ count = 1 << order;
+ for (i = 0; i < count; i++)
+ ClearPageReserved(page + i);
++#ifdef CONFIG_XEN
++ xen_destroy_contiguous_region((unsigned long)page_address(page), order);
++#endif
+ __free_pages(page, order);
+ }
+
+@@ -400,10 +416,10 @@ static struct page *kimage_alloc_normal_
+ do {
+ unsigned long pfn, epfn, addr, eaddr;
+
+- pages = kimage_alloc_pages(GFP_KERNEL, order);
++ pages = kimage_alloc_pages(GFP_KERNEL, order, KEXEC_CONTROL_MEMORY_LIMIT);
+ if (!pages)
+ break;
+- pfn = page_to_pfn(pages);
++ pfn = kexec_page_to_pfn(pages);
+ epfn = pfn + count;
+ addr = pfn << PAGE_SHIFT;
+ eaddr = epfn << PAGE_SHIFT;
+@@ -437,6 +453,7 @@ static struct page *kimage_alloc_normal_
+ return pages;
+ }
+
++#ifndef CONFIG_XEN
+ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
+ unsigned int order)
+ {
+@@ -490,7 +507,7 @@ static struct page *kimage_alloc_crash_c
+ }
+ /* If I don't overlap any segments I have found my hole! */
+ if (i == image->nr_segments) {
+- pages = pfn_to_page(hole_start >> PAGE_SHIFT);
++ pages = kexec_pfn_to_page(hole_start >> PAGE_SHIFT);
+ break;
+ }
+ }
+@@ -517,6 +534,13 @@ struct page *kimage_alloc_control_pages(
+
+ return pages;
+ }
++#else /* !CONFIG_XEN */
++struct page *kimage_alloc_control_pages(struct kimage *image,
++ unsigned int order)
++{
++ return kimage_alloc_normal_control_pages(image, order);
++}
++#endif
+
+ static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
+ {
+@@ -532,7 +556,7 @@ static int kimage_add_entry(struct kimag
+ return -ENOMEM;
+
+ ind_page = page_address(page);
+- *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
++ *image->entry = kexec_virt_to_phys(ind_page) | IND_INDIRECTION;
+ image->entry = ind_page;
+ image->last_entry = ind_page +
+ ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
+@@ -593,13 +617,13 @@ static int kimage_terminate(struct kimag
+ #define for_each_kimage_entry(image, ptr, entry) \
+ for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
+ ptr = (entry & IND_INDIRECTION)? \
+- phys_to_virt((entry & PAGE_MASK)): ptr +1)
++ kexec_phys_to_virt((entry & PAGE_MASK)): ptr +1)
+
+ static void kimage_free_entry(kimage_entry_t entry)
+ {
+ struct page *page;
+
+- page = pfn_to_page(entry >> PAGE_SHIFT);
++ page = kexec_pfn_to_page(entry >> PAGE_SHIFT);
+ kimage_free_pages(page);
+ }
+
+@@ -611,6 +635,10 @@ static void kimage_free(struct kimage *i
+ if (!image)
+ return;
+
++#ifdef CONFIG_XEN
++ xen_machine_kexec_unload(image);
++#endif
++
+ kimage_free_extra_pages(image);
+ for_each_kimage_entry(image, ptr, entry) {
+ if (entry & IND_INDIRECTION) {
+@@ -686,7 +714,7 @@ static struct page *kimage_alloc_page(st
+ * have a match.
+ */
+ list_for_each_entry(page, &image->dest_pages, lru) {
+- addr = page_to_pfn(page) << PAGE_SHIFT;
++ addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
+ if (addr == destination) {
+ list_del(&page->lru);
+ return page;
+@@ -697,16 +725,16 @@ static struct page *kimage_alloc_page(st
+ kimage_entry_t *old;
+
+ /* Allocate a page, if we run out of memory give up */
+- page = kimage_alloc_pages(gfp_mask, 0);
++ page = kimage_alloc_pages(gfp_mask, 0, KEXEC_SOURCE_MEMORY_LIMIT);
+ if (!page)
+ return NULL;
+ /* If the page cannot be used file it away */
+- if (page_to_pfn(page) >
++ if (kexec_page_to_pfn(page) >
+ (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
+ list_add(&page->lru, &image->unuseable_pages);
+ continue;
+ }
+- addr = page_to_pfn(page) << PAGE_SHIFT;
++ addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
+
+ /* If it is the destination page we want use it */
+ if (addr == destination)
+@@ -729,7 +757,7 @@ static struct page *kimage_alloc_page(st
+ struct page *old_page;
+
+ old_addr = *old & PAGE_MASK;
+- old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
++ old_page = kexec_pfn_to_page(old_addr >> PAGE_SHIFT);
+ copy_highpage(page, old_page);
+ *old = addr | (*old & ~PAGE_MASK);
+
+@@ -779,7 +807,7 @@ static int kimage_load_normal_segment(st
+ result = -ENOMEM;
+ goto out;
+ }
+- result = kimage_add_page(image, page_to_pfn(page)
++ result = kimage_add_page(image, kexec_page_to_pfn(page)
+ << PAGE_SHIFT);
+ if (result < 0)
+ goto out;
+@@ -811,6 +839,7 @@ out:
+ return result;
+ }
+
++#ifndef CONFIG_XEN
+ static int kimage_load_crash_segment(struct kimage *image,
+ struct kexec_segment *segment)
+ {
+@@ -833,7 +862,7 @@ static int kimage_load_crash_segment(str
+ char *ptr;
+ size_t uchunk, mchunk;
+
+- page = pfn_to_page(maddr >> PAGE_SHIFT);
++ page = kexec_pfn_to_page(maddr >> PAGE_SHIFT);
+ if (page == 0) {
+ result = -ENOMEM;
+ goto out;
+@@ -851,6 +880,7 @@ static int kimage_load_crash_segment(str
+ memset(ptr + uchunk, 0, mchunk - uchunk);
+ }
+ result = copy_from_user(ptr, buf, uchunk);
++ kexec_flush_icache_page(page);
+ kunmap(page);
+ if (result) {
+ result = (result < 0) ? result : -EIO;
+@@ -881,6 +911,13 @@ static int kimage_load_segment(struct ki
+
+ return result;
+ }
++#else /* CONFIG_XEN */
++static int kimage_load_segment(struct kimage *image,
++ struct kexec_segment *segment)
++{
++ return kimage_load_normal_segment(image, segment);
++}
++#endif
+
+ /*
+ * Exec Kernel system call: for obvious reasons only root may call it.
+@@ -991,6 +1028,13 @@ asmlinkage long sys_kexec_load(unsigned
+ if (result)
+ goto out;
+ }
++#ifdef CONFIG_XEN
++ if (image) {
++ result = xen_machine_kexec_load(image);
++ if (result)
++ goto out;
++ }
++#endif
+ /* Install the new kernel, and Uninstall the old */
+ image = xchg(dest_image, image);
+
+@@ -1044,7 +1088,6 @@ void crash_kexec(struct pt_regs *regs)
+ {
+ int locked;
+
+-
+ /* Take the kexec_lock here to prevent sys_kexec_load
+ * running on one cpu from replacing the crash kernel
+ * we are using after a panic on a different cpu.
+diff -rpuN linux-2.6.18.8/kernel/resource.c linux-2.6.18-xen-3.3.0/kernel/resource.c
+--- linux-2.6.18.8/kernel/resource.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/kernel/resource.c 2008-08-21 11:36:07.000000000 +0200
+@@ -36,6 +36,16 @@ struct resource iomem_resource = {
+ };
+ EXPORT_SYMBOL(iomem_resource);
+
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++struct resource iomem_machine_resource = {
++ .name = "Machine PCI mem",
++ .start = 0,
++ .end = -1,
++ .flags = IORESOURCE_MEM,
++};
++EXPORT_SYMBOL(iomem_machine_resource);
++#endif
++
+ static DEFINE_RWLOCK(resource_lock);
+
+ #ifdef CONFIG_PROC_FS
+@@ -115,6 +125,18 @@ static int iomem_open(struct inode *inod
+ return res;
+ }
+
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++static int iomem_machine_open(struct inode *inode, struct file *file)
++{
++ int res = seq_open(file, &resource_op);
++ if (!res) {
++ struct seq_file *m = file->private_data;
++ m->private = &iomem_machine_resource;
++ }
++ return res;
++}
++#endif
++
+ static struct file_operations proc_ioports_operations = {
+ .open = ioports_open,
+ .read = seq_read,
+@@ -129,6 +151,15 @@ static struct file_operations proc_iomem
+ .release = seq_release,
+ };
+
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++static struct file_operations proc_iomem_machine_operations = {
++ .open = iomem_machine_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = seq_release,
++};
++#endif
++
+ static int __init ioresources_init(void)
+ {
+ struct proc_dir_entry *entry;
+@@ -139,6 +170,13 @@ static int __init ioresources_init(void)
+ entry = create_proc_entry("iomem", 0, NULL);
+ if (entry)
+ entry->proc_fops = &proc_iomem_operations;
++#ifdef CONFIG_PROC_IOMEM_MACHINE
++ if (is_initial_xendomain()) {
++ entry = create_proc_entry("iomem_machine", 0, NULL);
++ if (entry)
++ entry->proc_fops = &proc_iomem_machine_operations;
++ }
++#endif
+ return 0;
+ }
+ __initcall(ioresources_init);
+diff -rpuN linux-2.6.18.8/kernel/softlockup.c linux-2.6.18-xen-3.3.0/kernel/softlockup.c
+--- linux-2.6.18.8/kernel/softlockup.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/kernel/softlockup.c 2008-08-21 11:36:07.000000000 +0200
+@@ -40,6 +40,19 @@ void touch_softlockup_watchdog(void)
+ }
+ EXPORT_SYMBOL(touch_softlockup_watchdog);
+
++unsigned long softlockup_get_next_event(void)
++{
++ int this_cpu = smp_processor_id();
++ unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
++
++ if (per_cpu(print_timestamp, this_cpu) == touch_timestamp ||
++ did_panic ||
++ !per_cpu(watchdog_task, this_cpu))
++ return MAX_JIFFY_OFFSET;
++
++ return max_t(long, 0, touch_timestamp + HZ - jiffies);
++}
++
+ /*
+ * This callback runs from the timer interrupt, and checks
+ * whether the watchdog thread has hung or not:
+diff -rpuN linux-2.6.18.8/kernel/sysctl.c linux-2.6.18-xen-3.3.0/kernel/sysctl.c
+--- linux-2.6.18.8/kernel/sysctl.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/kernel/sysctl.c 2008-08-21 11:36:07.000000000 +0200
+@@ -661,7 +661,7 @@ static ctl_table kern_table[] = {
+ .proc_handler = &proc_dointvec,
+ },
+ #endif
+-#ifdef CONFIG_ACPI_SLEEP
++#if defined(CONFIG_ACPI_SLEEP) && !defined(CONFIG_ACPI_PV_SLEEP)
+ {
+ .ctl_name = KERN_ACPI_VIDEO_FLAGS,
+ .procname = "acpi_video_flags",
+diff -rpuN linux-2.6.18.8/kernel/timer.c linux-2.6.18-xen-3.3.0/kernel/timer.c
+--- linux-2.6.18.8/kernel/timer.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/kernel/timer.c 2008-08-21 11:36:07.000000000 +0200
+@@ -485,7 +485,9 @@ unsigned long next_timer_interrupt(void)
+ if (hr_expires < 3)
+ return hr_expires + jiffies;
+ }
+- hr_expires += jiffies;
++ hr_expires = min_t(unsigned long,
++ softlockup_get_next_event(),
++ hr_expires) + jiffies;
+
+ base = __get_cpu_var(tvec_bases);
+ spin_lock(&base->lock);
+diff -rpuN linux-2.6.18.8/lib/Makefile linux-2.6.18-xen-3.3.0/lib/Makefile
+--- linux-2.6.18.8/lib/Makefile 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/lib/Makefile 2008-08-21 11:36:07.000000000 +0200
+@@ -52,6 +52,9 @@ obj-$(CONFIG_SMP) += percpu_counter.o
+ obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+
+ obj-$(CONFIG_SWIOTLB) += swiotlb.o
++ifeq ($(CONFIG_IA64),y)
++swiotlb-$(CONFIG_XEN) := ../arch/ia64/xen/swiotlb.o
++endif
+
+ hostprogs-y := gen_crc32table
+ clean-files := crc32table.h
+diff -rpuN linux-2.6.18.8/lib/swiotlb-xen.c linux-2.6.18-xen-3.3.0/lib/swiotlb-xen.c
+--- linux-2.6.18.8/lib/swiotlb-xen.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/lib/swiotlb-xen.c 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,745 @@
++/*
++ * Dynamic DMA mapping support.
++ *
++ * This implementation is a fallback for platforms that do not support
++ * I/O TLBs (aka DMA address translation hardware).
++ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
++ * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
++ * Copyright (C) 2000, 2003 Hewlett-Packard Co
++ * David Mosberger-Tang <davidm@hpl.hp.com>
++ * Copyright (C) 2005 Keir Fraser <keir@xensource.com>
++ */
++
++#include <linux/cache.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#include <linux/bootmem.h>
++#include <linux/highmem.h>
++#include <asm/io.h>
++#include <asm/pci.h>
++#include <asm/dma.h>
++#include <asm/uaccess.h>
++#include <xen/gnttab.h>
++#include <xen/interface/memory.h>
++#include <asm-i386/mach-xen/asm/gnttab_dma.h>
++
++int swiotlb;
++EXPORT_SYMBOL(swiotlb);
++
++#define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
++
++/*
++ * Maximum allowable number of contiguous slabs to map,
++ * must be a power of 2. What is the appropriate value ?
++ * The complexity of {map,unmap}_single is linearly dependent on this value.
++ */
++#define IO_TLB_SEGSIZE 128
++
++/*
++ * log of the size of each IO TLB slab. The number of slabs is command line
++ * controllable.
++ */
++#define IO_TLB_SHIFT 11
++
++int swiotlb_force;
++
++static char *iotlb_virt_start;
++static unsigned long iotlb_nslabs;
++
++/*
++ * Used to do a quick range check in swiotlb_unmap_single and
++ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
++ * API.
++ */
++static unsigned long iotlb_pfn_start, iotlb_pfn_end;
++
++/* Does the given dma address reside within the swiotlb aperture? */
++static inline int in_swiotlb_aperture(dma_addr_t dev_addr)
++{
++ unsigned long pfn = mfn_to_local_pfn(dev_addr >> PAGE_SHIFT);
++ return (pfn_valid(pfn)
++ && (pfn >= iotlb_pfn_start)
++ && (pfn < iotlb_pfn_end));
++}
++
++/*
++ * When the IOMMU overflows we return a fallback buffer. This sets the size.
++ */
++static unsigned long io_tlb_overflow = 32*1024;
++
++void *io_tlb_overflow_buffer;
++
++/*
++ * This is a free list describing the number of free entries available from
++ * each index
++ */
++static unsigned int *io_tlb_list;
++static unsigned int io_tlb_index;
++
++/*
++ * We need to save away the original address corresponding to a mapped entry
++ * for the sync operations.
++ */
++static struct phys_addr {
++ struct page *page;
++ unsigned int offset;
++} *io_tlb_orig_addr;
++
++/*
++ * Protect the above data structures in the map and unmap calls
++ */
++static DEFINE_SPINLOCK(io_tlb_lock);
++
++static unsigned int dma_bits;
++static unsigned int __initdata max_dma_bits = 32;
++static int __init
++setup_dma_bits(char *str)
++{
++ max_dma_bits = simple_strtoul(str, NULL, 0);
++ return 0;
++}
++__setup("dma_bits=", setup_dma_bits);
++
++static int __init
++setup_io_tlb_npages(char *str)
++{
++ /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
++ if (isdigit(*str)) {
++ iotlb_nslabs = simple_strtoul(str, &str, 0) <<
++ (20 - IO_TLB_SHIFT);
++ iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
++ /* Round up to power of two (xen_create_contiguous_region). */
++ while (iotlb_nslabs & (iotlb_nslabs-1))
++ iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
++ }
++ if (*str == ',')
++ ++str;
++ /*
++ * NB. 'force' enables the swiotlb, but doesn't force its use for
++ * every DMA like it does on native Linux. 'off' forcibly disables
++ * use of the swiotlb.
++ */
++ if (!strcmp(str, "force"))
++ swiotlb_force = 1;
++ else if (!strcmp(str, "off"))
++ swiotlb_force = -1;
++ return 1;
++}
++__setup("swiotlb=", setup_io_tlb_npages);
++/* make io_tlb_overflow tunable too? */
++
++/*
++ * Statically reserve bounce buffer space and initialize bounce buffer data
++ * structures for the software IO TLB used to implement the PCI DMA API.
++ */
++void
++swiotlb_init_with_default_size (size_t default_size)
++{
++ unsigned long i, bytes;
++ int rc;
++
++ if (!iotlb_nslabs) {
++ iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
++ iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
++ /* Round up to power of two (xen_create_contiguous_region). */
++ while (iotlb_nslabs & (iotlb_nslabs-1))
++ iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
++ }
++
++ bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
++
++ /*
++ * Get IO TLB memory from the low pages
++ */
++ iotlb_virt_start = alloc_bootmem_low_pages(bytes);
++ if (!iotlb_virt_start)
++ panic("Cannot allocate SWIOTLB buffer!\n");
++
++ dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
++ for (i = 0; i < iotlb_nslabs; i += IO_TLB_SEGSIZE) {
++ do {
++ rc = xen_create_contiguous_region(
++ (unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT),
++ get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
++ dma_bits);
++ } while (rc && dma_bits++ < max_dma_bits);
++ if (rc) {
++ if (i == 0)
++ panic("No suitable physical memory available for SWIOTLB buffer!\n"
++ "Use dom0_mem Xen boot parameter to reserve\n"
++ "some DMA memory (e.g., dom0_mem=-128M).\n");
++ iotlb_nslabs = i;
++ i <<= IO_TLB_SHIFT;
++ free_bootmem(__pa(iotlb_virt_start + i), bytes - i);
++ bytes = i;
++ for (dma_bits = 0; i > 0; i -= IO_TLB_SEGSIZE << IO_TLB_SHIFT) {
++ unsigned int bits = fls64(virt_to_bus(iotlb_virt_start + i - 1));
++
++ if (bits > dma_bits)
++ dma_bits = bits;
++ }
++ break;
++ }
++ }
++
++ /*
++ * Allocate and initialize the free list array. This array is used
++ * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
++ */
++ io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
++ for (i = 0; i < iotlb_nslabs; i++)
++ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++ io_tlb_index = 0;
++ io_tlb_orig_addr = alloc_bootmem(
++ iotlb_nslabs * sizeof(*io_tlb_orig_addr));
++
++ /*
++ * Get the overflow emergency buffer
++ */
++ io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
++ if (!io_tlb_overflow_buffer)
++ panic("Cannot allocate SWIOTLB overflow buffer!\n");
++
++ do {
++ rc = xen_create_contiguous_region(
++ (unsigned long)io_tlb_overflow_buffer,
++ get_order(io_tlb_overflow),
++ dma_bits);
++ } while (rc && dma_bits++ < max_dma_bits);
++ if (rc)
++ panic("No suitable physical memory available for SWIOTLB overflow buffer!\n");
++
++ iotlb_pfn_start = __pa(iotlb_virt_start) >> PAGE_SHIFT;
++ iotlb_pfn_end = iotlb_pfn_start + (bytes >> PAGE_SHIFT);
++
++ printk(KERN_INFO "Software IO TLB enabled: \n"
++ " Aperture: %lu megabytes\n"
++ " Kernel range: %p - %p\n"
++ " Address size: %u bits\n",
++ bytes >> 20,
++ iotlb_virt_start, iotlb_virt_start + bytes,
++ dma_bits);
++}
++
++void
++swiotlb_init(void)
++{
++ long ram_end;
++ size_t defsz = 64 * (1 << 20); /* 64MB default size */
++
++ if (swiotlb_force == 1) {
++ swiotlb = 1;
++ } else if ((swiotlb_force != -1) &&
++ is_running_on_xen() &&
++ is_initial_xendomain()) {
++ /* Domain 0 always has a swiotlb. */
++ ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
++ if (ram_end <= 0x7ffff)
++ defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
++ swiotlb = 1;
++ }
++
++ if (swiotlb)
++ swiotlb_init_with_default_size(defsz);
++ else
++ printk(KERN_INFO "Software IO TLB disabled\n");
++}
++
++/*
++ * We use __copy_to_user_inatomic to transfer to the host buffer because the
++ * buffer may be mapped read-only (e.g, in blkback driver) but lower-level
++ * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
++ * unnecessary copy from the aperture to the host buffer, and a page fault.
++ */
++static void
++__sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
++{
++ if (PageHighMem(buffer.page)) {
++ size_t len, bytes;
++ char *dev, *host, *kmp;
++ len = size;
++ while (len != 0) {
++ unsigned long flags;
++
++ if (((bytes = len) + buffer.offset) > PAGE_SIZE)
++ bytes = PAGE_SIZE - buffer.offset;
++ local_irq_save(flags); /* protects KM_BOUNCE_READ */
++ kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ);
++ dev = dma_addr + size - len;
++ host = kmp + buffer.offset;
++ if (dir == DMA_FROM_DEVICE) {
++ if (__copy_to_user_inatomic(host, dev, bytes))
++ /* inaccessible */;
++ } else
++ memcpy(dev, host, bytes);
++ kunmap_atomic(kmp, KM_BOUNCE_READ);
++ local_irq_restore(flags);
++ len -= bytes;
++ buffer.page++;
++ buffer.offset = 0;
++ }
++ } else {
++ char *host = (char *)phys_to_virt(
++ page_to_pseudophys(buffer.page)) + buffer.offset;
++ if (dir == DMA_FROM_DEVICE) {
++ if (__copy_to_user_inatomic(host, dma_addr, size))
++ /* inaccessible */;
++ } else if (dir == DMA_TO_DEVICE)
++ memcpy(dma_addr, host, size);
++ }
++}
++
++/*
++ * Allocates bounce buffer and returns its kernel virtual address.
++ */
++static void *
++map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
++{
++ unsigned long flags;
++ char *dma_addr;
++ unsigned int nslots, stride, index, wrap;
++ struct phys_addr slot_buf;
++ int i;
++
++ /*
++ * For mappings greater than a page, we limit the stride (and
++ * hence alignment) to a page size.
++ */
++ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++ if (size > PAGE_SIZE)
++ stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
++ else
++ stride = 1;
++
++ BUG_ON(!nslots);
++
++ /*
++ * Find suitable number of IO TLB entries size that will fit this
++ * request and allocate a buffer from that IO TLB pool.
++ */
++ spin_lock_irqsave(&io_tlb_lock, flags);
++ {
++ wrap = index = ALIGN(io_tlb_index, stride);
++
++ if (index >= iotlb_nslabs)
++ wrap = index = 0;
++
++ do {
++ /*
++ * If we find a slot that indicates we have 'nslots'
++ * number of contiguous buffers, we allocate the
++ * buffers from that slot and mark the entries as '0'
++ * indicating unavailable.
++ */
++ if (io_tlb_list[index] >= nslots) {
++ int count = 0;
++
++ for (i = index; i < (int)(index + nslots); i++)
++ io_tlb_list[i] = 0;
++ for (i = index - 1;
++ (OFFSET(i, IO_TLB_SEGSIZE) !=
++ IO_TLB_SEGSIZE -1) && io_tlb_list[i];
++ i--)
++ io_tlb_list[i] = ++count;
++ dma_addr = iotlb_virt_start +
++ (index << IO_TLB_SHIFT);
++
++ /*
++ * Update the indices to avoid searching in
++ * the next round.
++ */
++ io_tlb_index =
++ ((index + nslots) < iotlb_nslabs
++ ? (index + nslots) : 0);
++
++ goto found;
++ }
++ index += stride;
++ if (index >= iotlb_nslabs)
++ index = 0;
++ } while (index != wrap);
++
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++ return NULL;
++ }
++ found:
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++
++ /*
++ * Save away the mapping from the original address to the DMA address.
++ * This is needed when we sync the memory. Then we sync the buffer if
++ * needed.
++ */
++ slot_buf = buffer;
++ for (i = 0; i < nslots; i++) {
++ slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
++ slot_buf.offset &= PAGE_SIZE - 1;
++ io_tlb_orig_addr[index+i] = slot_buf;
++ slot_buf.offset += 1 << IO_TLB_SHIFT;
++ }
++ if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
++
++ return dma_addr;
++}
++
++static struct phys_addr dma_addr_to_phys_addr(char *dma_addr)
++{
++ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++ struct phys_addr buffer = io_tlb_orig_addr[index];
++ buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
++ buffer.page += buffer.offset >> PAGE_SHIFT;
++ buffer.offset &= PAGE_SIZE - 1;
++ return buffer;
++}
++
++/*
++ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
++ */
++static void
++unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++{
++ unsigned long flags;
++ int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++ struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
++
++ /*
++ * First, sync the memory before unmapping the entry
++ */
++ if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++ __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
++
++ /*
++ * Return the buffer to the free list by setting the corresponding
++ * entries to indicate the number of contigous entries available.
++ * While returning the entries to the free list, we merge the entries
++ * with slots below and above the pool being returned.
++ */
++ spin_lock_irqsave(&io_tlb_lock, flags);
++ {
++ count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
++ io_tlb_list[index + nslots] : 0);
++ /*
++ * Step 1: return the slots to the free list, merging the
++ * slots with superceeding slots
++ */
++ for (i = index + nslots - 1; i >= index; i--)
++ io_tlb_list[i] = ++count;
++ /*
++ * Step 2: merge the returned slots with the preceding slots,
++ * if available (non zero)
++ */
++ for (i = index - 1;
++ (OFFSET(i, IO_TLB_SEGSIZE) !=
++ IO_TLB_SEGSIZE -1) && io_tlb_list[i];
++ i--)
++ io_tlb_list[i] = ++count;
++ }
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++}
++
++static void
++sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++{
++ struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
++ BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
++ __sync_single(buffer, dma_addr, size, dir);
++}
++
++static void
++swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
++{
++ /*
++ * Ran out of IOMMU space for this operation. This is very bad.
++ * Unfortunately the drivers cannot handle this operation properly.
++ * unless they check for pci_dma_mapping_error (most don't)
++ * When the mapping is small enough return a static buffer to limit
++ * the damage, or panic when the transfer is too big.
++ */
++ printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
++ "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
++
++ if (size > io_tlb_overflow && do_panic) {
++ if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++ panic("PCI-DMA: Memory would be corrupted\n");
++ if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++ panic("PCI-DMA: Random memory would be DMAed\n");
++ }
++}
++
++/*
++ * Map a single buffer of the indicated size for DMA in streaming mode. The
++ * PCI address to use is returned.
++ *
++ * Once the device is given the dma address, the device owns this memory until
++ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
++ */
++dma_addr_t
++swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
++{
++ dma_addr_t dev_addr = gnttab_dma_map_page(virt_to_page(ptr)) +
++ offset_in_page(ptr);
++ void *map;
++ struct phys_addr buffer;
++
++ BUG_ON(dir == DMA_NONE);
++
++ /*
++ * If the pointer passed in happens to be in the device's DMA window,
++ * we can safely return the device addr and not worry about bounce
++ * buffering it.
++ */
++ if (!range_straddles_page_boundary(__pa(ptr), size) &&
++ !address_needs_mapping(hwdev, dev_addr))
++ return dev_addr;
++
++ /*
++ * Oh well, have to allocate and map a bounce buffer.
++ */
++ gnttab_dma_unmap_page(dev_addr);
++ buffer.page = virt_to_page(ptr);
++ buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
++ map = map_single(hwdev, buffer, size, dir);
++ if (!map) {
++ swiotlb_full(hwdev, size, dir, 1);
++ map = io_tlb_overflow_buffer;
++ }
++
++ dev_addr = virt_to_bus(map);
++ return dev_addr;
++}
++
++/*
++ * Unmap a single streaming mode DMA translation. The dma_addr and size must
++ * match what was provided for in a previous swiotlb_map_single call. All
++ * other usages are undefined.
++ *
++ * After this call, reads by the cpu to the buffer are guaranteed to see
++ * whatever the device wrote there.
++ */
++void
++swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
++ int dir)
++{
++ BUG_ON(dir == DMA_NONE);
++ if (in_swiotlb_aperture(dev_addr))
++ unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
++ else
++ gnttab_dma_unmap_page(dev_addr);
++}
++
++/*
++ * Make physical memory consistent for a single streaming mode DMA translation
++ * after a transfer.
++ *
++ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
++ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
++ * call this function before doing so. At the next point you give the PCI dma
++ * address back to the card, you must first perform a
++ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
++ */
++void
++swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir)
++{
++ BUG_ON(dir == DMA_NONE);
++ if (in_swiotlb_aperture(dev_addr))
++ sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
++
++void
++swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir)
++{
++ BUG_ON(dir == DMA_NONE);
++ if (in_swiotlb_aperture(dev_addr))
++ sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
++
++/*
++ * Map a set of buffers described by scatterlist in streaming mode for DMA.
++ * This is the scatter-gather version of the above swiotlb_map_single
++ * interface. Here the scatter gather list elements are each tagged with the
++ * appropriate dma address and length. They are obtained via
++ * sg_dma_{address,length}(SG).
++ *
++ * NOTE: An implementation may be able to use a smaller number of
++ * DMA address/length pairs than there are SG table elements.
++ * (for example via virtual mapping capabilities)
++ * The routine returns the number of addr/length pairs actually
++ * used, at most nents.
++ *
++ * Device ownership issues as mentioned above for swiotlb_map_single are the
++ * same here.
++ */
++int
++swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++ int dir)
++{
++ struct phys_addr buffer;
++ dma_addr_t dev_addr;
++ char *map;
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++) {
++ dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
++
++ if (range_straddles_page_boundary(page_to_pseudophys(sg->page)
++ + sg->offset, sg->length)
++ || address_needs_mapping(hwdev, dev_addr)) {
++ gnttab_dma_unmap_page(dev_addr);
++ buffer.page = sg->page;
++ buffer.offset = sg->offset;
++ map = map_single(hwdev, buffer, sg->length, dir);
++ if (!map) {
++ /* Don't panic here, we expect map_sg users
++ to do proper error handling. */
++ swiotlb_full(hwdev, sg->length, dir, 0);
++ swiotlb_unmap_sg(hwdev, sg - i, i, dir);
++ sg[0].dma_length = 0;
++ return 0;
++ }
++ sg->dma_address = (dma_addr_t)virt_to_bus(map);
++ } else
++ sg->dma_address = dev_addr;
++ sg->dma_length = sg->length;
++ }
++ return nelems;
++}
++
++/*
++ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
++ * concerning calls here are the same as for swiotlb_unmap_single() above.
++ */
++void
++swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++ int dir)
++{
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++)
++ if (in_swiotlb_aperture(sg->dma_address))
++ unmap_single(hwdev,
++ (void *)bus_to_virt(sg->dma_address),
++ sg->dma_length, dir);
++ else
++ gnttab_dma_unmap_page(sg->dma_address);
++}
++
++/*
++ * Make physical memory consistent for a set of streaming mode DMA translations
++ * after a transfer.
++ *
++ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
++ * and usage.
++ */
++void
++swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int dir)
++{
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++)
++ if (in_swiotlb_aperture(sg->dma_address))
++ sync_single(hwdev,
++ (void *)bus_to_virt(sg->dma_address),
++ sg->dma_length, dir);
++}
++
++void
++swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int dir)
++{
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++)
++ if (in_swiotlb_aperture(sg->dma_address))
++ sync_single(hwdev,
++ (void *)bus_to_virt(sg->dma_address),
++ sg->dma_length, dir);
++}
++
++#ifdef CONFIG_HIGHMEM
++
++dma_addr_t
++swiotlb_map_page(struct device *hwdev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction)
++{
++ struct phys_addr buffer;
++ dma_addr_t dev_addr;
++ char *map;
++
++ dev_addr = gnttab_dma_map_page(page) + offset;
++ if (address_needs_mapping(hwdev, dev_addr)) {
++ gnttab_dma_unmap_page(dev_addr);
++ buffer.page = page;
++ buffer.offset = offset;
++ map = map_single(hwdev, buffer, size, direction);
++ if (!map) {
++ swiotlb_full(hwdev, size, direction, 1);
++ map = io_tlb_overflow_buffer;
++ }
++ dev_addr = (dma_addr_t)virt_to_bus(map);
++ }
++
++ return dev_addr;
++}
++
++void
++swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
++ size_t size, enum dma_data_direction direction)
++{
++ BUG_ON(direction == DMA_NONE);
++ if (in_swiotlb_aperture(dma_address))
++ unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
++ else
++ gnttab_dma_unmap_page(dma_address);
++}
++
++#endif
++
++int
++swiotlb_dma_mapping_error(dma_addr_t dma_addr)
++{
++ return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
++}
++
++/*
++ * Return whether the given PCI device DMA address mask can be supported
++ * properly. For example, if your device can only drive the low 24-bits
++ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
++ * this function.
++ */
++int
++swiotlb_dma_supported (struct device *hwdev, u64 mask)
++{
++ return (mask >= ((1UL << dma_bits) - 1));
++}
++
++EXPORT_SYMBOL(swiotlb_init);
++EXPORT_SYMBOL(swiotlb_map_single);
++EXPORT_SYMBOL(swiotlb_unmap_single);
++EXPORT_SYMBOL(swiotlb_map_sg);
++EXPORT_SYMBOL(swiotlb_unmap_sg);
++EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_single_for_device);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
++EXPORT_SYMBOL(swiotlb_dma_mapping_error);
++EXPORT_SYMBOL(swiotlb_dma_supported);
+diff -rpuN linux-2.6.18.8/MAINTAINERS linux-2.6.18-xen-3.3.0/MAINTAINERS
+--- linux-2.6.18.8/MAINTAINERS 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/MAINTAINERS 2008-08-21 11:36:07.000000000 +0200
+@@ -2558,6 +2558,13 @@ M: pfg@sgi.com
+ L: linux-ia64@vger.kernel.org
+ S: Supported
+
++SFC NETWORK DRIVER
++P: Steve Hodgson
++P: Ben Hutchings
++P: Robert Stonehouse
++M: linux-net-drivers@solarflare.com
++S: Supported
++
+ SGI VISUAL WORKSTATION 320 AND 540
+ P: Andrey Panin
+ M: pazke@donpac.ru
+diff -rpuN linux-2.6.18.8/mm/highmem.c linux-2.6.18-xen-3.3.0/mm/highmem.c
+--- linux-2.6.18.8/mm/highmem.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/mm/highmem.c 2008-08-21 11:36:07.000000000 +0200
+@@ -142,6 +142,17 @@ start:
+ return vaddr;
+ }
+
++#ifdef CONFIG_XEN
++void kmap_flush_unused(void)
++{
++ spin_lock(&kmap_lock);
++ flush_all_zero_pkmaps();
++ spin_unlock(&kmap_lock);
++}
++
++EXPORT_SYMBOL(kmap_flush_unused);
++#endif
++
+ void fastcall *kmap_high(struct page *page)
+ {
+ unsigned long vaddr;
+@@ -457,6 +468,12 @@ void blk_queue_bounce(request_queue_t *q
+ mempool_t *pool;
+
+ /*
++ * Data-less bio, nothing to bounce
++ */
++ if (bio_empty_barrier(*bio_orig))
++ return;
++
++ /*
+ * for non-isa bounce case, just check if the bounce pfn is equal
+ * to or bigger than the highest pfn in the system -- in that case,
+ * don't waste time iterating over bio segments
+diff -rpuN linux-2.6.18.8/mm/memory.c linux-2.6.18-xen-3.3.0/mm/memory.c
+--- linux-2.6.18.8/mm/memory.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/mm/memory.c 2008-08-21 11:36:07.000000000 +0200
+@@ -396,6 +396,12 @@ struct page *vm_normal_page(struct vm_ar
+ return NULL;
+ }
+
++#if defined(CONFIG_XEN) && defined(CONFIG_X86)
++ /* XEN: Covers user-space grant mappings (even of local pages). */
++ if (unlikely(vma->vm_flags & VM_FOREIGN))
++ return NULL;
++#endif
++
+ /*
+ * Add some anal sanity checks for now. Eventually,
+ * we should just do "return pfn_to_page(pfn)", but
+@@ -403,7 +409,8 @@ struct page *vm_normal_page(struct vm_ar
+ * and that the resulting page looks ok.
+ */
+ if (unlikely(!pfn_valid(pfn))) {
+- print_bad_pte(vma, pte, addr);
++ if (!(vma->vm_flags & VM_RESERVED))
++ print_bad_pte(vma, pte, addr);
+ return NULL;
+ }
+
+@@ -658,8 +665,12 @@ static unsigned long zap_pte_range(struc
+ page->index > details->last_index))
+ continue;
+ }
+- ptent = ptep_get_and_clear_full(mm, addr, pte,
+- tlb->fullmm);
++ if (unlikely(vma->vm_ops && vma->vm_ops->zap_pte))
++ ptent = vma->vm_ops->zap_pte(vma, addr, pte,
++ tlb->fullmm);
++ else
++ ptent = ptep_get_and_clear_full(mm, addr, pte,
++ tlb->fullmm);
+ tlb_remove_tlb_entry(tlb, pte, addr);
+ if (unlikely(!page))
+ continue;
+@@ -754,6 +765,7 @@ static unsigned long unmap_page_range(st
+ details = NULL;
+
+ BUG_ON(addr >= end);
++
+ tlb_start_vma(tlb, vma);
+ pgd = pgd_offset(vma->vm_mm, addr);
+ do {
+@@ -891,6 +903,7 @@ unsigned long zap_page_range(struct vm_a
+ tlb_finish_mmu(tlb, address, end);
+ return end;
+ }
++EXPORT_SYMBOL(zap_page_range);
+
+ /*
+ * Do a quick page-table lookup for a single page.
+@@ -1030,6 +1043,26 @@ int get_user_pages(struct task_struct *t
+ continue;
+ }
+
++#ifdef CONFIG_XEN
++ if (vma && (vma->vm_flags & VM_FOREIGN)) {
++ struct page **map = vma->vm_private_data;
++ int offset = (start - vma->vm_start) >> PAGE_SHIFT;
++ if (map[offset] != NULL) {
++ if (pages) {
++ struct page *page = map[offset];
++
++ pages[i] = page;
++ get_page(page);
++ }
++ if (vmas)
++ vmas[i] = vma;
++ i++;
++ start += PAGE_SIZE;
++ len--;
++ continue;
++ }
++ }
++#endif
+ if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ || !(vm_flags & vma->vm_flags))
+ return i ? : -EFAULT;
+@@ -1379,6 +1412,102 @@ int remap_pfn_range(struct vm_area_struc
+ }
+ EXPORT_SYMBOL(remap_pfn_range);
+
++#ifdef CONFIG_XEN
++static inline int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long addr, unsigned long end,
++ pte_fn_t fn, void *data)
++{
++ pte_t *pte;
++ int err;
++ struct page *pmd_page;
++ spinlock_t *ptl;
++
++ pte = (mm == &init_mm) ?
++ pte_alloc_kernel(pmd, addr) :
++ pte_alloc_map_lock(mm, pmd, addr, &ptl);
++ if (!pte)
++ return -ENOMEM;
++
++ BUG_ON(pmd_huge(*pmd));
++
++ pmd_page = pmd_page(*pmd);
++
++ do {
++ err = fn(pte, pmd_page, addr, data);
++ if (err)
++ break;
++ } while (pte++, addr += PAGE_SIZE, addr != end);
++
++ if (mm != &init_mm)
++ pte_unmap_unlock(pte-1, ptl);
++ return err;
++}
++
++static inline int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
++ unsigned long addr, unsigned long end,
++ pte_fn_t fn, void *data)
++{
++ pmd_t *pmd;
++ unsigned long next;
++ int err;
++
++ pmd = pmd_alloc(mm, pud, addr);
++ if (!pmd)
++ return -ENOMEM;
++ do {
++ next = pmd_addr_end(addr, end);
++ err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
++ if (err)
++ break;
++ } while (pmd++, addr = next, addr != end);
++ return err;
++}
++
++static inline int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
++ unsigned long addr, unsigned long end,
++ pte_fn_t fn, void *data)
++{
++ pud_t *pud;
++ unsigned long next;
++ int err;
++
++ pud = pud_alloc(mm, pgd, addr);
++ if (!pud)
++ return -ENOMEM;
++ do {
++ next = pud_addr_end(addr, end);
++ err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
++ if (err)
++ break;
++ } while (pud++, addr = next, addr != end);
++ return err;
++}
++
++/*
++ * Scan a region of virtual memory, filling in page tables as necessary
++ * and calling a provided function on each leaf page table.
++ */
++int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
++ unsigned long size, pte_fn_t fn, void *data)
++{
++ pgd_t *pgd;
++ unsigned long next;
++ unsigned long end = addr + size;
++ int err;
++
++ BUG_ON(addr >= end);
++ pgd = pgd_offset(mm, addr);
++ do {
++ next = pgd_addr_end(addr, end);
++ err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
++ if (err)
++ break;
++ } while (pgd++, addr = next, addr != end);
++ return err;
++}
++EXPORT_SYMBOL_GPL(apply_to_page_range);
++#endif
++
+ /*
+ * handle_pte_fault chooses page fault handler according to an entry
+ * which was read non-atomically. Before making any commitment, on
+diff -rpuN linux-2.6.18.8/mm/mmap.c linux-2.6.18-xen-3.3.0/mm/mmap.c
+--- linux-2.6.18.8/mm/mmap.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/mm/mmap.c 2008-08-21 11:36:07.000000000 +0200
+@@ -1963,6 +1963,10 @@ void exit_mmap(struct mm_struct *mm)
+ unsigned long nr_accounted = 0;
+ unsigned long end;
+
++#ifdef arch_exit_mmap
++ arch_exit_mmap(mm);
++#endif
++
+ lru_add_drain();
+ flush_cache_mm(mm);
+ tlb = tlb_gather_mmu(mm, 1);
+diff -rpuN linux-2.6.18.8/mm/mprotect.c linux-2.6.18-xen-3.3.0/mm/mprotect.c
+--- linux-2.6.18.8/mm/mprotect.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/mm/mprotect.c 2008-08-21 11:36:07.000000000 +0200
+@@ -76,6 +76,8 @@ static inline void change_pmd_range(stru
+ next = pmd_addr_end(addr, end);
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
++ if (arch_change_pte_range(mm, pmd, addr, next, newprot))
++ continue;
+ change_pte_range(mm, pmd, addr, next, newprot);
+ } while (pmd++, addr = next, addr != end);
+ }
+diff -rpuN linux-2.6.18.8/mm/page_alloc.c linux-2.6.18-xen-3.3.0/mm/page_alloc.c
+--- linux-2.6.18.8/mm/page_alloc.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/mm/page_alloc.c 2008-08-21 11:36:07.000000000 +0200
+@@ -154,7 +154,11 @@ static void bad_page(struct page *page)
+ 1 << PG_slab |
+ 1 << PG_swapcache |
+ 1 << PG_writeback |
+- 1 << PG_buddy );
++ 1 << PG_buddy |
++#ifdef CONFIG_X86_XEN
++ 1 << PG_pinned |
++#endif
++ 1 << PG_foreign );
+ set_page_count(page, 0);
+ reset_page_mapcount(page);
+ page->mapping = NULL;
+@@ -389,7 +393,11 @@ static inline int free_pages_check(struc
+ 1 << PG_swapcache |
+ 1 << PG_writeback |
+ 1 << PG_reserved |
+- 1 << PG_buddy ))))
++ 1 << PG_buddy |
++#ifdef CONFIG_X86_XEN
++ 1 << PG_pinned |
++#endif
++ 1 << PG_foreign ))))
+ bad_page(page);
+ if (PageDirty(page))
+ __ClearPageDirty(page);
+@@ -443,6 +451,12 @@ static void __free_pages_ok(struct page
+ int i;
+ int reserved = 0;
+
++#ifdef CONFIG_XEN
++ if (PageForeign(page)) {
++ PageForeignDestructor(page);
++ return;
++ }
++#endif
+ arch_free_page(page, order);
+ if (!PageHighMem(page))
+ debug_check_no_locks_freed(page_address(page),
+@@ -538,7 +552,11 @@ static int prep_new_page(struct page *pa
+ 1 << PG_swapcache |
+ 1 << PG_writeback |
+ 1 << PG_reserved |
+- 1 << PG_buddy ))))
++ 1 << PG_buddy |
++#ifdef CONFIG_X86_XEN
++ 1 << PG_pinned |
++#endif
++ 1 << PG_foreign ))))
+ bad_page(page);
+
+ /*
+@@ -717,6 +735,12 @@ static void fastcall free_hot_cold_page(
+ struct per_cpu_pages *pcp;
+ unsigned long flags;
+
++#ifdef CONFIG_XEN
++ if (PageForeign(page)) {
++ PageForeignDestructor(page);
++ return;
++ }
++#endif
+ arch_free_page(page, 0);
+
+ if (PageAnon(page))
+diff -rpuN linux-2.6.18.8/net/bridge/br_netfilter.c linux-2.6.18-xen-3.3.0/net/bridge/br_netfilter.c
+--- linux-2.6.18.8/net/bridge/br_netfilter.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/net/bridge/br_netfilter.c 2008-08-21 11:36:07.000000000 +0200
+@@ -127,10 +127,10 @@ static inline struct nf_bridge_info *nf_
+
+ static inline void nf_bridge_save_header(struct sk_buff *skb)
+ {
+- int header_size = 16;
++ int header_size = ETH_HLEN;
+
+ if (skb->protocol == htons(ETH_P_8021Q))
+- header_size = 18;
++ header_size += VLAN_HLEN;
+
+ memcpy(skb->nf_bridge->data, skb->data - header_size, header_size);
+ }
+diff -rpuN linux-2.6.18.8/net/bridge/netfilter/ebtables.c linux-2.6.18-xen-3.3.0/net/bridge/netfilter/ebtables.c
+--- linux-2.6.18.8/net/bridge/netfilter/ebtables.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/net/bridge/netfilter/ebtables.c 2008-08-21 11:36:07.000000000 +0200
+@@ -597,7 +597,7 @@ ebt_check_entry(struct ebt_entry *e, str
+ struct ebt_entry_target *t;
+ struct ebt_target *target;
+ unsigned int i, j, hook = 0, hookmask = 0;
+- size_t gap = e->next_offset - e->target_offset;
++ size_t gap;
+ int ret;
+
+ /* don't mess with the struct ebt_entries */
+@@ -647,6 +647,7 @@ ebt_check_entry(struct ebt_entry *e, str
+ if (ret != 0)
+ goto cleanup_watchers;
+ t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
++ gap = e->next_offset - e->target_offset;
+ target = find_target_lock(t->u.name, &ret, &ebt_mutex);
+ if (!target)
+ goto cleanup_watchers;
+diff -rpuN linux-2.6.18.8/net/core/dev.c linux-2.6.18-xen-3.3.0/net/core/dev.c
+--- linux-2.6.18.8/net/core/dev.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/net/core/dev.c 2008-08-21 11:36:07.000000000 +0200
+@@ -113,11 +113,18 @@
+ #include <linux/wireless.h>
+ #include <net/iw_handler.h>
+ #include <asm/current.h>
++#include <linux/err.h>
+ #include <linux/audit.h>
+ #include <linux/dmaengine.h>
+ #include <linux/err.h>
+ #include <linux/ctype.h>
+
++#ifdef CONFIG_XEN
++#include <net/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#endif
++
+ /*
+ * The list of packet types we will receive (as opposed to discard)
+ * and the routines to invoke.
+@@ -1328,7 +1335,7 @@ static int dev_gso_segment(struct sk_buf
+ /* Verifying header integrity only. */
+ if (!segs)
+ return 0;
+-
++
+ if (unlikely(IS_ERR(segs)))
+ return PTR_ERR(segs);
+
+@@ -1391,6 +1398,43 @@ out_kfree_skb:
+ } \
+ }
+
++#ifdef CONFIG_XEN
++inline int skb_checksum_setup(struct sk_buff *skb)
++{
++ if (skb->proto_csum_blank) {
++ if (skb->protocol != htons(ETH_P_IP))
++ goto out;
++ skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
++ if (skb->h.raw >= skb->tail)
++ goto out;
++ switch (skb->nh.iph->protocol) {
++ case IPPROTO_TCP:
++ skb->csum = offsetof(struct tcphdr, check);
++ break;
++ case IPPROTO_UDP:
++ skb->csum = offsetof(struct udphdr, check);
++ break;
++ default:
++ if (net_ratelimit())
++ printk(KERN_ERR "Attempting to checksum a non-"
++ "TCP/UDP packet, dropping a protocol"
++ " %d packet", skb->nh.iph->protocol);
++ goto out;
++ }
++ if ((skb->h.raw + skb->csum + 2) > skb->tail)
++ goto out;
++ skb->ip_summed = CHECKSUM_HW;
++ skb->proto_csum_blank = 0;
++ }
++ return 0;
++out:
++ return -EPROTO;
++}
++#else
++inline int skb_checksum_setup(struct sk_buff *skb) { return 0; }
++#endif
++
++
+ /**
+ * dev_queue_xmit - transmit a buffer
+ * @skb: buffer to transmit
+@@ -1423,6 +1467,12 @@ int dev_queue_xmit(struct sk_buff *skb)
+ struct Qdisc *q;
+ int rc = -ENOMEM;
+
++ /* If a checksum-deferred packet is forwarded to a device that needs a
++ * checksum, correct the pointers and force checksumming.
++ */
++ if (skb_checksum_setup(skb))
++ goto out_kfree_skb;
++
+ /* GSO will handle the following emulations directly. */
+ if (netif_needs_gso(dev, skb))
+ goto gso;
+@@ -1798,6 +1848,19 @@ int netif_receive_skb(struct sk_buff *sk
+ }
+ #endif
+
++#ifdef CONFIG_XEN
++ switch (skb->ip_summed) {
++ case CHECKSUM_UNNECESSARY:
++ skb->proto_data_valid = 1;
++ break;
++ case CHECKSUM_HW:
++ /* XXX Implement me. */
++ default:
++ skb->proto_data_valid = 0;
++ break;
++ }
++#endif
++
+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ if (!ptype->dev || ptype->dev == skb->dev) {
+ if (pt_prev)
+@@ -3584,6 +3647,7 @@ EXPORT_SYMBOL(unregister_netdevice_notif
+ EXPORT_SYMBOL(net_enable_timestamp);
+ EXPORT_SYMBOL(net_disable_timestamp);
+ EXPORT_SYMBOL(dev_get_flags);
++EXPORT_SYMBOL(skb_checksum_setup);
+
+ #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+ EXPORT_SYMBOL(br_handle_frame_hook);
+diff -rpuN linux-2.6.18.8/net/core/neighbour.c linux-2.6.18-xen-3.3.0/net/core/neighbour.c
+--- linux-2.6.18.8/net/core/neighbour.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/net/core/neighbour.c 2008-08-21 11:36:07.000000000 +0200
+@@ -2679,7 +2679,6 @@ EXPORT_SYMBOL(neigh_table_clear);
+ EXPORT_SYMBOL(neigh_table_init);
+ EXPORT_SYMBOL(neigh_table_init_no_netlink);
+ EXPORT_SYMBOL(neigh_update);
+-EXPORT_SYMBOL(neigh_update_hhs);
+ EXPORT_SYMBOL(pneigh_enqueue);
+ EXPORT_SYMBOL(pneigh_lookup);
+ EXPORT_SYMBOL(neightbl_dump_info);
+diff -rpuN linux-2.6.18.8/net/core/skbuff.c linux-2.6.18-xen-3.3.0/net/core/skbuff.c
+--- linux-2.6.18.8/net/core/skbuff.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/net/core/skbuff.c 2008-08-21 11:36:07.000000000 +0200
+@@ -240,6 +240,7 @@ struct sk_buff *alloc_skb_from_cache(kme
+ skb_shinfo(skb)->gso_size = 0;
+ skb_shinfo(skb)->gso_segs = 0;
+ skb_shinfo(skb)->gso_type = 0;
++ skb_shinfo(skb)->ip6_frag_id = 0;
+ skb_shinfo(skb)->frag_list = NULL;
+ out:
+ return skb;
+@@ -464,6 +465,10 @@ struct sk_buff *skb_clone(struct sk_buff
+ C(local_df);
+ n->cloned = 1;
+ n->nohdr = 0;
++#ifdef CONFIG_XEN
++ C(proto_data_valid);
++ C(proto_csum_blank);
++#endif
+ C(pkt_type);
+ C(ip_summed);
+ C(priority);
+@@ -2071,7 +2076,6 @@ EXPORT_SYMBOL(pskb_copy);
+ EXPORT_SYMBOL(pskb_expand_head);
+ EXPORT_SYMBOL(skb_checksum);
+ EXPORT_SYMBOL(skb_clone);
+-EXPORT_SYMBOL(skb_clone_fraglist);
+ EXPORT_SYMBOL(skb_copy);
+ EXPORT_SYMBOL(skb_copy_and_csum_bits);
+ EXPORT_SYMBOL(skb_copy_and_csum_dev);
+diff -rpuN linux-2.6.18.8/net/ipv4/netfilter/ip_nat_proto_tcp.c linux-2.6.18-xen-3.3.0/net/ipv4/netfilter/ip_nat_proto_tcp.c
+--- linux-2.6.18.8/net/ipv4/netfilter/ip_nat_proto_tcp.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/net/ipv4/netfilter/ip_nat_proto_tcp.c 2008-08-21 11:36:07.000000000 +0200
+@@ -129,7 +129,12 @@ tcp_manip_pkt(struct sk_buff **pskb,
+ if (hdrsize < sizeof(*hdr))
+ return 1;
+
+- hdr->check = ip_nat_cheat_check(~oldip, newip,
++#ifdef CONFIG_XEN
++ if ((*pskb)->proto_csum_blank)
++ hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
++ else
++#endif
++ hdr->check = ip_nat_cheat_check(~oldip, newip,
+ ip_nat_cheat_check(oldport ^ 0xFFFF,
+ newport,
+ hdr->check));
+diff -rpuN linux-2.6.18.8/net/ipv4/netfilter/ip_nat_proto_udp.c linux-2.6.18-xen-3.3.0/net/ipv4/netfilter/ip_nat_proto_udp.c
+--- linux-2.6.18.8/net/ipv4/netfilter/ip_nat_proto_udp.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/net/ipv4/netfilter/ip_nat_proto_udp.c 2008-08-21 11:36:07.000000000 +0200
+@@ -113,11 +113,17 @@ udp_manip_pkt(struct sk_buff **pskb,
+ newport = tuple->dst.u.udp.port;
+ portptr = &hdr->dest;
+ }
+- if (hdr->check) /* 0 is a special case meaning no checksum */
+- hdr->check = ip_nat_cheat_check(~oldip, newip,
++ if (hdr->check) { /* 0 is a special case meaning no checksum */
++#ifdef CONFIG_XEN
++ if ((*pskb)->proto_csum_blank)
++ hdr->check = ip_nat_cheat_check(oldip, ~newip, hdr->check);
++ else
++#endif
++ hdr->check = ip_nat_cheat_check(~oldip, newip,
+ ip_nat_cheat_check(*portptr ^ 0xFFFF,
+ newport,
+ hdr->check));
++ }
+ *portptr = newport;
+ return 1;
+ }
+diff -rpuN linux-2.6.18.8/net/ipv4/tcp_input.c linux-2.6.18-xen-3.3.0/net/ipv4/tcp_input.c
+--- linux-2.6.18.8/net/ipv4/tcp_input.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/net/ipv4/tcp_input.c 2008-08-21 11:36:07.000000000 +0200
+@@ -127,7 +127,7 @@ static void tcp_measure_rcv_mss(struct s
+ /* skb->len may jitter because of SACKs, even if peer
+ * sends good full-sized frames.
+ */
+- len = skb->len;
++ len = skb_shinfo(skb)->gso_size ?: skb->len;
+ if (len >= icsk->icsk_ack.rcv_mss) {
+ icsk->icsk_ack.rcv_mss = len;
+ } else {
+diff -rpuN linux-2.6.18.8/net/ipv4/xfrm4_output.c linux-2.6.18-xen-3.3.0/net/ipv4/xfrm4_output.c
+--- linux-2.6.18.8/net/ipv4/xfrm4_output.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/net/ipv4/xfrm4_output.c 2008-08-21 11:36:07.000000000 +0200
+@@ -18,6 +18,8 @@
+ #include <net/xfrm.h>
+ #include <net/icmp.h>
+
++extern int skb_checksum_setup(struct sk_buff *skb);
++
+ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
+ {
+ int mtu, ret = 0;
+@@ -48,6 +50,10 @@ static int xfrm4_output_one(struct sk_bu
+ struct xfrm_state *x = dst->xfrm;
+ int err;
+
++ err = skb_checksum_setup(skb);
++ if (err)
++ goto error_nolock;
++
+ if (skb->ip_summed == CHECKSUM_HW) {
+ err = skb_checksum_help(skb, 0);
+ if (err)
+diff -rpuN linux-2.6.18.8/scripts/Makefile.build linux-2.6.18-xen-3.3.0/scripts/Makefile.build
+--- linux-2.6.18.8/scripts/Makefile.build 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/scripts/Makefile.build 2008-08-21 11:36:07.000000000 +0200
+@@ -68,6 +68,20 @@ ifndef obj
+ $(warning kbuild: Makefile.build is included improperly)
+ endif
+
++ifeq ($(CONFIG_XEN),y)
++$(objtree)/scripts/Makefile.xen: $(srctree)/scripts/Makefile.xen.awk $(srctree)/scripts/Makefile.build
++ @echo ' Updating $@'
++ $(if $(shell echo a | $(AWK) '{ print gensub(/a/, "AA", "g"); }'),\
++ ,$(error 'Your awk program does not define gensub. Use gawk or another awk with gensub'))
++ @$(AWK) -f $< $(filter-out $<,$^) >$@
++
++xen-src-single-used-m := $(patsubst $(srctree)/%,%,$(wildcard $(addprefix $(srctree)/,$(single-used-m:.o=-xen.c))))
++xen-single-used-m := $(xen-src-single-used-m:-xen.c=.o)
++single-used-m := $(filter-out $(xen-single-used-m),$(single-used-m))
++
++-include $(objtree)/scripts/Makefile.xen
++endif
++
+ # ===========================================================================
+
+ ifneq ($(strip $(lib-y) $(lib-m) $(lib-n) $(lib-)),)
+diff -rpuN linux-2.6.18.8/scripts/Makefile.lib linux-2.6.18-xen-3.3.0/scripts/Makefile.lib
+--- linux-2.6.18.8/scripts/Makefile.lib 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/scripts/Makefile.lib 2008-08-21 11:36:07.000000000 +0200
+@@ -13,6 +13,12 @@ obj-m := $(filter-out $(obj-y),$(obj-m))
+
+ lib-y := $(filter-out $(obj-y), $(sort $(lib-y) $(lib-m)))
+
++# Remove objects forcibly disabled
++
++obj-y := $(filter-out $(disabled-obj-y),$(obj-y))
++obj-m := $(filter-out $(disabled-obj-y),$(obj-m))
++lib-y := $(filter-out $(disabled-obj-y),$(lib-y))
++
+
+ # Handle objects in subdirs
+ # ---------------------------------------------------------------------------
+diff -rpuN linux-2.6.18.8/scripts/Makefile.xen.awk linux-2.6.18-xen-3.3.0/scripts/Makefile.xen.awk
+--- linux-2.6.18.8/scripts/Makefile.xen.awk 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/scripts/Makefile.xen.awk 2008-08-21 11:36:07.000000000 +0200
+@@ -0,0 +1,34 @@
++BEGIN {
++ is_rule = 0
++}
++
++/^[[:space:]]*#/ {
++ next
++}
++
++/^[[:space:]]*$/ {
++ if (is_rule)
++ print("")
++ is_rule = 0
++ next
++}
++
++/:[[:space:]]*%\.[cS][[:space:]]/ {
++ line = gensub(/%.([cS])/, "%-xen.\\1", "g", $0)
++ line = gensub(/(single-used-m)/, "xen-\\1", "g", line)
++ print line
++ is_rule = 1
++ next
++}
++
++/^[^\t]$/ {
++ if (is_rule)
++ print("")
++ is_rule = 0
++ next
++}
++
++is_rule {
++ print $0
++ next
++}
+diff -rpuN linux-2.6.18.8/scripts/mod/sumversion.c linux-2.6.18-xen-3.3.0/scripts/mod/sumversion.c
+--- linux-2.6.18.8/scripts/mod/sumversion.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/scripts/mod/sumversion.c 2008-08-21 11:36:07.000000000 +0200
+@@ -7,6 +7,7 @@
+ #include <ctype.h>
+ #include <errno.h>
+ #include <string.h>
++#include <limits.h>
+ #include "modpost.h"
+
+ /*
+diff -rpuN linux-2.6.18.8/sound/pci/hda/hda_intel.c linux-2.6.18-xen-3.3.0/sound/pci/hda/hda_intel.c
+--- linux-2.6.18.8/sound/pci/hda/hda_intel.c 2007-02-24 00:52:30.000000000 +0100
++++ linux-2.6.18-xen-3.3.0/sound/pci/hda/hda_intel.c 2008-08-21 11:36:07.000000000 +0200
+@@ -80,6 +80,8 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},
+ "{Intel, ICH7},"
+ "{Intel, ESB2},"
+ "{Intel, ICH8},"
++ "{Intel, ICH9},"
++ "{Intel, ICH10},"
+ "{ATI, SB450},"
+ "{ATI, SB600},"
+ "{ATI, RS600},"
+@@ -1634,6 +1636,10 @@ static struct pci_device_id azx_ids[] =
+ { 0x8086, 0x27d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH7 */
+ { 0x8086, 0x269a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ESB2 */
+ { 0x8086, 0x284b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH8 */
++ { 0x8086, 0x293e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH9 */
++ { 0x8086, 0x293f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH9 */
++ { 0x8086, 0x3a3e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH10 */
++ { 0x8086, 0x3a6e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH10 */
+ { 0x1002, 0x437b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB450 */
+ { 0x1002, 0x4383, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB600 */
+ { 0x1002, 0x793b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATIHDMI }, /* ATI RS600 HDMI */
diff --git a/tags/2.6.18-12/30001_nfnetlink_log-null-deref.patch b/tags/2.6.18-12/30001_nfnetlink_log-null-deref.patch
new file mode 100644
index 0000000..b86a409
--- /dev/null
+++ b/tags/2.6.18-12/30001_nfnetlink_log-null-deref.patch
@@ -0,0 +1,37 @@
+From: Michal Miroslaw <mirq-linux@rere.qmqm.pl>
+Date: Sun, 4 Mar 2007 23:59:20 +0000 (-0800)
+Subject: [NETFILTER]: nfnetlink_log: fix possible NULL pointer dereference
+X-Git-Tag: v2.6.21~469^2~10
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=dd16704eba171b32ef0cded3a4f562b33b911066
+
+[NETFILTER]: nfnetlink_log: fix possible NULL pointer dereference
+
+Eliminate possible NULL pointer dereference in nfulnl_recv_config().
+
+Signed-off-by: Michal Miroslaw <mirq-linux@rere.qmqm.pl>
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
+index 1b94051..b669db5 100644
+--- a/net/netfilter/nfnetlink_log.c
++++ b/net/netfilter/nfnetlink_log.c
+@@ -858,6 +858,9 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
+ ret = -EINVAL;
+ break;
+ }
++
++ if (!inst)
++ goto out;
+ } else {
+ if (!inst) {
+ UDEBUG("no config command, and no instance for "
+@@ -911,6 +914,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
+
+ out_put:
+ instance_put(inst);
++out:
+ return ret;
+ }
+
diff --git a/tags/2.6.18-12/30002_nf_conntrack-set-nfctinfo.patch b/tags/2.6.18-12/30002_nf_conntrack-set-nfctinfo.patch
new file mode 100644
index 0000000..f540a67
--- /dev/null
+++ b/tags/2.6.18-12/30002_nf_conntrack-set-nfctinfo.patch
@@ -0,0 +1,35 @@
+From: Patrick McHardy <kaber@trash.net>
+Date: Wed, 7 Mar 2007 21:34:42 +0000 (+0100)
+Subject: nf_conntrack: fix incorrect classification of IPv6 fragments as ESTABLISHED
+X-Git-Tag: v2.6.20.3~11
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fstable%2Flinux-2.6.20.y.git;a=commitdiff_plain;h=868f0120e0f93d070ea7f3e969c09dbab8ad7bc7
+
+nf_conntrack: fix incorrect classification of IPv6 fragments as ESTABLISHED
+
+[NETFILTER]: nf_conntrack: fix incorrect classification of IPv6 fragments as ESTABLISHED
+
+The individual fragments of a packet reassembled by conntrack have the
+conntrack reference from the reassembled packet attached, but nfctinfo
+is not copied. This leaves it initialized to 0, which unfortunately is
+the value of IP_CT_ESTABLISHED.
+
+The result is that all IPv6 fragments are tracked as ESTABLISHED,
+allowing them to bypass a usual ruleset which accepts ESTABLISHED
+packets early.
+
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+index a20615f..6155b80 100644
+--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
++++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+@@ -257,6 +257,7 @@ static unsigned int ipv6_conntrack_in(unsigned int hooknum,
+ }
+ nf_conntrack_get(reasm->nfct);
+ (*pskb)->nfct = reasm->nfct;
++ (*pskb)->nfctinfo = reasm->nfctinfo;
+ return NF_ACCEPT;
+ }
+
diff --git a/tags/2.6.18-12/30003_netlink-infinite-recursion.patch b/tags/2.6.18-12/30003_netlink-infinite-recursion.patch
new file mode 100644
index 0000000..df76325
--- /dev/null
+++ b/tags/2.6.18-12/30003_netlink-infinite-recursion.patch
@@ -0,0 +1,65 @@
+From: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+Date: Wed, 25 Apr 2007 20:59:03 +0000 (+0000)
+Subject: [PATCH] NETLINK: Infinite recursion in netlink.
+X-Git-Tag: v2.6.20.8~1
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fstable%2Flinux-2.6.20.y.git;a=commitdiff_plain;h=9bc1779885f4ce1a4257c5640c70b75d2ae124ad
+
+[PATCH] NETLINK: Infinite recursion in netlink.
+
+[NETLINK]: Infinite recursion in netlink.
+
+Reply to NETLINK_FIB_LOOKUP messages were misrouted back to kernel,
+which resulted in infinite recursion and stack overflow.
+
+The bug is present in all kernel versions since the feature appeared.
+
+The patch also makes some minimal cleanup:
+
+1. Return something consistent (-ENOENT) when fib table is missing
+2. Do not crash when queue is empty (does not happen, but yet)
+3. Put result of lookup
+
+Signed-off-by: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+diff -urN linux-source-2.6.18.orig/net/ipv4/fib_frontend.c linux-source-2.6.18/net/ipv4/fib_frontend.c
+--- linux-source-2.6.18.orig/net/ipv4/fib_frontend.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/net/ipv4/fib_frontend.c 2007-05-01 15:21:37.000000000 -0600
+@@ -524,6 +524,8 @@
+ .fwmark = frn->fl_fwmark,
+ .tos = frn->fl_tos,
+ .scope = frn->fl_scope } } };
++
++ frn->err = -ENOENT;
+ if (tb) {
+ local_bh_disable();
+
+@@ -535,6 +537,7 @@
+ frn->nh_sel = res.nh_sel;
+ frn->type = res.type;
+ frn->scope = res.scope;
++ fib_res_put(&res);
+ }
+ local_bh_enable();
+ }
+@@ -549,6 +552,9 @@
+ struct fib_table *tb;
+
+ skb = skb_dequeue(&sk->sk_receive_queue);
++ if (skb == NULL)
++ return;
++
+ nlh = (struct nlmsghdr *)skb->data;
+ if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len ||
+ nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*frn))) {
+@@ -561,7 +567,7 @@
+
+ nl_fib_lookup(frn, tb);
+
+- pid = nlh->nlmsg_pid; /*pid of sending process */
++ pid = NETLINK_CB(skb).pid; /* pid of sending process */
+ NETLINK_CB(skb).pid = 0; /* from kernel */
+ NETLINK_CB(skb).dst_pid = pid;
+ NETLINK_CB(skb).dst_group = 0; /* unicast */
diff --git a/tags/2.6.18-12/30004_nl_fib_lookup-oops.patch b/tags/2.6.18-12/30004_nl_fib_lookup-oops.patch
new file mode 100644
index 0000000..c0547fa
--- /dev/null
+++ b/tags/2.6.18-12/30004_nl_fib_lookup-oops.patch
@@ -0,0 +1,34 @@
+From: Sergey Vlasov <vsu@altlinux.ru>
+Date: Fri, 27 Apr 2007 09:18:35 +0000 (-0700)
+Subject: IPV4: Fix OOPS'er added to netlink fib.
+X-Git-Tag: v2.6.20.10~2
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fstable%2Flinux-2.6.20.y.git;a=commitdiff_plain;h=6af3412cff50b9a7b12b7b9cf6f01b34fbae4624
+
+IPV4: Fix OOPS'er added to netlink fib.
+
+[IPV4] nl_fib_lookup: Initialise res.r before fib_res_put(&res)
+
+When CONFIG_IP_MULTIPLE_TABLES is enabled, the code in nl_fib_lookup()
+needs to initialize the res.r field before fib_res_put(&res) - unlike
+fib_lookup(), a direct call to ->tb_lookup does not set this field.
+
+Signed-off-by: Sergey Vlasov <vsu@altlinux.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index fa2cb8c..30aae76 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -773,6 +773,10 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb )
+ .tos = frn->fl_tos,
+ .scope = frn->fl_scope } } };
+
++#ifdef CONFIG_IP_MULTIPLE_TABLES
++ res.r = NULL;
++#endif
++
+ frn->err = -ENOENT;
+ if (tb) {
+ local_bh_disable();
diff --git a/tags/2.6.18-12/30005_core-dump-unreadable-PT_INTERP.patch b/tags/2.6.18-12/30005_core-dump-unreadable-PT_INTERP.patch
new file mode 100644
index 0000000..33c7c4f
--- /dev/null
+++ b/tags/2.6.18-12/30005_core-dump-unreadable-PT_INTERP.patch
@@ -0,0 +1,70 @@
+From: Alexey Dobriyan <adobriyan@openvz.org>
+Date: Fri, 26 Jan 2007 08:57:16 +0000 (-0800)
+Subject: [PATCH] core-dumping unreadable binaries via PT_INTERP
+X-Git-Tag: v2.6.20-rc7^0~60
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=1fb844961818ce94e782acf6a96b92dc2303553b
+
+[PATCH] core-dumping unreadable binaries via PT_INTERP
+
+Proposed patch to fix #5 in
+http://www.isec.pl/vulnerabilities/isec-0017-binfmt_elf.txt
+aka
+http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2004-1073
+
+To reproduce, do
+* grab poc at the end of advisory.
+* add line "eph.p_memsz = 4096;" after "eph.p_filesz = 4096;"
+ where first "4096" is something equal to or greater than 4096.
+* ./poc /usr/bin/sudo && ls -l
+
+Here I get with 2.6.20-rc5:
+
+ -rw------- 1 ad ad 102400 2007-01-15 19:17 core
+ ---s--x--x 2 root root 101820 2007-01-15 19:15 /usr/bin/sudo
+
+Check for MAY_READ like binfmt_misc.c does.
+
+Signed-off-by: Alexey Dobriyan <adobriyan@openvz.org>
+Signed-off-by: Andrew Morton <akpm@osdl.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 90461f4..669dbe5 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -682,6 +682,15 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ retval = PTR_ERR(interpreter);
+ if (IS_ERR(interpreter))
+ goto out_free_interp;
++
++ /*
++ * If the binary is not readable then enforce
++ * mm->dumpable = 0 regardless of the interpreter's
++ * permissions.
++ */
++ if (file_permission(interpreter, MAY_READ) < 0)
++ bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
++
+ retval = kernel_read(interpreter, 0, bprm->buf,
+ BINPRM_BUF_SIZE);
+ if (retval != BINPRM_BUF_SIZE) {
+diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
+index 6e6d456..a4d933a 100644
+--- a/fs/binfmt_elf_fdpic.c
++++ b/fs/binfmt_elf_fdpic.c
+@@ -234,6 +234,14 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
+ goto error;
+ }
+
++ /*
++ * If the binary is not readable then enforce
++ * mm->dumpable = 0 regardless of the interpreter's
++ * permissions.
++ */
++ if (file_permission(interpreter, MAY_READ) < 0)
++ bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
++
+ retval = kernel_read(interpreter, 0, bprm->buf,
+ BINPRM_BUF_SIZE);
+ if (retval < 0)
diff --git a/tags/2.6.18-12/30006_appletalk-length-mismatch.patch b/tags/2.6.18-12/30006_appletalk-length-mismatch.patch
new file mode 100644
index 0000000..b82c4fe
--- /dev/null
+++ b/tags/2.6.18-12/30006_appletalk-length-mismatch.patch
@@ -0,0 +1,93 @@
+From: Jean Delvare <jdelvare@suse.de>
+Date: Thu, 5 Apr 2007 06:52:46 +0000 (-0700)
+Subject: [APPLETALK]: Fix a remotely triggerable crash
+X-Git-Tag: v2.6.21-rc6~3
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=75559c167bddc1254db5bcff032ad5eed8bd6f4a
+
+[APPLETALK]: Fix a remotely triggerable crash
+
+When we receive an AppleTalk frame shorter than what its header says,
+we still attempt to verify its checksum, and trip on the BUG_ON() at
+the end of function atalk_sum_skb() because of the length mismatch.
+
+This has security implications because this can be triggered by simply
+sending a specially crafted ethernet frame to a target victim,
+effectively crashing that host. Thus this qualifies, I think, as a
+remote DoS. Here is the frame I used to trigger the crash, in npg
+format:
+
+<Appletalk Killer>
+{
+# Ethernet header -----
+
+ XX XX XX XX XX XX # Destination MAC
+ 00 00 00 00 00 00 # Source MAC
+ 00 1D # Length
+
+# LLC header -----
+
+ AA AA 03
+ 08 00 07 80 9B # Appletalk
+
+# Appletalk header -----
+
+ 00 1B # Packet length (invalid)
+ 00 01 # Fake checksum
+ 00 00 00 00 # Destination and source networks
+ 00 00 00 00 # Destination and source nodes and ports
+
+# Payload -----
+
+ 0C 0D 0E 0F 10 11 12 13
+ 14
+}
+
+The destination MAC address must be set to those of the victim.
+
+The severity is mitigated by two requirements:
+* The target host must have the appletalk kernel module loaded. I
+ suspect this isn't so frequent.
+* AppleTalk frames are non-IP, thus I guess they can only travel on
+ local networks. I am no network expert though, maybe it is possible
+ to somehow encapsulate AppleTalk packets over IP.
+
+The bug has been reported back in June 2004:
+ http://bugzilla.kernel.org/show_bug.cgi?id=2979
+But it wasn't investigated, and was closed in July 2006 as both
+reporters had vanished meanwhile.
+
+This code was new in kernel 2.6.0-test5:
+ http://git.kernel.org/?p=linux/kernel/git/tglx/history.git;a=commitdiff;h=7ab442d7e0a76402c12553ee256f756097cae2d2
+And not modified since then, so we can assume that vanilla kernels
+2.6.0-test5 and later, and distribution kernels based thereon, are
+affected.
+
+Note that I still do not know for sure what triggered the bug in the
+real-world cases. The frame could have been corrupted by the kernel if
+we have a bug hiding somewhere. But more likely, we are receiving the
+faulty frame from the network.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index 113c175..c8b7dc2 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1417,10 +1417,13 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
+ /*
+ * Size check to see if ddp->deh_len was crap
+ * (Otherwise we'll detonate most spectacularly
+- * in the middle of recvmsg()).
++ * in the middle of atalk_checksum() or recvmsg()).
+ */
+- if (skb->len < sizeof(*ddp))
++ if (skb->len < sizeof(*ddp) || skb->len < (len_hops & 1023)) {
++ pr_debug("AppleTalk: dropping corrupted frame (deh_len=%u, "
++ "skb->len=%u)\n", len_hops & 1023, skb->len);
+ goto freeit;
++ }
+
+ /*
+ * Any checksums. Note we don't do htons() on this == is assumed to be
diff --git a/tags/2.6.18-12/30007_cm4040-buffer-overflow.patch b/tags/2.6.18-12/30007_cm4040-buffer-overflow.patch
new file mode 100644
index 0000000..3047ff6
--- /dev/null
+++ b/tags/2.6.18-12/30007_cm4040-buffer-overflow.patch
@@ -0,0 +1,44 @@
+From: Marcel Holtmann <marcel@holtmann.org>
+Date: Tue, 6 Mar 2007 21:12:00 +0000 (+0100)
+Subject: [PATCH] Fix buffer overflow in Omnikey CardMan 4040 driver (CVE-2007-0005)
+X-Git-Tag: v2.6.21-rc3~17
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=059819a41d4331316dd8ddcf977a24ab338f4300
+
+[PATCH] Fix buffer overflow in Omnikey CardMan 4040 driver (CVE-2007-0005)
+
+Based on a patch from Don Howard <dhoward@redhat.com>
+
+When calling write() with a buffer larger than 512 bytes, the
+driver's write buffer overflows, allowing to overwrite the EIP and
+execute arbitrary code with kernel privileges.
+
+In read(), there exists a similar problem, but coming from the device.
+A malicous or buggy device sending more than 512 bytes can overflow
+of the driver's read buffer, with the same effects as above.
+
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Harald Welte <laforge@gnumonks.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
+index 0e82968..f2e4ec4 100644
+--- a/drivers/char/pcmcia/cm4040_cs.c
++++ b/drivers/char/pcmcia/cm4040_cs.c
+@@ -273,6 +273,7 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
+ DEBUGP(6, dev, "BytesToRead=%lu\n", bytes_to_read);
+
+ min_bytes_to_read = min(count, bytes_to_read + 5);
++ min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE);
+
+ DEBUGP(6, dev, "Min=%lu\n", min_bytes_to_read);
+
+@@ -340,7 +341,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
+ return 0;
+ }
+
+- if (count < 5) {
++ if ((count < 5) || (count > READ_WRITE_BUFFER_SIZE)) {
+ DEBUGP(2, dev, "<- cm4040_write buffersize=%Zd < 5\n", count);
+ return -EIO;
+ }
diff --git a/tags/2.6.18-12/30008_ipv6_fl_socklist-no-share.patch b/tags/2.6.18-12/30008_ipv6_fl_socklist-no-share.patch
new file mode 100644
index 0000000..8749435
--- /dev/null
+++ b/tags/2.6.18-12/30008_ipv6_fl_socklist-no-share.patch
@@ -0,0 +1,32 @@
+From: Masayuki Nakagawa <nakagawa.msy@ncos.nec.co.jp>
+Date: Fri, 16 Mar 2007 23:14:03 +0000 (-0700)
+Subject: [IPV6]: ipv6_fl_socklist is inadvertently shared.
+X-Git-Tag: v2.6.21-rc5~72^2
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=d35690beda1429544d46c8eb34b2e3a8c37ab299
+
+[IPV6]: ipv6_fl_socklist is inadvertently shared.
+
+The ipv6_fl_socklist from listening socket is inadvertently shared
+with new socket created for connection. This leads to a variety of
+interesting, but fatal, bugs. For example, removing one of the
+sockets may lead to the other socket's encountering a page fault
+when the now freed list is referenced.
+
+The fix is to not share the flow label list with the new socket.
+
+Signed-off-by: Masayuki Nakagawa <nakagawa.msy@ncos.nec.co.jp>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index f57a9ba..92f9992 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1453,6 +1453,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ First: no IPv4 options.
+ */
+ newinet->opt = NULL;
++ newnp->ipv6_fl_list = NULL;
+
+ /* Clone RX bits */
+ newnp->rxopt.all = np->rxopt.all;
diff --git a/tags/2.6.18-12/30009_keys-serial-num-collision.patch b/tags/2.6.18-12/30009_keys-serial-num-collision.patch
new file mode 100644
index 0000000..9875900
--- /dev/null
+++ b/tags/2.6.18-12/30009_keys-serial-num-collision.patch
@@ -0,0 +1,92 @@
+From: David Howells <dhowells@redhat.com>
+Date: Tue, 6 Feb 2007 13:45:51 +0000 (+0000)
+Subject: [PATCH] Keys: Fix key serial number collision handling
+X-Git-Tag: v2.6.21-rc2~42^2~22
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=9ad0830f307bcd8dc285cfae58998d43b21727f4
+
+[PATCH] Keys: Fix key serial number collision handling
+
+Fix the key serial number collision avoidance code in key_alloc_serial().
+
+This didn't use to be so much of a problem as the key serial numbers were
+allocated from a simple incremental counter, and it would have to go through
+two billion keys before it could possibly encounter a collision. However, now
+that random numbers are used instead, collisions are much more likely.
+
+This is fixed by finding a hole in the rbtree where the next unused serial
+number ought to be and using that by going almost back to the top of the
+insertion routine and redoing the insertion with the new serial number rather
+than trying to be clever and attempting to work out the insertion point
+pointer directly.
+
+This fixes kernel BZ #7727.
+
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+diff --git a/security/keys/key.c b/security/keys/key.c
+index ac9326c..700400d 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -188,6 +188,7 @@ static inline void key_alloc_serial(struct key *key)
+
+ spin_lock(&key_serial_lock);
+
++attempt_insertion:
+ parent = NULL;
+ p = &key_serial_tree.rb_node;
+
+@@ -202,39 +203,33 @@ static inline void key_alloc_serial(struct key *key)
+ else
+ goto serial_exists;
+ }
+- goto insert_here;
++
++ /* we've found a suitable hole - arrange for this key to occupy it */
++ rb_link_node(&key->serial_node, parent, p);
++ rb_insert_color(&key->serial_node, &key_serial_tree);
++
++ spin_unlock(&key_serial_lock);
++ return;
+
+ /* we found a key with the proposed serial number - walk the tree from
+ * that point looking for the next unused serial number */
+ serial_exists:
+ for (;;) {
+ key->serial++;
+- if (key->serial < 2)
+- key->serial = 2;
+-
+- if (!rb_parent(parent))
+- p = &key_serial_tree.rb_node;
+- else if (rb_parent(parent)->rb_left == parent)
+- p = &(rb_parent(parent)->rb_left);
+- else
+- p = &(rb_parent(parent)->rb_right);
++ if (key->serial < 3) {
++ key->serial = 3;
++ goto attempt_insertion;
++ }
+
+ parent = rb_next(parent);
+ if (!parent)
+- break;
++ goto attempt_insertion;
+
+ xkey = rb_entry(parent, struct key, serial_node);
+ if (key->serial < xkey->serial)
+- goto insert_here;
++ goto attempt_insertion;
+ }
+
+- /* we've found a suitable hole - arrange for this key to occupy it */
+-insert_here:
+- rb_link_node(&key->serial_node, parent, p);
+- rb_insert_color(&key->serial_node, &key_serial_tree);
+-
+- spin_unlock(&key_serial_lock);
+-
+ } /* end key_alloc_serial() */
+
+ /*****************************************************************************/
diff --git a/tags/2.6.18-12/30010_ipv6_getsockopt_sticky-null-opt.patch b/tags/2.6.18-12/30010_ipv6_getsockopt_sticky-null-opt.patch
new file mode 100644
index 0000000..1a124c2
--- /dev/null
+++ b/tags/2.6.18-12/30010_ipv6_getsockopt_sticky-null-opt.patch
@@ -0,0 +1,42 @@
+From: David S. Miller <davem@sunset.davemloft.net>
+Date: Wed, 7 Mar 2007 20:50:46 +0000 (-0800)
+Subject: [IPV6]: Handle np->opt being NULL in ipv6_getsockopt_sticky().
+X-Git-Tag: v2.6.21-rc4~99^2~7
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=286930797d74b2c9a5beae84836044f6a836235f
+
+[IPV6]: Handle np->opt being NULL in ipv6_getsockopt_sticky().
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 286c867..4e0561a 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -795,11 +795,15 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ EXPORT_SYMBOL(compat_ipv6_setsockopt);
+ #endif
+
+-static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_opt_hdr *hdr,
++static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
+ char __user *optval, int len)
+ {
+- if (!hdr)
++ struct ipv6_opt_hdr *hdr;
++
++ if (!opt || !opt->hopopt)
+ return 0;
++ hdr = opt->hopopt;
++
+ len = min_t(int, len, ipv6_optlen(hdr));
+ if (copy_to_user(optval, hdr, ipv6_optlen(hdr)))
+ return -EFAULT;
+@@ -940,7 +944,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
+ {
+
+ lock_sock(sk);
+- len = ipv6_getsockopt_sticky(sk, np->opt->hopopt,
++ len = ipv6_getsockopt_sticky(sk, np->opt,
+ optval, len);
+ release_sock(sk);
+ return put_user(len, optlen);
diff --git a/tags/2.6.18-12/30011_ipv6_setsockopt-NULL-deref.patch b/tags/2.6.18-12/30011_ipv6_setsockopt-NULL-deref.patch
new file mode 100644
index 0000000..3e2d3dc
--- /dev/null
+++ b/tags/2.6.18-12/30011_ipv6_setsockopt-NULL-deref.patch
@@ -0,0 +1,28 @@
+From: Olaf Kirch <olaf.kirch@oracle.com>
+Date: Fri, 9 Mar 2007 21:55:38 +0000 (-0800)
+Subject: [IPV6]: Fix for ipv6_setsockopt NULL dereference
+X-Git-Tag: v2.6.21-rc4~50^2~1
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=dfee0a725bb027b749ffdd318eb48b91d564b266
+
+[IPV6]: Fix for ipv6_setsockopt NULL dereference
+
+I came across this bug in http://bugzilla.kernel.org/show_bug.cgi?id=8155
+
+Signed-off-by: Olaf Kirch <olaf.kirch@oracle.com>
+Acked-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+--- source/net/ipv6/ipv6_sockglue.c.orig 2007-03-22 09:58:17.000000000 -0600
++++ source/net/ipv6/ipv6_sockglue.c 2007-03-22 09:59:22.000000000 -0600
+@@ -408,7 +408,7 @@
+ }
+
+ /* routing header option needs extra check */
+- if (optname == IPV6_RTHDR && opt->srcrt) {
++ if (optname == IPV6_RTHDR && opt && opt->srcrt) {
+ struct ipv6_rt_hdr *rthdr = opt->srcrt;
+ if (rthdr->type)
+ goto sticky_done;
diff --git a/tags/2.6.18-12/30012_ipv6-disallow-RH0-by-default.patch b/tags/2.6.18-12/30012_ipv6-disallow-RH0-by-default.patch
new file mode 100644
index 0000000..9d59779
--- /dev/null
+++ b/tags/2.6.18-12/30012_ipv6-disallow-RH0-by-default.patch
@@ -0,0 +1,166 @@
+From: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+Date: Thu, 26 Apr 2007 04:56:57 +0000 (-0700)
+Subject: [PATCH] IPV6: Disallow RH0 by default.
+X-Git-Tag: v2.6.20.9~1
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fstable%2Flinux-2.6.20.y.git;a=commitdiff_plain;h=010831ab8436dfd9304b203467566fb6b135c24f
+
+[PATCH] IPV6: Disallow RH0 by default.
+
+[IPV6]: Disallow RH0 by default.
+
+A security issue is emerging. Disallow Routing Header Type 0 by default
+as we have been doing for IPv4.
+Note: We allow RH2 by default because it is harmless.
+
+Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+Backported to Debian's 2.6.18 by dann frazier and Vlad Yasevich
+
+diff -urpN linux-source-2.6.18.orig/Documentation/networking/ip-sysctl.txt linux-source-2.6.18/Documentation/networking/ip-sysctl.txt
+--- linux-source-2.6.18.orig/Documentation/networking/ip-sysctl.txt 2007-05-11 15:09:21.000000000 -0600
++++ linux-source-2.6.18/Documentation/networking/ip-sysctl.txt 2007-05-11 15:10:03.000000000 -0600
+@@ -775,6 +775,14 @@ accept_redirects - BOOLEAN
+ Functional default: enabled if local forwarding is disabled.
+ disabled if local forwarding is enabled.
+
++accept_source_route - INTEGER
++ Accept source routing (routing extension header).
++
++ > 0: Accept routing header.
++ = 0: Do not accept routing header.
++
++ Default: 0
++
+ autoconf - BOOLEAN
+ Autoconfigure addresses using Prefix Information in Router
+ Advertisements.
+diff -urpN linux-source-2.6.18.orig/include/linux/ipv6.h linux-source-2.6.18/include/linux/ipv6.h
+--- linux-source-2.6.18.orig/include/linux/ipv6.h 2007-05-11 15:09:21.000000000 -0600
++++ linux-source-2.6.18/include/linux/ipv6.h 2007-05-11 15:10:03.000000000 -0600
+@@ -153,6 +153,7 @@ struct ipv6_devconf {
+ __s32 accept_ra_rt_info_max_plen;
+ #endif
+ #endif
++ __s32 accept_source_route;
+ void *sysctl;
+ };
+
+@@ -180,6 +181,7 @@ enum {
+ DEVCONF_ACCEPT_RA_RTR_PREF,
+ DEVCONF_RTR_PROBE_INTERVAL,
+ DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN,
++ DEVCONF_ACCEPT_SOURCE_ROUTE,
+ DEVCONF_MAX
+ };
+
+diff -urpN linux-source-2.6.18.orig/include/linux/sysctl.h linux-source-2.6.18/include/linux/sysctl.h
+--- linux-source-2.6.18.orig/include/linux/sysctl.h 2007-05-11 15:09:21.000000000 -0600
++++ linux-source-2.6.18/include/linux/sysctl.h 2007-05-11 15:10:03.000000000 -0600
+@@ -553,6 +553,7 @@ enum {
+ NET_IPV6_ACCEPT_RA_RTR_PREF=20,
+ NET_IPV6_RTR_PROBE_INTERVAL=21,
+ NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22,
++ NET_IPV6_ACCEPT_SOURCE_ROUTE=23,
+ __NET_IPV6_MAX
+ };
+
+diff -urpN linux-source-2.6.18.orig/net/ipv6/addrconf.c linux-source-2.6.18/net/ipv6/addrconf.c
+--- linux-source-2.6.18.orig/net/ipv6/addrconf.c 2007-05-11 15:09:21.000000000 -0600
++++ linux-source-2.6.18/net/ipv6/addrconf.c 2007-05-11 15:10:07.000000000 -0600
+@@ -173,6 +173,7 @@ struct ipv6_devconf ipv6_devconf = {
+ .accept_ra_rt_info_max_plen = 0,
+ #endif
+ #endif
++ .accept_source_route = 0, /* we do not accept RH0 by default. */
+ };
+
+ static struct ipv6_devconf ipv6_devconf_dflt = {
+@@ -203,6 +204,7 @@ static struct ipv6_devconf ipv6_devconf_
+ .accept_ra_rt_info_max_plen = 0,
+ #endif
+ #endif
++ .accept_source_route = 0, /* we do not accept RH0 by default. */
+ };
+
+ /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
+@@ -3333,6 +3335,7 @@ static void inline ipv6_store_devconf(st
+ array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
+ #endif
+ #endif
++ array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
+ }
+
+ /* Maximum length of ifinfomsg attributes */
+@@ -3847,6 +3850,14 @@ static struct addrconf_sysctl_table
+ #endif
+ #endif
+ {
++ .ctl_name = NET_IPV6_ACCEPT_SOURCE_ROUTE,
++ .procname = "accept_source_route",
++ .data = &ipv6_devconf.accept_source_route,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec,
++ },
++ {
+ .ctl_name = 0, /* sentinel */
+ }
+ },
+diff -urpN linux-source-2.6.18.orig/net/ipv6/exthdrs.c linux-source-2.6.18/net/ipv6/exthdrs.c
+--- linux-source-2.6.18.orig/net/ipv6/exthdrs.c 2007-05-11 15:09:21.000000000 -0600
++++ linux-source-2.6.18/net/ipv6/exthdrs.c 2007-05-11 15:10:03.000000000 -0600
+@@ -221,10 +221,24 @@ static int ipv6_rthdr_rcv(struct sk_buff
+ struct inet6_skb_parm *opt = IP6CB(skb);
+ struct in6_addr *addr;
+ struct in6_addr daddr;
++ struct inet6_dev *idev;
+ int n, i;
+-
+ struct ipv6_rt_hdr *hdr;
+ struct rt0_hdr *rthdr;
++ int accept_source_route = ipv6_devconf.accept_source_route;
++
++ if (accept_source_route == 0 ||
++ ((idev = in6_dev_get(skb->dev)) == NULL)) {
++ kfree_skb(skb);
++ return -1;
++ }
++ if (idev->cnf.accept_source_route == 0) {
++ in6_dev_put(idev);
++ kfree_skb(skb);
++ return -1;
++ }
++
++ in6_dev_put(idev);
+
+ if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
+ !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
+@@ -235,6 +249,12 @@ static int ipv6_rthdr_rcv(struct sk_buff
+
+ hdr = (struct ipv6_rt_hdr *) skb->h.raw;
+
++ if (hdr->type != IPV6_SRCRT_TYPE_0) {
++ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
++ icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw);
++ return -1;
++ }
++
+ if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr) ||
+ skb->pkt_type != PACKET_HOST) {
+ IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
+@@ -253,12 +273,6 @@ looped_back:
+ return 1;
+ }
+
+- if (hdr->type != IPV6_SRCRT_TYPE_0) {
+- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+- icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw);
+- return -1;
+- }
+-
+ if (hdr->hdrlen & 0x01) {
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->hdrlen) - skb->nh.raw);
diff --git a/tags/2.6.18-12/30013_listxattr-mem-corruption.patch b/tags/2.6.18-12/30013_listxattr-mem-corruption.patch
new file mode 100644
index 0000000..10f37da
--- /dev/null
+++ b/tags/2.6.18-12/30013_listxattr-mem-corruption.patch
@@ -0,0 +1,441 @@
+From: Eric Sandeen <sandeen@redhat.com>
+Date: Sat, 6 Jan 2007 00:36:36 +0000 (-0800)
+Subject: [PATCH] fix memory corruption from misinterpreted bad_inode_ops return values
+X-Git-Tag: v2.6.20-rc4~60
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=be6aab0e9fa6d3c6d75aa1e38ac972d8b4ee82b8;hp=2723f9603a8f8bb2cd8c7b581f7c94b8d75e3837
+
+[PATCH] fix memory corruption from misinterpreted bad_inode_ops return values
+
+CVE-2006-5753 is for a case where an inode can be marked bad, switching
+the ops to bad_inode_ops, which are all connected as:
+
+static int return_EIO(void)
+{
+ return -EIO;
+}
+
+#define EIO_ERROR ((void *) (return_EIO))
+
+static struct inode_operations bad_inode_ops =
+{
+ .create = bad_inode_create
+...etc...
+
+The problem here is that the void cast causes return types to not be
+promoted, and for ops such as listxattr which expect more than 32 bits of
+return value, the 32-bit -EIO is interpreted as a large positive 64-bit
+number, i.e. 0x00000000fffffffa instead of 0xfffffffa.
+
+This goes particularly badly when the return value is taken as a number of
+bytes to copy into, say, a user's buffer for example...
+
+I originally had coded up the fix by creating a return_EIO_<TYPE> macro
+for each return type, like this:
+
+static int return_EIO_int(void)
+{
+ return -EIO;
+}
+#define EIO_ERROR_INT ((void *) (return_EIO_int))
+
+static struct inode_operations bad_inode_ops =
+{
+ .create = EIO_ERROR_INT,
+...etc...
+
+but Al felt that it was probably better to create an EIO-returner for each
+actual op signature. Since so few ops share a signature, I just went ahead
+& created an EIO function for each individual file & inode op that returns
+a value.
+
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Andrew Morton <akpm@osdl.org>
+Signed-off-by: Linus Torvalds <torvalds@osdl.org>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+--- linux-source-2.6.18/fs/bad_inode.c.orig 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/bad_inode.c 2007-03-19 20:56:08.000000000 -0600
+@@ -14,61 +14,321 @@
+ #include <linux/time.h>
+ #include <linux/smp_lock.h>
+ #include <linux/namei.h>
++#include <linux/poll.h>
+
+-static int return_EIO(void)
++
++static loff_t bad_file_llseek(struct file *file, loff_t offset, int origin)
++{
++ return -EIO;
++}
++
++static ssize_t bad_file_read(struct file *filp, char __user *buf,
++ size_t size, loff_t *ppos)
++{
++ return -EIO;
++}
++
++static ssize_t bad_file_write(struct file *filp, const char __user *buf,
++ size_t siz, loff_t *ppos)
++{
++ return -EIO;
++}
++
++static ssize_t bad_file_aio_read(struct kiocb *iocb, char __user *buf,
++ size_t siz, loff_t pos)
++{
++ return -EIO;
++}
++
++static ssize_t bad_file_aio_write(struct kiocb *iocb, const char __user *buf,
++ size_t siz, loff_t pos)
++{
++ return -EIO;
++}
++
++static int bad_file_readdir(struct file *filp, void *dirent, filldir_t filldir)
++{
++ return -EIO;
++}
++
++static unsigned int bad_file_poll(struct file *filp, poll_table *wait)
++{
++ return POLLERR;
++}
++
++static int bad_file_ioctl (struct inode *inode, struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ return -EIO;
++}
++
++static long bad_file_unlocked_ioctl(struct file *file, unsigned cmd,
++ unsigned long arg)
++{
++ return -EIO;
++}
++
++static long bad_file_compat_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ return -EIO;
++}
++
++static int bad_file_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ return -EIO;
++}
++
++static int bad_file_open(struct inode *inode, struct file *filp)
++{
++ return -EIO;
++}
++
++static int bad_file_flush(struct file *file, fl_owner_t id)
++{
++ return -EIO;
++}
++
++static int bad_file_release(struct inode *inode, struct file *filp)
++{
++ return -EIO;
++}
++
++static int bad_file_fsync(struct file *file, struct dentry *dentry,
++ int datasync)
++{
++ return -EIO;
++}
++
++static int bad_file_aio_fsync(struct kiocb *iocb, int datasync)
++{
++ return -EIO;
++}
++
++static int bad_file_fasync(int fd, struct file *filp, int on)
++{
++ return -EIO;
++}
++
++static int bad_file_lock(struct file *file, int cmd, struct file_lock *fl)
++{
++ return -EIO;
++}
++
++static ssize_t bad_file_readv(struct file *filp, const struct iovec *iov,
++ unsigned long nr_segs, loff_t *ppos)
++{
++ return -EIO;
++}
++
++static ssize_t bad_file_writev(struct file *filp, const struct iovec *iov,
++ unsigned long nr_segs, loff_t *ppos)
++{
++ return -EIO;
++}
++
++static ssize_t bad_file_sendfile(struct file *in_file, loff_t *ppos,
++ size_t count, read_actor_t actor, void *target)
++{
++ return -EIO;
++}
++
++static ssize_t bad_file_sendpage(struct file *file, struct page *page,
++ int off, size_t len, loff_t *pos, int more)
++{
++ return -EIO;
++}
++
++static unsigned long bad_file_get_unmapped_area(struct file *file,
++ unsigned long addr, unsigned long len,
++ unsigned long pgoff, unsigned long flags)
+ {
+ return -EIO;
+ }
+
+-#define EIO_ERROR ((void *) (return_EIO))
++static int bad_file_check_flags(int flags)
++{
++ return -EIO;
++}
++
++static int bad_file_dir_notify(struct file *file, unsigned long arg)
++{
++ return -EIO;
++}
++
++static int bad_file_flock(struct file *filp, int cmd, struct file_lock *fl)
++{
++ return -EIO;
++}
++
++static ssize_t bad_file_splice_write(struct pipe_inode_info *pipe,
++ struct file *out, loff_t *ppos, size_t len,
++ unsigned int flags)
++{
++ return -EIO;
++}
++
++static ssize_t bad_file_splice_read(struct file *in, loff_t *ppos,
++ struct pipe_inode_info *pipe, size_t len,
++ unsigned int flags)
++{
++ return -EIO;
++}
+
+ static const struct file_operations bad_file_ops =
+ {
+- .llseek = EIO_ERROR,
+- .aio_read = EIO_ERROR,
+- .read = EIO_ERROR,
+- .write = EIO_ERROR,
+- .aio_write = EIO_ERROR,
+- .readdir = EIO_ERROR,
+- .poll = EIO_ERROR,
+- .ioctl = EIO_ERROR,
+- .mmap = EIO_ERROR,
+- .open = EIO_ERROR,
+- .flush = EIO_ERROR,
+- .release = EIO_ERROR,
+- .fsync = EIO_ERROR,
+- .aio_fsync = EIO_ERROR,
+- .fasync = EIO_ERROR,
+- .lock = EIO_ERROR,
+- .readv = EIO_ERROR,
+- .writev = EIO_ERROR,
+- .sendfile = EIO_ERROR,
+- .sendpage = EIO_ERROR,
+- .get_unmapped_area = EIO_ERROR,
++ .llseek = bad_file_llseek,
++ .read = bad_file_read,
++ .write = bad_file_write,
++ .aio_read = bad_file_aio_read,
++ .aio_write = bad_file_aio_write,
++ .readdir = bad_file_readdir,
++ .poll = bad_file_poll,
++ .ioctl = bad_file_ioctl,
++ .unlocked_ioctl = bad_file_unlocked_ioctl,
++ .compat_ioctl = bad_file_compat_ioctl,
++ .mmap = bad_file_mmap,
++ .open = bad_file_open,
++ .flush = bad_file_flush,
++ .release = bad_file_release,
++ .fsync = bad_file_fsync,
++ .aio_fsync = bad_file_aio_fsync,
++ .fasync = bad_file_fasync,
++ .lock = bad_file_lock,
++ .readv = bad_file_readv,
++ .writev = bad_file_writev,
++ .sendfile = bad_file_sendfile,
++ .sendpage = bad_file_sendpage,
++ .get_unmapped_area = bad_file_get_unmapped_area,
++ .check_flags = bad_file_check_flags,
++ .dir_notify = bad_file_dir_notify,
++ .flock = bad_file_flock,
++ .splice_write = bad_file_splice_write,
++ .splice_read = bad_file_splice_read,
+ };
+
++static int bad_inode_create (struct inode *dir, struct dentry *dentry,
++ int mode, struct nameidata *nd)
++{
++ return -EIO;
++}
++
++static struct dentry *bad_inode_lookup(struct inode *dir,
++ struct dentry *dentry, struct nameidata *nd)
++{
++ return ERR_PTR(-EIO);
++}
++
++static int bad_inode_link (struct dentry *old_dentry, struct inode *dir,
++ struct dentry *dentry)
++{
++ return -EIO;
++}
++
++static int bad_inode_unlink(struct inode *dir, struct dentry *dentry)
++{
++ return -EIO;
++}
++
++static int bad_inode_symlink (struct inode *dir, struct dentry *dentry,
++ const char *symname)
++{
++ return -EIO;
++}
++
++static int bad_inode_mkdir(struct inode *dir, struct dentry *dentry,
++ int mode)
++{
++ return -EIO;
++}
++
++static int bad_inode_rmdir (struct inode *dir, struct dentry *dentry)
++{
++ return -EIO;
++}
++
++static int bad_inode_mknod (struct inode *dir, struct dentry *dentry,
++ int mode, dev_t rdev)
++{
++ return -EIO;
++}
++
++static int bad_inode_rename (struct inode *old_dir, struct dentry *old_dentry,
++ struct inode *new_dir, struct dentry *new_dentry)
++{
++ return -EIO;
++}
++
++static int bad_inode_readlink(struct dentry *dentry, char __user *buffer,
++ int buflen)
++{
++ return -EIO;
++}
++
++static int bad_inode_permission(struct inode *inode, int mask,
++ struct nameidata *nd)
++{
++ return -EIO;
++}
++
++static int bad_inode_getattr(struct vfsmount *mnt, struct dentry *dentry,
++ struct kstat *stat)
++{
++ return -EIO;
++}
++
++static int bad_inode_setattr(struct dentry *direntry, struct iattr *attrs)
++{
++ return -EIO;
++}
++
++static int bad_inode_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags)
++{
++ return -EIO;
++}
++
++static ssize_t bad_inode_getxattr(struct dentry *dentry, const char *name,
++ void *buffer, size_t size)
++{
++ return -EIO;
++}
++
++static ssize_t bad_inode_listxattr(struct dentry *dentry, char *buffer,
++ size_t buffer_size)
++{
++ return -EIO;
++}
++
++static int bad_inode_removexattr(struct dentry *dentry, const char *name)
++{
++ return -EIO;
++}
++
+ static struct inode_operations bad_inode_ops =
+ {
+- .create = EIO_ERROR,
+- .lookup = EIO_ERROR,
+- .link = EIO_ERROR,
+- .unlink = EIO_ERROR,
+- .symlink = EIO_ERROR,
+- .mkdir = EIO_ERROR,
+- .rmdir = EIO_ERROR,
+- .mknod = EIO_ERROR,
+- .rename = EIO_ERROR,
+- .readlink = EIO_ERROR,
++ .create = bad_inode_create,
++ .lookup = bad_inode_lookup,
++ .link = bad_inode_link,
++ .unlink = bad_inode_unlink,
++ .symlink = bad_inode_symlink,
++ .mkdir = bad_inode_mkdir,
++ .rmdir = bad_inode_rmdir,
++ .mknod = bad_inode_mknod,
++ .rename = bad_inode_rename,
++ .readlink = bad_inode_readlink,
+ /* follow_link must be no-op, otherwise unmounting this inode
+ won't work */
+- .truncate = EIO_ERROR,
+- .permission = EIO_ERROR,
+- .getattr = EIO_ERROR,
+- .setattr = EIO_ERROR,
+- .setxattr = EIO_ERROR,
+- .getxattr = EIO_ERROR,
+- .listxattr = EIO_ERROR,
+- .removexattr = EIO_ERROR,
++ /* put_link returns void */
++ /* truncate returns void */
++ .permission = bad_inode_permission,
++ .getattr = bad_inode_getattr,
++ .setattr = bad_inode_setattr,
++ .setxattr = bad_inode_setxattr,
++ .getxattr = bad_inode_getxattr,
++ .listxattr = bad_inode_listxattr,
++ .removexattr = bad_inode_removexattr,
++ /* truncate_range returns void */
+ };
+
+
+@@ -90,7 +350,7 @@
+ * on it to fail from this point on.
+ */
+
+-void make_bad_inode(struct inode * inode)
++void make_bad_inode(struct inode *inode)
+ {
+ remove_inode_hash(inode);
+
+@@ -115,7 +375,7 @@
+ * Returns true if the inode in question has been marked as bad.
+ */
+
+-int is_bad_inode(struct inode * inode)
++int is_bad_inode(struct inode *inode)
+ {
+ return (inode->i_op == &bad_inode_ops);
+ }
diff --git a/tags/2.6.18-12/30014_bluetooth-l2cap-hci-info-leaks.patch b/tags/2.6.18-12/30014_bluetooth-l2cap-hci-info-leaks.patch
new file mode 100644
index 0000000..0c64d1d
--- /dev/null
+++ b/tags/2.6.18-12/30014_bluetooth-l2cap-hci-info-leaks.patch
@@ -0,0 +1,63 @@
+From: Marcel Holtmann <marcel@holtmann.org>
+Date: Fri, 4 May 2007 22:35:59 +0000 (+0200)
+Subject: [Bluetooth] Fix L2CAP and HCI setsockopt() information leaks
+X-Git-Tag: v2.6.22-rc1~822^2~2^2~6
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=0878b6667f28772aa7d6b735abff53efc7bf6d91
+
+[Bluetooth] Fix L2CAP and HCI setsockopt() information leaks
+
+The L2CAP and HCI setsockopt() implementations have a small information
+leak that makes it possible to leak kernel stack memory to userspace.
+
+If the optlen parameter is 0, no data will be copied by copy_from_user(),
+but the uninitialized stack buffer will be read and stored later. A call
+to getsockopt() can now retrieve the leaked information.
+
+To fix this problem the stack buffer given to copy_from_user() must be
+initialized with the current settings.
+
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+---
+
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 832b5f4..bfc9a35 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -499,6 +499,15 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char
+ break;
+
+ case HCI_FILTER:
++ {
++ struct hci_filter *f = &hci_pi(sk)->filter;
++
++ uf.type_mask = f->type_mask;
++ uf.opcode = f->opcode;
++ uf.event_mask[0] = *((u32 *) f->event_mask + 0);
++ uf.event_mask[1] = *((u32 *) f->event_mask + 1);
++ }
++
+ len = min_t(unsigned int, len, sizeof(uf));
+ if (copy_from_user(&uf, optval, len)) {
+ err = -EFAULT;
+diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
+index a586787..a59b1fb 100644
+--- a/net/bluetooth/l2cap.c
++++ b/net/bluetooth/l2cap.c
+@@ -954,11 +954,17 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
+
+ switch (optname) {
+ case L2CAP_OPTIONS:
++ opts.imtu = l2cap_pi(sk)->imtu;
++ opts.omtu = l2cap_pi(sk)->omtu;
++ opts.flush_to = l2cap_pi(sk)->flush_to;
++ opts.mode = 0x00;
++
+ len = min_t(unsigned int, sizeof(opts), optlen);
+ if (copy_from_user((char *) &opts, optval, len)) {
+ err = -EFAULT;
+ break;
+ }
++
+ l2cap_pi(sk)->imtu = opts.imtu;
+ l2cap_pi(sk)->omtu = opts.omtu;
+ break;
diff --git a/tags/2.6.18-12/30015_usblcd-limit-memory-consumption.patch b/tags/2.6.18-12/30015_usblcd-limit-memory-consumption.patch
new file mode 100644
index 0000000..735810b
--- /dev/null
+++ b/tags/2.6.18-12/30015_usblcd-limit-memory-consumption.patch
@@ -0,0 +1,89 @@
+From: Oliver Neukum <oneukum@suse.de>
+Date: Mon, 11 Jun 2007 13:36:02 +0000 (+0200)
+Subject: USB: usblcd doesn't limit memory consumption during write
+X-Git-Tag: v2.6.22-rc7~49^2~3
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=5afeb104e7901168b21aad0437fb51dc620dfdd3
+
+USB: usblcd doesn't limit memory consumption during write
+
+usblcd currently has no way to limit memory consumption by fast writers.
+This is a security problem, as it allows users with write access to this
+device to drive the system into oom despite resource limits.
+Here's the fix taken from the modern skeleton driver.
+
+Signed-off-by: Oliver Neukum <oneukum@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/drivers/usb/misc/usblcd.c linux-source-2.6.18/drivers/usb/misc/usblcd.c
+--- linux-source-2.6.18.orig/drivers/usb/misc/usblcd.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/usb/misc/usblcd.c 2007-08-07 16:12:28.000000000 -0600
+@@ -42,10 +42,14 @@ struct usb_lcd {
+ size_t bulk_in_size; /* the size of the receive buffer */
+ __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
+ __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
+- struct kref kref;
++ struct kref kref;
++ struct semaphore limit_sem; /* to stop writes at full throttle from
++ * using up all RAM */
+ };
+ #define to_lcd_dev(d) container_of(d, struct usb_lcd, kref)
+
++#define USB_LCD_CONCURRENT_WRITES 5
++
+ static struct usb_driver lcd_driver;
+
+
+@@ -183,12 +187,13 @@ static void lcd_write_bulk_callback(stru
+ /* free up our allocated buffer */
+ usb_buffer_free(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
++ up(&dev->limit_sem);
+ }
+
+ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, size_t count, loff_t *ppos)
+ {
+ struct usb_lcd *dev;
+- int retval = 0;
++ int retval = 0, r;
+ struct urb *urb = NULL;
+ char *buf = NULL;
+
+@@ -198,10 +203,16 @@ static ssize_t lcd_write(struct file *fi
+ if (count == 0)
+ goto exit;
+
++ r = down_interruptible(&dev->limit_sem);
++ if (r < 0)
++ return -EINTR;
++
+ /* create a urb, and a buffer for it, and copy the data to the urb */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+- if (!urb)
+- return -ENOMEM;
++ if (!urb) {
++ retval = -ENOMEM;
++ goto err_no_buf;
++ }
+
+ buf = usb_buffer_alloc(dev->udev, count, GFP_KERNEL, &urb->transfer_dma);
+ if (!buf) {
+@@ -236,6 +247,8 @@ exit:
+ error:
+ usb_buffer_free(dev->udev, count, buf, urb->transfer_dma);
+ usb_free_urb(urb);
++err_no_buf:
++ up(&dev->limit_sem);
+ return retval;
+ }
+
+@@ -274,6 +287,7 @@ static int lcd_probe(struct usb_interfac
+ goto error;
+ }
+ kref_init(&dev->kref);
++ sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES);
+
+ dev->udev = usb_get_dev(interface_to_usbdev(interface));
+ dev->interface = interface;
diff --git a/tags/2.6.18-12/30016_pppoe-socket-release-mem-leak.patch b/tags/2.6.18-12/30016_pppoe-socket-release-mem-leak.patch
new file mode 100644
index 0000000..10f833c
--- /dev/null
+++ b/tags/2.6.18-12/30016_pppoe-socket-release-mem-leak.patch
@@ -0,0 +1,42 @@
+From: Florian Zumbiehl <florz@florz.de>
+Date: Fri, 20 Apr 2007 23:58:14 +0000 (-0700)
+Subject: [PPPOE]: memory leak when socket is release()d before PPPIOCGCHAN has been called ...
+X-Git-Tag: v2.6.22-rc1~1128^2~92
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=202a03acf9994076055df40ae093a5c5474ad0bd
+
+[PPPOE]: memory leak when socket is release()d before PPPIOCGCHAN has been called on it
+
+below you find a patch that fixes a memory leak when a PPPoE socket is
+release()d after it has been connect()ed, but before the PPPIOCGCHAN ioctl
+ever has been called on it.
+
+This is somewhat of a security problem, too, since PPPoE sockets can be
+created by any user, so any user can easily allocate all the machine's
+RAM to non-swappable address space and thus DoS the system.
+
+Is there any specific reason for PPPoE sockets being available to any
+unprivileged process, BTW? After all, you need a packet socket for the
+discovery stage anyway, so it's unlikely that any unprivileged process
+will ever need to create a PPPoE socket, no? Allocating all session IDs
+for a known AC is a kind of DoS, too, after all - with Juniper ERXes,
+this is really easy, actually, since they don't ever assign session ids
+above 8000 ...
+
+Signed-off-by: Florian Zumbiehl <florz@florz.de>
+Acked-by: Michal Ostrowski <mostrows@earthlink.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
+index 9315046..3f8115d 100644
+--- a/drivers/net/pppox.c
++++ b/drivers/net/pppox.c
+@@ -58,7 +58,7 @@ void pppox_unbind_sock(struct sock *sk)
+ {
+ /* Clear connection to ppp device, if attached. */
+
+- if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE)) {
++ if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED | PPPOX_ZOMBIE)) {
+ ppp_unregister_channel(&pppox_sk(sk)->chan);
+ sk->sk_state = PPPOX_DEAD;
+ }
diff --git a/tags/2.6.18-12/30017_nf_conntrack_h323-bounds-checking.patch b/tags/2.6.18-12/30017_nf_conntrack_h323-bounds-checking.patch
new file mode 100644
index 0000000..1101b89
--- /dev/null
+++ b/tags/2.6.18-12/30017_nf_conntrack_h323-bounds-checking.patch
@@ -0,0 +1,42 @@
+From: Jing Min Zhao <zhaojingmin@vivecode.com>
+Date: Fri, 6 Jul 2007 00:05:01 +0000 (-0700)
+Subject: [NETFILTER]: nf_conntrack_h323: add checking of out-of-range on choices' index values
+X-Git-Tag: v2.6.22~11^2~2
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=25845b5155b55cd77e42655ec24161ba3feffa47
+
+[NETFILTER]: nf_conntrack_h323: add checking of out-of-range on choices' index values
+
+Choices' index values may be out of range while still encoded in the fixed
+length bit-field. This bug may cause access to undefined types (NULL
+pointers) and thus crashes (Reported by Zhongling Wen).
+
+This patch also adds checking of decode flag when decoding SEQUENCEs.
+
+Signed-off-by: Jing Min Zhao <zhaojingmin@vivecode.com>
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c linux-source-2.6.18/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c
+--- linux-source-2.6.18.orig/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c 2007-07-11 00:23:22.000000000 -0600
+@@ -518,7 +518,7 @@ int decode_seq(bitstr_t * bs, field_t *
+ CHECK_BOUND(bs, 2);
+ len = get_len(bs);
+ CHECK_BOUND(bs, len);
+- if (!base) {
++ if (!base || !(son->attr & DECODE)) {
+ PRINT("%*.s%s\n", (level + 1) * TAB_SIZE,
+ " ", son->name);
+ bs->cur += len;
+@@ -704,6 +704,8 @@ int decode_choice(bitstr_t * bs, field_t
+ } else {
+ ext = 0;
+ type = get_bits(bs, f->sz);
++ if (type >= f->lb)
++ return H323_ERROR_RANGE;
+ }
+
+ /* Write Type */
diff --git a/tags/2.6.18-12/30018_dn_fib-out-of-bounds.patch b/tags/2.6.18-12/30018_dn_fib-out-of-bounds.patch
new file mode 100644
index 0000000..98be43e
--- /dev/null
+++ b/tags/2.6.18-12/30018_dn_fib-out-of-bounds.patch
@@ -0,0 +1,37 @@
+commit a979101106f549f4ed80d6dcbc35077be34d4346
+Author: Thomas Graf <tgraf@suug.ch>
+Date: Sat Mar 24 20:33:27 2007 -0700
+
+ [DECNet] fib: Fix out of bound access of dn_fib_props[]
+
+ Fixes a typo which caused fib_props[] to have the wrong size
+ and makes sure the value used to index the array which is
+ provided by userspace via netlink is checked to avoid out of
+ bound access.
+
+ Signed-off-by: Thomas Graf <tgraf@suug.ch>
+ Signed-off-by: David S. Miller <davem@davemloft.net>
+
+diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
+index 3cbfddc..82d58a9 100644
+--- a/net/decnet/dn_fib.c
++++ b/net/decnet/dn_fib.c
+@@ -63,7 +63,7 @@ static struct
+ {
+ int error;
+ u8 scope;
+-} dn_fib_props[RTA_MAX+1] = {
++} dn_fib_props[RTN_MAX+1] = {
+ [RTN_UNSPEC] = { .error = 0, .scope = RT_SCOPE_NOWHERE },
+ [RTN_UNICAST] = { .error = 0, .scope = RT_SCOPE_UNIVERSE },
+ [RTN_LOCAL] = { .error = 0, .scope = RT_SCOPE_HOST },
+@@ -276,6 +276,9 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta
+ struct dn_fib_info *ofi;
+ int nhs = 1;
+
++ if (r->rtm_type > RTN_MAX)
++ goto err_inval;
++
+ if (dn_fib_props[r->rtm_type].scope > r->rtm_scope)
+ goto err_inval;
+
diff --git a/tags/2.6.18-12/30019_random-fix-seeding-with-zero-entropy.patch b/tags/2.6.18-12/30019_random-fix-seeding-with-zero-entropy.patch
new file mode 100644
index 0000000..b61a03e
--- /dev/null
+++ b/tags/2.6.18-12/30019_random-fix-seeding-with-zero-entropy.patch
@@ -0,0 +1,97 @@
+commit 7f397dcdb78d699a20d96bfcfb595a2411a5bbd2
+Author: Matt Mackall <mpm@selenic.com>
+Date: Tue May 29 21:58:10 2007 -0500
+
+ random: fix seeding with zero entropy
+
+ Add data from zero-entropy random_writes directly to output pools to
+ avoid accounting difficulties on machines without entropy sources.
+
+ Tested on lguest with all entropy sources disabled.
+
+ Signed-off-by: Matt Mackall <mpm@selenic.com>
+ Acked-by: "Theodore Ts'o" <tytso@mit.edu>
+ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+# Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+--- linux-source-2.6.18/drivers/char/random.c.orig 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/char/random.c 2007-07-12 23:57:12.000000000 -0600
+@@ -1017,37 +1017,44 @@ random_poll(struct file *file, poll_tabl
+ return mask;
+ }
+
+-static ssize_t
+-random_write(struct file * file, const char __user * buffer,
+- size_t count, loff_t *ppos)
++static int
++write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+ {
+- int ret = 0;
+ size_t bytes;
+ __u32 buf[16];
+ const char __user *p = buffer;
+- size_t c = count;
+
+- while (c > 0) {
+- bytes = min(c, sizeof(buf));
++ while (count > 0) {
++ bytes = min(count, sizeof(buf));
++ if (copy_from_user(&buf, p, bytes))
++ return -EFAULT;
+
+- bytes -= copy_from_user(&buf, p, bytes);
+- if (!bytes) {
+- ret = -EFAULT;
+- break;
+- }
+- c -= bytes;
++ count -= bytes;
+ p += bytes;
+
+- add_entropy_words(&input_pool, buf, (bytes + 3) / 4);
+- }
+- if (p == buffer) {
+- return (ssize_t)ret;
+- } else {
+- struct inode *inode = file->f_dentry->d_inode;
+- inode->i_mtime = current_fs_time(inode->i_sb);
+- mark_inode_dirty(inode);
+- return (ssize_t)(p - buffer);
++ add_entropy_words(r, buf, (bytes + 3) / 4);
+ }
++
++ return 0;
++}
++
++static ssize_t
++random_write(struct file * file, const char __user * buffer,
++ size_t count, loff_t *ppos)
++{
++ size_t ret;
++ struct inode *inode = file->f_dentry->d_inode;
++
++ ret = write_pool(&blocking_pool, buffer, count);
++ if (ret)
++ return ret;
++ ret = write_pool(&nonblocking_pool, buffer, count);
++ if (ret)
++ return ret;
++
++ inode->i_mtime = current_fs_time(inode->i_sb);
++ mark_inode_dirty(inode);
++ return (ssize_t)count;
+ }
+
+ static int
+@@ -1086,8 +1093,8 @@ random_ioctl(struct inode * inode, struc
+ return -EINVAL;
+ if (get_user(size, p++))
+ return -EFAULT;
+- retval = random_write(file, (const char __user *) p,
+- size, &file->f_pos);
++ retval = write_pool(&input_pool, (const char __user *)p,
++ size);
+ if (retval < 0)
+ return retval;
+ credit_entropy_store(&input_pool, ent_count);
diff --git a/tags/2.6.18-12/30020_random-fix-error-in-entropy-extraction.patch b/tags/2.6.18-12/30020_random-fix-error-in-entropy-extraction.patch
new file mode 100644
index 0000000..8a302fd
--- /dev/null
+++ b/tags/2.6.18-12/30020_random-fix-error-in-entropy-extraction.patch
@@ -0,0 +1,51 @@
+commit 602b6aeefe8932dd8bb15014e8fe6bb25d736361
+Author: Matt Mackall <mpm@selenic.com>
+Date: Tue May 29 21:54:27 2007 -0500
+
+ random: fix error in entropy extraction
+
+ Fix cast error in entropy extraction.
+ Add comments explaining the magic 16.
+ Remove extra confusing loop variable.
+
+ Signed-off-by: Matt Mackall <mpm@selenic.com>
+ Acked-by: "Theodore Ts'o" <tytso@mit.edu>
+ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 46c1b97..9705b43 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -760,7 +760,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
+
+ static void extract_buf(struct entropy_store *r, __u8 *out)
+ {
+- int i, x;
++ int i;
+ __u32 data[16], buf[5 + SHA_WORKSPACE_WORDS];
+
+ sha_init(buf);
+@@ -772,9 +772,11 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ * attempts to find previous ouputs), unless the hash
+ * function can be inverted.
+ */
+- for (i = 0, x = 0; i < r->poolinfo->poolwords; i += 16, x+=2) {
+- sha_transform(buf, (__u8 *)r->pool+i, buf + 5);
+- add_entropy_words(r, &buf[x % 5], 1);
++ for (i = 0; i < r->poolinfo->poolwords; i += 16) {
++ /* hash blocks of 16 words = 512 bits */
++ sha_transform(buf, (__u8 *)(r->pool + i), buf + 5);
++ /* feed back portion of the resulting hash */
++ add_entropy_words(r, &buf[i % 5], 1);
+ }
+
+ /*
+@@ -782,7 +784,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ * portion of the pool while mixing, and hash one
+ * final time.
+ */
+- __add_entropy_words(r, &buf[x % 5], 1, data);
++ __add_entropy_words(r, &buf[i % 5], 1, data);
+ sha_transform(buf, (__u8 *)data, buf + 5);
+
+ /*
diff --git a/tags/2.6.18-12/30021_nf_conntrack_sctp-null-deref.patch b/tags/2.6.18-12/30021_nf_conntrack_sctp-null-deref.patch
new file mode 100644
index 0000000..17117b6
--- /dev/null
+++ b/tags/2.6.18-12/30021_nf_conntrack_sctp-null-deref.patch
@@ -0,0 +1,49 @@
+From: Patrick McHardy <kaber@trash.net>
+Date: Tue, 5 Jun 2007 12:14:22 +0000 (+0200)
+Subject: [UBUNTU] CVE-2007-2876 NETFILTER: {ip, nf}_conntrack_sctp: fix remotely triggerable ...
+X-Git-Url: http://kernel.ubuntu.com/git?p=ubuntu/ubuntu-edgy.git;a=commitdiff;h=3ccb814b91bca2e0a6fe4b5d1c5dbb35a06a848b
+
+[UBUNTU] CVE-2007-2876 NETFILTER: {ip, nf}_conntrack_sctp: fix remotely triggerable NULL ptr dereference
+
+When creating a new connection by sending an unknown chunk type, we
+don't transition to a valid state, causing a NULL pointer dereference in
+sctp_packet when accessing sctp_timeouts[SCTP_CONNTRACK_NONE].
+
+Fix by don't creating new conntrack entry if initial state is invalid.
+
+Noticed by Vilmos Nebehaj <vilmos.nebehaj@ramsys.hu>
+
+CC: Kiran Kumar Immidi <immidi_kiran@yahoo.com>
+Cc: David Miller <davem@davemloft.net>
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+
+modified: net/ipv4/netfilter/ip_conntrack_proto_sctp.c
+modified: net/netfilter/nf_conntrack_proto_sctp.c
+---
+
+--- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
++++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
+@@ -461,7 +461,8 @@ static int sctp_new(struct ip_conntrack
+ SCTP_CONNTRACK_NONE, sch->type);
+
+ /* Invalid: delete conntrack */
+- if (newconntrack == SCTP_CONNTRACK_MAX) {
++ if (newconntrack == SCTP_CONNTRACK_NONE ||
++ newconntrack == SCTP_CONNTRACK_MAX) {
+ DEBUGP("ip_conntrack_sctp: invalid new deleting.\n");
+ return 0;
+ }
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -467,7 +467,8 @@ static int sctp_new(struct nf_conn *conn
+ SCTP_CONNTRACK_NONE, sch->type);
+
+ /* Invalid: delete conntrack */
+- if (newconntrack == SCTP_CONNTRACK_MAX) {
++ if (newconntrack == SCTP_CONNTRACK_NONE ||
++ newconntrack == SCTP_CONNTRACK_MAX) {
+ DEBUGP("nf_conntrack_sctp: invalid new deleting.\n");
+ return 0;
+ }
diff --git a/tags/2.6.18-12/30022_i965-secure-batchbuffer.patch b/tags/2.6.18-12/30022_i965-secure-batchbuffer.patch
new file mode 100644
index 0000000..0c813c1
--- /dev/null
+++ b/tags/2.6.18-12/30022_i965-secure-batchbuffer.patch
@@ -0,0 +1,67 @@
+From: Dave Airlie <airlied@redhat.com>
+Date: Mon, 6 Aug 2007 23:09:51 +0000 (+1000)
+Subject: drm/i915: Fix i965 secured batchbuffer usage
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=21f16289270447673a7263ccc0b22d562fb01ecb
+
+drm/i915: Fix i965 secured batchbuffer usage
+
+This 965G and above chipsets moved the batch buffer non-secure bits to
+another place. This means that previous drm's allowed in-secure batchbuffers
+to be submitted to the hardware from non-privileged users who are logged
+into X and and have access to direct rendering.
+
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
+index 3359cc2..8e7d713 100644
+--- a/drivers/char/drm/i915_dma.c
++++ b/drivers/char/drm/i915_dma.c
+@@ -184,6 +184,8 @@ static int i915_initialize(struct drm_device * dev,
+ * private backbuffer/depthbuffer usage.
+ */
+ dev_priv->use_mi_batchbuffer_start = 0;
++ if (IS_I965G(dev)) /* 965 doesn't support older method */
++ dev_priv->use_mi_batchbuffer_start = 1;
+
+ /* Allow hardware batchbuffers unless told otherwise.
+ */
+@@ -517,8 +519,13 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
+
+ if (dev_priv->use_mi_batchbuffer_start) {
+ BEGIN_LP_RING(2);
+- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
+- OUT_RING(batch->start | MI_BATCH_NON_SECURE);
++ if (IS_I965G(dev)) {
++ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
++ OUT_RING(batch->start);
++ } else {
++ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
++ OUT_RING(batch->start | MI_BATCH_NON_SECURE);
++ }
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(4);
+@@ -735,7 +742,8 @@ static int i915_setparam(DRM_IOCTL_ARGS)
+
+ switch (param.param) {
+ case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
+- dev_priv->use_mi_batchbuffer_start = param.value;
++ if (!IS_I965G(dev))
++ dev_priv->use_mi_batchbuffer_start = param.value;
+ break;
+ case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
+ dev_priv->tex_lru_log_granularity = param.value;
+diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
+index fd91856..737088b 100644
+--- a/drivers/char/drm/i915_drv.h
++++ b/drivers/char/drm/i915_drv.h
+@@ -282,6 +282,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
+ #define MI_BATCH_BUFFER_START (0x31<<23)
+ #define MI_BATCH_BUFFER_END (0xA<<23)
+ #define MI_BATCH_NON_SECURE (1)
++#define MI_BATCH_NON_SECURE_I965 (1<<8)
+
+ #define MI_WAIT_FOR_EVENT ((0x3<<23))
+ #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
diff --git a/tags/2.6.18-12/30023_appletalk-endianness-annotations.patch b/tags/2.6.18-12/30023_appletalk-endianness-annotations.patch
new file mode 100644
index 0000000..8c826fe
--- /dev/null
+++ b/tags/2.6.18-12/30023_appletalk-endianness-annotations.patch
@@ -0,0 +1,285 @@
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Wed, 27 Sep 2006 04:22:08 +0000 (-0700)
+Subject: [ATALK]: endianness annotations
+X-Git-Tag: v2.6.19~1799^2~155
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=2a50f28c326d20ab4556be1b867ecddf6aefbb88
+
+[ATALK]: endianness annotations
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
+index 7f7dd45..b98592a 100644
+--- a/drivers/net/appletalk/ipddp.c
++++ b/drivers/net/appletalk/ipddp.c
+@@ -145,9 +145,7 @@ static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ /* Create the Extended DDP header */
+ ddp = (struct ddpehdr *)skb->data;
+- ddp->deh_len = skb->len;
+- ddp->deh_hops = 1;
+- ddp->deh_pad = 0;
++ ddp->deh_len_hops = htons(skb->len + (1<<10));
+ ddp->deh_sum = 0;
+
+ /*
+@@ -170,7 +168,6 @@ static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
+ ddp->deh_sport = 72;
+
+ *((__u8 *)(ddp+1)) = 22; /* ddp type = IP */
+- *((__u16 *)ddp)=ntohs(*((__u16 *)ddp)); /* fix up length field */
+
+ skb->protocol = htons(ETH_P_ATALK); /* Protocol has changed */
+
+diff --git a/include/linux/atalk.h b/include/linux/atalk.h
+index 6ba3aa8..75b8bac 100644
+--- a/include/linux/atalk.h
++++ b/include/linux/atalk.h
+@@ -88,15 +88,7 @@ static inline struct atalk_sock *at_sk(struct sock *sk)
+ #include <asm/byteorder.h>
+
+ struct ddpehdr {
+-#ifdef __LITTLE_ENDIAN_BITFIELD
+- __u16 deh_len:10,
+- deh_hops:4,
+- deh_pad:2;
+-#else
+- __u16 deh_pad:2,
+- deh_hops:4,
+- deh_len:10;
+-#endif
++ __be16 deh_len_hops; /* lower 10 bits are length, next 4 - hops */
+ __be16 deh_sum;
+ __be16 deh_dnet;
+ __be16 deh_snet;
+@@ -112,36 +104,6 @@ static __inline__ struct ddpehdr *ddp_hdr(struct sk_buff *skb)
+ return (struct ddpehdr *)skb->h.raw;
+ }
+
+-/*
+- * Don't drop the struct into the struct above. You'll get some
+- * surprise padding.
+- */
+-struct ddpebits {
+-#ifdef __LITTLE_ENDIAN_BITFIELD
+- __u16 deh_len:10,
+- deh_hops:4,
+- deh_pad:2;
+-#else
+- __u16 deh_pad:2,
+- deh_hops:4,
+- deh_len:10;
+-#endif
+-};
+-
+-/* Short form header */
+-struct ddpshdr {
+-#ifdef __LITTLE_ENDIAN_BITFIELD
+- __u16 dsh_len:10,
+- dsh_pad:6;
+-#else
+- __u16 dsh_pad:6,
+- dsh_len:10;
+-#endif
+- __u8 dsh_dport;
+- __u8 dsh_sport;
+- /* And netatalk apps expect to stick the type in themselves */
+-};
+-
+ /* AppleTalk AARP headers */
+ struct elapaarp {
+ __be16 hw_type;
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index 96dc6bb..708e2e0 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1002,7 +1002,7 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
+ return sum;
+ }
+
+-static unsigned short atalk_checksum(const struct sk_buff *skb, int len)
++static __be16 atalk_checksum(const struct sk_buff *skb, int len)
+ {
+ unsigned long sum;
+
+@@ -1010,7 +1010,7 @@ static unsigned short atalk_checksum(const struct sk_buff *skb, int len)
+ sum = atalk_sum_skb(skb, 4, len-4, 0);
+
+ /* Use 0xFFFF for 0. 0 itself means none */
+- return sum ? htons((unsigned short)sum) : 0xFFFF;
++ return sum ? htons((unsigned short)sum) : htons(0xFFFF);
+ }
+
+ static struct proto ddp_proto = {
+@@ -1289,7 +1289,7 @@ static int handle_ip_over_ddp(struct sk_buff *skb)
+ #endif
+
+ static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
+- struct ddpehdr *ddp, struct ddpebits *ddphv,
++ struct ddpehdr *ddp, __u16 len_hops,
+ int origlen)
+ {
+ struct atalk_route *rt;
+@@ -1317,10 +1317,12 @@ static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
+
+ /* Route the packet */
+ rt = atrtr_find(&ta);
+- if (!rt || ddphv->deh_hops == DDP_MAXHOPS)
++ /* increment hops count */
++ len_hops += 1 << 10;
++ if (!rt || !(len_hops & (15 << 10)))
+ goto free_it;
++
+ /* FIXME: use skb->cb to be able to use shared skbs */
+- ddphv->deh_hops++;
+
+ /*
+ * Route goes through another gateway, so set the target to the
+@@ -1335,11 +1337,10 @@ static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
+ /* Fix up skb->len field */
+ skb_trim(skb, min_t(unsigned int, origlen,
+ (rt->dev->hard_header_len +
+- ddp_dl->header_length + ddphv->deh_len)));
++ ddp_dl->header_length + (len_hops & 1023))));
+
+- /* Mend the byte order */
+ /* FIXME: use skb->cb to be able to use shared skbs */
+- *((__u16 *)ddp) = ntohs(*((__u16 *)ddphv));
++ ddp->deh_len_hops = htons(len_hops);
+
+ /*
+ * Send the buffer onwards
+@@ -1394,7 +1395,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct atalk_iface *atif;
+ struct sockaddr_at tosat;
+ int origlen;
+- struct ddpebits ddphv;
++ __u16 len_hops;
+
+ /* Don't mangle buffer if shared */
+ if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
+@@ -1406,16 +1407,11 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
+
+ ddp = ddp_hdr(skb);
+
+- /*
+- * Fix up the length field [Ok this is horrible but otherwise
+- * I end up with unions of bit fields and messy bit field order
+- * compiler/endian dependencies..]
+- */
+- *((__u16 *)&ddphv) = ntohs(*((__u16 *)ddp));
++ len_hops = ntohs(ddp->deh_len_hops);
+
+ /* Trim buffer in case of stray trailing data */
+ origlen = skb->len;
+- skb_trim(skb, min_t(unsigned int, skb->len, ddphv.deh_len));
++ skb_trim(skb, min_t(unsigned int, skb->len, len_hops & 1023));
+
+ /*
+ * Size check to see if ddp->deh_len was crap
+@@ -1430,7 +1426,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
+ * valid for net byte orders all over the networking code...
+ */
+ if (ddp->deh_sum &&
+- atalk_checksum(skb, ddphv.deh_len) != ddp->deh_sum)
++ atalk_checksum(skb, len_hops & 1023) != ddp->deh_sum)
+ /* Not a valid AppleTalk frame - dustbin time */
+ goto freeit;
+
+@@ -1444,7 +1440,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
+ /* Not ours, so we route the packet via the correct
+ * AppleTalk iface
+ */
+- atalk_route_packet(skb, dev, ddp, &ddphv, origlen);
++ atalk_route_packet(skb, dev, ddp, len_hops, origlen);
+ goto out;
+ }
+
+@@ -1489,7 +1485,7 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
+ /* Find our address */
+ struct atalk_addr *ap = atalk_find_dev_addr(dev);
+
+- if (!ap || skb->len < sizeof(struct ddpshdr))
++ if (!ap || skb->len < sizeof(__be16) || skb->len > 1023)
+ goto freeit;
+
+ /* Don't mangle buffer if shared */
+@@ -1519,11 +1515,8 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
+ /*
+ * Not sure about this bit...
+ */
+- ddp->deh_len = skb->len;
+- ddp->deh_hops = DDP_MAXHOPS; /* Non routable, so force a drop
+- if we slip up later */
+- /* Mend the byte order */
+- *((__u16 *)ddp) = htons(*((__u16 *)ddp));
++ /* Non routable, so force a drop if we slip up later */
++ ddp->deh_len_hops = htons(skb->len + (DDP_MAXHOPS << 10));
+ }
+ skb->h.raw = skb->data;
+
+@@ -1622,16 +1615,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
+ SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk);
+
+ ddp = (struct ddpehdr *)skb_put(skb, sizeof(struct ddpehdr));
+- ddp->deh_pad = 0;
+- ddp->deh_hops = 0;
+- ddp->deh_len = len + sizeof(*ddp);
+- /*
+- * Fix up the length field [Ok this is horrible but otherwise
+- * I end up with unions of bit fields and messy bit field order
+- * compiler/endian dependencies..
+- */
+- *((__u16 *)ddp) = ntohs(*((__u16 *)ddp));
+-
++ ddp->deh_len_hops = htons(len + sizeof(*ddp));
+ ddp->deh_dnet = usat->sat_addr.s_net;
+ ddp->deh_snet = at->src_net;
+ ddp->deh_dnode = usat->sat_addr.s_node;
+@@ -1712,8 +1696,8 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
+ struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
+ struct ddpehdr *ddp;
+ int copied = 0;
++ int offset = 0;
+ int err = 0;
+- struct ddpebits ddphv;
+ struct sk_buff *skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
+@@ -1721,25 +1705,18 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
+
+ /* FIXME: use skb->cb to be able to use shared skbs */
+ ddp = ddp_hdr(skb);
+- *((__u16 *)&ddphv) = ntohs(*((__u16 *)ddp));
++ copied = ntohs(ddp->deh_len_hops) & 1023;
+
+- if (sk->sk_type == SOCK_RAW) {
+- copied = ddphv.deh_len;
+- if (copied > size) {
+- copied = size;
+- msg->msg_flags |= MSG_TRUNC;
+- }
++ if (sk->sk_type != SOCK_RAW) {
++ offset = sizeof(*ddp);
++ copied -= offset;
++ }
+
+- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+- } else {
+- copied = ddphv.deh_len - sizeof(*ddp);
+- if (copied > size) {
+- copied = size;
+- msg->msg_flags |= MSG_TRUNC;
+- }
+- err = skb_copy_datagram_iovec(skb, sizeof(*ddp),
+- msg->msg_iov, copied);
++ if (copied > size) {
++ copied = size;
++ msg->msg_flags |= MSG_TRUNC;
+ }
++ err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
+
+ if (!err) {
+ if (sat) {
diff --git a/tags/2.6.18-12/30024_drm-i965.patch b/tags/2.6.18-12/30024_drm-i965.patch
new file mode 100644
index 0000000..e01faed
--- /dev/null
+++ b/tags/2.6.18-12/30024_drm-i965.patch
@@ -0,0 +1,221 @@
+
+commit 48cb9aceed782a4e9c557f30429e65f845dd777d
+Author: Alan Hourihane <alanh@tungstengraphics.com>
+Date: Tue Aug 8 15:05:54 2006 -0700
+
+ Add drm support for Intel i965G chipsets.
+
+ This is a patch prepared by Guangdeng Liao based off of Tungsten Graphics's
+ final code drop.
+
+ backport taken from fedora tree. -maks
+
+---
+ drivers/char/drm/drm_pciids.h | 4 4 + 0 - 0 !
+ drivers/char/drm/i915_dma.c | 47 35 + 12 - 0 !
+ drivers/char/drm/i915_drm.h | 6 6 + 0 - 0 !
+ drivers/char/drm/i915_drv.h | 14 8 + 6 - 0 !
+ drivers/char/drm/i915_irq.c | 18 14 + 4 - 0 !
+ 5 files changed, 67 insertions(+), 22 deletions(-)
+
+Index: linux-2.6.17/drivers/char/drm/i915_dma.c
+===================================================================
+--- linux-2.6.17.orig/drivers/char/drm/i915_dma.c 2006-08-10 11:43:30.000000000 +0200
++++ linux-2.6.17/drivers/char/drm/i915_dma.c 2006-08-10 11:50:44.000000000 +0200
+@@ -31,6 +31,12 @@
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+
++#define IS_I965G(dev) (dev->pdev->device == 0x2972 || \
++ dev->pdev->device == 0x2982 || \
++ dev->pdev->device == 0x2992 || \
++ dev->pdev->device == 0x29A2)
++
++
+ /* Really want an OS-independent resettable timer. Would like to have
+ * this loop run for (eg) 3 sec, but have the timer reset every time
+ * the head pointer changes, so that EBUSY only happens if the ring
+@@ -347,14 +353,15 @@ static int i915_emit_cmds(drm_device_t *
+ if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
+ return DRM_ERR(EINVAL);
+
+- BEGIN_LP_RING(((dwords+1)&~1));
++ BEGIN_LP_RING((dwords+1)&~1);
+
+ for (i = 0; i < dwords;) {
+ int cmd, sz;
+
+- if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
+- return DRM_ERR(EINVAL);
++ if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) {
+
++ return DRM_ERR(EINVAL);
++ }
+ if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
+ return DRM_ERR(EINVAL);
+
+@@ -395,24 +402,40 @@ static int i915_emit_box(drm_device_t *
+ return DRM_ERR(EINVAL);
+ }
+
+- BEGIN_LP_RING(6);
+- OUT_RING(GFX_OP_DRAWRECT_INFO);
+- OUT_RING(DR1);
+- OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
+- OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+- OUT_RING(DR4);
+- OUT_RING(0);
+- ADVANCE_LP_RING();
++ if (IS_I965G(dev)) {
++ BEGIN_LP_RING(4);
++ OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
++ OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
++ OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
++ OUT_RING(DR4);
++ ADVANCE_LP_RING();
++ } else {
++ BEGIN_LP_RING(6);
++ OUT_RING(GFX_OP_DRAWRECT_INFO);
++ OUT_RING(DR1);
++ OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
++ OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
++ OUT_RING(DR4);
++ OUT_RING(0);
++ ADVANCE_LP_RING();
++ }
+
+ return 0;
+ }
+
++/* XXX: Emitting the counter should really be moved to part of the IRQ
++ * emit. For now, do it in both places:
++ */
++
+ static void i915_emit_breadcrumb(drm_device_t *dev)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+- dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
++ dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
++
++ if (dev_priv->counter > 0x7FFFFFFFUL)
++ dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
+
+ BEGIN_LP_RING(4);
+ OUT_RING(CMD_STORE_DWORD_IDX);
+Index: linux-2.6.17/drivers/char/drm/i915_drm.h
+===================================================================
+--- linux-2.6.17.orig/drivers/char/drm/i915_drm.h 2006-08-10 11:43:30.000000000 +0200
++++ linux-2.6.17/drivers/char/drm/i915_drm.h 2006-08-10 11:46:31.000000000 +0200
+@@ -98,6 +98,12 @@ typedef struct _drm_i915_sarea {
+ int rotated_size;
+ int rotated_pitch;
+ int virtualX, virtualY;
++
++ unsigned int front_tiled;
++ unsigned int back_tiled;
++ unsigned int depth_tiled;
++ unsigned int rotated_tiled;
++ unsigned int rotated2_tiled;
+ } drm_i915_sarea_t;
+
+ /* Flags for perf_boxes
+Index: linux-2.6.17/drivers/char/drm/i915_drv.h
+===================================================================
+--- linux-2.6.17.orig/drivers/char/drm/i915_drv.h 2006-08-10 11:43:30.000000000 +0200
++++ linux-2.6.17/drivers/char/drm/i915_drv.h 2006-08-10 11:50:44.000000000 +0200
+@@ -137,14 +137,14 @@ extern void i915_mem_release(drm_device_
+ #define I915_VERBOSE 0
+
+ #define RING_LOCALS unsigned int outring, ringmask, outcount; \
+- volatile char *virt;
++ volatile char *virt;
+
+ #define BEGIN_LP_RING(n) do { \
+ if (I915_VERBOSE) \
+ DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \
+- n, __FUNCTION__); \
+- if (dev_priv->ring.space < n*4) \
+- i915_wait_ring(dev, n*4, __FUNCTION__); \
++ (n), __FUNCTION__); \
++ if (dev_priv->ring.space < (n)*4) \
++ i915_wait_ring(dev, (n)*4, __FUNCTION__); \
+ outcount = 0; \
+ outring = dev_priv->ring.tail; \
+ ringmask = dev_priv->ring.tail_mask; \
+@@ -153,8 +153,8 @@ extern void i915_mem_release(drm_device_
+
+ #define OUT_RING(n) do { \
+ if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
+- *(volatile unsigned int *)(virt + outring) = n; \
+- outcount++; \
++ *(volatile unsigned int *)(virt + outring) = (n); \
++ outcount++; \
+ outring += 4; \
+ outring &= ringmask; \
+ } while (0)
+@@ -250,6 +250,8 @@ extern int i915_wait_ring(drm_device_t *
+ #define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+ #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+
++#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
++
+ #define MI_BATCH_BUFFER ((0x30<<23)|1)
+ #define MI_BATCH_BUFFER_START (0x31<<23)
+ #define MI_BATCH_BUFFER_END (0xA<<23)
+Index: linux-2.6.17/drivers/char/drm/i915_irq.c
+===================================================================
+--- linux-2.6.17.orig/drivers/char/drm/i915_irq.c 2006-08-10 11:43:30.000000000 +0200
++++ linux-2.6.17/drivers/char/drm/i915_irq.c 2006-08-10 11:50:54.000000000 +0200
+@@ -69,22 +69,32 @@ irqreturn_t i915_driver_irq_handler(DRM_
+
+ static int i915_emit_irq(drm_device_t * dev)
+ {
++
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- u32 ret;
+ RING_LOCALS;
+
+ i915_kernel_lost_context(dev);
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+- ret = dev_priv->counter;
++ dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
++
++ if (dev_priv->counter > 0x7FFFFFFFUL)
++ dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
+
+- BEGIN_LP_RING(2);
++ BEGIN_LP_RING(6);
++ OUT_RING(CMD_STORE_DWORD_IDX);
++ OUT_RING(20);
++ OUT_RING(dev_priv->counter);
++
++ OUT_RING(0);
+ OUT_RING(0);
+ OUT_RING(GFX_OP_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+
+- return ret;
++ return dev_priv->counter;
++
++
+ }
+
+ static int i915_wait_irq(drm_device_t * dev, int irq_nr)
+Index: linux-2.6.17/drivers/char/drm/drm_pciids.h
+===================================================================
+--- linux-2.6.17.orig/drivers/char/drm/drm_pciids.h 2006-06-18 03:49:35.000000000 +0200
++++ linux-2.6.17/drivers/char/drm/drm_pciids.h 2006-08-10 11:48:36.000000000 +0200
+@@ -285,5 +285,9 @@
+ {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++ {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++ {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++ {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++ {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0, 0, 0}
+
diff --git a/tags/2.6.18-12/30025_ipv4-fib_props-out-of-bounds.patch b/tags/2.6.18-12/30025_ipv4-fib_props-out-of-bounds.patch
new file mode 100644
index 0000000..3fb091d
--- /dev/null
+++ b/tags/2.6.18-12/30025_ipv4-fib_props-out-of-bounds.patch
@@ -0,0 +1,42 @@
+From: Thomas Graf <tgraf@suug.ch>
+Date: Sun, 25 Mar 2007 03:32:54 +0000 (-0700)
+Subject: [IPv4] fib: Fix out of bound access of fib_props[]
+X-Git-Tag: v2.6.21~241^2~12
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=a0ee18b9b7d3847976c6fb315c06a34fb296de0e
+
+[IPv4] fib: Fix out of bound access of fib_props[]
+
+Fixes a typo which caused fib_props[] to have the wrong size
+and makes sure the value used to index the array which is
+provided by userspace via netlink is checked to avoid out of
+bound access.
+
+Signed-off-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>, heavily
+based upon Tim Gardner's backport for Ubuntu:
+ http://kernel.ubuntu.com/git?p=ubuntu/ubuntu-edgy.git;a=commitdiff;h=6e87288e83ac08e7154980795622efdafd49c9c8
+
+--- linux-source-2.6.18.orig/net/ipv4/fib_semantics.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/net/ipv4/fib_semantics.c 2007-08-27 22:15:04.678316443 -0600
+@@ -88,7 +88,7 @@ static const struct
+ {
+ int error;
+ u8 scope;
+-} fib_props[RTA_MAX + 1] = {
++} fib_props[RTN_MAX + 1] = {
+ {
+ .error = 0,
+ .scope = RT_SCOPE_NOWHERE,
+@@ -662,6 +662,9 @@ fib_create_info(const struct rtmsg *r, s
+ u32 mp_alg = IP_MP_ALG_NONE;
+ #endif
+
++ if (r->rtm_type > RTN_MAX)
++ goto err_inval;
++
+ /* Fast check to catch the most weird cases */
+ if (fib_props[r->rtm_type].scope > r->rtm_scope)
+ goto err_inval;
diff --git a/tags/2.6.18-12/30026_cifs-fix-sign-settings.patch b/tags/2.6.18-12/30026_cifs-fix-sign-settings.patch
new file mode 100644
index 0000000..11f8021
--- /dev/null
+++ b/tags/2.6.18-12/30026_cifs-fix-sign-settings.patch
@@ -0,0 +1,179 @@
+From: Steve French <sfrench@us.ibm.com>
+Date: Thu, 28 Jun 2007 18:41:42 +0000 (+0000)
+Subject: [CIFS] Fix sign mount option and sign proc config setting
+X-Git-Tag: v2.6.23-rc1~478^2~20
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=762e5ab77c803c819e45d054518a98efb70b0f60
+
+[CIFS] Fix sign mount option and sign proc config setting
+
+We were checking the wrong (old) global variable to determine
+whether to override server and force signing on the SMB
+connection.
+
+Acked-by: Dave Kleikamp <shaggy@austin.ibm.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/fs/cifs/cifs_debug.c linux-source-2.6.18/fs/cifs/cifs_debug.c
+--- linux-source-2.6.18.orig/fs/cifs/cifs_debug.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/cifs/cifs_debug.c 2007-08-27 23:12:40.666200121 -0600
+@@ -895,90 +895,14 @@ security_flags_write(struct file *file,
+ }
+ /* flags look ok - update the global security flags for cifs module */
+ extended_security = flags;
++ if (extended_security & CIFSSEC_MUST_SIGN) {
++ /* requiring signing implies signing is allowed */
++ extended_security |= CIFSSEC_MAY_SIGN;
++ cFYI(1, ("packet signing now required"));
++ } else if ((extended_security & CIFSSEC_MAY_SIGN) == 0) {
++ cFYI(1, ("packet signing disabled"));
++ }
++ /* BB should we turn on MAY flags for other MUST options? */
+ return count;
+ }
+-
+-/* static int
+-ntlmv2_enabled_read(char *page, char **start, off_t off,
+- int count, int *eof, void *data)
+-{
+- int len;
+-
+- len = sprintf(page, "%d\n", ntlmv2_support);
+-
+- len -= off;
+- *start = page + off;
+-
+- if (len > count)
+- len = count;
+- else
+- *eof = 1;
+-
+- if (len < 0)
+- len = 0;
+-
+- return len;
+-}
+-static int
+-ntlmv2_enabled_write(struct file *file, const char __user *buffer,
+- unsigned long count, void *data)
+-{
+- char c;
+- int rc;
+-
+- rc = get_user(c, buffer);
+- if (rc)
+- return rc;
+- if (c == '0' || c == 'n' || c == 'N')
+- ntlmv2_support = 0;
+- else if (c == '1' || c == 'y' || c == 'Y')
+- ntlmv2_support = 1;
+- else if (c == '2')
+- ntlmv2_support = 2;
+-
+- return count;
+-}
+-
+-static int
+-packet_signing_enabled_read(char *page, char **start, off_t off,
+- int count, int *eof, void *data)
+-{
+- int len;
+-
+- len = sprintf(page, "%d\n", sign_CIFS_PDUs);
+-
+- len -= off;
+- *start = page + off;
+-
+- if (len > count)
+- len = count;
+- else
+- *eof = 1;
+-
+- if (len < 0)
+- len = 0;
+-
+- return len;
+-}
+-static int
+-packet_signing_enabled_write(struct file *file, const char __user *buffer,
+- unsigned long count, void *data)
+-{
+- char c;
+- int rc;
+-
+- rc = get_user(c, buffer);
+- if (rc)
+- return rc;
+- if (c == '0' || c == 'n' || c == 'N')
+- sign_CIFS_PDUs = 0;
+- else if (c == '1' || c == 'y' || c == 'Y')
+- sign_CIFS_PDUs = 1;
+- else if (c == '2')
+- sign_CIFS_PDUs = 2;
+-
+- return count;
+-} */
+-
+-
+ #endif
+diff -urpN linux-source-2.6.18.orig/fs/cifs/cifssmb.c linux-source-2.6.18/fs/cifs/cifssmb.c
+--- linux-source-2.6.18.orig/fs/cifs/cifssmb.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/cifs/cifssmb.c 2007-08-27 23:12:40.678200384 -0600
+@@ -411,11 +411,11 @@ CIFSSMBNegotiate(unsigned int xid, struc
+
+ /* if any of auth flags (ie not sign or seal) are overriden use them */
+ if(ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
+- secFlags = ses->overrideSecFlg;
++ secFlags = ses->overrideSecFlg; /* BB FIXME fix sign flags? */
+ else /* if override flags set only sign/seal OR them with global auth */
+ secFlags = extended_security | ses->overrideSecFlg;
+
+- cFYI(1,("secFlags 0x%x",secFlags));
++ cFYI(1, ("secFlags 0x%x", secFlags));
+
+ pSMB->hdr.Mid = GetNextMid(server);
+ pSMB->hdr.Flags2 |= SMBFLG2_UNICODE;
+@@ -582,22 +582,32 @@ CIFSSMBNegotiate(unsigned int xid, struc
+ #ifdef CONFIG_CIFS_WEAK_PW_HASH
+ signing_check:
+ #endif
+- if(sign_CIFS_PDUs == FALSE) {
++ if ((secFlags & CIFSSEC_MAY_SIGN) == 0) {
++ /* MUST_SIGN already includes the MAY_SIGN FLAG
++ so if this is zero it means that signing is disabled */
++ cFYI(1, ("Signing disabled"));
+ if(server->secMode & SECMODE_SIGN_REQUIRED)
+- cERROR(1,("Server requires "
+- "/proc/fs/cifs/PacketSigningEnabled to be on"));
++ cERROR(1, ("Server requires "
++ "/proc/fs/cifs/PacketSigningEnabled "
++ "to be on"));
+ server->secMode &=
+ ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
+- } else if(sign_CIFS_PDUs == 1) {
++ } else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) {
++ /* signing required */
++ cFYI(1, ("Must sign - segFlags 0x%x", secFlags));
++ if ((server->secMode &
++ (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
++ cERROR(1,
++ ("signing required but server lacks support"));
++ } else
++ server->secMode |= SECMODE_SIGN_REQUIRED;
++ } else {
++ /* signing optional ie CIFSSEC_MAY_SIGN */
+ if((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
+ server->secMode &=
+ ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
+- } else if(sign_CIFS_PDUs == 2) {
+- if((server->secMode &
+- (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
+- cERROR(1,("signing required but server lacks support"));
+- }
+ }
++
+ neg_err_exit:
+ cifs_buf_release(pSMB);
+
diff --git a/tags/2.6.18-12/30027_cpuset_tasks-underflow.patch b/tags/2.6.18-12/30027_cpuset_tasks-underflow.patch
new file mode 100644
index 0000000..254ce0f
--- /dev/null
+++ b/tags/2.6.18-12/30027_cpuset_tasks-underflow.patch
@@ -0,0 +1,61 @@
+From: Akinobu Mita <akinobu.mita@gmail.com>
+Date: Wed, 9 May 2007 09:33:33 +0000 (-0700)
+Subject: use simple_read_from_buffer in kernel/
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fstable%2Flinux-2.6.20.y.git;a=commitdiff_plain;h=85badbdf5120d246ce2bb3f1a7689a805f9c9006
+
+use simple_read_from_buffer in kernel/
+
+Cleanup using simple_read_from_buffer() for /dev/cpuset/tasks and
+/proc/config.gz.
+
+Cc: Paul Jackson <pj@sgi.com>
+Cc: Randy Dunlap <rdunlap@xenotime.net>
+Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/kernel/configs.c linux-source-2.6.18/kernel/configs.c
+--- linux-source-2.6.18.orig/kernel/configs.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/kernel/configs.c 2007-08-27 22:30:10.774211736 -0600
+@@ -61,18 +61,9 @@ static ssize_t
+ ikconfig_read_current(struct file *file, char __user *buf,
+ size_t len, loff_t * offset)
+ {
+- loff_t pos = *offset;
+- ssize_t count;
+-
+- if (pos >= kernel_config_data_size)
+- return 0;
+-
+- count = min(len, (size_t)(kernel_config_data_size - pos));
+- if (copy_to_user(buf, kernel_config_data + MAGIC_SIZE + pos, count))
+- return -EFAULT;
+-
+- *offset += count;
+- return count;
++ return simple_read_from_buffer(buf, len, offset,
++ kernel_config_data + MAGIC_SIZE,
++ kernel_config_data_size);
+ }
+
+ static struct file_operations ikconfig_file_ops = {
+diff -urpN linux-source-2.6.18.orig/kernel/cpuset.c linux-source-2.6.18/kernel/cpuset.c
+--- linux-source-2.6.18.orig/kernel/cpuset.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/kernel/cpuset.c 2007-08-27 22:30:10.778211823 -0600
+@@ -1743,12 +1743,7 @@ static ssize_t cpuset_tasks_read(struct
+ {
+ struct ctr_struct *ctr = file->private_data;
+
+- if (*ppos + nbytes > ctr->bufsz)
+- nbytes = ctr->bufsz - *ppos;
+- if (copy_to_user(buf, ctr->buf + *ppos, nbytes))
+- return -EFAULT;
+- *ppos += nbytes;
+- return nbytes;
++ return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz);
+ }
+
+ static int cpuset_tasks_release(struct inode *unused_inode, struct file *file)
diff --git a/tags/2.6.18-12/30028_random-bound-check-ordering.patch b/tags/2.6.18-12/30028_random-bound-check-ordering.patch
new file mode 100644
index 0000000..f2e9ab5
--- /dev/null
+++ b/tags/2.6.18-12/30028_random-bound-check-ordering.patch
@@ -0,0 +1,42 @@
+From: Matt Mackall <mpm@selenic.com>
+Date: Thu, 19 Jul 2007 18:30:14 +0000 (-0700)
+Subject: random: fix bound check ordering (CVE-2007-3105)
+X-Git-Tag: v2.6.23-rc1~259
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=5a021e9ffd56c22700133ebc37d607f95be8f7bd
+
+random: fix bound check ordering (CVE-2007-3105)
+
+If root raised the default wakeup threshold over the size of the
+output pool, the pool transfer function could overflow the stack with
+RNG bytes, causing a DoS or potential privilege escalation.
+
+(Bug reported by the PaX Team <pageexec@freemail.hu>)
+
+Cc: Theodore Tso <tytso@mit.edu>
+Cc: Willy Tarreau <w@1wt.eu>
+Signed-off-by: Matt Mackall <mpm@selenic.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 7f52712..397c714 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -693,9 +693,14 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+
+ if (r->pull && r->entropy_count < nbytes * 8 &&
+ r->entropy_count < r->poolinfo->POOLBITS) {
+- int bytes = max_t(int, random_read_wakeup_thresh / 8,
+- min_t(int, nbytes, sizeof(tmp)));
++ /* If we're limited, always leave two wakeup worth's BITS */
+ int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
++ int bytes = nbytes;
++
++ /* pull at least as many as BYTES as wakeup BITS */
++ bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
++ /* but never more than the buffer size */
++ bytes = min_t(int, bytes, sizeof(tmp));
+
+ DEBUG_ENT("going to reseed %s with %d bits "
+ "(%d of %d requested)\n",
diff --git a/tags/2.6.18-12/30030_aacraid-ioctl-perm-check.patch b/tags/2.6.18-12/30030_aacraid-ioctl-perm-check.patch
new file mode 100644
index 0000000..cb2a516
--- /dev/null
+++ b/tags/2.6.18-12/30030_aacraid-ioctl-perm-check.patch
@@ -0,0 +1,40 @@
+From: Alan Cox <alan@lxorguk.ukuu.org.uk>
+Date: Mon, 23 Jul 2007 13:51:05 +0000 (+0100)
+Subject: [SCSI] aacraid: Fix security hole
+X-Git-Tag: v2.6.23-rc2~164^2~24
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=60395bb60e0b5e4e0808ac8eb07a92f6c9cdea1f
+
+[SCSI] aacraid: Fix security hole
+
+On the SCSI layer ioctl path there is no implicit permissions check for
+ioctls (and indeed other drivers implement unprivileged ioctls). aacraid
+however allows all sorts of very admin only things to be done so should
+check.
+
+Signed-off-by: Alan Cox <alan@redhat.com>
+Acked-by: "Salyzyn, Mark" <mark_salyzyn@adaptec.com>
+Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
+---
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+--- linux-source-2.6.18.orig/drivers/scsi/aacraid/linit.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/scsi/aacraid/linit.c 2007-08-27 23:27:41.805986591 -0600
+@@ -536,6 +536,8 @@ static int aac_cfg_open(struct inode *in
+ static int aac_cfg_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+ {
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
+ return aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
+ }
+
+@@ -589,6 +591,8 @@ static int aac_compat_ioctl(struct scsi_
+
+ static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ {
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
+ return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg);
+ }
+ #endif
diff --git a/tags/2.6.18-12/30031_ptrace-handle-bogus-selector.patch b/tags/2.6.18-12/30031_ptrace-handle-bogus-selector.patch
new file mode 100644
index 0000000..2aacd3b
--- /dev/null
+++ b/tags/2.6.18-12/30031_ptrace-handle-bogus-selector.patch
@@ -0,0 +1,86 @@
+From: Roland McGrath <roland@redhat.com>
+Date: Mon, 16 Jul 2007 08:03:16 +0000 (-0700)
+Subject: Handle bogus %cs selector in single-step instruction decoding
+X-Git-Tag: v2.6.23-rc1~492
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=29eb51101c02df517ca64ec472d7501127ad1da8
+
+Handle bogus %cs selector in single-step instruction decoding
+
+The code for LDT segment selectors was not robust in the face of a bogus
+selector set in %cs via ptrace before the single-step was done.
+
+Signed-off-by: Roland McGrath <roland@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/arch/i386/kernel/ptrace.c linux-source-2.6.18/arch/i386/kernel/ptrace.c
+--- linux-source-2.6.18.orig/arch/i386/kernel/ptrace.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/arch/i386/kernel/ptrace.c 2007-09-19 23:45:45.949576125 -0600
+@@ -172,14 +172,22 @@ static unsigned long convert_eip_to_line
+ u32 *desc;
+ unsigned long base;
+
+- down(&child->mm->context.sem);
+- desc = child->mm->context.ldt + (seg & ~7);
+- base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
++ seg &= ~7UL;
+
+- /* 16-bit code segment? */
+- if (!((desc[1] >> 22) & 1))
+- addr &= 0xffff;
+- addr += base;
++ down(&child->mm->context.sem);
++ if (unlikely((seg >> 3) >= child->mm->context.size))
++ addr = -1L; /* bogus selector, access would fault */
++ else {
++ desc = child->mm->context.ldt + seg;
++ base = ((desc[0] >> 16) |
++ ((desc[1] & 0xff) << 16) |
++ (desc[1] & 0xff000000));
++
++ /* 16-bit code segment? */
++ if (!((desc[1] >> 22) & 1))
++ addr &= 0xffff;
++ addr += base;
++ }
+ up(&child->mm->context.sem);
+ }
+ return addr;
+diff -urpN linux-source-2.6.18.orig/arch/x86_64/kernel/ptrace.c linux-source-2.6.18/arch/x86_64/kernel/ptrace.c
+--- linux-source-2.6.18.orig/arch/x86_64/kernel/ptrace.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/arch/x86_64/kernel/ptrace.c 2007-09-19 23:45:45.953575027 -0600
+@@ -103,16 +103,25 @@ unsigned long convert_rip_to_linear(stru
+ u32 *desc;
+ unsigned long base;
+
+- down(&child->mm->context.sem);
+- desc = child->mm->context.ldt + (seg & ~7);
+- base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
++ seg &= ~7UL;
+
+- /* 16-bit code segment? */
+- if (!((desc[1] >> 22) & 1))
+- addr &= 0xffff;
+- addr += base;
++ down(&child->mm->context.sem);
++ if (unlikely((seg >> 3) >= child->mm->context.size))
++ addr = -1L; /* bogus selector, access would fault */
++ else {
++ desc = child->mm->context.ldt + seg;
++ base = ((desc[0] >> 16) |
++ ((desc[1] & 0xff) << 16) |
++ (desc[1] & 0xff000000));
++
++ /* 16-bit code segment? */
++ if (!((desc[1] >> 22) & 1))
++ addr &= 0xffff;
++ addr += base;
++ }
+ up(&child->mm->context.sem);
+ }
++
+ return addr;
+ }
+
diff --git a/tags/2.6.18-12/30032_fixup-trace_irq-breakage.patch b/tags/2.6.18-12/30032_fixup-trace_irq-breakage.patch
new file mode 100644
index 0000000..383f5d8
--- /dev/null
+++ b/tags/2.6.18-12/30032_fixup-trace_irq-breakage.patch
@@ -0,0 +1,64 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 18 Jul 2007 18:59:22 +0000 (+0200)
+Subject: i386: fixup TRACE_IRQ breakage
+X-Git-Tag: v2.6.23-rc1~491
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=a10d9a71bafd3a283da240d2868e71346d2aef6f
+
+i386: fixup TRACE_IRQ breakage
+
+The TRACE_IRQS_ON function in iret_exc: calls a C function without
+ensuring that the segments are set properly. Move the trace function and
+the enabling of interrupt into the C stub.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/arch/i386/kernel/entry.S linux-source-2.6.18/arch/i386/kernel/entry.S
+--- linux-source-2.6.18.orig/arch/i386/kernel/entry.S 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/arch/i386/kernel/entry.S 2007-09-19 23:53:22.929573806 -0600
+@@ -384,8 +384,6 @@ restore_nocheck_notrace:
+ 1: iret
+ .section .fixup,"ax"
+ iret_exc:
+- TRACE_IRQS_ON
+- sti
+ pushl $0 # no error code
+ pushl $do_iret_error
+ jmp error_code
+diff -urpN linux-source-2.6.18.orig/arch/i386/kernel/traps.c linux-source-2.6.18/arch/i386/kernel/traps.c
+--- linux-source-2.6.18.orig/arch/i386/kernel/traps.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/arch/i386/kernel/traps.c 2007-09-19 23:47:18.209575527 -0600
+@@ -516,10 +516,12 @@ fastcall void do_##name(struct pt_regs *
+ do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
+ }
+
+-#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \
+ fastcall void do_##name(struct pt_regs * regs, long error_code) \
+ { \
+ siginfo_t info; \
++ if (irq) \
++ local_irq_enable(); \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+@@ -559,13 +561,13 @@ DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
+ #endif
+ DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
+ DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
+-DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
++DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip, 0)
+ DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
+ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
+ DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
+ DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
+-DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
+-DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)
++DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
+
+ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
+ long error_code)
diff --git a/tags/2.6.18-12/30033_prevent-stack-growth-into-hugetlb-region.patch b/tags/2.6.18-12/30033_prevent-stack-growth-into-hugetlb-region.patch
new file mode 100644
index 0000000..caa7fa7
--- /dev/null
+++ b/tags/2.6.18-12/30033_prevent-stack-growth-into-hugetlb-region.patch
@@ -0,0 +1,47 @@
+From: Adam Litke <agl@us.ibm.com>
+Date: Tue, 30 Jan 2007 22:35:39 +0000 (-0800)
+Subject: [PATCH] Don't allow the stack to grow into hugetlb reserved regions
+X-Git-Tag: v2.6.20-rc7~10
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=0d59a01bc461bbab4017ff449b8401151ef44cf6
+
+[PATCH] Don't allow the stack to grow into hugetlb reserved regions
+
+When expanding the stack, we don't currently check if the VMA will cross
+into an area of the address space that is reserved for hugetlb pages.
+Subsequent faults on the expanded portion of such a VMA will confuse the
+low-level MMU code, resulting in an OOPS. Check for this.
+
+Signed-off-by: Adam Litke <agl@us.ibm.com>
+Cc: David Gibson <david@gibson.dropbear.id.au>
+Cc: William Lee Irwin III <wli@holomorphy.com>
+Cc: Hugh Dickins <hugh@veritas.com>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@osdl.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 9717337..cc3a208 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1477,6 +1477,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
+ {
+ struct mm_struct *mm = vma->vm_mm;
+ struct rlimit *rlim = current->signal->rlim;
++ unsigned long new_start;
+
+ /* address space limit tests */
+ if (!may_expand_vm(mm, grow))
+@@ -1496,6 +1497,12 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
+ return -ENOMEM;
+ }
+
++ /* Check to ensure the stack will not grow into a hugetlb-only region */
++ new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
++ vma->vm_end - size;
++ if (is_hugepage_only_range(vma->vm_mm, new_start, size))
++ return -EFAULT;
++
+ /*
+ * Overcommit.. This must be the final test, as it will
+ * update security statistics.
diff --git a/tags/2.6.18-12/30034_cifs-honor-umask.patch b/tags/2.6.18-12/30034_cifs-honor-umask.patch
new file mode 100644
index 0000000..bf45500
--- /dev/null
+++ b/tags/2.6.18-12/30034_cifs-honor-umask.patch
@@ -0,0 +1,81 @@
+From: Steve French <sfrench@us.ibm.com>
+Date: Fri, 8 Jun 2007 14:55:14 +0000 (+0000)
+Subject: [CIFS] CIFS should honour umask
+X-Git-Tag: v2.6.22-rc5~50^2
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=3ce53fc4c57603d99c330a6ee2fe96d94f2d350f
+
+[CIFS] CIFS should honour umask
+
+This patch makes CIFS honour a process' umask like other filesystems.
+Of course the server is still free to munge the permissions if it wants
+to; but the client will send the "right" permissions to begin with.
+
+A few caveats:
+
+1) It only applies to filesystems that have CAP_UNIX (aka support unix
+extensions)
+2) It applies the correct mode to the follow up CIFSSMBUnixSetPerms()
+after remote creation
+
+When mode to CIFS/NTFS ACL mapping is complete we can do the
+same thing for that case for servers which do not
+support the Unix Extensions.
+
+Signed-off-by: Matt Keenen <matt@opcode-solutions.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/fs/cifs/dir.c linux-source-2.6.18/fs/cifs/dir.c
+--- linux-source-2.6.18.orig/fs/cifs/dir.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/cifs/dir.c 2007-09-24 22:49:29.509100350 -0600
+@@ -199,7 +199,8 @@ cifs_create(struct inode *inode, struct
+ /* If Open reported that we actually created a file
+ then we now have to set the mode if possible */
+ if ((cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
+- (oplock & CIFS_CREATE_ACTION))
++ (oplock & CIFS_CREATE_ACTION)) {
++ mode &= ~current->fs->umask;
+ if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
+ (__u64)current->fsuid,
+@@ -217,7 +218,7 @@ cifs_create(struct inode *inode, struct
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ }
+- else {
++ } else {
+ /* BB implement mode setting via Windows security descriptors */
+ /* eg CIFSSMBWinSetPerms(xid,pTcon,full_path,mode,-1,-1,local_nls);*/
+ /* could set r/o dos attribute if mode & 0222 == 0 */
+@@ -325,6 +326,7 @@ int cifs_mknod(struct inode *inode, stru
+ if(full_path == NULL)
+ rc = -ENOMEM;
+ else if (pTcon->ses->capabilities & CAP_UNIX) {
++ mode &= ~current->fs->umask;
+ if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ rc = CIFSSMBUnixSetPerms(xid, pTcon, full_path,
+ mode,(__u64)current->fsuid,(__u64)current->fsgid,
+diff -urpN linux-source-2.6.18.orig/fs/cifs/inode.c linux-source-2.6.18/fs/cifs/inode.c
+--- linux-source-2.6.18.orig/fs/cifs/inode.c 2007-09-18 16:46:11.000000000 -0600
++++ linux-source-2.6.18/fs/cifs/inode.c 2007-09-24 22:50:34.825099389 -0600
+@@ -751,7 +751,8 @@ int cifs_mkdir(struct inode *inode, stru
+ d_instantiate(direntry, newinode);
+ if (direntry->d_inode)
+ direntry->d_inode->i_nlink = 2;
+- if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
++ if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
++ mode &= ~current->fs->umask;
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ CIFSSMBUnixSetPerms(xid, pTcon, full_path,
+ mode,
+@@ -769,7 +770,7 @@ int cifs_mkdir(struct inode *inode, stru
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ }
+- else {
++ } else {
+ /* BB to be implemented via Windows secrty descriptors
+ eg CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
+ -1, -1, local_nls); */
diff --git a/tags/2.6.18-12/30035_amd64-zero-extend-32bit-ptrace.patch b/tags/2.6.18-12/30035_amd64-zero-extend-32bit-ptrace.patch
new file mode 100644
index 0000000..0f2637a
--- /dev/null
+++ b/tags/2.6.18-12/30035_amd64-zero-extend-32bit-ptrace.patch
@@ -0,0 +1,88 @@
+From: Andi Kleen <ak@suse.de>
+Date: Fri, 21 Sep 2007 14:16:18 +0000 (+0200)
+Subject: x86_64: Zero extend all registers after ptrace in 32bit entry path.
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=176df2457ef6207156ca1a40991c54ca01fef567
+
+x86_64: Zero extend all registers after ptrace in 32bit entry path.
+
+Strictly it's only needed for eax.
+
+It actually does a little more than strictly needed -- the other registers
+are already zero extended.
+
+Also remove the now unnecessary and non functional compat task check
+in ptrace.
+
+This is CVE-2007-4573
+
+Found by Wojciech Purczynski
+
+Signed-off-by: Andi Kleen <ak@suse.de>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/arch/x86_64/ia32/ia32entry.S linux-source-2.6.18/arch/x86_64/ia32/ia32entry.S
+--- linux-source-2.6.18.orig/arch/x86_64/ia32/ia32entry.S 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/arch/x86_64/ia32/ia32entry.S 2007-09-25 00:10:16.089100799 -0600
+@@ -38,6 +38,18 @@
+ movq %rax,R8(%rsp)
+ .endm
+
++ .macro LOAD_ARGS32 offset
++ movl \offset(%rsp),%r11d
++ movl \offset+8(%rsp),%r10d
++ movl \offset+16(%rsp),%r9d
++ movl \offset+24(%rsp),%r8d
++ movl \offset+40(%rsp),%ecx
++ movl \offset+48(%rsp),%edx
++ movl \offset+56(%rsp),%esi
++ movl \offset+64(%rsp),%edi
++ movl \offset+72(%rsp),%eax
++ .endm
++
+ .macro CFI_STARTPROC32 simple
+ CFI_STARTPROC \simple
+ CFI_UNDEFINED r8
+@@ -151,7 +163,7 @@ sysenter_tracesys:
+ movq $-ENOSYS,RAX(%rsp) /* really needed? */
+ movq %rsp,%rdi /* &pt_regs -> arg1 */
+ call syscall_trace_enter
+- LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
++ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ movl %ebp, %ebp
+ /* no need to do an access_ok check here because rbp has been
+@@ -253,7 +265,7 @@ cstar_tracesys:
+ movq $-ENOSYS,RAX(%rsp) /* really needed? */
+ movq %rsp,%rdi /* &pt_regs -> arg1 */
+ call syscall_trace_enter
+- LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
++ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ movl RSP-ARGOFFSET(%rsp), %r8d
+ /* no need to do an access_ok check here because r8 has been
+@@ -330,7 +342,7 @@ ia32_tracesys:
+ movq $-ENOSYS,RAX(%rsp) /* really needed? */
+ movq %rsp,%rdi /* &pt_regs -> arg1 */
+ call syscall_trace_enter
+- LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
++ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ jmp ia32_do_syscall
+ END(ia32_syscall)
+diff -urpN linux-source-2.6.18.orig/arch/x86_64/kernel/ptrace.c linux-source-2.6.18/arch/x86_64/kernel/ptrace.c
+--- linux-source-2.6.18.orig/arch/x86_64/kernel/ptrace.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/arch/x86_64/kernel/ptrace.c 2007-09-25 00:10:16.089100799 -0600
+@@ -223,10 +223,6 @@ static int putreg(struct task_struct *ch
+ {
+ unsigned long tmp;
+
+- /* Some code in the 64bit emulation may not be 64bit clean.
+- Don't take any chances. */
+- if (test_tsk_thread_flag(child, TIF_IA32))
+- value &= 0xffffffff;
+ switch (regno) {
+ case offsetof(struct user_regs_struct,fs):
+ if (value && (value & 3) != 3)
diff --git a/tags/2.6.18-12/30036_jffs2-ACL-vs-mode-handling.patch b/tags/2.6.18-12/30036_jffs2-ACL-vs-mode-handling.patch
new file mode 100644
index 0000000..6d52512
--- /dev/null
+++ b/tags/2.6.18-12/30036_jffs2-ACL-vs-mode-handling.patch
@@ -0,0 +1,355 @@
+From: David Woodhouse <dwmw2@infradead.org>
+Date: Wed, 22 Aug 2007 11:39:19 +0000 (+0100)
+Subject: [JFFS2] Fix ACL vs. mode handling.
+X-Git-Url: http://git.infradead.org/?p=mtd-2.6.git;a=commitdiff_plain;h=9ed437c50d89eabae763dd422579f73fdebf288d
+
+[JFFS2] Fix ACL vs. mode handling.
+
+When POSIX ACL support was enabled, we weren't writing correct
+legacy modes to the medium on inode creation, or when the ACL was set.
+This meant that the permissions would be incorrect after the file system
+was remounted.
+
+Signed-off-by: David Woodhouse <dwmw2@infradead.org>
+---
+
+diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
+index 65b3a1b..8ec9323 100644
+--- a/fs/jffs2/acl.c
++++ b/fs/jffs2/acl.c
+@@ -176,7 +176,7 @@ static void jffs2_iset_acl(struct inode *inode, struct posix_acl **i_acl, struct
+ spin_unlock(&inode->i_lock);
+ }
+
+-static struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
++struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
+ {
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+ struct posix_acl *acl;
+@@ -247,8 +247,13 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+ if (rc < 0)
+ return rc;
+ if (inode->i_mode != mode) {
+- inode->i_mode = mode;
+- jffs2_dirty_inode(inode);
++ struct iattr attr;
++
++ attr.ia_valid = ATTR_MODE;
++ attr.ia_mode = mode;
++ rc = jffs2_do_setattr(inode, &attr);
++ if (rc < 0)
++ return rc;
+ }
+ if (rc == 0)
+ acl = NULL;
+@@ -307,22 +312,16 @@ int jffs2_permission(struct inode *inode, int mask, struct nameidata *nd)
+ return generic_permission(inode, mask, jffs2_check_acl);
+ }
+
+-int jffs2_init_acl(struct inode *inode, struct inode *dir)
++int jffs2_init_acl(struct inode *inode, struct posix_acl *acl)
+ {
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+- struct posix_acl *acl = NULL, *clone;
++ struct posix_acl *clone;
+ mode_t mode;
+ int rc = 0;
+
+ f->i_acl_access = JFFS2_ACL_NOT_CACHED;
+ f->i_acl_default = JFFS2_ACL_NOT_CACHED;
+- if (!S_ISLNK(inode->i_mode)) {
+- acl = jffs2_get_acl(dir, ACL_TYPE_DEFAULT);
+- if (IS_ERR(acl))
+- return PTR_ERR(acl);
+- if (!acl)
+- inode->i_mode &= ~current->fs->umask;
+- }
++
+ if (acl) {
+ if (S_ISDIR(inode->i_mode)) {
+ rc = jffs2_set_acl(inode, ACL_TYPE_DEFAULT, acl);
+diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
+index c84378c..90a2dbf 100644
+--- a/fs/jffs2/acl.h
++++ b/fs/jffs2/acl.h
+@@ -28,9 +28,10 @@ struct jffs2_acl_header {
+
+ #define JFFS2_ACL_NOT_CACHED ((void *)-1)
+
++extern struct posix_acl *jffs2_get_acl(struct inode *inode, int type);
+ extern int jffs2_permission(struct inode *, int, struct nameidata *);
+ extern int jffs2_acl_chmod(struct inode *);
+-extern int jffs2_init_acl(struct inode *, struct inode *);
++extern int jffs2_init_acl(struct inode *, struct posix_acl *);
+ extern void jffs2_clear_acl(struct jffs2_inode_info *);
+
+ extern struct xattr_handler jffs2_acl_access_xattr_handler;
+@@ -38,6 +39,7 @@ extern struct xattr_handler jffs2_acl_default_xattr_handler;
+
+ #else
+
++#define jffs2_get_acl(inode, type) (NULL)
+ #define jffs2_permission NULL
+ #define jffs2_acl_chmod(inode) (0)
+ #define jffs2_init_acl(inode,dir) (0)
+diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
+index d293a1f..8353eb9 100644
+--- a/fs/jffs2/dir.c
++++ b/fs/jffs2/dir.c
+@@ -182,6 +182,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
+ struct jffs2_inode_info *f, *dir_f;
+ struct jffs2_sb_info *c;
+ struct inode *inode;
++ struct posix_acl *acl;
+ int ret;
+
+ ri = jffs2_alloc_raw_inode();
+@@ -192,7 +193,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
+
+ D1(printk(KERN_DEBUG "jffs2_create()\n"));
+
+- inode = jffs2_new_inode(dir_i, mode, ri);
++ inode = jffs2_new_inode(dir_i, mode, ri, &acl);
+
+ if (IS_ERR(inode)) {
+ D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n"));
+@@ -212,12 +213,12 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
+ dentry->d_name.name, dentry->d_name.len);
+
+ if (ret)
+- goto fail;
++ goto fail_acl;
+
+ ret = jffs2_init_security(inode, dir_i);
+ if (ret)
+- goto fail;
+- ret = jffs2_init_acl(inode, dir_i);
++ goto fail_acl;
++ ret = jffs2_init_acl(inode, acl);
+ if (ret)
+ goto fail;
+
+@@ -230,6 +231,8 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
+ inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, inode->i_mapping->nrpages));
+ return 0;
+
++ fail_acl:
++ posix_acl_release(acl);
+ fail:
+ make_bad_inode(inode);
+ iput(inode);
+@@ -306,6 +309,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
+ struct jffs2_full_dirent *fd;
+ int namelen;
+ uint32_t alloclen;
++ struct posix_acl *acl;
+ int ret, targetlen = strlen(target);
+
+ /* FIXME: If you care. We'd need to use frags for the target
+@@ -332,7 +336,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
+ return ret;
+ }
+
+- inode = jffs2_new_inode(dir_i, S_IFLNK | S_IRWXUGO, ri);
++ inode = jffs2_new_inode(dir_i, S_IFLNK | S_IRWXUGO, ri, &acl);
+
+ if (IS_ERR(inode)) {
+ jffs2_free_raw_inode(ri);
+@@ -362,6 +366,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
+ up(&f->sem);
+ jffs2_complete_reservation(c);
+ jffs2_clear_inode(inode);
++ posix_acl_release(acl);
+ return PTR_ERR(fn);
+ }
+
+@@ -372,6 +377,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
+ up(&f->sem);
+ jffs2_complete_reservation(c);
+ jffs2_clear_inode(inode);
++ posix_acl_release(acl);
+ return -ENOMEM;
+ }
+
+@@ -389,9 +395,10 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
+ ret = jffs2_init_security(inode, dir_i);
+ if (ret) {
+ jffs2_clear_inode(inode);
++ posix_acl_release(acl);
+ return ret;
+ }
+- ret = jffs2_init_acl(inode, dir_i);
++ ret = jffs2_init_acl(inode, acl);
+ if (ret) {
+ jffs2_clear_inode(inode);
+ return ret;
+@@ -469,6 +476,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
+ struct jffs2_full_dirent *fd;
+ int namelen;
+ uint32_t alloclen;
++ struct posix_acl *acl;
+ int ret;
+
+ mode |= S_IFDIR;
+@@ -491,7 +499,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
+ return ret;
+ }
+
+- inode = jffs2_new_inode(dir_i, mode, ri);
++ inode = jffs2_new_inode(dir_i, mode, ri, &acl);
+
+ if (IS_ERR(inode)) {
+ jffs2_free_raw_inode(ri);
+@@ -518,6 +526,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
+ up(&f->sem);
+ jffs2_complete_reservation(c);
+ jffs2_clear_inode(inode);
++ posix_acl_release(acl);
+ return PTR_ERR(fn);
+ }
+ /* No data here. Only a metadata node, which will be
+@@ -531,9 +540,10 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
+ ret = jffs2_init_security(inode, dir_i);
+ if (ret) {
+ jffs2_clear_inode(inode);
++ posix_acl_release(acl);
+ return ret;
+ }
+- ret = jffs2_init_acl(inode, dir_i);
++ ret = jffs2_init_acl(inode, acl);
+ if (ret) {
+ jffs2_clear_inode(inode);
+ return ret;
+@@ -629,6 +639,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
+ union jffs2_device_node dev;
+ int devlen = 0;
+ uint32_t alloclen;
++ struct posix_acl *acl;
+ int ret;
+
+ if (!new_valid_dev(rdev))
+@@ -655,7 +666,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
+ return ret;
+ }
+
+- inode = jffs2_new_inode(dir_i, mode, ri);
++ inode = jffs2_new_inode(dir_i, mode, ri, &acl);
+
+ if (IS_ERR(inode)) {
+ jffs2_free_raw_inode(ri);
+@@ -684,6 +695,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
+ up(&f->sem);
+ jffs2_complete_reservation(c);
+ jffs2_clear_inode(inode);
++ posix_acl_release(acl);
+ return PTR_ERR(fn);
+ }
+ /* No data here. Only a metadata node, which will be
+@@ -697,9 +709,10 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
+ ret = jffs2_init_security(inode, dir_i);
+ if (ret) {
+ jffs2_clear_inode(inode);
++ posix_acl_release(acl);
+ return ret;
+ }
+- ret = jffs2_init_acl(inode, dir_i);
++ ret = jffs2_init_acl(inode, acl);
+ if (ret) {
+ jffs2_clear_inode(inode);
+ return ret;
+diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
+index 1d3b7a9..dd64ddc 100644
+--- a/fs/jffs2/fs.c
++++ b/fs/jffs2/fs.c
+@@ -24,7 +24,7 @@
+
+ static int jffs2_flash_setup(struct jffs2_sb_info *c);
+
+-static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
++int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
+ {
+ struct jffs2_full_dnode *old_metadata, *new_metadata;
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+@@ -36,10 +36,8 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
+ unsigned int ivalid;
+ uint32_t alloclen;
+ int ret;
++
+ D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
+- ret = inode_change_ok(inode, iattr);
+- if (ret)
+- return ret;
+
+ /* Special cases - we don't want more than one data node
+ for these types on the medium at any time. So setattr
+@@ -183,9 +181,14 @@ int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
+ {
+ int rc;
+
++ rc = inode_change_ok(dentry->d_inode, iattr);
++ if (rc)
++ return rc;
++
+ rc = jffs2_do_setattr(dentry->d_inode, iattr);
+ if (!rc && (iattr->ia_valid & ATTR_MODE))
+ rc = jffs2_acl_chmod(dentry->d_inode);
++
+ return rc;
+ }
+
+@@ -399,7 +402,8 @@ void jffs2_write_super (struct super_block *sb)
+
+ /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
+ fill in the raw_inode while you're at it. */
+-struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri)
++struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri,
++ struct posix_acl **acl)
+ {
+ struct inode *inode;
+ struct super_block *sb = dir_i->i_sb;
+@@ -431,7 +435,23 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i
+ } else {
+ ri->gid = cpu_to_je16(current->fsgid);
+ }
+- ri->mode = cpu_to_jemode(mode);
++
++ /* POSIX ACLs have to be processed now, at least partly.
++ The umask is only applied if there's no default ACL */
++ if (!S_ISLNK(mode)) {
++ *acl = jffs2_get_acl(dir_i, ACL_TYPE_DEFAULT);
++ if (IS_ERR(*acl)) {
++ make_bad_inode(inode);
++ iput(inode);
++ inode = (void *)*acl;
++ *acl = NULL;
++ return inode;
++ }
++ if (!(*acl))
++ mode &= ~current->fs->umask;
++ } else {
++ *acl = NULL;
++ }
+ ret = jffs2_do_new_inode (c, f, mode, ri);
+ if (ret) {
+ make_bad_inode(inode);
+diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
+index 80daea9..f6743a9 100644
+--- a/fs/jffs2/os-linux.h
++++ b/fs/jffs2/os-linux.h
+@@ -173,12 +173,15 @@ int jffs2_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+ extern const struct inode_operations jffs2_symlink_inode_operations;
+
+ /* fs.c */
++struct posix_acl;
++
+ int jffs2_setattr (struct dentry *, struct iattr *);
++int jffs2_do_setattr (struct inode *, struct iattr *);
+ void jffs2_read_inode (struct inode *);
+ void jffs2_clear_inode (struct inode *);
+ void jffs2_dirty_inode(struct inode *inode);
+ struct inode *jffs2_new_inode (struct inode *dir_i, int mode,
+- struct jffs2_raw_inode *ri);
++ struct jffs2_raw_inode *ri, struct posix_acl **acl);
+ int jffs2_statfs (struct dentry *, struct kstatfs *);
+ void jffs2_write_super (struct super_block *);
+ int jffs2_remount_fs (struct super_block *, int *, char *);
diff --git a/tags/2.6.18-12/30039_hugetlb-prio_tree-unit-fix.patch b/tags/2.6.18-12/30039_hugetlb-prio_tree-unit-fix.patch
new file mode 100644
index 0000000..2b92bf0
--- /dev/null
+++ b/tags/2.6.18-12/30039_hugetlb-prio_tree-unit-fix.patch
@@ -0,0 +1,85 @@
+From: Hugh Dickins <hugh@veritas.com>
+Date: Sat, 28 Oct 2006 17:38:43 +0000 (-0700)
+Subject: [PATCH] hugetlb: fix prio_tree unit
+X-Git-Tag: v2.6.19-rc4~50
+X-Git-Url: http://git.kernel.org/gitweb.cgi?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=856fc29505556cf263f3dcda2533cf3766c14ab6
+
+[PATCH] hugetlb: fix prio_tree unit
+
+hugetlb_vmtruncate_list was misconverted to prio_tree: its prio_tree is in
+units of PAGE_SIZE (PAGE_CACHE_SIZE) like any other, not HPAGE_SIZE (whereas
+its radix_tree is kept in units of HPAGE_SIZE, otherwise slots would be
+absurdly sparse).
+
+At first I thought the error benign, just calling __unmap_hugepage_range on
+more vmas than necessary; but on 32-bit machines, when the prio_tree is
+searched correctly, it happens to ensure the v_offset calculation won't
+overflow. As it stood, when truncating at or beyond 4GB, it was liable to
+discard pages COWed from lower offsets; or even to clear pmd entries of
+preceding vmas, triggering exit_mmap's BUG_ON(nr_ptes).
+
+Signed-off-by: Hugh Dickins <hugh@veritas.com>
+Cc: Adam Litke <agl@us.ibm.com>
+Cc: David Gibson <david@gibson.dropbear.id.au>
+Cc: "Chen, Kenneth W" <kenneth.w.chen@intel.com>
+Signed-off-by: Andrew Morton <akpm@osdl.org>
+Signed-off-by: Linus Torvalds <torvalds@osdl.org>
+---
+
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 0b23b96..0bea6a6 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -271,26 +271,24 @@ static void hugetlbfs_drop_inode(struct inode *inode)
+ hugetlbfs_forget_inode(inode);
+ }
+
+-/*
+- * h_pgoff is in HPAGE_SIZE units.
+- * vma->vm_pgoff is in PAGE_SIZE units.
+- */
+ static inline void
+-hugetlb_vmtruncate_list(struct prio_tree_root *root, unsigned long h_pgoff)
++hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff)
+ {
+ struct vm_area_struct *vma;
+ struct prio_tree_iter iter;
+
+- vma_prio_tree_foreach(vma, &iter, root, h_pgoff, ULONG_MAX) {
+- unsigned long h_vm_pgoff;
++ vma_prio_tree_foreach(vma, &iter, root, pgoff, ULONG_MAX) {
+ unsigned long v_offset;
+
+- h_vm_pgoff = vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT);
+- v_offset = (h_pgoff - h_vm_pgoff) << HPAGE_SHIFT;
+ /*
+- * Is this VMA fully outside the truncation point?
++ * Can the expression below overflow on 32-bit arches?
++ * No, because the prio_tree returns us only those vmas
++ * which overlap the truncated area starting at pgoff,
++ * and no vma on a 32-bit arch can span beyond the 4GB.
+ */
+- if (h_vm_pgoff >= h_pgoff)
++ if (vma->vm_pgoff < pgoff)
++ v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
++ else
+ v_offset = 0;
+
+ __unmap_hugepage_range(vma,
+@@ -303,14 +301,14 @@ hugetlb_vmtruncate_list(struct prio_tree_root *root, unsigned long h_pgoff)
+ */
+ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
+ {
+- unsigned long pgoff;
++ pgoff_t pgoff;
+ struct address_space *mapping = inode->i_mapping;
+
+ if (offset > inode->i_size)
+ return -EINVAL;
+
+ BUG_ON(offset & ~HPAGE_MASK);
+- pgoff = offset >> HPAGE_SHIFT;
++ pgoff = offset >> PAGE_SHIFT;
+
+ inode->i_size = offset;
+ spin_lock(&mapping->i_mmap_lock);
diff --git a/tags/2.6.18-12/30040_usb-pwc-disconnect-block.patch b/tags/2.6.18-12/30040_usb-pwc-disconnect-block.patch
new file mode 100644
index 0000000..679c7bc
--- /dev/null
+++ b/tags/2.6.18-12/30040_usb-pwc-disconnect-block.patch
@@ -0,0 +1,124 @@
+From: Oliver Neukum <oneukum@suse.de>
+Date: Tue, 21 Aug 2007 05:10:42 +0000 (+0200)
+Subject: USB: fix DoS in pwc USB video driver
+X-Git-Tag: v2.6.23-rc4~29^2~8
+X-Git-Url: http://git.kernel.org/gitweb.cgi?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=85237f202d46d55c1bffe0c5b1aa3ddc0f1dce4d
+
+USB: fix DoS in pwc USB video driver
+
+the pwc driver has a disconnect method that waits for user space to
+close the device. This opens up an opportunity for a DoS attack,
+blocking the USB subsystem and making khubd's task busy wait in
+kernel space. This patch shifts freeing resources to close if an opened
+device is disconnected.
+
+Signed-off-by: Oliver Neukum <oneukum@suse.de>
+CC: stable <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/drivers/media/video/pwc/pwc-if.c linux-source-2.6.18/drivers/media/video/pwc/pwc-if.c
+--- linux-source-2.6.18.orig/drivers/media/video/pwc/pwc-if.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/media/video/pwc/pwc-if.c 2007-10-02 10:23:54.471131296 -0600
+@@ -1186,12 +1186,19 @@ static int pwc_video_open(struct inode *
+ return 0;
+ }
+
++
++static void pwc_cleanup(struct pwc_device *pdev)
++{
++ pwc_remove_sysfs_files(pdev->vdev);
++ video_unregister_device(pdev->vdev);
++}
++
+ /* Note that all cleanup is done in the reverse order as in _open */
+ static int pwc_video_close(struct inode *inode, struct file *file)
+ {
+ struct video_device *vdev = file->private_data;
+ struct pwc_device *pdev;
+- int i;
++ int i, hint;
+
+ PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev);
+
+@@ -1214,8 +1221,9 @@ static int pwc_video_close(struct inode
+ pwc_isoc_cleanup(pdev);
+ pwc_free_buffers(pdev);
+
++ lock_kernel();
+ /* Turn off LEDS and power down camera, but only when not unplugged */
+- if (pdev->error_status != EPIPE) {
++ if (!pdev->unplugged) {
+ /* Turn LEDs off */
+ if (pwc_set_leds(pdev, 0, 0) < 0)
+ PWC_DEBUG_MODULE("Failed to set LED on/off time.\n");
+@@ -1224,9 +1232,19 @@ static int pwc_video_close(struct inode
+ if (i < 0)
+ PWC_ERROR("Failed to power down camera (%d)\n", i);
+ }
++ pdev->vopen--;
++ PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", i);
++ } else {
++ pwc_cleanup(pdev);
++ /* Free memory (don't set pdev to 0 just yet) */
++ kfree(pdev);
++ /* search device_hint[] table if we occupy a slot, by any chance */
++ for (hint = 0; hint < MAX_DEV_HINTS; hint++)
++ if (device_hint[hint].pdev == pdev)
++ device_hint[hint].pdev = NULL;
+ }
+- pdev->vopen--;
+- PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen);
++ unlock_kernel();
++
+ return 0;
+ }
+
+@@ -1763,21 +1781,21 @@ static void usb_pwc_disconnect(struct us
+ /* Alert waiting processes */
+ wake_up_interruptible(&pdev->frameq);
+ /* Wait until device is closed */
+- while (pdev->vopen)
+- schedule();
+- /* Device is now closed, so we can safely unregister it */
+- PWC_DEBUG_PROBE("Unregistering video device in disconnect().\n");
+- pwc_remove_sysfs_files(pdev->vdev);
+- video_unregister_device(pdev->vdev);
+-
+- /* Free memory (don't set pdev to 0 just yet) */
+- kfree(pdev);
++ if(pdev->vopen) {
++ pdev->unplugged = 1;
++ } else {
++ /* Device is closed, so we can safely unregister it */
++ PWC_DEBUG_PROBE("Unregistering video device in disconnect().\n");
++ pwc_cleanup(pdev);
++ /* Free memory (don't set pdev to 0 just yet) */
++ kfree(pdev);
+
+ disconnect_out:
+- /* search device_hint[] table if we occupy a slot, by any chance */
+- for (hint = 0; hint < MAX_DEV_HINTS; hint++)
+- if (device_hint[hint].pdev == pdev)
+- device_hint[hint].pdev = NULL;
++ /* search device_hint[] table if we occupy a slot, by any chance */
++ for (hint = 0; hint < MAX_DEV_HINTS; hint++)
++ if (device_hint[hint].pdev == pdev)
++ device_hint[hint].pdev = NULL;
++ }
+
+ unlock_kernel();
+ }
+diff -urpN linux-source-2.6.18.orig/drivers/media/video/pwc/pwc.h linux-source-2.6.18/drivers/media/video/pwc/pwc.h
+--- linux-source-2.6.18.orig/drivers/media/video/pwc/pwc.h 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/media/video/pwc/pwc.h 2007-10-02 10:23:54.471131296 -0600
+@@ -198,6 +198,7 @@ struct pwc_device
+ char vsnapshot; /* snapshot mode */
+ char vsync; /* used by isoc handler */
+ char vmirror; /* for ToUCaM series */
++ char unplugged;
+
+ int cmd_len;
+ unsigned char cmd_buf[13];
diff --git a/tags/2.6.18-12/30041_ipv6-disallow-RH0-by-default-2.patch b/tags/2.6.18-12/30041_ipv6-disallow-RH0-by-default-2.patch
new file mode 100644
index 0000000..1923828
--- /dev/null
+++ b/tags/2.6.18-12/30041_ipv6-disallow-RH0-by-default-2.patch
@@ -0,0 +1,30 @@
+diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
+index 95a9eb5..6e9822d 100644
+--- a/net/ipv6/exthdrs.c
++++ b/net/ipv6/exthdrs.c
+@@ -249,12 +249,6 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp)
+
+ hdr = (struct ipv6_rt_hdr *) skb->h.raw;
+
+- if (hdr->type != IPV6_SRCRT_TYPE_0) {
+- IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+- icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw);
+- return -1;
+- }
+-
+ if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr) ||
+ skb->pkt_type != PACKET_HOST) {
+ IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
+@@ -273,6 +267,12 @@ looped_back:
+ return 1;
+ }
+
++ if (hdr->type != IPV6_SRCRT_TYPE_0) {
++ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
++ icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw);
++ return -1;
++ }
++
+ if (hdr->hdrlen & 0x01) {
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+ icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->hdrlen) - skb->nh.raw);
diff --git a/tags/2.6.18-12/30042_reset-pdeathsig-on-suid-upstream.patch b/tags/2.6.18-12/30042_reset-pdeathsig-on-suid-upstream.patch
new file mode 100644
index 0000000..3e092c0
--- /dev/null
+++ b/tags/2.6.18-12/30042_reset-pdeathsig-on-suid-upstream.patch
@@ -0,0 +1,51 @@
+From: Marcel Holtmann <marcel@holtmann.org>
+Date: Fri, 17 Aug 2007 19:47:58 +0000 (+0200)
+Subject: Reset current->pdeath_signal on SUID binary execution
+X-Git-Tag: v2.6.23-rc4~134
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=d2d56c5f51028cb9f3d800882eb6f4cbd3f9099f
+
+Reset current->pdeath_signal on SUID binary execution
+
+This fixes a vulnerability in the "parent process death signal"
+implementation discoverd by Wojciech Purczynski of COSEINC PTE Ltd.
+and iSEC Security Research.
+
+http://marc.info/?l=bugtraq&m=118711306802632&w=2
+
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/fs/exec.c linux-source-2.6.18/fs/exec.c
+--- linux-source-2.6.18.orig/fs/exec.c 2007-09-05 14:11:16.000000000 -0600
++++ linux-source-2.6.18/fs/exec.c 2007-09-05 14:17:02.000000000 -0600
+@@ -883,9 +883,12 @@ int flush_old_exec(struct linux_binprm *
+ */
+ current->mm->task_size = TASK_SIZE;
+
+- if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
+- file_permission(bprm->file, MAY_READ) ||
+- (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
++ if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
++ suid_keys(current);
++ current->mm->dumpable = suid_dumpable;
++ current->pdeath_signal = 0;
++ } else if (file_permission(bprm->file, MAY_READ) ||
++ (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
+ suid_keys(current);
+ current->mm->dumpable = suid_dumpable;
+ }
+@@ -977,8 +980,10 @@ void compute_creds(struct linux_binprm *
+ {
+ int unsafe;
+
+- if (bprm->e_uid != current->uid)
++ if (bprm->e_uid != current->uid) {
+ suid_keys(current);
++ current->pdeath_signal = 0;
++ }
+ exec_keys(current);
+
+ task_lock(current);
diff --git a/tags/2.6.18-12/30044_cifs-better-failed-mount-errors.patch b/tags/2.6.18-12/30044_cifs-better-failed-mount-errors.patch
new file mode 100644
index 0000000..d3b7c91
--- /dev/null
+++ b/tags/2.6.18-12/30044_cifs-better-failed-mount-errors.patch
@@ -0,0 +1,234 @@
+From: Steve French <sfrench@us.ibm.com>
+Date: Thu, 18 Oct 2007 21:45:27 +0000 (+0000)
+Subject: [CIFS] log better errors on failed mounts
+X-Git-Tag: v2.6.24-rc1~138^2
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=a761ac579b89bc1f00212a42401398108deba65c
+
+[CIFS] log better errors on failed mounts
+
+Also returns more accurate errors to mount for the cases of
+account expired and password expired
+
+Acked-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/fs/cifs/cifsproto.h linux-source-2.6.18/fs/cifs/cifsproto.h
+--- linux-source-2.6.18.orig/fs/cifs/cifsproto.h 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/cifs/cifsproto.h 2007-11-25 14:13:04.000000000 -0700
+@@ -49,7 +49,8 @@ extern int SendReceive(const unsigned in
+ int * /* bytes returned */ , const int long_op);
+ extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
+ struct kvec *, int /* nvec to send */,
+- int * /* type of buf returned */ , const int long_op);
++ int * /* type of buf returned */ , const int long_op,
++ const int logError /* whether to log status code*/ );
+ extern int SendReceiveBlockingLock(const unsigned int /* xid */ , struct cifsTconInfo *,
+ struct smb_hdr * /* input */ ,
+ struct smb_hdr * /* out */ ,
+@@ -64,7 +65,7 @@ extern unsigned int smbCalcSize_LE(struc
+ extern int decode_negTokenInit(unsigned char *security_blob, int length,
+ enum securityEnum *secType);
+ extern int cifs_inet_pton(int, char * source, void *dst);
+-extern int map_smb_to_linux_error(struct smb_hdr *smb);
++extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
+ extern void header_assemble(struct smb_hdr *, char /* command */ ,
+ const struct cifsTconInfo *, int /* length of
+ fixed section (word count) in two byte units */);
+diff -urpN linux-source-2.6.18.orig/fs/cifs/cifssmb.c linux-source-2.6.18/fs/cifs/cifssmb.c
+--- linux-source-2.6.18.orig/fs/cifs/cifssmb.c 2007-10-03 12:38:14.000000000 -0600
++++ linux-source-2.6.18/fs/cifs/cifssmb.c 2007-11-25 14:14:07.000000000 -0700
+@@ -1170,9 +1170,8 @@ CIFSSMBRead(const int xid, struct cifsTc
+
+ iov[0].iov_base = (char *)pSMB;
+ iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
+- rc = SendReceive2(xid, tcon->ses, iov,
+- 1 /* num iovecs */,
+- &resp_buf_type, 0);
++ rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
++ &resp_buf_type, 0 /* not long op */, 1 /* log err */ );
+ cifs_stats_inc(&tcon->num_reads);
+ pSMBr = (READ_RSP *)iov[0].iov_base;
+ if (rc) {
+@@ -1389,7 +1388,7 @@ CIFSSMBWrite2(const int xid, struct cifs
+
+
+ rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type,
+- long_op);
++ long_op, 0 /* do not log STATUS code */ );
+ cifs_stats_inc(&tcon->num_writes);
+ if (rc) {
+ cFYI(1, ("Send error Write2 = %d", rc));
+@@ -2822,7 +2821,8 @@ CIFSSMBGetCIFSACL(const int xid, struct
+ iov[0].iov_base = (char *)pSMB;
+ iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
+
+- rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type, 0);
++ rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type,
++ 0 /* not long op */, 0 /* do not log STATUS codes */ );
+ cifs_stats_inc(&tcon->num_acl_get);
+ if (rc) {
+ cFYI(1, ("Send error in QuerySecDesc = %d", rc));
+diff -urpN linux-source-2.6.18.orig/fs/cifs/netmisc.c linux-source-2.6.18/fs/cifs/netmisc.c
+--- linux-source-2.6.18.orig/fs/cifs/netmisc.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/cifs/netmisc.c 2007-11-25 14:16:03.000000000 -0700
+@@ -114,10 +114,16 @@ static const struct smb_to_posix_error m
+ {ERRusempx, -EIO},
+ {ERRusestd, -EIO},
+ {ERR_NOTIFY_ENUM_DIR, -ENOBUFS},
+- {ERRaccountexpired, -EACCES},
++ {ERRnoSuchUser, -EACCES},
++/* {ERRaccountexpired, -EACCES},
+ {ERRbadclient, -EACCES},
+ {ERRbadLogonTime, -EACCES},
+- {ERRpasswordExpired, -EACCES},
++ {ERRpasswordExpired, -EACCES},*/
++ {ERRaccountexpired, -EKEYEXPIRED},
++ {ERRbadclient, -EACCES},
++ {ERRbadLogonTime, -EACCES},
++ {ERRpasswordExpired, -EKEYEXPIRED},
++
+ {ERRnosupport, -EINVAL},
+ {0, 0}
+ };
+@@ -314,7 +320,7 @@ static const struct {
+ from NT_STATUS_NO_SUCH_USER to NT_STATUS_LOGON_FAILURE
+ during the session setup } */
+ {
+- ERRDOS, ERRnoaccess, NT_STATUS_NO_SUCH_USER}, {
++ ERRDOS, ERRnoaccess, NT_STATUS_NO_SUCH_USER}, { /* could map to 2238 */
+ ERRHRD, ERRgeneral, NT_STATUS_GROUP_EXISTS}, {
+ ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_GROUP}, {
+ ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_GROUP}, {
+@@ -329,10 +335,10 @@ static const struct {
+ ERRHRD, ERRgeneral, NT_STATUS_PASSWORD_RESTRICTION}, {
+ ERRDOS, ERRnoaccess, NT_STATUS_LOGON_FAILURE}, {
+ ERRHRD, ERRgeneral, NT_STATUS_ACCOUNT_RESTRICTION}, {
+- ERRSRV, 2241, NT_STATUS_INVALID_LOGON_HOURS}, {
+- ERRSRV, 2240, NT_STATUS_INVALID_WORKSTATION}, {
++ ERRSRV, ERRbadLogonTime, NT_STATUS_INVALID_LOGON_HOURS}, {
++ ERRSRV, ERRbadclient, NT_STATUS_INVALID_WORKSTATION}, {
+ ERRSRV, ERRpasswordExpired, NT_STATUS_PASSWORD_EXPIRED}, {
+- ERRSRV, 2239, NT_STATUS_ACCOUNT_DISABLED}, {
++ ERRSRV, ERRaccountexpired, NT_STATUS_ACCOUNT_DISABLED}, {
+ ERRHRD, ERRgeneral, NT_STATUS_NONE_MAPPED}, {
+ ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_LUIDS_REQUESTED}, {
+ ERRHRD, ERRgeneral, NT_STATUS_LUIDS_EXHAUSTED}, {
+@@ -629,7 +635,7 @@ static const struct {
+ ERRDOS, ERRnoaccess, NT_STATUS_TRUST_FAILURE}, {
+ ERRHRD, ERRgeneral, NT_STATUS_MUTANT_LIMIT_EXCEEDED}, {
+ ERRDOS, ERRnetlogonNotStarted, NT_STATUS_NETLOGON_NOT_STARTED}, {
+- ERRSRV, 2239, NT_STATUS_ACCOUNT_EXPIRED}, {
++ ERRSRV, ERRaccountexpired, NT_STATUS_ACCOUNT_EXPIRED}, {
+ ERRHRD, ERRgeneral, NT_STATUS_POSSIBLE_DEADLOCK}, {
+ ERRHRD, ERRgeneral, NT_STATUS_NETWORK_CREDENTIAL_CONFLICT}, {
+ ERRHRD, ERRgeneral, NT_STATUS_REMOTE_SESSION_LIMIT}, {
+@@ -798,7 +804,7 @@ ntstatus_to_dos(__u32 ntstatus, __u8 * e
+ }
+
+ int
+-map_smb_to_linux_error(struct smb_hdr *smb)
++map_smb_to_linux_error(struct smb_hdr *smb, int logErr)
+ {
+ unsigned int i;
+ int rc = -EIO; /* if transport error smb error may not be set */
+@@ -814,7 +820,9 @@ map_smb_to_linux_error(struct smb_hdr *s
+ if (smb->Flags2 & SMBFLG2_ERR_STATUS) {
+ /* translate the newer STATUS codes to old style errors and then to POSIX errors */
+ __u32 err = le32_to_cpu(smb->Status.CifsError);
+- if(cifsFYI & CIFS_RC)
++ if (logErr && (err != (NT_STATUS_MORE_PROCESSING_REQUIRED)))
++ cifs_print_status(err);
++ else if (cifsFYI & CIFS_RC)
+ cifs_print_status(err);
+ ntstatus_to_dos(err, &smberrclass, &smberrcode);
+ } else {
+@@ -854,7 +862,8 @@ map_smb_to_linux_error(struct smb_hdr *s
+ }
+ /* else ERRHRD class errors or junk - return EIO */
+
+- cFYI(1, (" !!Mapping smb error code %d to POSIX err %d !!", smberrcode,rc));
++ cFYI(1, ("Mapping smb error code %d to POSIX err %d",
++ smberrcode, rc));
+
+ /* generic corrective action e.g. reconnect SMB session on ERRbaduid could be added */
+
+diff -urpN linux-source-2.6.18.orig/fs/cifs/sess.c linux-source-2.6.18/fs/cifs/sess.c
+--- linux-source-2.6.18.orig/fs/cifs/sess.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/cifs/sess.c 2007-11-25 14:17:16.000000000 -0700
+@@ -482,7 +482,8 @@ CIFS_SessSetup(unsigned int xid, struct
+
+ iov[1].iov_base = str_area;
+ iov[1].iov_len = count;
+- rc = SendReceive2(xid, ses, iov, 2 /* num_iovecs */, &resp_buf_type, 0);
++ rc = SendReceive2(xid, ses, iov, 2 /* num_iovecs */, &resp_buf_type,
++ 0 /* not long op */, 1 /* log NT STATUS if any */ );
+ /* SMB request buf freed in SendReceive2 */
+
+ cFYI(1,("ssetup rc from sendrecv2 is %d",rc));
+diff -urpN linux-source-2.6.18.orig/fs/cifs/smberr.h linux-source-2.6.18/fs/cifs/smberr.h
+--- linux-source-2.6.18.orig/fs/cifs/smberr.h 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/cifs/smberr.h 2007-11-25 14:12:02.000000000 -0700
+@@ -173,9 +173,10 @@
+ #define ERRusestd 251 /* temporarily unable to use either raw
+ or mpx */
+ #define ERR_NOTIFY_ENUM_DIR 1024
++#define ERRnoSuchUser 2238 /* user account does not exist */
+ #define ERRaccountexpired 2239
+-#define ERRbadclient 2240
+-#define ERRbadLogonTime 2241
++#define ERRbadclient 2240 /* can not logon from this client */
++#define ERRbadLogonTime 2241 /* logon hours do not allow this */
+ #define ERRpasswordExpired 2242
+ #define ERRnetlogonNotStarted 2455
+ #define ERRnosupport 0xFFFF
+diff -urpN linux-source-2.6.18.orig/fs/cifs/transport.c linux-source-2.6.18/fs/cifs/transport.c
+--- linux-source-2.6.18.orig/fs/cifs/transport.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/cifs/transport.c 2007-11-25 14:18:15.000000000 -0700
+@@ -419,7 +419,7 @@ static int wait_for_response(struct cifs
+ int
+ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
+ struct kvec *iov, int n_vec, int * pRespBufType /* ret */,
+- const int long_op)
++ const int long_op, const int logError)
+ {
+ int rc = 0;
+ unsigned int receive_len;
+@@ -465,7 +465,6 @@ SendReceive2(const unsigned int xid, str
+ wake_up(&ses->server->request_q);
+ return rc;
+ }
+-
+ rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
+
+ midQ->midState = MID_REQUEST_SUBMITTED;
+@@ -568,8 +567,7 @@ SendReceive2(const unsigned int xid, str
+ }
+
+ /* BB special case reconnect tid and uid here? */
+- /* BB special case Errbadpassword and pwdexpired here */
+- rc = map_smb_to_linux_error(midQ->resp_buf);
++ rc = map_smb_to_linux_error(midQ->resp_buf, logError);
+
+ /* convert ByteCount if necessary */
+ if (receive_len >=
+@@ -750,7 +748,7 @@ SendReceive(const unsigned int xid, stru
+ *pbytes_returned = out_buf->smb_buf_length;
+
+ /* BB special case reconnect tid and uid here? */
+- rc = map_smb_to_linux_error(out_buf);
++ rc = map_smb_to_linux_error(out_buf, 0 /* no log */ );
+
+ /* convert ByteCount if necessary */
+ if (receive_len >=
+@@ -995,7 +993,7 @@ SendReceiveBlockingLock(const unsigned i
+ *pbytes_returned = out_buf->smb_buf_length;
+
+ /* BB special case reconnect tid and uid here? */
+- rc = map_smb_to_linux_error(out_buf);
++ rc = map_smb_to_linux_error(out_buf, 0 /* no log */ );
+
+ /* convert ByteCount if necessary */
+ if (receive_len >=
diff --git a/tags/2.6.18-12/30045_cifs-corrupt-server-response-overflow.patch b/tags/2.6.18-12/30045_cifs-corrupt-server-response-overflow.patch
new file mode 100644
index 0000000..eb79c7b
--- /dev/null
+++ b/tags/2.6.18-12/30045_cifs-corrupt-server-response-overflow.patch
@@ -0,0 +1,694 @@
+From: Steve French <sfrench@us.ibm.com>
+Date: Tue, 13 Nov 2007 22:41:37 +0000 (+0000)
+Subject: [CIFS] Fix buffer overflow if server sends corrupt response to small
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fsfrench%2Fcifs-2.6.git;a=commitdiff_plain;h=133672efbc1085f9af990bdc145e1822ea93bcf3
+
+[CIFS] Fix buffer overflow if server sends corrupt response to small
+request
+
+In SendReceive() function in transport.c - it memcpy's
+message payload into a buffer passed via out_buf param. The function
+assumes that all buffers are of size (CIFSMaxBufSize +
+MAX_CIFS_HDR_SIZE) , unfortunately it is also called with smaller
+(MAX_CIFS_SMALL_BUFFER_SIZE) buffers. There are eight callers
+(SMB worker functions) which are primarily affected by this change:
+
+TreeDisconnect, uLogoff, Close, findClose, SetFileSize, SetFileTimes,
+Lock and PosixLock
+
+CC: Dave Kleikamp <shaggy@austin.ibm.com>
+CC: Przemyslaw Wegrzyn <czajnik@czajsoft.pl>
+Acked-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/fs/cifs/cifsglob.h linux-source-2.6.18/fs/cifs/cifsglob.h
+--- linux-source-2.6.18.orig/fs/cifs/cifsglob.h 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/cifs/cifsglob.h 2007-11-25 14:19:26.000000000 -0700
+@@ -437,6 +437,17 @@ struct dir_notify_req {
+ #define CIFS_LARGE_BUFFER 2
+ #define CIFS_IOVEC 4 /* array of response buffers */
+
++/* Type of Request to SendReceive2 */
++#define CIFS_STD_OP 0 /* normal request timeout */
++#define CIFS_LONG_OP 1 /* long op (up to 45 sec, oplock time) */
++#define CIFS_VLONG_OP 2 /* sloow op - can take up to 180 seconds */
++#define CIFS_BLOCKING_OP 4 /* operation can block */
++#define CIFS_ASYNC_OP 8 /* do not wait for response */
++#define CIFS_TIMEOUT_MASK 0x00F /* only one of 5 above set in req */
++#define CIFS_LOG_ERROR 0x010 /* log NT STATUS if non-zero */
++#define CIFS_LARGE_BUF_OP 0x020 /* large request buffer */
++#define CIFS_NO_RESP 0x040 /* no response buffer required */
++
+ /* Security Flags: indicate type of session setup needed */
+ #define CIFSSEC_MAY_SIGN 0x00001
+ #define CIFSSEC_MAY_NTLM 0x00002
+diff -urpN linux-source-2.6.18.orig/fs/cifs/cifsproto.h linux-source-2.6.18/fs/cifs/cifsproto.h
+--- linux-source-2.6.18.orig/fs/cifs/cifsproto.h 2007-11-25 14:13:04.000000000 -0700
++++ linux-source-2.6.18/fs/cifs/cifsproto.h 2007-11-25 14:21:47.000000000 -0700
+@@ -47,10 +47,11 @@ extern int SendReceive(const unsigned in
+ struct smb_hdr * /* input */ ,
+ struct smb_hdr * /* out */ ,
+ int * /* bytes returned */ , const int long_op);
++extern int SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
++ struct smb_hdr *in_buf, int flags);
+ extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
+ struct kvec *, int /* nvec to send */,
+- int * /* type of buf returned */ , const int long_op,
+- const int logError /* whether to log status code*/ );
++ int * /* type of buf returned */ , const int flags);
+ extern int SendReceiveBlockingLock(const unsigned int /* xid */ , struct cifsTconInfo *,
+ struct smb_hdr * /* input */ ,
+ struct smb_hdr * /* out */ ,
+diff -urpN linux-source-2.6.18.orig/fs/cifs/cifssmb.c linux-source-2.6.18/fs/cifs/cifssmb.c
+--- linux-source-2.6.18.orig/fs/cifs/cifssmb.c 2007-11-25 14:14:07.000000000 -0700
++++ linux-source-2.6.18/fs/cifs/cifssmb.c 2007-11-25 14:26:03.000000000 -0700
+@@ -619,9 +619,7 @@ int
+ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
+ {
+ struct smb_hdr *smb_buffer;
+- struct smb_hdr *smb_buffer_response; /* BB removeme BB */
+ int rc = 0;
+- int length;
+
+ cFYI(1, ("In tree disconnect"));
+ /*
+@@ -658,16 +656,12 @@ CIFSSMBTDis(const int xid, struct cifsTc
+ if (rc) {
+ up(&tcon->tconSem);
+ return rc;
+- } else {
+- smb_buffer_response = smb_buffer; /* BB removeme BB */
+ }
+- rc = SendReceive(xid, tcon->ses, smb_buffer, smb_buffer_response,
+- &length, 0);
++
++ rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0);
+ if (rc)
+ cFYI(1, ("Tree disconnect failed %d", rc));
+
+- if (smb_buffer)
+- cifs_small_buf_release(smb_buffer);
+ up(&tcon->tconSem);
+
+ /* No need to return error on this operation if tid invalidated and
+@@ -681,10 +675,8 @@ CIFSSMBTDis(const int xid, struct cifsTc
+ int
+ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
+ {
+- struct smb_hdr *smb_buffer_response;
+ LOGOFF_ANDX_REQ *pSMB;
+ int rc = 0;
+- int length;
+
+ cFYI(1, ("In SMBLogoff for session disconnect"));
+ if (ses)
+@@ -703,8 +695,6 @@ CIFSSMBLogoff(const int xid, struct cifs
+ return rc;
+ }
+
+- smb_buffer_response = (struct smb_hdr *)pSMB; /* BB removeme BB */
+-
+ if(ses->server) {
+ pSMB->hdr.Mid = GetNextMid(ses->server);
+
+@@ -716,8 +706,7 @@ CIFSSMBLogoff(const int xid, struct cifs
+ pSMB->hdr.Uid = ses->Suid;
+
+ pSMB->AndXCommand = 0xFF;
+- rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
+- smb_buffer_response, &length, 0);
++ rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0);
+ if (ses->server) {
+ atomic_dec(&ses->server->socketUseCount);
+ if (atomic_read(&ses->server->socketUseCount) == 0) {
+@@ -728,7 +717,6 @@ CIFSSMBLogoff(const int xid, struct cifs
+ }
+ }
+ up(&ses->sesSem);
+- cifs_small_buf_release(pSMB);
+
+ /* if session dead then we do not need to do ulogoff,
+ since server closed smb session, no sense reporting
+@@ -978,7 +966,7 @@ OldOpenRetry:
+ pSMB->ByteCount = cpu_to_le16(count);
+ /* long_op set to 1 to allow for oplock break timeouts */
+ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+- (struct smb_hdr *) pSMBr, &bytes_returned, 1);
++ (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
+ cifs_stats_inc(&tcon->num_opens);
+ if (rc) {
+ cFYI(1, ("Error in Open = %d", rc));
+@@ -1092,7 +1080,7 @@ openRetry:
+ pSMB->ByteCount = cpu_to_le16(count);
+ /* long_op set to 1 to allow for oplock break timeouts */
+ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+- (struct smb_hdr *) pSMBr, &bytes_returned, 1);
++ (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
+ cifs_stats_inc(&tcon->num_opens);
+ if (rc) {
+ cFYI(1, ("Error in Open = %d", rc));
+@@ -1171,7 +1159,7 @@ CIFSSMBRead(const int xid, struct cifsTc
+ iov[0].iov_base = (char *)pSMB;
+ iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
+ rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
+- &resp_buf_type, 0 /* not long op */, 1 /* log err */ );
++ &resp_buf_type, CIFS_STD_OP | CIFS_LOG_ERROR);
+ cifs_stats_inc(&tcon->num_reads);
+ pSMBr = (READ_RSP *)iov[0].iov_base;
+ if (rc) {
+@@ -1388,7 +1376,7 @@ CIFSSMBWrite2(const int xid, struct cifs
+
+
+ rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type,
+- long_op, 0 /* do not log STATUS code */ );
++ long_op);
+ cifs_stats_inc(&tcon->num_writes);
+ if (rc) {
+ cFYI(1, ("Send error Write2 = %d", rc));
+@@ -1430,7 +1418,7 @@ CIFSSMBLock(const int xid, struct cifsTc
+ int timeout = 0;
+ __u16 count;
+
+- cFYI(1, ("In CIFSSMBLock - timeout %d numLock %d",waitFlag,numLock));
++ cFYI(1, ("CIFSSMBLock timeout %d numLock %d", waitFlag, numLock));
+ rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB);
+
+ if (rc)
+@@ -1439,10 +1427,10 @@ CIFSSMBLock(const int xid, struct cifsTc
+ pSMBr = (LOCK_RSP *)pSMB; /* BB removeme BB */
+
+ if(lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
+- timeout = -1; /* no response expected */
++ timeout = CIFS_ASYNC_OP; /* no response expected */
+ pSMB->Timeout = 0;
+ } else if (waitFlag == TRUE) {
+- timeout = 3; /* blocking operation, no timeout */
++ timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */
+ pSMB->Timeout = cpu_to_le32(-1);/* blocking - do not time out */
+ } else {
+ pSMB->Timeout = 0;
+@@ -1472,15 +1460,16 @@ CIFSSMBLock(const int xid, struct cifsTc
+ if (waitFlag) {
+ rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
+ (struct smb_hdr *) pSMBr, &bytes_returned);
++ cifs_small_buf_release(pSMB);
+ } else {
+- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+- (struct smb_hdr *) pSMBr, &bytes_returned, timeout);
++ rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *)pSMB,
++ timeout);
++ /* SMB buffer freed by function above */
+ }
+ cifs_stats_inc(&tcon->num_locks);
+ if (rc) {
+ cFYI(1, ("Send error in Lock = %d", rc));
+ }
+- cifs_small_buf_release(pSMB);
+
+ /* Note: On -EAGAIN error only caller can retry on handle based calls
+ since file handle passed in no longer valid */
+@@ -1500,7 +1489,9 @@ CIFSSMBPosixLock(const int xid, struct c
+ int rc = 0;
+ int timeout = 0;
+ int bytes_returned = 0;
++ int resp_buf_type = 0;
+ __u16 params, param_offset, offset, byte_count, count;
++ struct kvec iov[1];
+
+ cFYI(1, ("Posix Lock"));
+
+@@ -1544,7 +1535,7 @@ CIFSSMBPosixLock(const int xid, struct c
+
+ parm_data->lock_type = cpu_to_le16(lock_type);
+ if(waitFlag) {
+- timeout = 3; /* blocking operation, no timeout */
++ timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */
+ parm_data->lock_flags = cpu_to_le16(1);
+ pSMB->Timeout = cpu_to_le32(-1);
+ } else
+@@ -1564,8 +1555,13 @@ CIFSSMBPosixLock(const int xid, struct c
+ rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
+ (struct smb_hdr *) pSMBr, &bytes_returned);
+ } else {
+- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+- (struct smb_hdr *) pSMBr, &bytes_returned, timeout);
++ iov[0].iov_base = (char *)pSMB;
++ iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
++ rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
++ &resp_buf_type, timeout);
++ pSMB = NULL; /* request buf already freed by SendReceive2. Do
++ not try to free it twice below on exit */
++ pSMBr = (struct smb_com_transaction2_sfi_rsp *)iov[0].iov_base;
+ }
+
+ if (rc) {
+@@ -1600,6 +1596,11 @@ plk_err_exit:
+ if (pSMB)
+ cifs_small_buf_release(pSMB);
+
++ if (resp_buf_type == CIFS_SMALL_BUFFER)
++ cifs_small_buf_release(iov[0].iov_base);
++ else if (resp_buf_type == CIFS_LARGE_BUFFER)
++ cifs_buf_release(iov[0].iov_base);
++
+ /* Note: On -EAGAIN error only caller can retry on handle based calls
+ since file handle passed in no longer valid */
+
+@@ -1612,8 +1613,6 @@ CIFSSMBClose(const int xid, struct cifsT
+ {
+ int rc = 0;
+ CLOSE_REQ *pSMB = NULL;
+- CLOSE_RSP *pSMBr = NULL;
+- int bytes_returned;
+ cFYI(1, ("In CIFSSMBClose"));
+
+ /* do not retry on dead session on close */
+@@ -1623,13 +1622,10 @@ CIFSSMBClose(const int xid, struct cifsT
+ if (rc)
+ return rc;
+
+- pSMBr = (CLOSE_RSP *)pSMB; /* BB removeme BB */
+-
+ pSMB->FileID = (__u16) smb_file_id;
+ pSMB->LastWriteTime = 0;
+ pSMB->ByteCount = 0;
+- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++ rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+ cifs_stats_inc(&tcon->num_closes);
+ if (rc) {
+ if(rc!=-EINTR) {
+@@ -1638,8 +1634,6 @@ CIFSSMBClose(const int xid, struct cifsT
+ }
+ }
+
+- cifs_small_buf_release(pSMB);
+-
+ /* Since session is dead, file will be closed on server already */
+ if(rc == -EAGAIN)
+ rc = 0;
+@@ -2822,7 +2816,7 @@ CIFSSMBGetCIFSACL(const int xid, struct
+ iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
+
+ rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type,
+- 0 /* not long op */, 0 /* do not log STATUS codes */ );
++ CIFS_STD_OP);
+ cifs_stats_inc(&tcon->num_acl_get);
+ if (rc) {
+ cFYI(1, ("Send error in QuerySecDesc = %d", rc));
+@@ -3444,8 +3438,6 @@ CIFSFindClose(const int xid, struct cifs
+ {
+ int rc = 0;
+ FINDCLOSE_REQ *pSMB = NULL;
+- CLOSE_RSP *pSMBr = NULL; /* BB removeme BB */
+- int bytes_returned;
+
+ cFYI(1, ("In CIFSSMBFindClose"));
+ rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB);
+@@ -3457,16 +3449,13 @@ CIFSFindClose(const int xid, struct cifs
+ if (rc)
+ return rc;
+
+- pSMBr = (CLOSE_RSP *)pSMB; /* BB removeme BB */
+ pSMB->FileID = searchHandle;
+ pSMB->ByteCount = 0;
+- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++ rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+ if (rc) {
+ cERROR(1, ("Send error in FindClose = %d", rc));
+ }
+ cifs_stats_inc(&tcon->num_fclose);
+- cifs_small_buf_release(pSMB);
+
+ /* Since session is dead, search handle closed on server already */
+ if (rc == -EAGAIN)
+@@ -4373,11 +4362,9 @@ CIFSSMBSetFileSize(const int xid, struct
+ __u16 fid, __u32 pid_of_opener, int SetAllocation)
+ {
+ struct smb_com_transaction2_sfi_req *pSMB = NULL;
+- struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
+ char *data_offset;
+ struct file_end_of_file_info *parm_data;
+ int rc = 0;
+- int bytes_returned = 0;
+ __u16 params, param_offset, offset, byte_count, count;
+
+ cFYI(1, ("SetFileSize (via SetFileInfo) %lld",
+@@ -4387,8 +4374,6 @@ CIFSSMBSetFileSize(const int xid, struct
+ if (rc)
+ return rc;
+
+- pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB;
+-
+ pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
+ pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
+
+@@ -4439,17 +4424,13 @@ CIFSSMBSetFileSize(const int xid, struct
+ pSMB->Reserved4 = 0;
+ pSMB->hdr.smb_buf_length += byte_count;
+ pSMB->ByteCount = cpu_to_le16(byte_count);
+- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++ rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+ if (rc) {
+ cFYI(1,
+ ("Send error in SetFileInfo (SetFileSize) = %d",
+ rc));
+ }
+
+- if (pSMB)
+- cifs_small_buf_release(pSMB);
+-
+ /* Note: On -EAGAIN error only caller can retry on handle based calls
+ since file handle passed in no longer valid */
+
+@@ -4467,10 +4448,8 @@ CIFSSMBSetFileTimes(const int xid, struc
+ __u16 fid)
+ {
+ struct smb_com_transaction2_sfi_req *pSMB = NULL;
+- struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
+ char *data_offset;
+ int rc = 0;
+- int bytes_returned = 0;
+ __u16 params, param_offset, offset, byte_count, count;
+
+ cFYI(1, ("Set Times (via SetFileInfo)"));
+@@ -4479,8 +4458,6 @@ CIFSSMBSetFileTimes(const int xid, struc
+ if (rc)
+ return rc;
+
+- pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB;
+-
+ /* At this point there is no need to override the current pid
+ with the pid of the opener, but that could change if we someday
+ use an existing handle (rather than opening one on the fly) */
+@@ -4520,14 +4497,11 @@ CIFSSMBSetFileTimes(const int xid, struc
+ pSMB->hdr.smb_buf_length += byte_count;
+ pSMB->ByteCount = cpu_to_le16(byte_count);
+ memcpy(data_offset,data,sizeof(FILE_BASIC_INFO));
+- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
++ rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
+ if (rc) {
+ cFYI(1,("Send error in Set Time (SetFileInfo) = %d",rc));
+ }
+
+- cifs_small_buf_release(pSMB);
+-
+ /* Note: On -EAGAIN error only caller can retry on handle based calls
+ since file handle passed in no longer valid */
+
+@@ -4808,7 +4782,8 @@ int CIFSSMBNotify(const int xid, struct
+ pSMB->ByteCount = 0;
+
+ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+- (struct smb_hdr *) pSMBr, &bytes_returned, -1);
++ (struct smb_hdr *)pSMBr, &bytes_returned,
++ CIFS_ASYNC_OP);
+ if (rc) {
+ cFYI(1, ("Error in Notify = %d", rc));
+ } else {
+diff -urpN linux-source-2.6.18.orig/fs/cifs/connect.c linux-source-2.6.18/fs/cifs/connect.c
+--- linux-source-2.6.18.orig/fs/cifs/connect.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/cifs/connect.c 2007-11-25 14:19:26.000000000 -0700
+@@ -2148,7 +2148,7 @@ CIFSSessSetup(unsigned int xid, struct c
+ pSMB->req_no_secext.ByteCount = cpu_to_le16(count);
+
+ rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response,
+- &bytes_returned, 1);
++ &bytes_returned, CIFS_LONG_OP);
+ if (rc) {
+ /* rc = map_smb_to_linux_error(smb_buffer_response); now done in SendReceive */
+ } else if ((smb_buffer_response->WordCount == 3)
+@@ -2434,7 +2434,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned i
+ pSMB->req.ByteCount = cpu_to_le16(count);
+
+ rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response,
+- &bytes_returned, 1);
++ &bytes_returned, CIFS_LONG_OP);
+
+ if (smb_buffer_response->Status.CifsError ==
+ cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))
+@@ -2860,7 +2860,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xi
+ pSMB->req.ByteCount = cpu_to_le16(count);
+
+ rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response,
+- &bytes_returned, 1);
++ &bytes_returned, CIFS_LONG_OP);
+ if (rc) {
+ /* rc = map_smb_to_linux_error(smb_buffer_response); *//* done in SendReceive now */
+ } else if ((smb_buffer_response->WordCount == 3)
+@@ -3131,7 +3131,8 @@ CIFSTCon(unsigned int xid, struct cifsSe
+ pSMB->hdr.smb_buf_length += count;
+ pSMB->ByteCount = cpu_to_le16(count);
+
+- rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, 0);
++ rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
++ CIFS_STD_OP);
+
+ /* if (rc) rc = map_smb_to_linux_error(smb_buffer_response); */
+ /* above now done in SendReceive */
+diff -urpN linux-source-2.6.18.orig/fs/cifs/file.c linux-source-2.6.18/fs/cifs/file.c
+--- linux-source-2.6.18.orig/fs/cifs/file.c 2007-10-03 12:38:13.000000000 -0600
++++ linux-source-2.6.18/fs/cifs/file.c 2007-11-25 14:20:52.000000000 -0700
+@@ -813,9 +813,9 @@ ssize_t cifs_user_write(struct file *fil
+ }
+
+ if (*poffset > file->f_dentry->d_inode->i_size)
+- long_op = 2; /* writes past end of file can take a long time */
++ long_op = CIFS_VLONG_OP; /* writes past EOF take long time */
+ else
+- long_op = 1;
++ long_op = CIFS_LONG_OP;
+
+ for (total_written = 0; write_size > total_written;
+ total_written += bytes_written) {
+@@ -868,7 +868,7 @@ ssize_t cifs_user_write(struct file *fil
+ }
+ } else
+ *poffset += bytes_written;
+- long_op = FALSE; /* subsequent writes fast -
++ long_op = CIFS_STD_OP; /* subsequent writes fast -
+ 15 seconds is plenty */
+ }
+
+@@ -927,9 +927,9 @@ static ssize_t cifs_write(struct file *f
+ }
+
+ if (*poffset > file->f_dentry->d_inode->i_size)
+- long_op = 2; /* writes past end of file can take a long time */
++ long_op = CIFS_VLONG_OP; /* writes past EOF can be slow */
+ else
+- long_op = 1;
++ long_op = CIFS_LONG_OP;
+
+ for (total_written = 0; write_size > total_written;
+ total_written += bytes_written) {
+@@ -1001,7 +1001,7 @@ static ssize_t cifs_write(struct file *f
+ }
+ } else
+ *poffset += bytes_written;
+- long_op = FALSE; /* subsequent writes fast -
++ long_op = CIFS_STD_OP; /* subsequent writes fast -
+ 15 seconds is plenty */
+ }
+
+@@ -1288,7 +1288,7 @@ retry:
+ open_file->netfid,
+ bytes_to_write, offset,
+ &bytes_written, iov, n_iov,
+- 1);
++ CIFS_LONG_OP);
+ atomic_dec(&open_file->wrtPending);
+ if (rc || bytes_written < bytes_to_write) {
+ cERROR(1,("Write2 ret %d, written = %d",
+diff -urpN linux-source-2.6.18.orig/fs/cifs/sess.c linux-source-2.6.18/fs/cifs/sess.c
+--- linux-source-2.6.18.orig/fs/cifs/sess.c 2007-11-25 14:17:16.000000000 -0700
++++ linux-source-2.6.18/fs/cifs/sess.c 2007-11-25 14:19:26.000000000 -0700
+@@ -483,7 +483,7 @@ CIFS_SessSetup(unsigned int xid, struct
+ iov[1].iov_base = str_area;
+ iov[1].iov_len = count;
+ rc = SendReceive2(xid, ses, iov, 2 /* num_iovecs */, &resp_buf_type,
+- 0 /* not long op */, 1 /* log NT STATUS if any */ );
++ CIFS_STD_OP /* not long */ | CIFS_LOG_ERROR);
+ /* SMB request buf freed in SendReceive2 */
+
+ cFYI(1,("ssetup rc from sendrecv2 is %d",rc));
+diff -urpN linux-source-2.6.18.orig/fs/cifs/transport.c linux-source-2.6.18/fs/cifs/transport.c
+--- linux-source-2.6.18.orig/fs/cifs/transport.c 2007-11-25 14:18:15.000000000 -0700
++++ linux-source-2.6.18/fs/cifs/transport.c 2007-11-25 14:30:14.000000000 -0700
+@@ -308,7 +308,7 @@ smb_send2(struct socket *ssocket, struct
+
+ static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
+ {
+- if(long_op == -1) {
++ if (long_op == CIFS_ASYNC_OP) {
+ /* oplock breaks must not be held up */
+ atomic_inc(&ses->server->inFlight);
+ } else {
+@@ -337,7 +337,7 @@ static int wait_for_free_request(struct
+ they are allowed to block on server */
+
+ /* update # of requests on the wire to server */
+- if (long_op < 3)
++ if (long_op != CIFS_BLOCKING_OP)
+ atomic_inc(&ses->server->inFlight);
+ spin_unlock(&GlobalMid_Lock);
+ break;
+@@ -416,17 +416,48 @@ static int wait_for_response(struct cifs
+ }
+ }
+
++
++/*
++ *
++ * Send an SMB Request. No response info (other than return code)
++ * needs to be parsed.
++ *
++ * flags indicate the type of request buffer and how long to wait
++ * and whether to log NT STATUS code (error) before mapping it to POSIX error
++ *
++ */
++int
++SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
++ struct smb_hdr *in_buf, int flags)
++{
++ int rc;
++ struct kvec iov[1];
++ int resp_buf_type;
++
++ iov[0].iov_base = (char *)in_buf;
++ iov[0].iov_len = in_buf->smb_buf_length + 4;
++ flags |= CIFS_NO_RESP;
++ rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
++#ifdef CONFIG_CIFS_DEBUG2
++ cFYI(1, ("SendRcvNoR flags %d rc %d", flags, rc));
++#endif
++ return rc;
++}
++
+ int
+ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
+ struct kvec *iov, int n_vec, int * pRespBufType /* ret */,
+- const int long_op, const int logError)
++ const int flags)
+ {
+ int rc = 0;
++ int long_op;
+ unsigned int receive_len;
+ unsigned long timeout;
+ struct mid_q_entry *midQ;
+ struct smb_hdr *in_buf = iov[0].iov_base;
+
++ long_op = flags & CIFS_TIMEOUT_MASK;
++
+ *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
+
+ if ((ses == NULL) || (ses->server == NULL)) {
+@@ -484,15 +515,22 @@ SendReceive2(const unsigned int xid, str
+ if(rc < 0)
+ goto out;
+
+- if (long_op == -1)
+- goto out;
+- else if (long_op == 2) /* writes past end of file can take loong time */
++ if (long_op == CIFS_STD_OP)
++ timeout = 15 * HZ;
++ else if (long_op == CIFS_VLONG_OP) /* e.g. slow writes past EOF */
+ timeout = 180 * HZ;
+- else if (long_op == 1)
++ else if (long_op == CIFS_LONG_OP)
+ timeout = 45 * HZ; /* should be greater than
+ servers oplock break timeout (about 43 seconds) */
+- else
+- timeout = 15 * HZ;
++ else if (long_op == CIFS_ASYNC_OP)
++ goto out;
++ else if (long_op == CIFS_BLOCKING_OP)
++ timeout = 0x7FFFFFFF; /* large, but not so large as to wrap */
++ else {
++ cERROR(1, ("unknown timeout flag %d", long_op));
++ rc = -EIO;
++ goto out;
++ }
+
+ /* wait for 15 seconds or until woken up due to response arriving or
+ due to last connection to this server being unmounted */
+@@ -567,7 +605,8 @@ SendReceive2(const unsigned int xid, str
+ }
+
+ /* BB special case reconnect tid and uid here? */
+- rc = map_smb_to_linux_error(midQ->resp_buf, logError);
++ rc = map_smb_to_linux_error(midQ->resp_buf,
++ flags & CIFS_LOG_ERROR);
+
+ /* convert ByteCount if necessary */
+ if (receive_len >=
+@@ -576,8 +615,10 @@ SendReceive2(const unsigned int xid, str
+ (2 * midQ->resp_buf->WordCount) + 2 /* bcc */ )
+ BCC(midQ->resp_buf) =
+ le16_to_cpu(BCC_LE(midQ->resp_buf));
+- midQ->resp_buf = NULL; /* mark it so will not be freed
+- by DeleteMidQEntry */
++ if ((flags & CIFS_NO_RESP) == 0)
++ midQ->resp_buf = NULL; /* mark it so buf will
++ not be freed by
++ DeleteMidQEntry */
+ } else {
+ rc = -EIO;
+ cFYI(1,("Bad MID state?"));
+@@ -666,17 +707,25 @@ SendReceive(const unsigned int xid, stru
+ if(rc < 0)
+ goto out;
+
+- if (long_op == -1)
++ if (long_op == CIFS_STD_OP)
++ timeout = 15 * HZ;
++ /* wait for 15 seconds or until woken up due to response arriving or
++ due to last connection to this server being unmounted */
++ else if (long_op == CIFS_ASYNC_OP)
+ goto out;
+- else if (long_op == 2) /* writes past end of file can take loong time */
++ else if (long_op == CIFS_VLONG_OP) /* writes past EOF can be slow */
+ timeout = 180 * HZ;
+- else if (long_op == 1)
++ else if (long_op == CIFS_LONG_OP)
+ timeout = 45 * HZ; /* should be greater than
+ servers oplock break timeout (about 43 seconds) */
+- else
+- timeout = 15 * HZ;
+- /* wait for 15 seconds or until woken up due to response arriving or
+- due to last connection to this server being unmounted */
++ else if (long_op == CIFS_BLOCKING_OP)
++ timeout = 0x7FFFFFFF; /* large but no so large as to wrap */
++ else {
++ cERROR(1, ("unknown timeout flag %d", long_op));
++ rc = -EIO;
++ goto out;
++ }
++
+ if (signal_pending(current)) {
+ /* if signal pending do not hold up user for full smb timeout
+ but we still give response a change to complete */
+@@ -817,7 +866,7 @@ send_lock_cancel(const unsigned int xid,
+ pSMB->hdr.Mid = GetNextMid(ses->server);
+
+ return SendReceive(xid, ses, in_buf, out_buf,
+- &bytes_returned, 0);
++ &bytes_returned, CIFS_STD_OP);
+ }
+
+ int
+@@ -849,7 +898,7 @@ SendReceiveBlockingLock(const unsigned i
+ to the same server. We may make this configurable later or
+ use ses->maxReq */
+
+- rc = wait_for_free_request(ses, 3);
++ rc = wait_for_free_request(ses, CIFS_BLOCKING_OP);
+ if (rc)
+ return rc;
+
diff --git a/tags/2.6.18-12/30046_wait_task_stopped-hang.patch b/tags/2.6.18-12/30046_wait_task_stopped-hang.patch
new file mode 100644
index 0000000..de602c4
--- /dev/null
+++ b/tags/2.6.18-12/30046_wait_task_stopped-hang.patch
@@ -0,0 +1,38 @@
+From: Roland McGrath <roland@redhat.com>
+Date: Wed, 14 Nov 2007 06:11:50 +0000 (-0800)
+Subject: wait_task_stopped: Check p->exit_state instead of TASK_TRACED
+X-Git-Tag: v2.6.24-rc3~12
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=a3474224e6a01924be40a8255636ea5522c1023a
+
+wait_task_stopped: Check p->exit_state instead of TASK_TRACED
+
+The original meaning of the old test (p->state > TASK_STOPPED) was
+"not dead", since it was before TASK_TRACED existed and before the
+state/exit_state split. It was a wrong correction in commit
+14bf01bb0599c89fc7f426d20353b76e12555308 to make this test for
+TASK_TRACED instead. It should have been changed when TASK_TRACED
+was introducted and again when exit_state was introduced.
+
+Signed-off-by: Roland McGrath <roland@redhat.com>
+Cc: Oleg Nesterov <oleg@tv-sign.ru>
+Cc: Alexey Dobriyan <adobriyan@sw.ru>
+Cc: Kees Cook <kees@ubuntu.com>
+Acked-by: Scott James Remnant <scott@ubuntu.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/kernel/exit.c linux-source-2.6.18/kernel/exit.c
+--- linux-source-2.6.18.orig/kernel/exit.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/kernel/exit.c 2007-11-25 13:39:32.000000000 -0700
+@@ -1287,8 +1287,7 @@ static int wait_task_stopped(struct task
+ int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
+
+ exit_code = p->exit_code;
+- if (unlikely(!exit_code) ||
+- unlikely(p->state & TASK_TRACED))
++ if (unlikely(!exit_code) || unlikely(p->exit_state))
+ goto bail_ref;
+ return wait_noreap_copyout(p, pid, uid,
+ why, (exit_code << 8) | 0x7f,
diff --git a/tags/2.6.18-12/30047_ieee80211-underflow.patch b/tags/2.6.18-12/30047_ieee80211-underflow.patch
new file mode 100644
index 0000000..53c733a
--- /dev/null
+++ b/tags/2.6.18-12/30047_ieee80211-underflow.patch
@@ -0,0 +1,54 @@
+From: John W. Linville <linville@tuxdriver.com>
+Date: Tue, 2 Oct 2007 04:03:54 +0000 (-0700)
+Subject: [IEEE80211]: avoid integer underflow for runt rx frames
+X-Git-Tag: kvm-47~34^2~42^2
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Favi%2Fkvm.git;a=commitdiff_plain;h=04045f98e0457aba7d4e6736f37eed189c48a5f7
+
+[IEEE80211]: avoid integer underflow for runt rx frames
+
+Reported by Chris Evans <scarybeasts@gmail.com>:
+
+> The summary is that an evil 80211 frame can crash out a victim's
+> machine. It only applies to drivers using the 80211 wireless code, and
+> only then to certain drivers (and even then depends on a card's
+> firmware not dropping a dubious packet). I must confess I'm not
+> keeping track of Linux wireless support, and the different protocol
+> stacks etc.
+>
+> Details are as follows:
+>
+> ieee80211_rx() does not explicitly check that "skb->len >= hdrlen".
+> There are other skb->len checks, but not enough to prevent a subtle
+> off-by-two error if the frame has the IEEE80211_STYPE_QOS_DATA flag
+> set.
+>
+> This leads to integer underflow and crash here:
+>
+> if (frag != 0)
+> flen -= hdrlen;
+>
+> (flen is subsequently used as a memcpy length parameter).
+
+How about this?
+
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
+index f2de2e4..6284c99 100644
+--- a/net/ieee80211/ieee80211_rx.c
++++ b/net/ieee80211/ieee80211_rx.c
+@@ -366,6 +366,12 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+ frag = WLAN_GET_SEQ_FRAG(sc);
+ hdrlen = ieee80211_get_hdrlen(fc);
+
++ if (skb->len < hdrlen) {
++ printk(KERN_INFO "%s: invalid SKB length %d\n",
++ dev->name, skb->len);
++ goto rx_dropped;
++ }
++
+ /* Put this code here so that we avoid duplicating it in all
+ * Rx paths. - Jean II */
+ #ifdef CONFIG_WIRELESS_EXT
diff --git a/tags/2.6.18-12/30048_sysfs_readdir-NULL-deref-1.patch b/tags/2.6.18-12/30048_sysfs_readdir-NULL-deref-1.patch
new file mode 100644
index 0000000..c13fd1b
--- /dev/null
+++ b/tags/2.6.18-12/30048_sysfs_readdir-NULL-deref-1.patch
@@ -0,0 +1,112 @@
+From: Eric Sandeen <sandeen@sandeen.net>
+Date: Mon, 11 Jun 2007 05:02:45 +0000 (+0900)
+Subject: sysfs: store sysfs inode nrs in s_ino to avoid readdir oopses
+X-Git-Tag: v2.6.22-rc5~47
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fstable%2Flinux-2.6.22.y.git;a=commitdiff_plain;h=dc351252b33f8fede396d6173dba117bcb933607
+
+sysfs: store sysfs inode nrs in s_ino to avoid readdir oopses
+
+Backport of
+ftp://ftp.kernel.org/pub/linux/kernel/people/akpm/patches/2.6/2.6.22-rc1/2.6.22-rc1-mm1/broken-out/gregkh-driver-sysfs-allocate-inode-number-using-ida.patch
+
+For regular files in sysfs, sysfs_readdir wants to traverse
+sysfs_dirent->s_dentry->d_inode->i_ino to get to the inode number.
+But, the dentry can be reclaimed under memory pressure, and there is
+no synchronization with readdir. This patch follows Tejun's scheme of
+allocating and storing an inode number in the new s_ino member of a
+sysfs_dirent, when dirents are created, and retrieving it from there
+for readdir, so that the pointer chain doesn't have to be traversed.
+
+Tejun's upstream patch uses a new-ish "ida" allocator which brings
+along some extra complexity; this -stable patch has a brain-dead
+incrementing counter which does not guarantee uniqueness, but because
+sysfs doesn't hash inodes as iunique expects, uniqueness wasn't
+guaranteed today anyway.
+
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Signed-off-by: Tejun Heo <htejun@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@hp.com>
+
+diff -urpN linux-source-2.6.18.orig/fs/sysfs/dir.c linux-source-2.6.18/fs/sysfs/dir.c
+--- linux-source-2.6.18.orig/fs/sysfs/dir.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/sysfs/dir.c 2007-11-07 15:31:11.000000000 -0700
+@@ -29,6 +29,14 @@ static struct dentry_operations sysfs_de
+ .d_iput = sysfs_d_iput,
+ };
+
++static unsigned int sysfs_inode_counter;
++ino_t sysfs_get_inum(void)
++{
++ if (unlikely(sysfs_inode_counter < 3))
++ sysfs_inode_counter = 3;
++ return sysfs_inode_counter++;
++}
++
+ /*
+ * Allocates a new sysfs_dirent and links it to the parent sysfs_dirent
+ */
+@@ -42,6 +50,7 @@ static struct sysfs_dirent * sysfs_new_d
+ return NULL;
+
+ memset(sd, 0, sizeof(*sd));
++ sd->s_ino = sysfs_get_inum();
+ atomic_set(&sd->s_count, 1);
+ atomic_set(&sd->s_event, 0);
+ INIT_LIST_HEAD(&sd->s_children);
+@@ -416,7 +425,7 @@ static int sysfs_readdir(struct file * f
+
+ switch (i) {
+ case 0:
+- ino = dentry->d_inode->i_ino;
++ ino = parent_sd->s_ino;
+ if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
+ break;
+ filp->f_pos++;
+@@ -445,10 +454,7 @@ static int sysfs_readdir(struct file * f
+
+ name = sysfs_get_name(next);
+ len = strlen(name);
+- if (next->s_dentry)
+- ino = next->s_dentry->d_inode->i_ino;
+- else
+- ino = iunique(sysfs_sb, 2);
++ ino = next->s_ino;
+
+ if (filldir(dirent, name, len, filp->f_pos, ino,
+ dt_type(next)) < 0)
+diff -urpN linux-source-2.6.18.orig/fs/sysfs/inode.c linux-source-2.6.18/fs/sysfs/inode.c
+--- linux-source-2.6.18.orig/fs/sysfs/inode.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/sysfs/inode.c 2007-11-07 15:30:13.000000000 -0700
+@@ -129,6 +129,7 @@ struct inode * sysfs_new_inode(mode_t mo
+ inode->i_mapping->a_ops = &sysfs_aops;
+ inode->i_mapping->backing_dev_info = &sysfs_backing_dev_info;
+ inode->i_op = &sysfs_inode_operations;
++ inode->i_ino = sd->s_ino;
+ lockdep_set_class(&inode->i_mutex, &sysfs_inode_imutex_key);
+
+ if (sd->s_iattr) {
+diff -urpN linux-source-2.6.18.orig/fs/sysfs/mount.c linux-source-2.6.18/fs/sysfs/mount.c
+--- linux-source-2.6.18.orig/fs/sysfs/mount.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/sysfs/mount.c 2007-11-07 15:30:13.000000000 -0700
+@@ -29,6 +29,7 @@ static struct sysfs_dirent sysfs_root =
+ .s_element = NULL,
+ .s_type = SYSFS_ROOT,
+ .s_iattr = NULL,
++ .s_ino = 1,
+ };
+
+ static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
+diff -urpN linux-source-2.6.18.orig/include/linux/sysfs.h linux-source-2.6.18/include/linux/sysfs.h
+--- linux-source-2.6.18.orig/include/linux/sysfs.h 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/include/linux/sysfs.h 2007-11-07 15:34:16.000000000 -0700
+@@ -72,6 +72,7 @@ struct sysfs_dirent {
+ void * s_element;
+ int s_type;
+ umode_t s_mode;
++ ino_t s_ino;
+ struct dentry * s_dentry;
+ struct iattr * s_iattr;
+ atomic_t s_event;
diff --git a/tags/2.6.18-12/30049_sysfs_readdir-NULL-deref-2.patch b/tags/2.6.18-12/30049_sysfs_readdir-NULL-deref-2.patch
new file mode 100644
index 0000000..e242c17
--- /dev/null
+++ b/tags/2.6.18-12/30049_sysfs_readdir-NULL-deref-2.patch
@@ -0,0 +1,128 @@
+From: Tejun Heo <htejun@gmail.com>
+Date: Mon, 11 Jun 2007 05:04:01 +0000 (+0900)
+Subject: sysfs: fix race condition around sd->s_dentry, take#2
+X-Git-Tag: v2.6.22-rc5~45
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fstable%2Flinux-2.6.22.y.git;a=commitdiff_plain;h=dd14cbc994709a1c5a64ed3621f583c49a27e521
+
+sysfs: fix race condition around sd->s_dentry, take#2
+
+Allowing attribute and symlink dentries to be reclaimed means
+sd->s_dentry can change dynamically. However, updates to the field
+are unsynchronized leading to race conditions. This patch adds
+sysfs_lock and use it to synchronize updates to sd->s_dentry.
+
+Due to the locking around ->d_iput, the check in sysfs_drop_dentry()
+is complex. sysfs_lock only protect sd->s_dentry pointer itself. The
+validity of the dentry is protected by dcache_lock, so whether dentry
+is alive or not can only be tested while holding both locks.
+
+This is minimal backport of sysfs_drop_dentry() rewrite in devel
+branch.
+
+Signed-off-by: Tejun Heo <htejun@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@hp.com>
+
+diff -urpN linux-source-2.6.18.orig/fs/sysfs/dir.c linux-source-2.6.18/fs/sysfs/dir.c
+--- linux-source-2.6.18.orig/fs/sysfs/dir.c 2007-11-07 15:44:57.000000000 -0700
++++ linux-source-2.6.18/fs/sysfs/dir.c 2007-11-07 15:38:57.000000000 -0700
+@@ -12,14 +12,26 @@
+ #include "sysfs.h"
+
+ DECLARE_RWSEM(sysfs_rename_sem);
++spinlock_t sysfs_lock = SPIN_LOCK_UNLOCKED;
+
+ static void sysfs_d_iput(struct dentry * dentry, struct inode * inode)
+ {
+ struct sysfs_dirent * sd = dentry->d_fsdata;
+
+ if (sd) {
+- BUG_ON(sd->s_dentry != dentry);
+- sd->s_dentry = NULL;
++ /* sd->s_dentry is protected with sysfs_lock. This
++ * allows sysfs_drop_dentry() to dereference it.
++ */
++ spin_lock(&sysfs_lock);
++
++ /* The dentry might have been deleted or another
++ * lookup could have happened updating sd->s_dentry to
++ * point the new dentry. Ignore if it isn't pointing
++ * to this dentry.
++ */
++ if (sd->s_dentry == dentry)
++ sd->s_dentry = NULL;
++ spin_unlock(&sysfs_lock);
+ sysfs_put(sd);
+ }
+ iput(inode);
+@@ -218,7 +230,10 @@ static int sysfs_attach_attr(struct sysf
+ }
+
+ dentry->d_fsdata = sysfs_get(sd);
++ /* protect sd->s_dentry against sysfs_d_iput */
++ spin_lock(&sysfs_lock);
+ sd->s_dentry = dentry;
++ spin_unlock(&sysfs_lock);
+ error = sysfs_create(dentry, (attr->mode & S_IALLUGO) | S_IFREG, init);
+ if (error) {
+ sysfs_put(sd);
+@@ -240,7 +255,10 @@ static int sysfs_attach_link(struct sysf
+ int err = 0;
+
+ dentry->d_fsdata = sysfs_get(sd);
++ /* protect sd->s_dentry against sysfs_d_iput */
++ spin_lock(&sysfs_lock);
+ sd->s_dentry = dentry;
++ spin_unlock(&sysfs_lock);
+ err = sysfs_create(dentry, S_IFLNK|S_IRWXUGO, init_symlink);
+ if (!err) {
+ dentry->d_op = &sysfs_dentry_ops;
+diff -urpN linux-source-2.6.18.orig/fs/sysfs/inode.c linux-source-2.6.18/fs/sysfs/inode.c
+--- linux-source-2.6.18.orig/fs/sysfs/inode.c 2007-11-07 15:44:57.000000000 -0700
++++ linux-source-2.6.18/fs/sysfs/inode.c 2007-11-07 15:40:19.000000000 -0700
+@@ -217,8 +217,22 @@ const unsigned char * sysfs_get_name(str
+ */
+ void sysfs_drop_dentry(struct sysfs_dirent * sd, struct dentry * parent)
+ {
+- struct dentry * dentry = sd->s_dentry;
++ struct dentry *dentry = NULL;
+
++ /* We're not holding a reference to ->s_dentry dentry but the
++ * field will stay valid as long as sysfs_lock is held.
++ */
++ spin_lock(&sysfs_lock);
++ spin_lock(&dcache_lock);
++
++ /* dget dentry if it's still alive */
++ if (sd->s_dentry && sd->s_dentry->d_inode)
++ dentry = dget_locked(sd->s_dentry);
++
++ spin_unlock(&dcache_lock);
++ spin_unlock(&sysfs_lock);
++
++ /* drop dentry */
+ if (dentry) {
+ spin_lock(&dcache_lock);
+ spin_lock(&dentry->d_lock);
+@@ -232,6 +246,8 @@ void sysfs_drop_dentry(struct sysfs_dire
+ spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_lock);
+ }
++
++ dput(dentry);
+ }
+ }
+
+diff -urpN linux-source-2.6.18.orig/fs/sysfs/sysfs.h linux-source-2.6.18/fs/sysfs/sysfs.h
+--- linux-source-2.6.18.orig/fs/sysfs/sysfs.h 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/sysfs/sysfs.h 2007-11-07 15:38:57.000000000 -0700
+@@ -20,6 +20,7 @@ extern const unsigned char * sysfs_get_n
+ extern void sysfs_drop_dentry(struct sysfs_dirent *sd, struct dentry *parent);
+ extern int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
+
++extern spinlock_t sysfs_lock;
+ extern struct rw_semaphore sysfs_rename_sem;
+ extern struct super_block * sysfs_sb;
+ extern const struct file_operations sysfs_dir_operations;
diff --git a/tags/2.6.18-12/30050_sysfs-fix-condition-check.patch b/tags/2.6.18-12/30050_sysfs-fix-condition-check.patch
new file mode 100644
index 0000000..652065e
--- /dev/null
+++ b/tags/2.6.18-12/30050_sysfs-fix-condition-check.patch
@@ -0,0 +1,29 @@
+From: Tejun Heo <htejun@gmail.com>
+Date: Mon, 11 Jun 2007 05:03:27 +0000 (+0900)
+Subject: sysfs: fix condition check in sysfs_drop_dentry()
+X-Git-Tag: v2.6.22-rc5~46
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=6aa054aadfea613a437ad0b15d38eca2b963fc0a
+
+sysfs: fix condition check in sysfs_drop_dentry()
+
+The condition check doesn't make much sense as it basically always
+succeeds. This causes NULL dereferencing on certain cases. It seems
+that parentheses are put in the wrong place. Fix it.
+
+Signed-off-by: Tejun Heo <htejun@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@hp.com>
+
+--- linux-source-2.6.18+2.6.22.y/fs/sysfs/inode.c.orig 2007-11-07 15:40:19.000000000 -0700
++++ linux-source-2.6.18+2.6.22.y/fs/sysfs/inode.c 2007-11-07 17:09:33.000000000 -0700
+@@ -236,7 +236,7 @@ void sysfs_drop_dentry(struct sysfs_dire
+ if (dentry) {
+ spin_lock(&dcache_lock);
+ spin_lock(&dentry->d_lock);
+- if (!(d_unhashed(dentry) && dentry->d_inode)) {
++ if (!d_unhashed(dentry) && dentry->d_inode) {
+ dget_locked(dentry);
+ __d_drop(dentry);
+ spin_unlock(&dentry->d_lock);
diff --git a/tags/2.6.18-12/30051_tmpfs-restore-clear_highpage.patch b/tags/2.6.18-12/30051_tmpfs-restore-clear_highpage.patch
new file mode 100644
index 0000000..c4ed61c
--- /dev/null
+++ b/tags/2.6.18-12/30051_tmpfs-restore-clear_highpage.patch
@@ -0,0 +1,44 @@
+commit e84e2e132c9c66d8498e7710d4ea532d1feaaac5
+Author: Hugh Dickins <hugh@veritas.com>
+Date: Wed Nov 28 18:55:10 2007 +0000
+
+ tmpfs: restore missing clear_highpage
+
+ tmpfs was misconverted to __GFP_ZERO in 2.6.11. There's an unusual case in
+ which shmem_getpage receives the page from its caller instead of allocating.
+ We must cover this case by clear_highpage before SetPageUptodate, as before.
+
+ Signed-off-by: Hugh Dickins <hugh@veritas.com>
+ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@hp.com>
+
+diff -urpN linux-source-2.6.18.orig/mm/shmem.c linux-source-2.6.18/mm/shmem.c
+--- linux-source-2.6.18.orig/mm/shmem.c 2007-12-01 15:24:42.000000000 -0700
++++ linux-source-2.6.18/mm/shmem.c 2007-12-17 18:24:57.000000000 -0700
+@@ -972,7 +972,7 @@ shmem_alloc_page(gfp_t gfp, struct shmem
+ pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
+ pvma.vm_pgoff = idx;
+ pvma.vm_end = PAGE_SIZE;
+- page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
++ page = alloc_page_vma(gfp, &pvma, 0);
+ mpol_free(pvma.vm_policy);
+ return page;
+ }
+@@ -992,7 +992,7 @@ shmem_swapin(struct shmem_inode_info *in
+ static inline struct page *
+ shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
+ {
+- return alloc_page(gfp | __GFP_ZERO);
++ return alloc_page(gfp);
+ }
+ #endif
+
+@@ -1201,6 +1201,7 @@ repeat:
+
+ info->alloced++;
+ spin_unlock(&info->lock);
++ clear_highpage(filepage);
+ flush_dcache_page(filepage);
+ SetPageUptodate(filepage);
+ }
diff --git a/tags/2.6.18-12/30052_minixfs-printk-hang.patch b/tags/2.6.18-12/30052_minixfs-printk-hang.patch
new file mode 100644
index 0000000..b7b205b
--- /dev/null
+++ b/tags/2.6.18-12/30052_minixfs-printk-hang.patch
@@ -0,0 +1,76 @@
+From: Eric Sandeen <sandeen@redhat.com>
+Date: Wed, 17 Oct 2007 06:27:15 +0000 (-0700)
+Subject: minixfs: limit minixfs printks on corrupted dir i_size (CVE-2006-6058)
+X-Git-Tag: v2.6.23.7~3
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fstable%2Flinux-2.6.23.y.git;a=commitdiff_plain;h=f0ae3188daf70ed07a4dfbeb133bef3a92838a15
+
+minixfs: limit minixfs printks on corrupted dir i_size (CVE-2006-6058)
+
+patch f44ec6f3f89889a469773b1fd894f8fcc07c29cf upstream.
+
+This attempts to address CVE-2006-6058
+http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2006-6058
+
+first reported at http://projects.info-pull.com/mokb/MOKB-17-11-2006.html
+
+Essentially a corrupted minix dir inode reporting a very large
+i_size will loop for a very long time in minix_readdir, minix_find_entry,
+etc, because on EIO they just move on to try the next page. This is
+under the BKL, printk-storming as well. This can lock up the machine
+for a very long time. Simply ratelimiting the printks gets things back
+under control. Make the message a bit more informative while we're here.
+
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Cc: Bodo Eggert <7eggert@gmx.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/fs/minix/itree_v1.c linux-source-2.6.18/fs/minix/itree_v1.c
+--- linux-source-2.6.18.orig/fs/minix/itree_v1.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/minix/itree_v1.c 2007-12-16 19:13:41.000000000 -0700
+@@ -23,11 +23,16 @@ static inline block_t *i_data(struct ino
+ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
+ {
+ int n = 0;
++ char b[BDEVNAME_SIZE];
+
+ if (block < 0) {
+- printk("minix_bmap: block<0\n");
++ printk("MINIX-fs: block_to_path: block %ld < 0 on dev %s\n",
++ block, bdevname(inode->i_sb->s_bdev, b));
+ } else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) {
+- printk("minix_bmap: block>big\n");
++ if (printk_ratelimit())
++ printk("MINIX-fs: block_to_path: "
++ "block %ld too big on dev %s\n",
++ block, bdevname(inode->i_sb->s_bdev, b));
+ } else if (block < 7) {
+ offsets[n++] = block;
+ } else if ((block -= 7) < 512) {
+diff -urpN linux-source-2.6.18.orig/fs/minix/itree_v2.c linux-source-2.6.18/fs/minix/itree_v2.c
+--- linux-source-2.6.18.orig/fs/minix/itree_v2.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/minix/itree_v2.c 2007-12-16 19:40:06.000000000 -0700
+@@ -23,11 +23,17 @@ static inline block_t *i_data(struct ino
+ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
+ {
+ int n = 0;
++ char b[BDEVNAME_SIZE];
++ struct super_block *sb = inode->i_sb;
+
+ if (block < 0) {
+- printk("minix_bmap: block<0\n");
++ printk("MINIX-fs: block_to_path: block %ld < 0 on dev %s\n",
++ block, bdevname(sb->s_bdev, b));
+ } else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) {
+- printk("minix_bmap: block>big\n");
++ if (printk_ratelimit())
++ printk("MINIX-fs: block_to_path: "
++ "block %ld too big on dev %s\n",
++ block, bdevname(sb->s_bdev, b));
+ } else if (block < 7) {
+ offsets[n++] = block;
+ } else if ((block -= 7) < 256) {
diff --git a/tags/2.6.18-12/30053_hrtimer-large-relative-timeouts-overflow.patch b/tags/2.6.18-12/30053_hrtimer-large-relative-timeouts-overflow.patch
new file mode 100644
index 0000000..57c2b94
--- /dev/null
+++ b/tags/2.6.18-12/30053_hrtimer-large-relative-timeouts-overflow.patch
@@ -0,0 +1,45 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 7 Dec 2007 18:16:17 +0000 (+0100)
+Subject: hrtimers: avoid overflow for large relative timeouts
+X-Git-Tag: v2.6.24-rc5~49^2~2
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=62f0f61e6673e67151a7c8c0f9a09c7ea43fe2b5;hp=f194d132e4971111f85c18c96067acffb13cee6d
+
+hrtimers: avoid overflow for large relative timeouts
+
+Relative hrtimers with a large timeout value might end up as negative
+timer values, when the current time is added in hrtimer_start().
+
+This in turn is causing the clockevents_set_next() function to set an
+huge timeout and sleep for quite a long time when we have a clock
+source which is capable of long sleeps like HPET. With PIT this almost
+goes unnoticed as the maximum delta is ~27ms. The non-hrt/nohz code
+sorts this out in the next timer interrupt, so we never noticed that
+problem which has been there since the first day of hrtimers.
+
+This bug became more apparent in 2.6.24 which activates HPET on more
+hardware.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+---
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/kernel/hrtimer.c linux-source-2.6.18/kernel/hrtimer.c
+--- linux-source-2.6.18.orig/kernel/hrtimer.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/kernel/hrtimer.c 2007-12-16 18:43:03.000000000 -0700
+@@ -443,6 +443,14 @@ hrtimer_start(struct hrtimer *timer, kti
+ #ifdef CONFIG_TIME_LOW_RES
+ tim = ktime_add(tim, base->resolution);
+ #endif
++ /*
++ * Careful here: User space might have asked for a
++ * very long sleep, so the add above might result in a
++ * negative number, which enqueues the timer in front
++ * of the queue.
++ */
++ if (tim.tv64 < 0)
++ tim.tv64 = KTIME_MAX;
+ }
+ timer->expires = tim;
+
diff --git a/tags/2.6.18-12/30054_coredump-only-to-same-uid.patch b/tags/2.6.18-12/30054_coredump-only-to-same-uid.patch
new file mode 100644
index 0000000..74af052
--- /dev/null
+++ b/tags/2.6.18-12/30054_coredump-only-to-same-uid.patch
@@ -0,0 +1,38 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Wed, 28 Nov 2007 12:59:18 +0000 (+0100)
+Subject: vfs: coredumping fix
+X-Git-Tag: v2.6.24-rc4~82
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=c46f739dd39db3b07ab5deb4e3ec81e1c04a91af
+
+vfs: coredumping fix
+
+fix: http://bugzilla.kernel.org/show_bug.cgi?id=3043
+
+only allow coredumping to the same uid that the coredumping
+task runs under.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Acked-by: Alan Cox <alan@redhat.com>
+Acked-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Al Viro <viro@ftp.linux.org.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/fs/exec.c linux-source-2.6.18/fs/exec.c
+--- linux-source-2.6.18.orig/fs/exec.c 2007-10-03 12:38:15.000000000 -0600
++++ linux-source-2.6.18/fs/exec.c 2007-12-05 23:41:00.000000000 -0700
+@@ -1524,6 +1524,12 @@ int do_coredump(long signr, int exit_cod
+
+ if (!S_ISREG(inode->i_mode))
+ goto close_fail;
++ /*
++ * Dont allow local users get cute and trick others to coredump
++ * into their pre-created files:
++ */
++ if (inode->i_uid != current->fsuid)
++ goto close_fail;
+ if (!file->f_op)
+ goto close_fail;
+ if (!file->f_op->write)
diff --git a/tags/2.6.18-12/30055_isdn-net-overflow.patch b/tags/2.6.18-12/30055_isdn-net-overflow.patch
new file mode 100644
index 0000000..9ea5f60
--- /dev/null
+++ b/tags/2.6.18-12/30055_isdn-net-overflow.patch
@@ -0,0 +1,54 @@
+From: Karsten Keil <kkeil@suse.de>
+Date: Thu, 22 Nov 2007 11:43:13 +0000 (+0100)
+Subject: isdn: avoid copying overly-long strings
+X-Git-Tag: v2.6.24-rc4~110
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=0f13864e5b24d9cbe18d125d41bfa4b726a82e40
+
+isdn: avoid copying overly-long strings
+
+Addresses http://bugzilla.kernel.org/show_bug.cgi?id=9416
+
+Signed-off-by: Karsten Keil <kkeil@suse.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+diff -urpN linux-source-2.6.18.orig/drivers/isdn/i4l/isdn_net.c linux-source-2.6.18/drivers/isdn/i4l/isdn_net.c
+--- linux-source-2.6.18.orig/drivers/isdn/i4l/isdn_net.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/isdn/i4l/isdn_net.c 2007-12-04 09:39:24.000000000 -0700
+@@ -2125,7 +2125,7 @@ isdn_net_find_icall(int di, int ch, int
+ u_long flags;
+ isdn_net_dev *p;
+ isdn_net_phone *n;
+- char nr[32];
++ char nr[ISDN_MSNLEN];
+ char *my_eaz;
+
+ /* Search name in netdev-chain */
+@@ -2134,7 +2134,7 @@ isdn_net_find_icall(int di, int ch, int
+ nr[1] = '\0';
+ printk(KERN_INFO "isdn_net: Incoming call without OAD, assuming '0'\n");
+ } else
+- strcpy(nr, setup->phone);
++ strlcpy(nr, setup->phone, ISDN_MSNLEN);
+ si1 = (int) setup->si1;
+ si2 = (int) setup->si2;
+ if (!setup->eazmsn[0]) {
+@@ -2803,7 +2803,7 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg
+ chidx = -1;
+ }
+ }
+- strcpy(lp->msn, cfg->eaz);
++ strlcpy(lp->msn, cfg->eaz, sizeof(lp->msn));
+ lp->pre_device = drvidx;
+ lp->pre_channel = chidx;
+ lp->onhtime = cfg->onhtime;
+@@ -2952,7 +2952,7 @@ isdn_net_addphone(isdn_net_ioctl_phone *
+ if (p) {
+ if (!(n = (isdn_net_phone *) kmalloc(sizeof(isdn_net_phone), GFP_KERNEL)))
+ return -ENOMEM;
+- strcpy(n->num, phone->phone);
++ strlcpy(n->num, phone->phone, sizeof(n->num));
+ n->next = p->local->phone[phone->outgoing & 1];
+ p->local->phone[phone->outgoing & 1] = n;
+ return 0;
diff --git a/tags/2.6.18-12/30056_proc-snd-page-alloc-mem-leak.patch b/tags/2.6.18-12/30056_proc-snd-page-alloc-mem-leak.patch
new file mode 100644
index 0000000..f11dbcf
--- /dev/null
+++ b/tags/2.6.18-12/30056_proc-snd-page-alloc-mem-leak.patch
@@ -0,0 +1,169 @@
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 17 Sep 2007 19:55:10 +0000 (+0200)
+Subject: Convert snd-page-alloc proc file to use seq_file
+X-Git-Tag: v2.6.23-rc8~3
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=ccec6e2c4a74adf76ed4e2478091a311b1806212;hp=7bae705ef2c2daac1993de03e5be93b5c300fc5e
+
+Convert snd-page-alloc proc file to use seq_file
+
+Use seq_file for the proc file read/write of snd-page-alloc module.
+This automatically fixes bugs in the old proc code.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/sound/core/memalloc.c linux-source-2.6.18/sound/core/memalloc.c
+--- linux-source-2.6.18.orig/sound/core/memalloc.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/sound/core/memalloc.c 2007-09-25 17:53:01.000000000 -0600
+@@ -27,6 +27,7 @@
+ #include <linux/pci.h>
+ #include <linux/slab.h>
+ #include <linux/mm.h>
++#include <linux/seq_file.h>
+ #include <asm/uaccess.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/moduleparam.h>
+@@ -483,10 +484,8 @@ static void free_all_reserved_pages(void
+ #define SND_MEM_PROC_FILE "driver/snd-page-alloc"
+ static struct proc_dir_entry *snd_mem_proc;
+
+-static int snd_mem_proc_read(char *page, char **start, off_t off,
+- int count, int *eof, void *data)
++static int snd_mem_proc_read(struct seq_file *seq, void *offset)
+ {
+- int len = 0;
+ long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
+ struct list_head *p;
+ struct snd_mem_list *mem;
+@@ -494,44 +493,47 @@ static int snd_mem_proc_read(char *page,
+ static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" };
+
+ mutex_lock(&list_mutex);
+- len += snprintf(page + len, count - len,
+- "pages : %li bytes (%li pages per %likB)\n",
+- pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
++ seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n",
++ pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
+ devno = 0;
+ list_for_each(p, &mem_list_head) {
+ mem = list_entry(p, struct snd_mem_list, list);
+ devno++;
+- len += snprintf(page + len, count - len,
+- "buffer %d : ID %08x : type %s\n",
+- devno, mem->id, types[mem->buffer.dev.type]);
+- len += snprintf(page + len, count - len,
+- " addr = 0x%lx, size = %d bytes\n",
+- (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes);
++ seq_printf(seq, "buffer %d : ID %08x : type %s\n",
++ devno, mem->id, types[mem->buffer.dev.type]);
++ seq_printf(seq, " addr = 0x%lx, size = %d bytes\n",
++ (unsigned long)mem->buffer.addr,
++ (int)mem->buffer.bytes);
+ }
+ mutex_unlock(&list_mutex);
+- return len;
++ return 0;
++}
++
++static int snd_mem_proc_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, snd_mem_proc_read, NULL);
+ }
+
+ /* FIXME: for pci only - other bus? */
+ #ifdef CONFIG_PCI
+ #define gettoken(bufp) strsep(bufp, " \t\n")
+
+-static int snd_mem_proc_write(struct file *file, const char __user *buffer,
+- unsigned long count, void *data)
++static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
++ size_t count, loff_t * ppos)
+ {
+ char buf[128];
+ char *token, *p;
+
+- if (count > ARRAY_SIZE(buf) - 1)
+- count = ARRAY_SIZE(buf) - 1;
++ if (count > sizeof(buf) - 1)
++ return -EINVAL;
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+- buf[ARRAY_SIZE(buf) - 1] = '\0';
++ buf[count] = '\0';
+
+ p = buf;
+ token = gettoken(&p);
+ if (! token || *token == '#')
+- return (int)count;
++ return count;
+ if (strcmp(token, "add") == 0) {
+ char *endp;
+ int vendor, device, size, buffers;
+@@ -552,7 +554,7 @@ static int snd_mem_proc_write(struct fil
+ (buffers = simple_strtol(token, NULL, 0)) <= 0 ||
+ buffers > 4) {
+ printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
+- return (int)count;
++ return count;
+ }
+ vendor &= 0xffff;
+ device &= 0xffff;
+@@ -564,7 +566,7 @@ static int snd_mem_proc_write(struct fil
+ if (pci_set_dma_mask(pci, mask) < 0 ||
+ pci_set_consistent_dma_mask(pci, mask) < 0) {
+ printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
+- return (int)count;
++ return count;
+ }
+ }
+ for (i = 0; i < buffers; i++) {
+@@ -574,7 +576,7 @@ static int snd_mem_proc_write(struct fil
+ size, &dmab) < 0) {
+ printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
+ pci_dev_put(pci);
+- return (int)count;
++ return count;
+ }
+ snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
+ }
+@@ -600,9 +602,21 @@ static int snd_mem_proc_write(struct fil
+ free_all_reserved_pages();
+ else
+ printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
+- return (int)count;
++ return count;
+ }
+ #endif /* CONFIG_PCI */
++
++static const struct file_operations snd_mem_proc_fops = {
++ .owner = THIS_MODULE,
++ .open = snd_mem_proc_open,
++ .read = seq_read,
++#ifdef CONFIG_PCI
++ .write = snd_mem_proc_write,
++#endif
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
+ #endif /* CONFIG_PROC_FS */
+
+ /*
+@@ -613,12 +627,8 @@ static int __init snd_mem_init(void)
+ {
+ #ifdef CONFIG_PROC_FS
+ snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL);
+- if (snd_mem_proc) {
+- snd_mem_proc->read_proc = snd_mem_proc_read;
+-#ifdef CONFIG_PCI
+- snd_mem_proc->write_proc = snd_mem_proc_write;
+-#endif
+- }
++ if (snd_mem_proc)
++ snd_mem_proc->proc_fops = &snd_mem_proc_fops;
+ #endif
+ return 0;
+ }
diff --git a/tags/2.6.18-12/30057_fat-move-ioctl-compat-code.patch b/tags/2.6.18-12/30057_fat-move-ioctl-compat-code.patch
new file mode 100644
index 0000000..cde2538
--- /dev/null
+++ b/tags/2.6.18-12/30057_fat-move-ioctl-compat-code.patch
@@ -0,0 +1,167 @@
+From: David Howells <dhowells@redhat.com>
+Date: Thu, 31 Aug 2006 10:50:04 +0000 (+0200)
+Subject: [PATCH] BLOCK: Move the msdos device ioctl compat stuff to the msdos driver [try #6]
+X-Git-Tag: v2.6.19~1581^2~9
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=188f83dfe0eeecd1427d0d255cc97dbf7ef6b4b7
+
+[PATCH] BLOCK: Move the msdos device ioctl compat stuff to the msdos driver [try #6]
+
+Move the msdos device ioctl compat stuff from fs/compat_ioctl.c to the msdos
+driver so that the msdos header file doesn't need to be included.
+
+Signed-Off-By: David Howells <dhowells@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/fs/compat_ioctl.c linux-source-2.6.18/fs/compat_ioctl.c
+--- linux-source-2.6.18.orig/fs/compat_ioctl.c 2006-09-20 04:42:06.000000000 +0100
++++ linux-source-2.6.18/fs/compat_ioctl.c 2007-06-22 15:57:42.000000000 +0100
+@@ -113,7 +113,6 @@
+ #include <linux/nbd.h>
+ #include <linux/random.h>
+ #include <linux/filter.h>
+-#include <linux/msdos_fs.h>
+ #include <linux/pktcdvd.h>
+
+ #include <linux/hiddev.h>
+@@ -2052,51 +2051,6 @@ static int mtd_rw_oob(unsigned int fd, u
+ return err;
+ }
+
+-#define VFAT_IOCTL_READDIR_BOTH32 _IOR('r', 1, struct compat_dirent[2])
+-#define VFAT_IOCTL_READDIR_SHORT32 _IOR('r', 2, struct compat_dirent[2])
+-
+-static long
+-put_dirent32 (struct dirent *d, struct compat_dirent __user *d32)
+-{
+- if (!access_ok(VERIFY_WRITE, d32, sizeof(struct compat_dirent)))
+- return -EFAULT;
+-
+- __put_user(d->d_ino, &d32->d_ino);
+- __put_user(d->d_off, &d32->d_off);
+- __put_user(d->d_reclen, &d32->d_reclen);
+- if (__copy_to_user(d32->d_name, d->d_name, d->d_reclen))
+- return -EFAULT;
+-
+- return 0;
+-}
+-
+-static int vfat_ioctl32(unsigned fd, unsigned cmd, unsigned long arg)
+-{
+- struct compat_dirent __user *p = compat_ptr(arg);
+- int ret;
+- mm_segment_t oldfs = get_fs();
+- struct dirent d[2];
+-
+- switch(cmd)
+- {
+- case VFAT_IOCTL_READDIR_BOTH32:
+- cmd = VFAT_IOCTL_READDIR_BOTH;
+- break;
+- case VFAT_IOCTL_READDIR_SHORT32:
+- cmd = VFAT_IOCTL_READDIR_SHORT;
+- break;
+- }
+-
+- set_fs(KERNEL_DS);
+- ret = sys_ioctl(fd,cmd,(unsigned long)&d);
+- set_fs(oldfs);
+- if (ret >= 0) {
+- ret |= put_dirent32(&d[0], p);
+- ret |= put_dirent32(&d[1], p + 1);
+- }
+- return ret;
+-}
+-
+ #define REISERFS_IOC_UNPACK32 _IOW(0xCD,1,int)
+
+ static int reiserfs_ioctl32(unsigned fd, unsigned cmd, unsigned long ptr)
+@@ -2866,9 +2820,6 @@ HANDLE_IOCTL(SONET_GETFRSENSE, do_atm_io
+ HANDLE_IOCTL(BLKBSZGET_32, do_blkbszget)
+ HANDLE_IOCTL(BLKBSZSET_32, do_blkbszset)
+ HANDLE_IOCTL(BLKGETSIZE64_32, do_blkgetsize64)
+-/* vfat */
+-HANDLE_IOCTL(VFAT_IOCTL_READDIR_BOTH32, vfat_ioctl32)
+-HANDLE_IOCTL(VFAT_IOCTL_READDIR_SHORT32, vfat_ioctl32)
+ HANDLE_IOCTL(REISERFS_IOC_UNPACK32, reiserfs_ioctl32)
+ /* Raw devices */
+ HANDLE_IOCTL(RAW_SETBIND, raw_ioctl)
+diff -urpN linux-source-2.6.18.orig/fs/fat/dir.c linux-source-2.6.18/fs/fat/dir.c
+--- linux-source-2.6.18.orig/fs/fat/dir.c 2006-09-20 04:42:06.000000000 +0100
++++ linux-source-2.6.18/fs/fat/dir.c 2007-06-22 15:55:53.000000000 +0100
+@@ -20,6 +20,7 @@
+ #include <linux/dirent.h>
+ #include <linux/smp_lock.h>
+ #include <linux/buffer_head.h>
++#include <linux/compat.h>
+ #include <asm/uaccess.h>
+
+ static inline loff_t fat_make_i_pos(struct super_block *sb,
+@@ -741,10 +742,65 @@ static int fat_dir_ioctl(struct inode *
+ return ret;
+ }
+
++#ifdef CONFIG_COMPAT
++#define VFAT_IOCTL_READDIR_BOTH32 _IOR('r', 1, struct compat_dirent[2])
++#define VFAT_IOCTL_READDIR_SHORT32 _IOR('r', 2, struct compat_dirent[2])
++
++static long fat_compat_put_dirent32(struct dirent *d,
++ struct compat_dirent __user *d32)
++{
++ if (!access_ok(VERIFY_WRITE, d32, sizeof(struct compat_dirent)))
++ return -EFAULT;
++
++ __put_user(d->d_ino, &d32->d_ino);
++ __put_user(d->d_off, &d32->d_off);
++ __put_user(d->d_reclen, &d32->d_reclen);
++ if (__copy_to_user(d32->d_name, d->d_name, d->d_reclen))
++ return -EFAULT;
++
++ return 0;
++}
++
++static long fat_compat_dir_ioctl(struct file *file, unsigned cmd,
++ unsigned long arg)
++{
++ struct compat_dirent __user *p = compat_ptr(arg);
++ int ret;
++ mm_segment_t oldfs = get_fs();
++ struct dirent d[2];
++
++ switch (cmd) {
++ case VFAT_IOCTL_READDIR_BOTH32:
++ cmd = VFAT_IOCTL_READDIR_BOTH;
++ break;
++ case VFAT_IOCTL_READDIR_SHORT32:
++ cmd = VFAT_IOCTL_READDIR_SHORT;
++ break;
++ default:
++ return -ENOIOCTLCMD;
++ }
++
++ set_fs(KERNEL_DS);
++ lock_kernel();
++ ret = fat_dir_ioctl(file->f_dentry->d_inode, file,
++ cmd, (unsigned long) &d);
++ unlock_kernel();
++ set_fs(oldfs);
++ if (ret >= 0) {
++ ret |= fat_compat_put_dirent32(&d[0], p);
++ ret |= fat_compat_put_dirent32(&d[1], p + 1);
++ }
++ return ret;
++}
++#endif /* CONFIG_COMPAT */
++
+ const struct file_operations fat_dir_operations = {
+ .read = generic_read_dir,
+ .readdir = fat_readdir,
+ .ioctl = fat_dir_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = fat_compat_dir_ioctl,
++#endif
+ .fsync = file_fsync,
+ };
+
diff --git a/tags/2.6.18-12/30058_fat-fix-compat-ioctls.patch b/tags/2.6.18-12/30058_fat-fix-compat-ioctls.patch
new file mode 100644
index 0000000..f98c7d1
--- /dev/null
+++ b/tags/2.6.18-12/30058_fat-fix-compat-ioctls.patch
@@ -0,0 +1,311 @@
+From: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
+Date: Tue, 8 May 2007 07:31:28 +0000 (-0700)
+Subject: fat: fix VFAT compat ioctls on 64-bit systems
+X-Git-Tag: v2.6.22-rc1~614
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=c483bab099cb89e92b7cad94a52fcdaf37e56657
+
+fat: fix VFAT compat ioctls on 64-bit systems
+
+If you compile and run the below test case in an msdos or vfat directory on
+an x86-64 system with -m32 you'll get garbage in the kernel_dirent struct
+followed by a SIGSEGV.
+
+The patch fixes this.
+
+Reported and initial fix by Bart Oldeman
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <dirent.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+struct kernel_dirent {
+ long d_ino;
+ long d_off;
+ unsigned short d_reclen;
+ char d_name[256]; /* We must not include limits.h! */
+};
+#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct kernel_dirent [2])
+#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct kernel_dirent [2])
+
+int main(void)
+{
+ int fd = open(".", O_RDONLY);
+ struct kernel_dirent de[2];
+
+ while (1) {
+ int i = ioctl(fd, VFAT_IOCTL_READDIR_BOTH, (long)de);
+ if (i == -1) break;
+ if (de[0].d_reclen == 0) break;
+ printf("SFN: reclen=%2d off=%d ino=%d, %-12s",
+ de[0].d_reclen, de[0].d_off, de[0].d_ino, de[0].d_name);
+ if (de[1].d_reclen)
+ printf("\tLFN: reclen=%2d off=%d ino=%d, %s",
+ de[1].d_reclen, de[1].d_off, de[1].d_ino, de[1].d_name);
+ printf("\n");
+ }
+ return 0;
+}
+
+Signed-off-by: Bart Oldeman <bartoldeman@users.sourceforge.net>
+Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/fs/fat/dir.c linux-source-2.6.18/fs/fat/dir.c
+--- linux-source-2.6.18.orig/fs/fat/dir.c 2007-06-22 21:48:00.000000000 -0600
++++ linux-source-2.6.18/fs/fat/dir.c 2007-06-22 21:48:42.000000000 -0600
+@@ -422,7 +422,7 @@ EODir:
+ EXPORT_SYMBOL_GPL(fat_search_long);
+
+ struct fat_ioctl_filldir_callback {
+- struct dirent __user *dirent;
++ void __user *dirent;
+ int result;
+ /* for dir ioctl */
+ const char *longname;
+@@ -647,62 +647,85 @@ static int fat_readdir(struct file *filp
+ return __fat_readdir(inode, filp, dirent, filldir, 0, 0);
+ }
+
+-static int fat_ioctl_filldir(void *__buf, const char *name, int name_len,
+- loff_t offset, ino_t ino, unsigned int d_type)
++#define FAT_IOCTL_FILLDIR_FUNC(func, dirent_type) \
++static int func(void *__buf, const char *name, int name_len, \
++ loff_t offset, ino_t ino, unsigned int d_type) \
++{ \
++ struct fat_ioctl_filldir_callback *buf = __buf; \
++ struct dirent_type __user *d1 = buf->dirent; \
++ struct dirent_type __user *d2 = d1 + 1; \
++ \
++ if (buf->result) \
++ return -EINVAL; \
++ buf->result++; \
++ \
++ if (name != NULL) { \
++ /* dirent has only short name */ \
++ if (name_len >= sizeof(d1->d_name)) \
++ name_len = sizeof(d1->d_name) - 1; \
++ \
++ if (put_user(0, d2->d_name) || \
++ put_user(0, &d2->d_reclen) || \
++ copy_to_user(d1->d_name, name, name_len) || \
++ put_user(0, d1->d_name + name_len) || \
++ put_user(name_len, &d1->d_reclen)) \
++ goto efault; \
++ } else { \
++ /* dirent has short and long name */ \
++ const char *longname = buf->longname; \
++ int long_len = buf->long_len; \
++ const char *shortname = buf->shortname; \
++ int short_len = buf->short_len; \
++ \
++ if (long_len >= sizeof(d1->d_name)) \
++ long_len = sizeof(d1->d_name) - 1; \
++ if (short_len >= sizeof(d1->d_name)) \
++ short_len = sizeof(d1->d_name) - 1; \
++ \
++ if (copy_to_user(d2->d_name, longname, long_len) || \
++ put_user(0, d2->d_name + long_len) || \
++ put_user(long_len, &d2->d_reclen) || \
++ put_user(ino, &d2->d_ino) || \
++ put_user(offset, &d2->d_off) || \
++ copy_to_user(d1->d_name, shortname, short_len) || \
++ put_user(0, d1->d_name + short_len) || \
++ put_user(short_len, &d1->d_reclen)) \
++ goto efault; \
++ } \
++ return 0; \
++efault: \
++ buf->result = -EFAULT; \
++ return -EFAULT; \
++}
++
++FAT_IOCTL_FILLDIR_FUNC(fat_ioctl_filldir, dirent)
++
++static int fat_ioctl_readdir(struct inode *inode, struct file *filp,
++ void __user *dirent, filldir_t filldir,
++ int short_only, int both)
+ {
+- struct fat_ioctl_filldir_callback *buf = __buf;
+- struct dirent __user *d1 = buf->dirent;
+- struct dirent __user *d2 = d1 + 1;
+-
+- if (buf->result)
+- return -EINVAL;
+- buf->result++;
+-
+- if (name != NULL) {
+- /* dirent has only short name */
+- if (name_len >= sizeof(d1->d_name))
+- name_len = sizeof(d1->d_name) - 1;
+-
+- if (put_user(0, d2->d_name) ||
+- put_user(0, &d2->d_reclen) ||
+- copy_to_user(d1->d_name, name, name_len) ||
+- put_user(0, d1->d_name + name_len) ||
+- put_user(name_len, &d1->d_reclen))
+- goto efault;
+- } else {
+- /* dirent has short and long name */
+- const char *longname = buf->longname;
+- int long_len = buf->long_len;
+- const char *shortname = buf->shortname;
+- int short_len = buf->short_len;
+-
+- if (long_len >= sizeof(d1->d_name))
+- long_len = sizeof(d1->d_name) - 1;
+- if (short_len >= sizeof(d1->d_name))
+- short_len = sizeof(d1->d_name) - 1;
+-
+- if (copy_to_user(d2->d_name, longname, long_len) ||
+- put_user(0, d2->d_name + long_len) ||
+- put_user(long_len, &d2->d_reclen) ||
+- put_user(ino, &d2->d_ino) ||
+- put_user(offset, &d2->d_off) ||
+- copy_to_user(d1->d_name, shortname, short_len) ||
+- put_user(0, d1->d_name + short_len) ||
+- put_user(short_len, &d1->d_reclen))
+- goto efault;
++ struct fat_ioctl_filldir_callback buf;
++ int ret;
++
++ buf.dirent = dirent;
++ buf.result = 0;
++ mutex_lock(&inode->i_mutex);
++ ret = -ENOENT;
++ if (!IS_DEADDIR(inode)) {
++ ret = __fat_readdir(inode, filp, &buf, filldir,
++ short_only, both);
+ }
+- return 0;
+-efault:
+- buf->result = -EFAULT;
+- return -EFAULT;
++ mutex_unlock(&inode->i_mutex);
++ if (ret >= 0)
++ ret = buf.result;
++ return ret;
+ }
+
+-static int fat_dir_ioctl(struct inode * inode, struct file * filp,
+- unsigned int cmd, unsigned long arg)
++static int fat_dir_ioctl(struct inode *inode, struct file *filp,
++ unsigned int cmd, unsigned long arg)
+ {
+- struct fat_ioctl_filldir_callback buf;
+- struct dirent __user *d1;
+- int ret, short_only, both;
++ struct dirent __user *d1 = (struct dirent __user *)arg;
++ int short_only, both;
+
+ switch (cmd) {
+ case VFAT_IOCTL_READDIR_SHORT:
+@@ -717,7 +740,6 @@ static int fat_dir_ioctl(struct inode *
+ return fat_generic_ioctl(inode, filp, cmd, arg);
+ }
+
+- d1 = (struct dirent __user *)arg;
+ if (!access_ok(VERIFY_WRITE, d1, sizeof(struct dirent[2])))
+ return -EFAULT;
+ /*
+@@ -728,69 +750,48 @@ static int fat_dir_ioctl(struct inode *
+ if (put_user(0, &d1->d_reclen))
+ return -EFAULT;
+
+- buf.dirent = d1;
+- buf.result = 0;
+- mutex_lock(&inode->i_mutex);
+- ret = -ENOENT;
+- if (!IS_DEADDIR(inode)) {
+- ret = __fat_readdir(inode, filp, &buf, fat_ioctl_filldir,
+- short_only, both);
+- }
+- mutex_unlock(&inode->i_mutex);
+- if (ret >= 0)
+- ret = buf.result;
+- return ret;
++ return fat_ioctl_readdir(inode, filp, d1, fat_ioctl_filldir,
++ short_only, both);
+ }
+
+ #ifdef CONFIG_COMPAT
+ #define VFAT_IOCTL_READDIR_BOTH32 _IOR('r', 1, struct compat_dirent[2])
+ #define VFAT_IOCTL_READDIR_SHORT32 _IOR('r', 2, struct compat_dirent[2])
+
+-static long fat_compat_put_dirent32(struct dirent *d,
+- struct compat_dirent __user *d32)
+-{
+- if (!access_ok(VERIFY_WRITE, d32, sizeof(struct compat_dirent)))
+- return -EFAULT;
++FAT_IOCTL_FILLDIR_FUNC(fat_compat_ioctl_filldir, compat_dirent)
+
+- __put_user(d->d_ino, &d32->d_ino);
+- __put_user(d->d_off, &d32->d_off);
+- __put_user(d->d_reclen, &d32->d_reclen);
+- if (__copy_to_user(d32->d_name, d->d_name, d->d_reclen))
+- return -EFAULT;
+-
+- return 0;
+-}
+-
+-static long fat_compat_dir_ioctl(struct file *file, unsigned cmd,
++static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd,
+ unsigned long arg)
+ {
+- struct compat_dirent __user *p = compat_ptr(arg);
+- int ret;
+- mm_segment_t oldfs = get_fs();
+- struct dirent d[2];
++ struct inode *inode = filp->f_dentry->d_inode;
++ struct compat_dirent __user *d1 = compat_ptr(arg);
++ int short_only, both;
+
+ switch (cmd) {
+- case VFAT_IOCTL_READDIR_BOTH32:
+- cmd = VFAT_IOCTL_READDIR_BOTH;
+- break;
+ case VFAT_IOCTL_READDIR_SHORT32:
+- cmd = VFAT_IOCTL_READDIR_SHORT;
++ short_only = 1;
++ both = 0;
++ break;
++ case VFAT_IOCTL_READDIR_BOTH32:
++ short_only = 0;
++ both = 1;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+- set_fs(KERNEL_DS);
+- lock_kernel();
+- ret = fat_dir_ioctl(file->f_dentry->d_inode, file,
+- cmd, (unsigned long) &d);
+- unlock_kernel();
+- set_fs(oldfs);
+- if (ret >= 0) {
+- ret |= fat_compat_put_dirent32(&d[0], p);
+- ret |= fat_compat_put_dirent32(&d[1], p + 1);
+- }
+- return ret;
++ if (!access_ok(VERIFY_WRITE, d1, sizeof(struct compat_dirent[2])))
++ return -EFAULT;
++ /*
++ * Yes, we don't need this put_user() absolutely. However old
++ * code didn't return the right value. So, app use this value,
++ * in order to check whether it is EOF.
++ */
++ if (put_user(0, &d1->d_reclen))
++ return -EFAULT;
++
++ return fat_ioctl_readdir(inode, filp, d1, fat_compat_ioctl_filldir,
++ short_only, both);
+ }
+ #endif /* CONFIG_COMPAT */
+
diff --git a/tags/2.6.18-12/30059_vfs-use-access-mode-flag.patch b/tags/2.6.18-12/30059_vfs-use-access-mode-flag.patch
new file mode 100644
index 0000000..ef47e1a
--- /dev/null
+++ b/tags/2.6.18-12/30059_vfs-use-access-mode-flag.patch
@@ -0,0 +1,52 @@
+From: Linus Torvalds <torvalds@woody.linux-foundation.org>
+Date: Sat, 12 Jan 2008 22:06:34 +0000 (-0800)
+Subject: Use access mode instead of open flags to determine needed permissions
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=974a9f0b47da74e28f68b9c8645c3786aa5ace1a
+
+Use access mode instead of open flags to determine needed permissions
+
+Way back when (in commit 834f2a4a1554dc5b2598038b3fe8703defcbe467, aka
+"VFS: Allow the filesystem to return a full file pointer on open intent"
+to be exact), Trond changed the open logic to keep track of the original
+flags to a file open, in order to pass down the the intent of a dentry
+lookup to the low-level filesystem.
+
+However, when doing that reorganization, it changed the meaning of
+namei_flags, and thus inadvertently changed the test of access mode for
+directories (and RO filesystem) to use the wrong flag. So fix those
+test back to use access mode ("acc_mode") rather than the open flag
+("flag").
+
+Issue noticed by Bill Roman at Datalight.
+
+Reported-and-tested-by: Bill Roman <bill.roman@datalight.com>
+Acked-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Acked-by: Al Viro <viro@ZenIV.linux.org.uk>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@hp.com>
+
+diff -urpN linux-source-2.6.18.orig/fs/namei.c linux-source-2.6.18/fs/namei.c
+--- linux-source-2.6.18.orig/fs/namei.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/namei.c 2008-01-15 16:42:10.000000000 -0700
+@@ -1500,7 +1500,7 @@ int may_open(struct nameidata *nd, int a
+ if (S_ISLNK(inode->i_mode))
+ return -ELOOP;
+
+- if (S_ISDIR(inode->i_mode) && (flag & FMODE_WRITE))
++ if (S_ISDIR(inode->i_mode) && (acc_mode & MAY_WRITE))
+ return -EISDIR;
+
+ error = vfs_permission(nd, acc_mode);
+@@ -1519,7 +1519,7 @@ int may_open(struct nameidata *nd, int a
+ return -EACCES;
+
+ flag &= ~O_TRUNC;
+- } else if (IS_RDONLY(inode) && (flag & FMODE_WRITE))
++ } else if (IS_RDONLY(inode) && (acc_mode & MAY_WRITE))
+ return -EROFS;
+ /*
+ * An append-only file must be opened in append mode for writing.
diff --git a/tags/2.6.18-12/30060_i4l-isdn_ioctl-mem-overrun.patch b/tags/2.6.18-12/30060_i4l-isdn_ioctl-mem-overrun.patch
new file mode 100644
index 0000000..a44bb5b
--- /dev/null
+++ b/tags/2.6.18-12/30060_i4l-isdn_ioctl-mem-overrun.patch
@@ -0,0 +1,56 @@
+From: Karsten Keil <kkeil@suse.de>
+Date: Sat, 1 Dec 2007 20:16:15 +0000 (-0800)
+Subject: I4L: fix isdn_ioctl memory overrun vulnerability
+X-Git-Tag: v2.6.24-rc4~16
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=eafe1aa37e6ec2d56f14732b5240c4dd09f0613a
+
+I4L: fix isdn_ioctl memory overrun vulnerability
+
+Fix possible memory overrun issue in the isdn ioctl code.
+
+Found by ADLAB <adlab@venustech.com.cn>
+
+Signed-off-by: Karsten Keil <kkeil@suse.de>
+Cc: ADLAB <adlab@venustech.com.cn>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
+index c6df292..d695295 100644
+--- a/drivers/isdn/i4l/isdn_common.c
++++ b/drivers/isdn/i4l/isdn_common.c
+@@ -1515,6 +1515,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
+ if (copy_from_user(&iocts, argp,
+ sizeof(isdn_ioctl_struct)))
+ return -EFAULT;
++ iocts.drvid[sizeof(iocts.drvid)-1] = 0;
+ if (strlen(iocts.drvid)) {
+ if ((p = strchr(iocts.drvid, ',')))
+ *p = 0;
+@@ -1599,6 +1600,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
+ if (copy_from_user(&iocts, argp,
+ sizeof(isdn_ioctl_struct)))
+ return -EFAULT;
++ iocts.drvid[sizeof(iocts.drvid)-1] = 0;
+ if (strlen(iocts.drvid)) {
+ drvidx = -1;
+ for (i = 0; i < ISDN_MAX_DRIVERS; i++)
+@@ -1643,7 +1645,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
+ } else {
+ p = (char __user *) iocts.arg;
+ for (i = 0; i < 10; i++) {
+- sprintf(bname, "%s%s",
++ snprintf(bname, sizeof(bname), "%s%s",
+ strlen(dev->drv[drvidx]->msn2eaz[i]) ?
+ dev->drv[drvidx]->msn2eaz[i] : "_",
+ (i < 9) ? "," : "\0");
+@@ -1673,6 +1675,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
+ char *p;
+ if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct)))
+ return -EFAULT;
++ iocts.drvid[sizeof(iocts.drvid)-1] = 0;
+ if (strlen(iocts.drvid)) {
+ if ((p = strchr(iocts.drvid, ',')))
+ *p = 0;
diff --git a/tags/2.6.18-12/30061_vmsplice-security.patch b/tags/2.6.18-12/30061_vmsplice-security.patch
new file mode 100644
index 0000000..248bdba
--- /dev/null
+++ b/tags/2.6.18-12/30061_vmsplice-security.patch
@@ -0,0 +1,28 @@
+diff --git a/fs/splice.c b/fs/splice.c
+index 684bca3..2d7e598 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -1122,6 +1122,11 @@ static int get_iovec_page_array(const struct iovec __user *iov,
+ size_t len;
+ int i;
+
++ if (!access_ok(VERIFY_READ, iov, sizeof(struct iovec))) {
++ error = -EFAULT;
++ break;
++ }
++
+ /*
+ * Get user address base and length for this iovec.
+ */
+@@ -1141,6 +1146,11 @@ static int get_iovec_page_array(const struct iovec __user *iov,
+ if (unlikely(!base))
+ break;
+
++ if (!access_ok(VERIFY_READ, base, len)) {
++ error = -EFAULT;
++ break;
++ }
++
+ /*
+ * Get this base offset and number of pages, then map
+ * in the user pages.
diff --git a/tags/2.6.18-12/30062_clear-spurious-irq.patch b/tags/2.6.18-12/30062_clear-spurious-irq.patch
new file mode 100644
index 0000000..6873d8c
--- /dev/null
+++ b/tags/2.6.18-12/30062_clear-spurious-irq.patch
@@ -0,0 +1,34 @@
+From: Linus Torvalds <torvalds@woody.linux-foundation.org>
+Date: Tue, 23 Jan 2007 22:16:31 +0000 (-0800)
+Subject: Clear spurious irq stat information when adding irq handler
+X-Git-Tag: v2.6.20-rc6~15
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=8528b0f1de1101c6002036fd53638fb21111d0ea
+
+Clear spurious irq stat information when adding irq handler
+
+Any newly added irq handler may obviously make any old spurious irq
+status invalid, since the new handler may well be the thing that is
+supposed to handle any interrupts that came in.
+
+So just clear the statistics when adding handlers.
+
+Pointed-out-by: Alan Cox <alan@lxorguk.ukuu.org.uk>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index b385878..8b961ad 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -315,6 +315,9 @@ int setup_irq(unsigned int irq, struct irqaction *new)
+ /* Undo nested disables: */
+ desc->depth = 1;
+ }
++ /* Reset broken irq detection when installing new handler */
++ desc->irq_count = 0;
++ desc->irqs_unhandled = 0;
+ spin_unlock_irqrestore(&desc->lock, flags);
+
+ new->irq = irq;
diff --git a/tags/2.6.18-12/30063_mmap-VM_DONTEXPAND.patch b/tags/2.6.18-12/30063_mmap-VM_DONTEXPAND.patch
new file mode 100644
index 0000000..9c285a2
--- /dev/null
+++ b/tags/2.6.18-12/30063_mmap-VM_DONTEXPAND.patch
@@ -0,0 +1,120 @@
+From: Nick Piggin <npiggin@suse.de>
+Date: Sat, 2 Feb 2008 02:08:53 +0000 (+0100)
+Subject: vm audit: add VM_DONTEXPAND to mmap for drivers that need it (CVE-2008-0007)
+X-Git-Tag: v2.6.22.17~1
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fstable%2Flinux-2.6.22.y.git;a=commitdiff_plain;h=83af8eda68a3f0c227d0eb05348e58ae27a62e7e
+
+vm audit: add VM_DONTEXPAND to mmap for drivers that need it (CVE-2008-0007)
+
+Drivers that register a ->fault handler, but do not range-check the
+offset argument, must set VM_DONTEXPAND in the vm_flags in order to
+prevent an expanding mremap from overflowing the resource.
+
+I've audited the tree and attempted to fix these problems (usually by
+adding VM_DONTEXPAND where it is not obvious).
+
+Signed-off-by: Nick Piggin <npiggin@suse.de>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/drivers/char/drm/drm_vm.c linux-source-2.6.18/drivers/char/drm/drm_vm.c
+--- linux-source-2.6.18.orig/drivers/char/drm/drm_vm.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/char/drm/drm_vm.c 2008-02-13 22:17:32.000000000 -0700
+@@ -479,6 +479,7 @@ static int drm_mmap_dma(struct file *fil
+ vma->vm_ops = &drm_vm_dma_ops;
+
+ vma->vm_flags |= VM_RESERVED; /* Don't swap */
++ vma->vm_flags |= VM_DONTEXPAND;
+
+ vma->vm_file = filp; /* Needed for drm_vm_open() */
+ drm_vm_open(vma);
+@@ -656,6 +657,7 @@ int drm_mmap(struct file *filp, struct v
+ return -EINVAL; /* This should never happen. */
+ }
+ vma->vm_flags |= VM_RESERVED; /* Don't swap */
++ vma->vm_flags |= VM_DONTEXPAND;
+
+ vma->vm_file = filp; /* Needed for drm_vm_open() */
+ drm_vm_open(vma);
+diff -urpN linux-source-2.6.18.orig/fs/ncpfs/mmap.c linux-source-2.6.18/fs/ncpfs/mmap.c
+--- linux-source-2.6.18.orig/fs/ncpfs/mmap.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/ncpfs/mmap.c 2008-02-13 22:17:35.000000000 -0700
+@@ -47,9 +47,6 @@ static struct page* ncp_file_mmap_nopage
+ pos = address - area->vm_start + (area->vm_pgoff << PAGE_SHIFT);
+
+ count = PAGE_SIZE;
+- if (address + PAGE_SIZE > area->vm_end) {
+- count = area->vm_end - address;
+- }
+ /* what we can read in one go */
+ bufsize = NCP_SERVER(inode)->buffer_size;
+
+diff -urpN linux-source-2.6.18.orig/kernel/relay.c linux-source-2.6.18/kernel/relay.c
+--- linux-source-2.6.18.orig/kernel/relay.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/kernel/relay.c 2008-02-13 22:17:35.000000000 -0700
+@@ -84,6 +84,7 @@ int relay_mmap_buf(struct rchan_buf *buf
+ return -EINVAL;
+
+ vma->vm_ops = &relay_file_mmap_ops;
++ vma->vm_flags |= VM_DONTEXPAND;
+ vma->vm_private_data = buf;
+ buf->chan->cb->buf_mapped(buf, filp);
+
+diff -urpN linux-source-2.6.18.orig/sound/oss/via82cxxx_audio.c linux-source-2.6.18/sound/oss/via82cxxx_audio.c
+--- linux-source-2.6.18.orig/sound/oss/via82cxxx_audio.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/sound/oss/via82cxxx_audio.c 2008-02-13 22:17:35.000000000 -0700
+@@ -2104,6 +2104,7 @@ static struct page * via_mm_nopage (stru
+ {
+ struct via_info *card = vma->vm_private_data;
+ struct via_channel *chan = &card->ch_out;
++ unsigned long max_bufs;
+ struct page *dmapage;
+ unsigned long pgoff;
+ int rd, wr;
+@@ -2127,14 +2128,11 @@ static struct page * via_mm_nopage (stru
+ rd = card->ch_in.is_mapped;
+ wr = card->ch_out.is_mapped;
+
+-#ifndef VIA_NDEBUG
+- {
+- unsigned long max_bufs = chan->frag_number;
+- if (rd && wr) max_bufs *= 2;
+- /* via_dsp_mmap() should ensure this */
+- assert (pgoff < max_bufs);
+- }
+-#endif
++ max_bufs = chan->frag_number;
++ if (rd && wr)
++ max_bufs *= 2;
++ if (pgoff >= max_bufs)
++ return NOPAGE_SIGBUS;
+
+ /* if full-duplex (read+write) and we have two sets of bufs,
+ * then the playback buffers come first, sez soundcard.c */
+diff -urpN linux-source-2.6.18.orig/sound/usb/usx2y/usX2Yhwdep.c linux-source-2.6.18/sound/usb/usx2y/usX2Yhwdep.c
+--- linux-source-2.6.18.orig/sound/usb/usx2y/usX2Yhwdep.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/sound/usb/usx2y/usX2Yhwdep.c 2008-02-13 22:17:35.000000000 -0700
+@@ -88,7 +88,7 @@ static int snd_us428ctls_mmap(struct snd
+ us428->us428ctls_sharedmem->CtlSnapShotLast = -2;
+ }
+ area->vm_ops = &us428ctls_vm_ops;
+- area->vm_flags |= VM_RESERVED;
++ area->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+ area->vm_private_data = hw->private_data;
+ return 0;
+ }
+diff -urpN linux-source-2.6.18.orig/sound/usb/usx2y/usx2yhwdeppcm.c linux-source-2.6.18/sound/usb/usx2y/usx2yhwdeppcm.c
+--- linux-source-2.6.18.orig/sound/usb/usx2y/usx2yhwdeppcm.c 2008-02-10 10:48:45.000000000 -0700
++++ linux-source-2.6.18/sound/usb/usx2y/usx2yhwdeppcm.c 2008-02-13 22:17:35.000000000 -0700
+@@ -728,7 +728,7 @@ static int snd_usX2Y_hwdep_pcm_mmap(stru
+ return -ENODEV;
+ }
+ area->vm_ops = &snd_usX2Y_hwdep_pcm_vm_ops;
+- area->vm_flags |= VM_RESERVED;
++ area->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+ area->vm_private_data = hw->private_data;
+ return 0;
+ }
diff --git a/tags/2.6.18-12/30064_RLIMIT_CPU-earlier-checking.patch b/tags/2.6.18-12/30064_RLIMIT_CPU-earlier-checking.patch
new file mode 100644
index 0000000..af498f1
--- /dev/null
+++ b/tags/2.6.18-12/30064_RLIMIT_CPU-earlier-checking.patch
@@ -0,0 +1,80 @@
+commit 9926e4c74300c4b31dee007298c6475d33369df0
+Author: Tom Alsberg <alsbergt@cs.huji.ac.il>
+Date: Tue May 8 00:30:31 2007 -0700
+
+ CPU time limit patch / setrlimit(RLIMIT_CPU, 0) cheat fix
+
+ As discovered here today, the change in Kernel 2.6.17 intended to inhibit
+ users from setting RLIMIT_CPU to 0 (as that is equivalent to unlimited) by
+ "cheating" and setting it to 1 in such a case, does not make a difference,
+ as the check is done in the wrong place (too late), and only applies to the
+ profiling code.
+
+ On all systems I checked running kernels above 2.6.17, no matter what the
+ hard and soft CPU time limits were before, a user could escape them by
+ issuing in the shell (sh/bash/zsh) "ulimit -t 0", and then the user's
+ process was not ever killed.
+
+ Attached is a trivial patch to fix that. Simply moving the check to a
+ slightly earlier location (specifically, before the line that actually
+ assigns the limit - *old_rlim = new_rlim), does the trick.
+
+ Do note that at least the zsh (but not ash, dash, or bash) shell has the
+ problem of "caching" the limits set by the ulimit command, so when running
+ zsh the fix will not immediately be evident - after entering "ulimit -t 0",
+ "ulimit -a" will show "-t: cpu time (seconds) 0", even though the actual
+ limit as returned by getrlimit(...) will be 1. It can be verified by
+ opening a subshell (which will not have the values of the parent shell in
+ cache) and checking in it, or just by running a CPU intensive command like
+ "echo '65536^1048576' | bc" and verifying that it dumps core after one
+ second.
+
+ Regardless of whether that is a misfeature in the shell, perhaps it would
+ be better to return -EINVAL from setrlimit in such a case instead of
+ cheating and setting to 1, as that does not really reflect the actual state
+ of the process anymore. I do not however know what the ground for that
+ decision was in the original 2.6.17 change, and whether there would be any
+ "backward" compatibility issues, so I preferred not to touch that right
+ now.
+
+ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/kernel/sys.c linux-source-2.6.18/kernel/sys.c
+--- linux-source-2.6.18.orig/kernel/sys.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/kernel/sys.c 2008-04-04 18:04:09.000000000 -0600
+@@ -1807,6 +1807,16 @@ asmlinkage long sys_setrlimit(unsigned i
+ if (retval)
+ return retval;
+
++ if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) {
++ /*
++ * The caller is asking for an immediate RLIMIT_CPU
++ * expiry. But we use the zero value to mean "it was
++ * never set". So let's cheat and make it one second
++ * instead
++ */
++ new_rlim.rlim_cur = 1;
++ }
++
+ task_lock(current->group_leader);
+ *old_rlim = new_rlim;
+ task_unlock(current->group_leader);
+@@ -1828,15 +1838,6 @@ asmlinkage long sys_setrlimit(unsigned i
+ unsigned long rlim_cur = new_rlim.rlim_cur;
+ cputime_t cputime;
+
+- if (rlim_cur == 0) {
+- /*
+- * The caller is asking for an immediate RLIMIT_CPU
+- * expiry. But we use the zero value to mean "it was
+- * never set". So let's cheat and make it one second
+- * instead
+- */
+- rlim_cur = 1;
+- }
+ cputime = secs_to_cputime(rlim_cur);
+ read_lock(&tasklist_lock);
+ spin_lock_irq(&current->sighand->siglock);
diff --git a/tags/2.6.18-12/30065_dnotify-race.patch b/tags/2.6.18-12/30065_dnotify-race.patch
new file mode 100644
index 0000000..ab637bc
--- /dev/null
+++ b/tags/2.6.18-12/30065_dnotify-race.patch
@@ -0,0 +1,22 @@
+diff -urpN linux-source-2.6.18.orig/fs/dnotify.c linux-source-2.6.18/fs/dnotify.c
+--- linux-source-2.6.18.orig/fs/dnotify.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/dnotify.c 2008-04-09 13:50:42.000000000 -0600
+@@ -20,6 +20,7 @@
+ #include <linux/init.h>
+ #include <linux/spinlock.h>
+ #include <linux/slab.h>
++#include <linux/file.h>
+
+ int dir_notify_enable __read_mostly = 1;
+
+@@ -92,6 +93,10 @@ int fcntl_dirnotify(int fd, struct file
+ prev = &odn->dn_next;
+ }
+
++ /* we'd lost the race with close(), sod off silently */
++ if (fcheck(fd) != filp)
++ goto out_free;
++
+ error = f_setown(filp, current->pid, 0);
+ if (error)
+ goto out_free;
diff --git a/tags/2.6.18-12/30066_fcntl_setlk-close-race.patch b/tags/2.6.18-12/30066_fcntl_setlk-close-race.patch
new file mode 100644
index 0000000..9292f22
--- /dev/null
+++ b/tags/2.6.18-12/30066_fcntl_setlk-close-race.patch
@@ -0,0 +1,76 @@
+commit 0b2bac2f1ea0d33a3621b27ca68b9ae760fca2e9
+Author: Al Viro <viro@zeniv.linux.org.uk>
+Date: Tue May 6 13:58:34 2008 -0400
+
+ [PATCH] fix SMP ordering hole in fcntl_setlk()
+
+ fcntl_setlk()/close() race prevention has a subtle hole - we need to
+ make sure that if we *do* have an fcntl/close race on SMP box, the
+ access to descriptor table and inode->i_flock won't get reordered.
+
+ As it is, we get STORE inode->i_flock, LOAD descriptor table entry vs.
+ STORE descriptor table entry, LOAD inode->i_flock with not a single
+ lock in common on both sides. We do have BKL around the first STORE,
+ but check in locks_remove_posix() is outside of BKL and for a good
+ reason - we don't want BKL on common path of close(2).
+
+ Solution is to hold ->file_lock around fcheck() in there; that orders
+ us wrt removal from descriptor table that preceded locks_remove_posix()
+ on close path and we either come first (in which case eviction will be
+ handled by the close side) or we'll see the effect of close and do
+ eviction ourselves. Note that even though it's read-only access,
+ we do need ->file_lock here - rcu_read_lock() won't be enough to
+ order the things.
+
+ Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@hp.com>
+
+diff -urpN linux-source-2.6.18.orig/fs/locks.c linux-source-2.6.18/fs/locks.c
+--- linux-source-2.6.18.orig/fs/locks.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/locks.c 2008-05-06 17:02:29.000000000 -0600
+@@ -1680,6 +1680,7 @@ int fcntl_setlk(unsigned int fd, struct
+ struct file_lock *file_lock = locks_alloc_lock();
+ struct flock flock;
+ struct inode *inode;
++ struct file *f;
+ int error;
+
+ if (file_lock == NULL)
+@@ -1754,7 +1755,15 @@ again:
+ * Attempt to detect a close/fcntl race and recover by
+ * releasing the lock that was just acquired.
+ */
+- if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) {
++ /*
++ * we need that spin_lock here - it prevents reordering between
++ * update of inode->i_flock and check for it done in close().
++ * rcu_read_lock() wouldn't do.
++ */
++ spin_lock(&current->files->file_lock);
++ f = fcheck(fd);
++ spin_unlock(&current->files->file_lock);
++ if (!error && f != filp && flock.l_type != F_UNLCK) {
+ flock.l_type = F_UNLCK;
+ goto again;
+ }
+@@ -1823,6 +1832,7 @@ int fcntl_setlk64(unsigned int fd, struc
+ struct file_lock *file_lock = locks_alloc_lock();
+ struct flock64 flock;
+ struct inode *inode;
++ struct file *f;
+ int error;
+
+ if (file_lock == NULL)
+@@ -1897,7 +1907,10 @@ again:
+ * Attempt to detect a close/fcntl race and recover by
+ * releasing the lock that was just acquired.
+ */
+- if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) {
++ spin_lock(&current->files->file_lock);
++ f = fcheck(fd);
++ spin_unlock(&current->files->file_lock);
++ if (!error && f != filp && flock.l_type != F_UNLCK) {
+ flock.l_type = F_UNLCK;
+ goto again;
+ }
diff --git a/tags/2.6.18-12/30067_sit-missing-kfree_skb-on-pskb_may_pull.patch b/tags/2.6.18-12/30067_sit-missing-kfree_skb-on-pskb_may_pull.patch
new file mode 100644
index 0000000..cffb4b1
--- /dev/null
+++ b/tags/2.6.18-12/30067_sit-missing-kfree_skb-on-pskb_may_pull.patch
@@ -0,0 +1,26 @@
+commit 36ca34cc3b8335eb1fe8bd9a1d0a2592980c3f02
+Author: David S. Miller <davem@davemloft.net>
+Date: Thu May 8 23:40:26 2008 -0700
+
+ sit: Add missing kfree_skb() on pskb_may_pull() failure.
+
+ Noticed by Paul Marks <paul@pmarks.net>.
+
+ Signed-off-by: David S. Miller <davem@davemloft.net>
+
+Adjusted to apply to Debian's 2.6.24 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.24.orig/net/ipv6/sit.c linux-source-2.6.24/net/ipv6/sit.c
+--- linux-source-2.6.24.orig/net/ipv6/sit.c 2008-01-24 15:58:37.000000000 -0700
++++ linux-source-2.6.24/net/ipv6/sit.c 2008-05-21 00:00:08.000000000 -0600
+@@ -395,9 +395,9 @@ static int ipip6_rcv(struct sk_buff *skb
+ }
+
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+- kfree_skb(skb);
+ read_unlock(&ipip6_lock);
+ out:
++ kfree_skb(skb);
+ return 0;
+ }
+
diff --git a/tags/2.6.18-12/30068_hrtimer-prevent-overrun.patch b/tags/2.6.18-12/30068_hrtimer-prevent-overrun.patch
new file mode 100644
index 0000000..d8c622e
--- /dev/null
+++ b/tags/2.6.18-12/30068_hrtimer-prevent-overrun.patch
@@ -0,0 +1,38 @@
+commit 13788ccc41ceea5893f9c747c59bc0b28f2416c2
+Author: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri Mar 16 13:38:20 2007 -0800
+
+ [PATCH] hrtimer: prevent overrun DoS in hrtimer_forward()
+
+ hrtimer_forward() does not check for the possible overflow of
+ timer->expires. This can happen on 64 bit machines with large interval
+ values and results currently in an endless loop in the softirq because the
+ expiry value becomes negative and therefor the timer is expired all the
+ time.
+
+ Check for this condition and set the expiry value to the max. expiry time
+ in the future. The fix should be applied to stable kernel series as well.
+
+ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ Acked-by: Ingo Molnar <mingo@elte.hu>
+ Cc: <stable@kernel.org>
+ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index ec4cb9f..5e7122d 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -644,6 +644,12 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
+ orun++;
+ }
+ timer->expires = ktime_add(timer->expires, interval);
++ /*
++ * Make sure, that the result did not wrap with a very large
++ * interval.
++ */
++ if (timer->expires.tv64 < 0)
++ timer->expires = ktime_set(KTIME_SEC_MAX, 0);
+
+ return orun;
+ }
diff --git a/tags/2.6.18-12/30069_ktime-fix-MTIME_SEC_MAX-on-32-bit.patch b/tags/2.6.18-12/30069_ktime-fix-MTIME_SEC_MAX-on-32-bit.patch
new file mode 100644
index 0000000..6bd6bd1
--- /dev/null
+++ b/tags/2.6.18-12/30069_ktime-fix-MTIME_SEC_MAX-on-32-bit.patch
@@ -0,0 +1,29 @@
+commit 5379058b718ac6354ba99cc74d10c28d632dc28a
+Author: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri Mar 16 14:15:57 2007 -0800
+
+ [PATCH] fix MTIME_SEC_MAX on 32-bit
+
+ The maximum seconds value we can handle on 32bit is LONG_MAX.
+
+ Cc: Ingo Molnar <mingo@elte.hu>
+ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+diff --git a/include/linux/ktime.h b/include/linux/ktime.h
+index c68c7ac..248305b 100644
+--- a/include/linux/ktime.h
++++ b/include/linux/ktime.h
+@@ -57,7 +57,11 @@ typedef union {
+ } ktime_t;
+
+ #define KTIME_MAX ((s64)~((u64)1 << 63))
+-#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
++#if (BITS_PER_LONG == 64)
++# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
++#else
++# define KTIME_SEC_MAX LONG_MAX
++#endif
+
+ /*
+ * ktime_t definitions when using the 64-bit scalar representation:
diff --git a/tags/2.6.18-12/30070_amd64-cs-corruption.patch b/tags/2.6.18-12/30070_amd64-cs-corruption.patch
new file mode 100644
index 0000000..da24cd3
--- /dev/null
+++ b/tags/2.6.18-12/30070_amd64-cs-corruption.patch
@@ -0,0 +1,12 @@
+diff -urpN linux-source-2.6.18.orig/arch/x86_64/kernel/entry.S linux-source-2.6.18/arch/x86_64/kernel/entry.S
+--- linux-source-2.6.18.orig/arch/x86_64/kernel/entry.S 2008-04-23 21:53:06.000000000 -0600
++++ linux-source-2.6.18/arch/x86_64/kernel/entry.S 2008-05-08 17:19:58.000000000 -0600
+@@ -776,7 +776,7 @@ paranoid_swapgs\trace:
+ swapgs
+ paranoid_restore\trace:
+ RESTORE_ALL 8
+- iretq
++ jmp iret_label
+ paranoid_userspace\trace:
+ GET_THREAD_INFO(%rcx)
+ movl threadinfo_flags(%rcx),%ebx
diff --git a/tags/2.6.18-12/30071_dccp-feature-length-check.patch b/tags/2.6.18-12/30071_dccp-feature-length-check.patch
new file mode 100644
index 0000000..9ceb18c
--- /dev/null
+++ b/tags/2.6.18-12/30071_dccp-feature-length-check.patch
@@ -0,0 +1,15 @@
+diff -urpN linux-source-2.6.18.orig/net/dccp/feat.c linux-source-2.6.18/net/dccp/feat.c
+--- linux-source-2.6.18.orig/net/dccp/feat.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/net/dccp/feat.c 2008-06-05 19:57:08.000000000 -0600
+@@ -25,6 +25,11 @@ int dccp_feat_change(struct dccp_minisoc
+
+ dccp_pr_debug("feat change type=%d feat=%d\n", type, feature);
+
++ if (len > 3) {
++ if (net_ratelimit())
++ printk("%s: invalid length %d\n", __func__, len);
++ return -EINVAL;
++ }
+ /* XXX sanity check feat change request */
+
+ /* check if that feature is already being negotiated */
diff --git a/tags/2.6.18-12/30072_asn1-ber-decoding-checks.patch b/tags/2.6.18-12/30072_asn1-ber-decoding-checks.patch
new file mode 100644
index 0000000..2b512fe
--- /dev/null
+++ b/tags/2.6.18-12/30072_asn1-ber-decoding-checks.patch
@@ -0,0 +1,103 @@
+From: Chris Wright <chrisw@sous-sol.org>
+Date: Wed, 4 Jun 2008 16:16:33 +0000 (-0700)
+Subject: asn1: additional sanity checking during BER decoding
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=ddb2c43594f22843e9f3153da151deaba1a834c5
+
+asn1: additional sanity checking during BER decoding
+
+- Don't trust a length which is greater than the working buffer.
+ An invalid length could cause overflow when calculating buffer size
+ for decoding oid.
+
+- An oid length of zero is invalid and allows for an off-by-one error when
+ decoding oid because the first subid actually encodes first 2 subids.
+
+- A primitive encoding may not have an indefinite length.
+
+Thanks to Wei Wang from McAfee for report.
+
+Cc: Steven French <sfrench@us.ibm.com>
+Cc: stable@kernel.org
+Acked-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/fs/cifs/asn1.c linux-source-2.6.18/fs/cifs/asn1.c
+--- linux-source-2.6.18.orig/fs/cifs/asn1.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/cifs/asn1.c 2008-06-05 21:52:32.000000000 -0600
+@@ -182,6 +182,11 @@ asn1_length_decode(struct asn1_ctx *ctx,
+ }
+ }
+ }
++
++ /* don't trust len bigger than ctx buffer */
++ if (*len > ctx->end - ctx->pointer)
++ return 0;
++
+ return 1;
+ }
+
+@@ -199,6 +204,10 @@ asn1_header_decode(struct asn1_ctx *ctx,
+ if (!asn1_length_decode(ctx, &def, &len))
+ return 0;
+
++ /* primitive shall be definite, indefinite shall be constructed */
++ if (*con == ASN1_PRI && !def)
++ return 0;
++
+ if (def)
+ *eoc = ctx->pointer + len;
+ else
+@@ -385,6 +394,11 @@ asn1_oid_decode(struct asn1_ctx *ctx,
+ unsigned long *optr;
+
+ size = eoc - ctx->pointer + 1;
++
++ /* first subid actually encodes first two subids */
++ if (size < 2 || size > ULONG_MAX/sizeof(unsigned long))
++ return 0;
++
+ *oid = kmalloc(size * sizeof (unsigned long), GFP_ATOMIC);
+ if (*oid == NULL) {
+ return 0;
+diff -urpN linux-source-2.6.18.orig/net/ipv4/netfilter/ip_nat_snmp_basic.c linux-source-2.6.18/net/ipv4/netfilter/ip_nat_snmp_basic.c
+--- linux-source-2.6.18.orig/net/ipv4/netfilter/ip_nat_snmp_basic.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/net/ipv4/netfilter/ip_nat_snmp_basic.c 2008-06-05 21:53:29.000000000 -0600
+@@ -235,6 +235,11 @@ static unsigned char asn1_length_decode(
+ }
+ }
+ }
++
++ /* don't trust len bigger than ctx buffer */
++ if (*len > ctx->end - ctx->pointer)
++ return 0;
++
+ return 1;
+ }
+
+@@ -253,6 +258,10 @@ static unsigned char asn1_header_decode(
+ if (!asn1_length_decode(ctx, &def, &len))
+ return 0;
+
++ /* primitive shall be definite, indefinite shall be constructed */
++ if (*con == ASN1_PRI && !def)
++ return 0;
++
+ if (def)
+ *eoc = ctx->pointer + len;
+ else
+@@ -437,6 +446,11 @@ static unsigned char asn1_oid_decode(str
+ unsigned long *optr;
+
+ size = eoc - ctx->pointer + 1;
++
++ /* first subid actually encodes first two subids */
++ if (size < 2 || size > ULONG_MAX/sizeof(unsigned long))
++ return 0;
++
+ *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
+ if (*oid == NULL) {
+ if (net_ratelimit())
diff --git a/tags/2.6.18-12/30073_nfs-write-corruption.patch b/tags/2.6.18-12/30073_nfs-write-corruption.patch
new file mode 100644
index 0000000..a657673
--- /dev/null
+++ b/tags/2.6.18-12/30073_nfs-write-corruption.patch
@@ -0,0 +1,76 @@
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Thu, 7 Feb 2008 22:24:07 +0000 (-0500)
+Subject: NFS: Fix a potential file corruption issue when writing
+X-Git-Tag: v2.6.25-rc1~286^2~1
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=5d47a35600270e7115061cb1320ee60ae9bcb6b8
+
+NFS: Fix a potential file corruption issue when writing
+
+If the inode is flagged as having an invalid mapping, then we can't rely on
+the PageUptodate() flag. Ensure that we don't use the "anti-fragmentation"
+write optimisation in nfs_updatepage(), since that will cause NFS to write
+out areas of the page that are no longer guaranteed to be up to date.
+
+A potential corruption could occur in the following scenario:
+
+client 1 client 2
+=============== ===============
+ fd=open("f",O_CREAT|O_WRONLY,0644);
+ write(fd,"fubar\n",6); // cache last page
+ close(fd);
+fd=open("f",O_WRONLY|O_APPEND);
+write(fd,"foo\n",4);
+close(fd);
+
+ fd=open("f",O_WRONLY|O_APPEND);
+ write(fd,"bar\n",4);
+ close(fd);
+-----
+The bug may lead to the file "f" reading 'fubar\n\0\0\0\nbar\n' because
+client 2 does not update the cached page after re-opening the file for
+write. Instead it keeps it marked as PageUptodate() until someone calls
+invaldate_inode_pages2() (typically by calling read()).
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+---
+
+Backported to Debian's 2.6.18 by dann frazier <danfn@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/fs/nfs/write.c linux-source-2.6.18/fs/nfs/write.c
+--- linux-source-2.6.18.orig/fs/nfs/write.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/fs/nfs/write.c 2008-03-13 01:16:30.000000000 -0600
+@@ -805,6 +805,17 @@ int nfs_flush_incompatible(struct file *
+ }
+
+ /*
++ * If the page cache is marked as unsafe or invalid, then we can't rely on
++ * the PageUptodate() flag. In this case, we will need to turn off
++ * write optimisations that depend on the page contents being correct.
++ */
++static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
++{
++ return PageUptodate(page) &&
++ !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
++}
++
++/*
+ * Update and possibly write a cached page of an NFS file.
+ *
+ * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
+@@ -836,10 +847,13 @@ int nfs_updatepage(struct file *file, st
+ }
+
+ /* If we're not using byte range locks, and we know the page
+- * is entirely in cache, it may be more efficient to avoid
+- * fragmenting write requests.
++ * is up to date, it may be more efficient to extend the write
++ * to cover the entire page in order to avoid fragmentation
++ * inefficiencies.
+ */
+- if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
++ if (nfs_write_pageuptodate(page, inode) &&
++ inode->i_flock == NULL &&
++ !(file->f_flags & O_SYNC)) {
+ loff_t end_offs = i_size_read(inode) - 1;
+ unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
+
diff --git a/tags/2.6.18-12/30074_x86-clear-df-before-calling-signal-handler.patch b/tags/2.6.18-12/30074_x86-clear-df-before-calling-signal-handler.patch
new file mode 100644
index 0000000..76481fb
--- /dev/null
+++ b/tags/2.6.18-12/30074_x86-clear-df-before-calling-signal-handler.patch
@@ -0,0 +1,57 @@
+Index: linux-2.6.18.8/arch/i386/kernel/signal.c
+===================================================================
+--- linux-2.6.18.8.orig/arch/i386/kernel/signal.c
++++ linux-2.6.18.8/arch/i386/kernel/signal.c
+@@ -391,7 +391,7 @@ static int setup_frame(int sig, struct k
+ * The tracer may want to single-step inside the
+ * handler too.
+ */
+- regs->eflags &= ~TF_MASK;
++ regs->eflags &= ~(TF_MASK | X86_EFLAGS_DF);
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
+
+@@ -485,7 +485,7 @@ static int setup_rt_frame(int sig, struc
+ * The tracer may want to single-step inside the
+ * handler too.
+ */
+- regs->eflags &= ~TF_MASK;
++ regs->eflags &= ~(TF_MASK | X86_EFLAGS_DF);
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
+
+Index: linux-2.6.18.8/arch/x86_64/ia32/ia32_signal.c
+===================================================================
+--- linux-2.6.18.8.orig/arch/x86_64/ia32/ia32_signal.c
++++ linux-2.6.18.8/arch/x86_64/ia32/ia32_signal.c
+@@ -493,7 +493,7 @@ int ia32_setup_frame(int sig, struct k_s
+ regs->ss = __USER32_DS;
+
+ set_fs(USER_DS);
+- regs->eflags &= ~TF_MASK;
++ regs->eflags &= ~(TF_MASK | X86_EFLAGS_DF);
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
+
+@@ -589,7 +589,7 @@ int ia32_setup_rt_frame(int sig, struct
+ regs->ss = __USER32_DS;
+
+ set_fs(USER_DS);
+- regs->eflags &= ~TF_MASK;
++ regs->eflags &= ~(TF_MASK | X86_EFLAGS_DF);
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
+
+Index: linux-2.6.18.8/arch/x86_64/kernel/signal.c
+===================================================================
+--- linux-2.6.18.8.orig/arch/x86_64/kernel/signal.c
++++ linux-2.6.18.8/arch/x86_64/kernel/signal.c
+@@ -302,7 +302,7 @@ static int setup_rt_frame(int sig, struc
+ see include/asm-x86_64/uaccess.h for details. */
+ set_fs(USER_DS);
+
+- regs->eflags &= ~TF_MASK;
++ regs->eflags &= ~(TF_MASK | X86_EFLAGS_DF);
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
+ #ifdef DEBUG_SIG
diff --git a/tags/2.6.18-12/30075_3w-xxxx-bigmem-corruption.patch b/tags/2.6.18-12/30075_3w-xxxx-bigmem-corruption.patch
new file mode 100644
index 0000000..400fcdf
--- /dev/null
+++ b/tags/2.6.18-12/30075_3w-xxxx-bigmem-corruption.patch
@@ -0,0 +1,42 @@
+Changes extracted from:
+ http://www.3ware.com/KB/attachments/183-3w-xxxx-2.6.18-GUIDa2c1e9da5ebd4284a41f7be6cee86ff1.tgz
+Described at:
+ http://www.3ware.com/KB/article.aspx?id=15243
+
+Recent upstream kernels have been converted to the new
+sg list accessor API, so this fix isn't applicable there.
+
+diff -urpN linux-source-2.6.18.orig/drivers/scsi/3w-xxxx.c linux-source-2.6.18/drivers/scsi/3w-xxxx.c
+--- linux-source-2.6.18.orig/drivers/scsi/3w-xxxx.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/scsi/3w-xxxx.c 2008-04-21 21:34:10.000000000 -0600
+@@ -1286,7 +1286,7 @@ static int tw_map_scsi_sg_data(struct pc
+ if (cmd->use_sg == 0)
+ return 0;
+
+- use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
++ use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, cmd->sc_data_direction);
+
+ if (use_sg == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
+@@ -1308,7 +1308,7 @@ static u32 tw_map_scsi_single_data(struc
+ if (cmd->request_bufflen == 0)
+ return 0;
+
+- mapping = pci_map_page(pdev, virt_to_page(cmd->request_buffer), offset_in_page(cmd->request_buffer), cmd->request_bufflen, DMA_BIDIRECTIONAL);
++ mapping = pci_map_page(pdev, virt_to_page(cmd->request_buffer), offset_in_page(cmd->request_buffer), cmd->request_bufflen, cmd->sc_data_direction);
+
+ if (mapping == 0) {
+ printk(KERN_WARNING "3w-xxxx: tw_map_scsi_single_data(): pci_map_page() failed.\n");
+@@ -1327,10 +1327,10 @@ static void tw_unmap_scsi_data(struct pc
+
+ switch(cmd->SCp.phase) {
+ case TW_PHASE_SINGLE:
+- pci_unmap_page(pdev, cmd->SCp.have_data_in, cmd->request_bufflen, DMA_BIDIRECTIONAL);
++ pci_unmap_page(pdev, cmd->SCp.have_data_in, cmd->request_bufflen, cmd->sc_data_direction);
+ break;
+ case TW_PHASE_SGLIST:
+- pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
++ pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, cmd->sc_data_direction);
+ break;
+ }
+ } /* End tw_unmap_scsi_data() */
diff --git a/tags/2.6.18-12/30076_dnotify-race-locking.patch b/tags/2.6.18-12/30076_dnotify-race-locking.patch
new file mode 100644
index 0000000..a186b18
--- /dev/null
+++ b/tags/2.6.18-12/30076_dnotify-race-locking.patch
@@ -0,0 +1,29 @@
+Index: linux-2.6.18.8/fs/dnotify.c
+===================================================================
+--- linux-2.6.18.8.orig/fs/dnotify.c
++++ linux-2.6.18.8/fs/dnotify.c
+@@ -67,6 +67,9 @@ int fcntl_dirnotify(int fd, struct file
+ struct dnotify_struct **prev;
+ struct inode *inode;
+ fl_owner_t id = current->files;
++#ifndef __GENKSYMS__
++ struct file *f;
++#endif
+ int error = 0;
+
+ if ((arg & ~DN_MULTISHOT) == 0) {
+@@ -93,8 +96,13 @@ int fcntl_dirnotify(int fd, struct file
+ prev = &odn->dn_next;
+ }
+
++ rcu_read_lock();
++ f = fcheck(fd);
++ rcu_read_unlock();
+ /* we'd lost the race with close(), sod off silently */
+- if (fcheck(fd) != filp)
++ /* note that inode->i_lock prevents reordering problems
++ * between accesses to descriptor table and ->i_dnotify */
++ if (f != filp)
+ goto out_free;
+
+ error = f_setown(filp, current->pid, 0);
diff --git a/tags/2.6.18-12/30077_sctp-make-sure-n-sizeof-does-not-overflow.patch b/tags/2.6.18-12/30077_sctp-make-sure-n-sizeof-does-not-overflow.patch
new file mode 100644
index 0000000..a36c02d
--- /dev/null
+++ b/tags/2.6.18-12/30077_sctp-make-sure-n-sizeof-does-not-overflow.patch
@@ -0,0 +1,29 @@
+commit 735ce972fbc8a65fb17788debd7bbe7b4383cc62
+Author: David S. Miller <davem@davemloft.net>
+Date: Fri Jun 20 22:04:34 2008 -0700
+
+ sctp: Make sure N * sizeof(union sctp_addr) does not overflow.
+
+ As noticed by Gabriel Campana, the kmalloc() length arg
+ passed in by sctp_getsockopt_local_addrs_old() can overflow
+ if ->addr_num is large enough.
+
+ Therefore, enforce an appropriate limit.
+
+ Signed-off-by: David S. Miller <davem@davemloft.net>
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index e7e3baf..0dbcde6 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4401,7 +4401,9 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
+ if (copy_from_user(&getaddrs, optval, len))
+ return -EFAULT;
+
+- if (getaddrs.addr_num <= 0) return -EINVAL;
++ if (getaddrs.addr_num <= 0 ||
++ getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr)))
++ return -EINVAL;
+ /*
+ * For UDP-style sockets, id specifies the association to query.
+ * If the id field is set to the value '0' then the locally bound
diff --git a/tags/2.6.18-12/30078_esp-iv-in-linear-part-of-skb.patch b/tags/2.6.18-12/30078_esp-iv-in-linear-part-of-skb.patch
new file mode 100644
index 0000000..59fb682
--- /dev/null
+++ b/tags/2.6.18-12/30078_esp-iv-in-linear-part-of-skb.patch
@@ -0,0 +1,48 @@
+From: Thomas Graf <tgraf@suug.ch>
+Date: Thu, 27 Mar 2008 23:08:03 +0000 (-0700)
+Subject: (CVE-2007-6282) [ESP]: Ensure IV is in linear part of the skb to avoid BUG() due ...
+X-Git-Url: http://kernel.ubuntu.com/git?p=ubuntu-security%2Fubuntu-dapper.git;a=commitdiff_plain;h=3f83e11fbd494f5e40db1a7bbd2b780118b712a1
+
+(CVE-2007-6282) [ESP]: Ensure IV is in linear part of the skb to avoid BUG() due to OOB access
+
+[linux-2.6: 920fc941a9617f95ccb283037fe6f8a38d95bb69]
+
+ESP does not account for the IV size when calling pskb_may_pull() to
+ensure everything it accesses directly is within the linear part of a
+potential fragment. This results in a BUG() being triggered when the
+both the IPv4 and IPv6 ESP stack is fed with an skb where the first
+fragment ends between the end of the esp header and the end of the IV.
+
+This bug was found by Dirk Nehring <dnehring@gmx.net> .
+
+Signed-off-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Kees Cook <kees.cook@canonical.com>
+---
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/net/ipv4/esp4.c linux-source-2.6.18/net/ipv4/esp4.c
+--- linux-source-2.6.18.orig/net/ipv4/esp4.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/net/ipv4/esp4.c 2008-06-24 22:47:45.000000000 -0600
+@@ -147,7 +147,7 @@ static int esp_input(struct xfrm_state *
+ struct scatterlist *sg;
+ int padlen;
+
+- if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
++ if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + esp->conf.ivlen))
+ goto out;
+
+ if (elen <= 0 || (elen & (blksize-1)))
+diff -urpN linux-source-2.6.18.orig/net/ipv6/esp6.c linux-source-2.6.18/net/ipv6/esp6.c
+--- linux-source-2.6.18.orig/net/ipv6/esp6.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/net/ipv6/esp6.c 2008-06-24 22:47:45.000000000 -0600
+@@ -143,7 +143,7 @@ static int esp6_input(struct xfrm_state
+ int nfrags;
+ int ret = 0;
+
+- if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr))) {
++ if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen)) {
+ ret = -EINVAL;
+ goto out;
+ }
diff --git a/tags/2.6.18-12/30079a_amd64-fix-zeroing-on-exception-in-copy_user-pre.patch b/tags/2.6.18-12/30079a_amd64-fix-zeroing-on-exception-in-copy_user-pre.patch
new file mode 100644
index 0000000..17e1346
--- /dev/null
+++ b/tags/2.6.18-12/30079a_amd64-fix-zeroing-on-exception-in-copy_user-pre.patch
@@ -0,0 +1,798 @@
+commit 8d379dad8f1670d233ac67b76b1c5a42ad3714a3
+Author: Jan Beulich <jbeulich@novell.com>
+Date: Tue Sep 26 10:52:32 2006 +0200
+
+ [PATCH] annotate arch/x86_64/lib/*.S
+
+ Add unwind annotations to arch/x86_64/lib/*.S, and also use the macros
+ provided by linux/linkage.h where-ever possible.
+
+ Some of the alternative instructions handling needed to be adjusted so
+ that the replacement code would also have valid unwind information.
+
+ Signed-off-by: Jan Beulich <jbeulich@novell.com>
+ Signed-off-by: Andi Kleen <ak@suse.de>
+
+diff --git a/arch/x86_64/lib/clear_page.S b/arch/x86_64/lib/clear_page.S
+index 1f81b79..9a10a78 100644
+--- a/arch/x86_64/lib/clear_page.S
++++ b/arch/x86_64/lib/clear_page.S
+@@ -1,10 +1,22 @@
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++
+ /*
+ * Zero a page.
+ * rdi page
+ */
+- .globl clear_page
+- .p2align 4
+-clear_page:
++ ALIGN
++clear_page_c:
++ CFI_STARTPROC
++ movl $4096/8,%ecx
++ xorl %eax,%eax
++ rep stosq
++ ret
++ CFI_ENDPROC
++ENDPROC(clear_page)
++
++ENTRY(clear_page)
++ CFI_STARTPROC
+ xorl %eax,%eax
+ movl $4096/64,%ecx
+ .p2align 4
+@@ -23,28 +35,25 @@ clear_page:
+ jnz .Lloop
+ nop
+ ret
+-clear_page_end:
++ CFI_ENDPROC
++.Lclear_page_end:
++ENDPROC(clear_page)
+
+ /* Some CPUs run faster using the string instructions.
+ It is also a lot simpler. Use this when possible */
+
+ #include <asm/cpufeature.h>
+
++ .section .altinstr_replacement,"ax"
++1: .byte 0xeb /* jmp <disp8> */
++ .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
++2:
++ .previous
+ .section .altinstructions,"a"
+ .align 8
+- .quad clear_page
+- .quad clear_page_c
+- .byte X86_FEATURE_REP_GOOD
+- .byte clear_page_end-clear_page
+- .byte clear_page_c_end-clear_page_c
+- .previous
+-
+- .section .altinstr_replacement,"ax"
+-clear_page_c:
+- movl $4096/8,%ecx
+- xorl %eax,%eax
+- rep
+- stosq
+- ret
+-clear_page_c_end:
++ .quad clear_page
++ .quad 1b
++ .byte X86_FEATURE_REP_GOOD
++ .byte .Lclear_page_end - clear_page
++ .byte 2b - 1b
+ .previous
+diff --git a/arch/x86_64/lib/copy_page.S b/arch/x86_64/lib/copy_page.S
+index 8fa19d9..0ebb03b 100644
+--- a/arch/x86_64/lib/copy_page.S
++++ b/arch/x86_64/lib/copy_page.S
+@@ -1,17 +1,33 @@
+ /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
+
++#include <linux/config.h>
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++
++ ALIGN
++copy_page_c:
++ CFI_STARTPROC
++ movl $4096/8,%ecx
++ rep movsq
++ ret
++ CFI_ENDPROC
++ENDPROC(copy_page_c)
++
+ /* Don't use streaming store because it's better when the target
+ ends up in cache. */
+
+ /* Could vary the prefetch distance based on SMP/UP */
+
+- .globl copy_page
+- .p2align 4
+-copy_page:
++ENTRY(copy_page)
++ CFI_STARTPROC
+ subq $3*8,%rsp
++ CFI_ADJUST_CFA_OFFSET 3*8
+ movq %rbx,(%rsp)
++ CFI_REL_OFFSET rbx, 0
+ movq %r12,1*8(%rsp)
++ CFI_REL_OFFSET r12, 1*8
+ movq %r13,2*8(%rsp)
++ CFI_REL_OFFSET r13, 2*8
+
+ movl $(4096/64)-5,%ecx
+ .p2align 4
+@@ -72,30 +88,33 @@ copy_page:
+ jnz .Loop2
+
+ movq (%rsp),%rbx
++ CFI_RESTORE rbx
+ movq 1*8(%rsp),%r12
++ CFI_RESTORE r12
+ movq 2*8(%rsp),%r13
++ CFI_RESTORE r13
+ addq $3*8,%rsp
++ CFI_ADJUST_CFA_OFFSET -3*8
+ ret
++.Lcopy_page_end:
++ CFI_ENDPROC
++ENDPROC(copy_page)
+
+ /* Some CPUs run faster using the string copy instructions.
+ It is also a lot simpler. Use this when possible */
+
+ #include <asm/cpufeature.h>
+
++ .section .altinstr_replacement,"ax"
++1: .byte 0xeb /* jmp <disp8> */
++ .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
++2:
++ .previous
+ .section .altinstructions,"a"
+ .align 8
+- .quad copy_page
+- .quad copy_page_c
+- .byte X86_FEATURE_REP_GOOD
+- .byte copy_page_c_end-copy_page_c
+- .byte copy_page_c_end-copy_page_c
+- .previous
+-
+- .section .altinstr_replacement,"ax"
+-copy_page_c:
+- movl $4096/8,%ecx
+- rep
+- movsq
+- ret
+-copy_page_c_end:
++ .quad copy_page
++ .quad 1b
++ .byte X86_FEATURE_REP_GOOD
++ .byte .Lcopy_page_end - copy_page
++ .byte 2b - 1b
+ .previous
+diff --git a/arch/x86_64/lib/copy_user.S b/arch/x86_64/lib/copy_user.S
+index f64569b..962f3a6 100644
+--- a/arch/x86_64/lib/copy_user.S
++++ b/arch/x86_64/lib/copy_user.S
+@@ -4,6 +4,9 @@
+ * Functions to copy from and to user space.
+ */
+
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++
+ #define FIX_ALIGNMENT 1
+
+ #include <asm/current.h>
+@@ -12,9 +15,8 @@
+ #include <asm/cpufeature.h>
+
+ /* Standard copy_to_user with segment limit checking */
+- .globl copy_to_user
+- .p2align 4
+-copy_to_user:
++ENTRY(copy_to_user)
++ CFI_STARTPROC
+ GET_THREAD_INFO(%rax)
+ movq %rdi,%rcx
+ addq %rdx,%rcx
+@@ -25,9 +27,11 @@ copy_to_user:
+ .byte 0xe9 /* 32bit jump */
+ .long .Lcug-1f
+ 1:
++ CFI_ENDPROC
++ENDPROC(copy_to_user)
+
+ .section .altinstr_replacement,"ax"
+-3: .byte 0xe9 /* replacement jmp with 8 bit immediate */
++3: .byte 0xe9 /* replacement jmp with 32 bit immediate */
+ .long copy_user_generic_c-1b /* offset */
+ .previous
+ .section .altinstructions,"a"
+@@ -40,9 +44,8 @@ copy_to_user:
+ .previous
+
+ /* Standard copy_from_user with segment limit checking */
+- .globl copy_from_user
+- .p2align 4
+-copy_from_user:
++ENTRY(copy_from_user)
++ CFI_STARTPROC
+ GET_THREAD_INFO(%rax)
+ movq %rsi,%rcx
+ addq %rdx,%rcx
+@@ -50,10 +53,13 @@ copy_from_user:
+ cmpq threadinfo_addr_limit(%rax),%rcx
+ jae bad_from_user
+ /* FALL THROUGH to copy_user_generic */
++ CFI_ENDPROC
++ENDPROC(copy_from_user)
+
+ .section .fixup,"ax"
+ /* must zero dest */
+ bad_from_user:
++ CFI_STARTPROC
+ movl %edx,%ecx
+ xorl %eax,%eax
+ rep
+@@ -61,6 +67,8 @@ bad_from_user:
+ bad_to_user:
+ movl %edx,%eax
+ ret
++ CFI_ENDPROC
++END(bad_from_user)
+ .previous
+
+
+@@ -75,9 +83,8 @@ bad_to_user:
+ * Output:
+ * eax uncopied bytes or 0 if successful.
+ */
+- .globl copy_user_generic
+- .p2align 4
+-copy_user_generic:
++ENTRY(copy_user_generic)
++ CFI_STARTPROC
+ .byte 0x66,0x66,0x90 /* 5 byte nop for replacement jump */
+ .byte 0x66,0x90
+ 1:
+@@ -95,6 +102,8 @@ copy_user_generic:
+ .previous
+ .Lcug:
+ pushq %rbx
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rbx, 0
+ xorl %eax,%eax /*zero for the exception handler */
+
+ #ifdef FIX_ALIGNMENT
+@@ -168,9 +177,13 @@ copy_user_generic:
+ decl %ecx
+ jnz .Lloop_1
+
++ CFI_REMEMBER_STATE
+ .Lende:
+ popq %rbx
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_RESTORE rbx
+ ret
++ CFI_RESTORE_STATE
+
+ #ifdef FIX_ALIGNMENT
+ /* align destination */
+@@ -261,6 +274,9 @@ copy_user_generic:
+ .Le_zero:
+ movq %rdx,%rax
+ jmp .Lende
++ CFI_ENDPROC
++ENDPROC(copy_user_generic)
++
+
+ /* Some CPUs run faster using the string copy instructions.
+ This is also a lot simpler. Use them when possible.
+@@ -282,6 +298,7 @@ copy_user_generic:
+ * this please consider this.
+ */
+ copy_user_generic_c:
++ CFI_STARTPROC
+ movl %edx,%ecx
+ shrl $3,%ecx
+ andl $7,%edx
+@@ -294,6 +311,8 @@ copy_user_generic_c:
+ ret
+ 3: lea (%rdx,%rcx,8),%rax
+ ret
++ CFI_ENDPROC
++END(copy_user_generic_c)
+
+ .section __ex_table,"a"
+ .quad 1b,3b
+diff --git a/arch/x86_64/lib/csum-copy.S b/arch/x86_64/lib/csum-copy.S
+index 72fd55e..f0dba36 100644
+--- a/arch/x86_64/lib/csum-copy.S
++++ b/arch/x86_64/lib/csum-copy.S
+@@ -5,8 +5,9 @@
+ * License. See the file COPYING in the main directory of this archive
+ * for more details. No warranty for anything given at all.
+ */
+- #include <linux/linkage.h>
+- #include <asm/errno.h>
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++#include <asm/errno.h>
+
+ /*
+ * Checksum copy with exception handling.
+@@ -53,19 +54,24 @@
+ .endm
+
+
+- .globl csum_partial_copy_generic
+- .p2align 4
+-csum_partial_copy_generic:
++ENTRY(csum_partial_copy_generic)
++ CFI_STARTPROC
+ cmpl $3*64,%edx
+ jle .Lignore
+
+ .Lignore:
+ subq $7*8,%rsp
++ CFI_ADJUST_CFA_OFFSET 7*8
+ movq %rbx,2*8(%rsp)
++ CFI_REL_OFFSET rbx, 2*8
+ movq %r12,3*8(%rsp)
++ CFI_REL_OFFSET r12, 3*8
+ movq %r14,4*8(%rsp)
++ CFI_REL_OFFSET r14, 4*8
+ movq %r13,5*8(%rsp)
++ CFI_REL_OFFSET r13, 5*8
+ movq %rbp,6*8(%rsp)
++ CFI_REL_OFFSET rbp, 6*8
+
+ movq %r8,(%rsp)
+ movq %r9,1*8(%rsp)
+@@ -208,14 +214,22 @@ csum_partial_copy_generic:
+ addl %ebx,%eax
+ adcl %r9d,%eax /* carry */
+
++ CFI_REMEMBER_STATE
+ .Lende:
+ movq 2*8(%rsp),%rbx
++ CFI_RESTORE rbx
+ movq 3*8(%rsp),%r12
++ CFI_RESTORE r12
+ movq 4*8(%rsp),%r14
++ CFI_RESTORE r14
+ movq 5*8(%rsp),%r13
++ CFI_RESTORE r13
+ movq 6*8(%rsp),%rbp
++ CFI_RESTORE rbp
+ addq $7*8,%rsp
++ CFI_ADJUST_CFA_OFFSET -7*8
+ ret
++ CFI_RESTORE_STATE
+
+ /* Exception handlers. Very simple, zeroing is done in the wrappers */
+ .Lbad_source:
+@@ -231,3 +245,5 @@ csum_partial_copy_generic:
+ jz .Lende
+ movl $-EFAULT,(%rax)
+ jmp .Lende
++ CFI_ENDPROC
++ENDPROC(csum_partial_copy_generic)
+diff --git a/arch/x86_64/lib/getuser.S b/arch/x86_64/lib/getuser.S
+index 3844d5e..5448876 100644
+--- a/arch/x86_64/lib/getuser.S
++++ b/arch/x86_64/lib/getuser.S
+@@ -27,25 +27,26 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/dwarf2.h>
+ #include <asm/page.h>
+ #include <asm/errno.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
+
+ .text
+- .p2align 4
+-.globl __get_user_1
+-__get_user_1:
++ENTRY(__get_user_1)
++ CFI_STARTPROC
+ GET_THREAD_INFO(%r8)
+ cmpq threadinfo_addr_limit(%r8),%rcx
+ jae bad_get_user
+ 1: movzb (%rcx),%edx
+ xorl %eax,%eax
+ ret
++ CFI_ENDPROC
++ENDPROC(__get_user_1)
+
+- .p2align 4
+-.globl __get_user_2
+-__get_user_2:
++ENTRY(__get_user_2)
++ CFI_STARTPROC
+ GET_THREAD_INFO(%r8)
+ addq $1,%rcx
+ jc 20f
+@@ -57,10 +58,11 @@ __get_user_2:
+ ret
+ 20: decq %rcx
+ jmp bad_get_user
++ CFI_ENDPROC
++ENDPROC(__get_user_2)
+
+- .p2align 4
+-.globl __get_user_4
+-__get_user_4:
++ENTRY(__get_user_4)
++ CFI_STARTPROC
+ GET_THREAD_INFO(%r8)
+ addq $3,%rcx
+ jc 30f
+@@ -72,10 +74,11 @@ __get_user_4:
+ ret
+ 30: subq $3,%rcx
+ jmp bad_get_user
++ CFI_ENDPROC
++ENDPROC(__get_user_4)
+
+- .p2align 4
+-.globl __get_user_8
+-__get_user_8:
++ENTRY(__get_user_8)
++ CFI_STARTPROC
+ GET_THREAD_INFO(%r8)
+ addq $7,%rcx
+ jc 40f
+@@ -87,11 +90,16 @@ __get_user_8:
+ ret
+ 40: subq $7,%rcx
+ jmp bad_get_user
++ CFI_ENDPROC
++ENDPROC(__get_user_8)
+
+ bad_get_user:
++ CFI_STARTPROC
+ xorl %edx,%edx
+ movq $(-EFAULT),%rax
+ ret
++ CFI_ENDPROC
++END(bad_get_user)
+
+ .section __ex_table,"a"
+ .quad 1b,bad_get_user
+diff --git a/arch/x86_64/lib/iomap_copy.S b/arch/x86_64/lib/iomap_copy.S
+index 8bbade5..05a95e7 100644
+--- a/arch/x86_64/lib/iomap_copy.S
++++ b/arch/x86_64/lib/iomap_copy.S
+@@ -15,12 +15,16 @@
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++
+ /*
+ * override generic version in lib/iomap_copy.c
+ */
+- .globl __iowrite32_copy
+- .p2align 4
+-__iowrite32_copy:
++ENTRY(__iowrite32_copy)
++ CFI_STARTPROC
+ movl %edx,%ecx
+ rep movsd
+ ret
++ CFI_ENDPROC
++ENDPROC(__iowrite32_copy)
+diff --git a/arch/x86_64/lib/memcpy.S b/arch/x86_64/lib/memcpy.S
+index 5554948..967b22f 100644
+--- a/arch/x86_64/lib/memcpy.S
++++ b/arch/x86_64/lib/memcpy.S
+@@ -1,6 +1,10 @@
+ /* Copyright 2002 Andi Kleen */
+
+- #include <asm/cpufeature.h>
++#include <linux/config.h>
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++#include <asm/cpufeature.h>
++
+ /*
+ * memcpy - Copy a memory block.
+ *
+@@ -13,12 +17,26 @@
+ * rax original destination
+ */
+
+- .globl __memcpy
+- .globl memcpy
+- .p2align 4
+-__memcpy:
+-memcpy:
++ ALIGN
++memcpy_c:
++ CFI_STARTPROC
++ movq %rdi,%rax
++ movl %edx,%ecx
++ shrl $3,%ecx
++ andl $7,%edx
++ rep movsq
++ movl %edx,%ecx
++ rep movsb
++ ret
++ CFI_ENDPROC
++ENDPROC(memcpy_c)
++
++ENTRY(__memcpy)
++ENTRY(memcpy)
++ CFI_STARTPROC
+ pushq %rbx
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rbx, 0
+ movq %rdi,%rax
+
+ movl %edx,%ecx
+@@ -86,36 +104,27 @@ memcpy:
+
+ .Lende:
+ popq %rbx
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_RESTORE rbx
+ ret
+ .Lfinal:
++ CFI_ENDPROC
++ENDPROC(memcpy)
++ENDPROC(__memcpy)
+
+ /* Some CPUs run faster using the string copy instructions.
+ It is also a lot simpler. Use this when possible */
+
++ .section .altinstr_replacement,"ax"
++1: .byte 0xeb /* jmp <disp8> */
++ .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
++2:
++ .previous
+ .section .altinstructions,"a"
+ .align 8
+- .quad memcpy
+- .quad memcpy_c
+- .byte X86_FEATURE_REP_GOOD
+- .byte .Lfinal-memcpy
+- .byte memcpy_c_end-memcpy_c
+- .previous
+-
+- .section .altinstr_replacement,"ax"
+- /* rdi destination
+- * rsi source
+- * rdx count
+- */
+-memcpy_c:
+- movq %rdi,%rax
+- movl %edx,%ecx
+- shrl $3,%ecx
+- andl $7,%edx
+- rep
+- movsq
+- movl %edx,%ecx
+- rep
+- movsb
+- ret
+-memcpy_c_end:
++ .quad memcpy
++ .quad 1b
++ .byte X86_FEATURE_REP_GOOD
++ .byte .Lfinal - memcpy
++ .byte 2b - 1b
+ .previous
+diff --git a/arch/x86_64/lib/memset.S b/arch/x86_64/lib/memset.S
+index ad397f2..09ed1f6 100644
+--- a/arch/x86_64/lib/memset.S
++++ b/arch/x86_64/lib/memset.S
+@@ -1,4 +1,9 @@
+ /* Copyright 2002 Andi Kleen, SuSE Labs */
++
++#include <linux/config.h>
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++
+ /*
+ * ISO C memset - set a memory block to a byte value.
+ *
+@@ -8,11 +13,29 @@
+ *
+ * rax original destination
+ */
+- .globl __memset
+- .globl memset
+- .p2align 4
+-memset:
+-__memset:
++ ALIGN
++memset_c:
++ CFI_STARTPROC
++ movq %rdi,%r9
++ movl %edx,%r8d
++ andl $7,%r8d
++ movl %edx,%ecx
++ shrl $3,%ecx
++ /* expand byte value */
++ movzbl %sil,%esi
++ movabs $0x0101010101010101,%rax
++ mulq %rsi /* with rax, clobbers rdx */
++ rep stosq
++ movl %r8d,%ecx
++ rep stosb
++ movq %r9,%rax
++ ret
++ CFI_ENDPROC
++ENDPROC(memset_c)
++
++ENTRY(memset)
++ENTRY(__memset)
++ CFI_STARTPROC
+ movq %rdi,%r10
+ movq %rdx,%r11
+
+@@ -25,6 +48,7 @@ __memset:
+ movl %edi,%r9d
+ andl $7,%r9d
+ jnz .Lbad_alignment
++ CFI_REMEMBER_STATE
+ .Lafter_bad_alignment:
+
+ movl %r11d,%ecx
+@@ -75,6 +99,7 @@ __memset:
+ movq %r10,%rax
+ ret
+
++ CFI_RESTORE_STATE
+ .Lbad_alignment:
+ cmpq $7,%r11
+ jbe .Lhandle_7
+@@ -84,42 +109,26 @@ __memset:
+ addq %r8,%rdi
+ subq %r8,%r11
+ jmp .Lafter_bad_alignment
++.Lfinal:
++ CFI_ENDPROC
++ENDPROC(memset)
++ENDPROC(__memset)
+
+ /* Some CPUs run faster using the string instructions.
+ It is also a lot simpler. Use this when possible */
+
+ #include <asm/cpufeature.h>
+
++ .section .altinstr_replacement,"ax"
++1: .byte 0xeb /* jmp <disp8> */
++ .byte (memset_c - memset) - (2f - 1b) /* offset */
++2:
++ .previous
+ .section .altinstructions,"a"
+ .align 8
+- .quad memset
+- .quad memset_c
+- .byte X86_FEATURE_REP_GOOD
+- .byte memset_c_end-memset_c
+- .byte memset_c_end-memset_c
+- .previous
+-
+- .section .altinstr_replacement,"ax"
+- /* rdi destination
+- * rsi value
+- * rdx count
+- */
+-memset_c:
+- movq %rdi,%r9
+- movl %edx,%r8d
+- andl $7,%r8d
+- movl %edx,%ecx
+- shrl $3,%ecx
+- /* expand byte value */
+- movzbl %sil,%esi
+- movabs $0x0101010101010101,%rax
+- mulq %rsi /* with rax, clobbers rdx */
+- rep
+- stosq
+- movl %r8d,%ecx
+- rep
+- stosb
+- movq %r9,%rax
+- ret
+-memset_c_end:
++ .quad memset
++ .quad 1b
++ .byte X86_FEATURE_REP_GOOD
++ .byte .Lfinal - memset
++ .byte 2b - 1b
+ .previous
+diff --git a/arch/x86_64/lib/putuser.S b/arch/x86_64/lib/putuser.S
+index 7f55939..4989f5a 100644
+--- a/arch/x86_64/lib/putuser.S
++++ b/arch/x86_64/lib/putuser.S
+@@ -25,25 +25,26 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/dwarf2.h>
+ #include <asm/page.h>
+ #include <asm/errno.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
+
+ .text
+- .p2align 4
+-.globl __put_user_1
+-__put_user_1:
++ENTRY(__put_user_1)
++ CFI_STARTPROC
+ GET_THREAD_INFO(%r8)
+ cmpq threadinfo_addr_limit(%r8),%rcx
+ jae bad_put_user
+ 1: movb %dl,(%rcx)
+ xorl %eax,%eax
+ ret
++ CFI_ENDPROC
++ENDPROC(__put_user_1)
+
+- .p2align 4
+-.globl __put_user_2
+-__put_user_2:
++ENTRY(__put_user_2)
++ CFI_STARTPROC
+ GET_THREAD_INFO(%r8)
+ addq $1,%rcx
+ jc 20f
+@@ -55,10 +56,11 @@ __put_user_2:
+ ret
+ 20: decq %rcx
+ jmp bad_put_user
++ CFI_ENDPROC
++ENDPROC(__put_user_2)
+
+- .p2align 4
+-.globl __put_user_4
+-__put_user_4:
++ENTRY(__put_user_4)
++ CFI_STARTPROC
+ GET_THREAD_INFO(%r8)
+ addq $3,%rcx
+ jc 30f
+@@ -70,10 +72,11 @@ __put_user_4:
+ ret
+ 30: subq $3,%rcx
+ jmp bad_put_user
++ CFI_ENDPROC
++ENDPROC(__put_user_4)
+
+- .p2align 4
+-.globl __put_user_8
+-__put_user_8:
++ENTRY(__put_user_8)
++ CFI_STARTPROC
+ GET_THREAD_INFO(%r8)
+ addq $7,%rcx
+ jc 40f
+@@ -85,10 +88,15 @@ __put_user_8:
+ ret
+ 40: subq $7,%rcx
+ jmp bad_put_user
++ CFI_ENDPROC
++ENDPROC(__put_user_8)
+
+ bad_put_user:
++ CFI_STARTPROC
+ movq $(-EFAULT),%rax
+ ret
++ CFI_ENDPROC
++END(bad_put_user)
+
+ .section __ex_table,"a"
+ .quad 1b,bad_put_user
diff --git a/tags/2.6.18-12/30079b_amd64-fix-zeroing-on-exception-in-copy_user.patch b/tags/2.6.18-12/30079b_amd64-fix-zeroing-on-exception-in-copy_user.patch
new file mode 100644
index 0000000..4530d87
--- /dev/null
+++ b/tags/2.6.18-12/30079b_amd64-fix-zeroing-on-exception-in-copy_user.patch
@@ -0,0 +1,272 @@
+commit 3022d734a54cbd2b65eea9a024564821101b4a9a
+Author: Andi Kleen <ak@suse.de>
+Date: Tue Sep 26 10:52:39 2006 +0200
+
+ [PATCH] Fix zeroing on exception in copy_*_user
+
+ - Don't zero for __copy_from_user_inatomic following i386.
+ This will prevent spurious zeros for parallel file system writers when
+ one does a exception
+ - The string instruction version didn't zero the output on
+ exception. Oops.
+
+ Also I cleaned up the code a bit while I was at it and added a minor
+ optimization to the string instruction path.
+
+ Signed-off-by: Andi Kleen <ak@suse.de>
+
+diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
+index 370952c..c3454af 100644
+--- a/arch/x86_64/kernel/x8664_ksyms.c
++++ b/arch/x86_64/kernel/x8664_ksyms.c
+@@ -29,6 +29,7 @@ EXPORT_SYMBOL(__put_user_8);
+ EXPORT_SYMBOL(copy_user_generic);
+ EXPORT_SYMBOL(copy_from_user);
+ EXPORT_SYMBOL(copy_to_user);
++EXPORT_SYMBOL(__copy_from_user_inatomic);
+
+ EXPORT_SYMBOL(copy_page);
+ EXPORT_SYMBOL(clear_page);
+diff --git a/arch/x86_64/lib/copy_user.S b/arch/x86_64/lib/copy_user.S
+index 962f3a6..70bebd3 100644
+--- a/arch/x86_64/lib/copy_user.S
++++ b/arch/x86_64/lib/copy_user.S
+@@ -9,10 +9,29 @@
+
+ #define FIX_ALIGNMENT 1
+
+- #include <asm/current.h>
+- #include <asm/asm-offsets.h>
+- #include <asm/thread_info.h>
+- #include <asm/cpufeature.h>
++#include <asm/current.h>
++#include <asm/asm-offsets.h>
++#include <asm/thread_info.h>
++#include <asm/cpufeature.h>
++
++ .macro ALTERNATIVE_JUMP feature,orig,alt
++0:
++ .byte 0xe9 /* 32bit jump */
++ .long \orig-1f /* by default jump to orig */
++1:
++ .section .altinstr_replacement,"ax"
++2: .byte 0xe9 /* near jump with 32bit immediate */
++ .long \alt-1b /* offset */ /* or alternatively to alt */
++ .previous
++ .section .altinstructions,"a"
++ .align 8
++ .quad 0b
++ .quad 2b
++ .byte \feature /* when feature is set */
++ .byte 5
++ .byte 5
++ .previous
++ .endm
+
+ /* Standard copy_to_user with segment limit checking */
+ ENTRY(copy_to_user)
+@@ -23,25 +42,21 @@ ENTRY(copy_to_user)
+ jc bad_to_user
+ cmpq threadinfo_addr_limit(%rax),%rcx
+ jae bad_to_user
+-2:
+- .byte 0xe9 /* 32bit jump */
+- .long .Lcug-1f
+-1:
++ xorl %eax,%eax /* clear zero flag */
++ ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ CFI_ENDPROC
+-ENDPROC(copy_to_user)
+
+- .section .altinstr_replacement,"ax"
+-3: .byte 0xe9 /* replacement jmp with 32 bit immediate */
+- .long copy_user_generic_c-1b /* offset */
+- .previous
+- .section .altinstructions,"a"
+- .align 8
+- .quad 2b
+- .quad 3b
+- .byte X86_FEATURE_REP_GOOD
+- .byte 5
+- .byte 5
+- .previous
++ENTRY(copy_user_generic)
++ CFI_STARTPROC
++ movl $1,%ecx /* set zero flag */
++ ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
++ CFI_ENDPROC
++
++ENTRY(__copy_from_user_inatomic)
++ CFI_STARTPROC
++ xorl %ecx,%ecx /* clear zero flag */
++ ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
++ CFI_ENDPROC
+
+ /* Standard copy_from_user with segment limit checking */
+ ENTRY(copy_from_user)
+@@ -52,7 +67,8 @@ ENTRY(copy_from_user)
+ jc bad_from_user
+ cmpq threadinfo_addr_limit(%rax),%rcx
+ jae bad_from_user
+- /* FALL THROUGH to copy_user_generic */
++ movl $1,%ecx /* set zero flag */
++ ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ CFI_ENDPROC
+ ENDPROC(copy_from_user)
+
+@@ -73,37 +89,26 @@ END(bad_from_user)
+
+
+ /*
+- * copy_user_generic - memory copy with exception handling.
++ * copy_user_generic_unrolled - memory copy with exception handling.
++ * This version is for CPUs like P4 that don't have efficient micro code for rep movsq
+ *
+ * Input:
+ * rdi destination
+ * rsi source
+ * rdx count
++ * ecx zero flag -- if true zero destination on error
+ *
+ * Output:
+ * eax uncopied bytes or 0 if successful.
+ */
+-ENTRY(copy_user_generic)
++ENTRY(copy_user_generic_unrolled)
+ CFI_STARTPROC
+- .byte 0x66,0x66,0x90 /* 5 byte nop for replacement jump */
+- .byte 0x66,0x90
+-1:
+- .section .altinstr_replacement,"ax"
+-2: .byte 0xe9 /* near jump with 32bit immediate */
+- .long copy_user_generic_c-1b /* offset */
+- .previous
+- .section .altinstructions,"a"
+- .align 8
+- .quad copy_user_generic
+- .quad 2b
+- .byte X86_FEATURE_REP_GOOD
+- .byte 5
+- .byte 5
+- .previous
+-.Lcug:
+ pushq %rbx
+ CFI_ADJUST_CFA_OFFSET 8
+ CFI_REL_OFFSET rbx, 0
++ pushq %rcx
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rcx, 0
+ xorl %eax,%eax /*zero for the exception handler */
+
+ #ifdef FIX_ALIGNMENT
+@@ -179,6 +184,9 @@ ENTRY(copy_user_generic)
+
+ CFI_REMEMBER_STATE
+ .Lende:
++ popq %rcx
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_RESTORE rcx
+ popq %rbx
+ CFI_ADJUST_CFA_OFFSET -8
+ CFI_RESTORE rbx
+@@ -265,6 +273,8 @@ ENTRY(copy_user_generic)
+ addl %ecx,%edx
+ /* edx: bytes to zero, rdi: dest, eax:zero */
+ .Lzero_rest:
++ cmpl $0,(%rsp)
++ jz .Le_zero
+ movq %rdx,%rcx
+ .Le_byte:
+ xorl %eax,%eax
+@@ -286,6 +296,7 @@ ENDPROC(copy_user_generic)
+ /* rdi destination
+ * rsi source
+ * rdx count
++ * ecx zero flag
+ *
+ * Output:
+ * eax uncopied bytes or 0 if successfull.
+@@ -296,25 +307,48 @@ ENDPROC(copy_user_generic)
+ * And more would be dangerous because both Intel and AMD have
+ * errata with rep movsq > 4GB. If someone feels the need to fix
+ * this please consider this.
+- */
+-copy_user_generic_c:
++ */
++ENTRY(copy_user_generic_string)
+ CFI_STARTPROC
++ movl %ecx,%r8d /* save zero flag */
+ movl %edx,%ecx
+ shrl $3,%ecx
+ andl $7,%edx
++ jz 10f
+ 1: rep
+ movsq
+ movl %edx,%ecx
+ 2: rep
+ movsb
+-4: movl %ecx,%eax
++9: movl %ecx,%eax
+ ret
+-3: lea (%rdx,%rcx,8),%rax
++
++ /* multiple of 8 byte */
++10: rep
++ movsq
++ xor %eax,%eax
+ ret
++
++ /* exception handling */
++3: lea (%rdx,%rcx,8),%rax /* exception on quad loop */
++ jmp 6f
++5: movl %ecx,%eax /* exception on byte loop */
++ /* eax: left over bytes */
++6: testl %r8d,%r8d /* zero flag set? */
++ jz 7f
++ movl %eax,%ecx /* initialize x86 loop counter */
++ push %rax
++ xorl %eax,%eax
++8: rep
++ stosb /* zero the rest */
++11: pop %rax
++7: ret
+ CFI_ENDPROC
+ END(copy_user_generic_c)
+
+ .section __ex_table,"a"
+ .quad 1b,3b
+- .quad 2b,4b
++ .quad 2b,5b
++ .quad 8b,11b
++ .quad 10b,3b
+ .previous
+diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
+index 1e1fa00..bc68120 100644
+--- a/include/asm-x86_64/uaccess.h
++++ b/include/asm-x86_64/uaccess.h
+@@ -238,6 +238,7 @@ do { \
+
+ /* Handles exceptions in both to and from, but doesn't do access_ok */
+ extern unsigned long copy_user_generic(void *to, const void *from, unsigned len);
++extern unsigned long copy_user_generic_dontzero(void *to, const void *from, unsigned len);
+
+ extern unsigned long copy_to_user(void __user *to, const void *from, unsigned len);
+ extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len);
+@@ -303,7 +304,6 @@ static __always_inline int __copy_to_user(void __user *dst, const void *src, uns
+ }
+ }
+
+-
+ static __always_inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ {
+ int ret = 0;
+@@ -352,7 +352,7 @@ long strlen_user(const char __user *str);
+ unsigned long clear_user(void __user *mem, unsigned long len);
+ unsigned long __clear_user(void __user *mem, unsigned long len);
+
+-#define __copy_to_user_inatomic __copy_to_user
+-#define __copy_from_user_inatomic __copy_from_user
++extern long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size);
++#define __copy_to_user_inatomic copy_user_generic
+
+ #endif /* __X86_64_UACCESS_H */
diff --git a/tags/2.6.18-12/30080_tty-fix-for-tty-operations-bugs.patch b/tags/2.6.18-12/30080_tty-fix-for-tty-operations-bugs.patch
new file mode 100644
index 0000000..5c42b38
--- /dev/null
+++ b/tags/2.6.18-12/30080_tty-fix-for-tty-operations-bugs.patch
@@ -0,0 +1,183 @@
+From alan@lxorguk.ukuu.org.uk Fri Jun 27 07:39:26 2008
+From: Alan Cox <alan@lxorguk.ukuu.org.uk>
+Date: Fri, 27 Jun 2008 15:21:55 +0100
+Subject: TTY: fix for tty operations bugs
+To: greg@kroah.com
+Message-ID: <20080627152155.50f0ebae@lxorguk.ukuu.org.uk>
+
+From: Alan Cox <alan@lxorguk.ukuu.org.uk>
+
+This is fixed with the recent tty operations rewrite in mainline in a
+different way, this is a selective backport of the relevant portions to
+the -stable tree.
+
+Signed-off-by: Alan Cox <alan@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/drivers/net/hamradio/6pack.c linux-source-2.6.18/drivers/net/hamradio/6pack.c
+--- linux-source-2.6.18.orig/drivers/net/hamradio/6pack.c 2008-06-16 16:25:20.000000000 -0600
++++ linux-source-2.6.18/drivers/net/hamradio/6pack.c 2008-07-02 02:45:08.000000000 -0600
+@@ -601,6 +601,8 @@ static int sixpack_open(struct tty_struc
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
++ if (!tty->driver->write)
++ return -EOPNOTSUPP;
+
+ dev = alloc_netdev(sizeof(struct sixpack), "sp%d", sp_setup);
+ if (!dev) {
+diff -urpN linux-source-2.6.18.orig/drivers/net/hamradio/mkiss.c linux-source-2.6.18/drivers/net/hamradio/mkiss.c
+--- linux-source-2.6.18.orig/drivers/net/hamradio/mkiss.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/net/hamradio/mkiss.c 2008-07-02 02:45:08.000000000 -0600
+@@ -530,6 +530,7 @@ static void ax_encaps(struct net_device
+ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct mkiss *ax = netdev_priv(dev);
++ int cib = 0;
+
+ if (!netif_running(dev)) {
+ printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name);
+@@ -545,10 +546,11 @@ static int ax_xmit(struct sk_buff *skb,
+ /* 20 sec timeout not reached */
+ return 1;
+ }
++ if (ax->tty->driver->chars_in_buffer)
++ cib = ax->tty->driver->chars_in_buffer(ax->tty);
+
+ printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name,
+- (ax->tty->driver->chars_in_buffer(ax->tty) || ax->xleft) ?
+- "bad line quality" : "driver error");
++ cib || ax->xleft ? "bad line quality" : "driver error");
+
+ ax->xleft = 0;
+ clear_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
+@@ -736,6 +738,8 @@ static int mkiss_open(struct tty_struct
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
++ if (!tty->driver->write)
++ return -EOPNOTSUPP;
+
+ dev = alloc_netdev(sizeof(struct mkiss), "ax%d", ax_setup);
+ if (!dev) {
+diff -urpN linux-source-2.6.18.orig/drivers/net/irda/irtty-sir.c linux-source-2.6.18/drivers/net/irda/irtty-sir.c
+--- linux-source-2.6.18.orig/drivers/net/irda/irtty-sir.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/net/irda/irtty-sir.c 2008-07-02 02:45:08.000000000 -0600
+@@ -64,7 +64,9 @@ static int irtty_chars_in_buffer(struct
+ IRDA_ASSERT(priv != NULL, return -1;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
+
+- return priv->tty->driver->chars_in_buffer(priv->tty);
++ if (priv->tty->driver->chars_in_buffer)
++ return priv->tty->driver->chars_in_buffer(priv->tty);
++ return 0;
+ }
+
+ /* Wait (sleep) until underlaying hardware finished transmission
+diff -urpN linux-source-2.6.18.orig/drivers/net/ppp_async.c linux-source-2.6.18/drivers/net/ppp_async.c
+--- linux-source-2.6.18.orig/drivers/net/ppp_async.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/net/ppp_async.c 2008-07-02 02:45:08.000000000 -0600
+@@ -158,6 +158,9 @@ ppp_asynctty_open(struct tty_struct *tty
+ struct asyncppp *ap;
+ int err;
+
++ if (!tty->driver->write)
++ return -EOPNOTSUPP;
++
+ err = -ENOMEM;
+ ap = kmalloc(sizeof(*ap), GFP_KERNEL);
+ if (ap == 0)
+diff -urpN linux-source-2.6.18.orig/drivers/net/ppp_synctty.c linux-source-2.6.18/drivers/net/ppp_synctty.c
+--- linux-source-2.6.18.orig/drivers/net/ppp_synctty.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/net/ppp_synctty.c 2008-07-02 02:49:36.000000000 -0600
+@@ -207,6 +207,9 @@ ppp_sync_open(struct tty_struct *tty)
+ struct syncppp *ap;
+ int err;
+
++ if (!tty->driver->write)
++ return -EOPNOTSUPP;
++
+ ap = kmalloc(sizeof(*ap), GFP_KERNEL);
+ err = -ENOMEM;
+ if (ap == 0)
+diff -urpN linux-source-2.6.18.orig/drivers/net/slip.c linux-source-2.6.18/drivers/net/slip.c
+--- linux-source-2.6.18.orig/drivers/net/slip.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/net/slip.c 2008-07-02 02:48:57.000000000 -0600
+@@ -463,9 +463,14 @@ static void sl_tx_timeout(struct net_dev
+ /* 20 sec timeout not reached */
+ goto out;
+ }
+- printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
+- (sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ?
+- "bad line quality" : "driver error");
++ {
++ int cib = 0;
++ if (sl->tty->driver->chars_in_buffer)
++ cib = sl->tty->driver->chars_in_buffer(sl->tty);
++ printk(KERN_WARNING "%s: transmit timed out, %s?\n",
++ dev->name, (cib || sl->xleft) ?
++ "bad line quality" : "driver error");
++ }
+ sl->xleft = 0;
+ sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ sl_unlock(sl);
+@@ -836,6 +841,8 @@ static int slip_open(struct tty_struct *
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
++ if (!tty->driver->write)
++ return -EOPNOTSUPP;
+
+ /* RTnetlink lock is misused here to serialize concurrent
+ opens of slip channels. There are better ways, but it is
+diff -urpN linux-source-2.6.18.orig/drivers/net/wan/x25_asy.c linux-source-2.6.18/drivers/net/wan/x25_asy.c
+--- linux-source-2.6.18.orig/drivers/net/wan/x25_asy.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/net/wan/x25_asy.c 2008-07-02 02:45:08.000000000 -0600
+@@ -283,6 +283,10 @@ static void x25_asy_write_wakeup(struct
+ static void x25_asy_timeout(struct net_device *dev)
+ {
+ struct x25_asy *sl = (struct x25_asy*)(dev->priv);
++ int cib = 0;
++
++ if (sl->tty->driver->chars_in_buffer)
++ cib = sl->tty->driver->chars_in_buffer(sl->tty);
+
+ spin_lock(&sl->lock);
+ if (netif_queue_stopped(dev)) {
+@@ -290,8 +294,7 @@ static void x25_asy_timeout(struct net_d
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
+- (sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ?
+- "bad line quality" : "driver error");
++ (cib || sl->xleft) ? "bad line quality" : "driver error");
+ sl->xleft = 0;
+ sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ x25_asy_unlock(sl);
+@@ -561,6 +564,9 @@ static int x25_asy_open_tty(struct tty_s
+ return -EEXIST;
+ }
+
++ if (!tty->driver->write)
++ return -EOPNOTSUPP;
++
+ /* OK. Find a free X.25 channel to use. */
+ if ((sl = x25_asy_alloc()) == NULL) {
+ return -ENFILE;
+diff -urpN linux-source-2.6.18.orig/drivers/net/wireless/strip.c linux-source-2.6.18/drivers/net/wireless/strip.c
+--- linux-source-2.6.18.orig/drivers/net/wireless/strip.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/drivers/net/wireless/strip.c 2008-07-02 02:45:08.000000000 -0600
+@@ -801,7 +801,8 @@ static void set_baud(struct tty_struct *
+ struct termios old_termios = *(tty->termios);
+ tty->termios->c_cflag &= ~CBAUD; /* Clear the old baud setting */
+ tty->termios->c_cflag |= baudcode; /* Set the new baud setting */
+- tty->driver->set_termios(tty, &old_termios);
++ if (tty->driver->set_termios)
++ tty->driver->set_termios(tty, &old_termios);
+ }
+
+ /*
diff --git a/tags/2.6.18-12/30081_check-privileges-before-setting-mount-propagation.patch b/tags/2.6.18-12/30081_check-privileges-before-setting-mount-propagation.patch
new file mode 100644
index 0000000..b08c50e
--- /dev/null
+++ b/tags/2.6.18-12/30081_check-privileges-before-setting-mount-propagation.patch
@@ -0,0 +1,28 @@
+commit ee6f958291e2a768fd727e7a67badfff0b67711a
+Author: Miklos Szeredi <mszeredi@suse.cz>
+Date: Tue May 8 00:30:40 2007 -0700
+
+ check privileges before setting mount propagation
+
+ There's a missing check for CAP_SYS_ADMIN in do_change_type().
+
+ Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+ Cc: Al Viro <viro@zeniv.linux.org.uk>
+ Cc: Christoph Hellwig <hch@lst.de>
+ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 72bb106..b696e3a 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -886,6 +886,9 @@ static int do_change_type(struct nameidata *nd, int flag)
+ int recurse = flag & MS_REC;
+ int type = flag & ~MS_REC;
+
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
+ if (nd->dentry != nd->mnt->mnt_root)
+ return -EINVAL;
+
diff --git a/tags/2.6.18-12/30082a_x86-add-copy_user_handle_tail.patch b/tags/2.6.18-12/30082a_x86-add-copy_user_handle_tail.patch
new file mode 100644
index 0000000..27cfb86
--- /dev/null
+++ b/tags/2.6.18-12/30082a_x86-add-copy_user_handle_tail.patch
@@ -0,0 +1,56 @@
+commit 1129585a08baf58582c0da91e572cb29e3179acf
+Author: Vitaly Mayatskikh <v.mayatskih@gmail.com>
+Date: Wed Jul 2 15:48:21 2008 +0200
+
+ x86: introduce copy_user_handle_tail() routine
+
+ Introduce generic C routine for handling necessary tail operations after
+ protection fault in copy_*_user on x86.
+
+ Signed-off-by: Vitaly Mayatskikh <v.mayatskih@gmail.com>
+ Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+ Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/arch/x86_64/lib/usercopy.c linux-source-2.6.18/arch/x86_64/lib/usercopy.c
+--- linux-source-2.6.18.orig/arch/x86_64/lib/usercopy.c 2006-09-19 21:42:06.000000000 -0600
++++ linux-source-2.6.18/arch/x86_64/lib/usercopy.c 2008-07-16 02:39:08.000000000 -0600
+@@ -164,3 +164,26 @@ unsigned long copy_in_user(void __user *
+ }
+ EXPORT_SYMBOL(copy_in_user);
+
++/*
++ * Try to copy last bytes and clear the rest if needed.
++ * Since protection fault in copy_from/to_user is not a normal situation,
++ * it is not necessary to optimize tail handling.
++ */
++unsigned long
++copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
++{
++ char c;
++ unsigned zero_len;
++
++ for (; len; --len) {
++ if (__get_user_nocheck(c, from++, sizeof(char)))
++ break;
++ if (__put_user_nocheck(c, to++, sizeof(char)))
++ break;
++ }
++
++ for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
++ if (__put_user_nocheck(c, to++, sizeof(char)))
++ break;
++ return len;
++}
+diff -urpN linux-source-2.6.18.orig/include/asm-x86_64/uaccess.h linux-source-2.6.18/include/asm-x86_64/uaccess.h
+--- linux-source-2.6.18.orig/include/asm-x86_64/uaccess.h 2008-07-16 00:01:24.000000000 -0600
++++ linux-source-2.6.18/include/asm-x86_64/uaccess.h 2008-07-16 02:39:15.000000000 -0600
+@@ -355,4 +355,7 @@ unsigned long __clear_user(void __user *
+ extern long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size);
+ #define __copy_to_user_inatomic copy_user_generic
+
++unsigned long
++copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
++
+ #endif /* __X86_64_UACCESS_H */
diff --git a/tags/2.6.18-12/30082b_x86-fix-copy_user.patch b/tags/2.6.18-12/30082b_x86-fix-copy_user.patch
new file mode 100644
index 0000000..f0b7a07
--- /dev/null
+++ b/tags/2.6.18-12/30082b_x86-fix-copy_user.patch
@@ -0,0 +1,537 @@
+commit ad2fc2cd925300b8127cf682f5a1c7511ae9dd27
+Author: Vitaly Mayatskikh <v.mayatskih@gmail.com>
+Date: Wed Jul 2 15:53:13 2008 +0200
+
+ x86: fix copy_user on x86
+
+ Switch copy_user_generic_string(), copy_user_generic_unrolled() and
+ __copy_user_nocache() from custom tail handlers to generic
+ copy_user_tail_handle().
+
+ Signed-off-by: Vitaly Mayatskikh <v.mayatskih@gmail.com>
+ Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+ Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/arch/x86_64/lib/copy_user.S linux-source-2.6.18/arch/x86_64/lib/copy_user.S
+--- linux-source-2.6.18.orig/arch/x86_64/lib/copy_user.S 2008-07-15 23:01:24.000000000 -0700
++++ linux-source-2.6.18/arch/x86_64/lib/copy_user.S 2008-07-15 23:33:23.000000000 -0700
+@@ -1,8 +1,10 @@
+-/* Copyright 2002 Andi Kleen, SuSE Labs.
++/*
++ * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
++ * Copyright 2002 Andi Kleen, SuSE Labs.
+ * Subject to the GNU Public License v2.
+- *
+- * Functions to copy from and to user space.
+- */
++ *
++ * Functions to copy from and to user space.
++ */
+
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
+@@ -20,60 +22,88 @@
+ .long \orig-1f /* by default jump to orig */
+ 1:
+ .section .altinstr_replacement,"ax"
+-2: .byte 0xe9 /* near jump with 32bit immediate */
++2: .byte 0xe9 /* near jump with 32bit immediate */
+ .long \alt-1b /* offset */ /* or alternatively to alt */
+ .previous
+ .section .altinstructions,"a"
+ .align 8
+ .quad 0b
+ .quad 2b
+- .byte \feature /* when feature is set */
++ .byte \feature /* when feature is set */
+ .byte 5
+ .byte 5
+ .previous
+ .endm
+
+-/* Standard copy_to_user with segment limit checking */
++ .macro ALIGN_DESTINATION
++#ifdef FIX_ALIGNMENT
++ /* check for bad alignment of destination */
++ movl %edi,%ecx
++ andl $7,%ecx
++ jz 102f /* already aligned */
++ subl $8,%ecx
++ negl %ecx
++ subl %ecx,%edx
++100: movb (%rsi),%al
++101: movb %al,(%rdi)
++ incq %rsi
++ incq %rdi
++ decl %ecx
++ jnz 100b
++102:
++ .section .fixup,"ax"
++103: addl %r8d,%edx /* ecx is zerorest also */
++ jmp copy_user_handle_tail
++ .previous
++
++ .section __ex_table,"a"
++ .align 8
++ .quad 100b,103b
++ .quad 101b,103b
++ .previous
++#endif
++ .endm
++
++/* Standard copy_to_user with segment limit checking */
+ ENTRY(copy_to_user)
+ CFI_STARTPROC
+ GET_THREAD_INFO(%rax)
+ movq %rdi,%rcx
+ addq %rdx,%rcx
+- jc bad_to_user
+- cmpq threadinfo_addr_limit(%rax),%rcx
++ jc bad_to_user
++ cmpq threadinfo_addr_limit(%rax),%rcx
+ jae bad_to_user
+- xorl %eax,%eax /* clear zero flag */
+ ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ CFI_ENDPROC
+
+-ENTRY(copy_user_generic)
++/* Standard copy_from_user with segment limit checking */
++ENTRY(copy_from_user)
+ CFI_STARTPROC
+- movl $1,%ecx /* set zero flag */
++ GET_THREAD_INFO(%rax)
++ movq %rsi,%rcx
++ addq %rdx,%rcx
++ jc bad_from_user
++ cmpq threadinfo_addr_limit(%rax),%rcx
++ jae bad_from_user
+ ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ CFI_ENDPROC
++ENDPROC(copy_from_user)
+
+-ENTRY(__copy_from_user_inatomic)
++ENTRY(copy_user_generic)
+ CFI_STARTPROC
+- xorl %ecx,%ecx /* clear zero flag */
+ ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ CFI_ENDPROC
++ENDPROC(copy_user_generic)
+
+-/* Standard copy_from_user with segment limit checking */
+-ENTRY(copy_from_user)
++ENTRY(__copy_from_user_inatomic)
+ CFI_STARTPROC
+- GET_THREAD_INFO(%rax)
+- movq %rsi,%rcx
+- addq %rdx,%rcx
+- jc bad_from_user
+- cmpq threadinfo_addr_limit(%rax),%rcx
+- jae bad_from_user
+- movl $1,%ecx /* set zero flag */
+ ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ CFI_ENDPROC
+-ENDPROC(copy_from_user)
+-
++ENDPROC(__copy_from_user_inatomic)
++
+ .section .fixup,"ax"
+ /* must zero dest */
++ENTRY(bad_from_user)
+ bad_from_user:
+ CFI_STARTPROC
+ movl %edx,%ecx
+@@ -81,274 +111,158 @@ bad_from_user:
+ rep
+ stosb
+ bad_to_user:
+- movl %edx,%eax
++ movl %edx,%eax
+ ret
+ CFI_ENDPROC
+-END(bad_from_user)
++ENDPROC(bad_from_user)
+ .previous
+-
+-
++
+ /*
+ * copy_user_generic_unrolled - memory copy with exception handling.
+- * This version is for CPUs like P4 that don't have efficient micro code for rep movsq
+- *
+- * Input:
++ * This version is for CPUs like P4 that don't have efficient micro
++ * code for rep movsq
++ *
++ * Input:
+ * rdi destination
+ * rsi source
+ * rdx count
+- * ecx zero flag -- if true zero destination on error
+ *
+- * Output:
+- * eax uncopied bytes or 0 if successful.
++ * Output:
++ * eax uncopied bytes or 0 if successfull.
+ */
+ ENTRY(copy_user_generic_unrolled)
+ CFI_STARTPROC
+- pushq %rbx
+- CFI_ADJUST_CFA_OFFSET 8
+- CFI_REL_OFFSET rbx, 0
+- pushq %rcx
+- CFI_ADJUST_CFA_OFFSET 8
+- CFI_REL_OFFSET rcx, 0
+- xorl %eax,%eax /*zero for the exception handler */
+-
+-#ifdef FIX_ALIGNMENT
+- /* check for bad alignment of destination */
+- movl %edi,%ecx
+- andl $7,%ecx
+- jnz .Lbad_alignment
+-.Lafter_bad_alignment:
+-#endif
+-
+- movq %rdx,%rcx
+-
+- movl $64,%ebx
+- shrq $6,%rdx
+- decq %rdx
+- js .Lhandle_tail
+-
+- .p2align 4
+-.Lloop:
+-.Ls1: movq (%rsi),%r11
+-.Ls2: movq 1*8(%rsi),%r8
+-.Ls3: movq 2*8(%rsi),%r9
+-.Ls4: movq 3*8(%rsi),%r10
+-.Ld1: movq %r11,(%rdi)
+-.Ld2: movq %r8,1*8(%rdi)
+-.Ld3: movq %r9,2*8(%rdi)
+-.Ld4: movq %r10,3*8(%rdi)
+-
+-.Ls5: movq 4*8(%rsi),%r11
+-.Ls6: movq 5*8(%rsi),%r8
+-.Ls7: movq 6*8(%rsi),%r9
+-.Ls8: movq 7*8(%rsi),%r10
+-.Ld5: movq %r11,4*8(%rdi)
+-.Ld6: movq %r8,5*8(%rdi)
+-.Ld7: movq %r9,6*8(%rdi)
+-.Ld8: movq %r10,7*8(%rdi)
+-
+- decq %rdx
+-
++ cmpl $8,%edx
++ jb 20f /* less then 8 bytes, go to byte copy loop */
++ ALIGN_DESTINATION
++ movl %edx,%ecx
++ andl $63,%edx
++ shrl $6,%ecx
++ jz 17f
++1: movq (%rsi),%r8
++2: movq 1*8(%rsi),%r9
++3: movq 2*8(%rsi),%r10
++4: movq 3*8(%rsi),%r11
++5: movq %r8,(%rdi)
++6: movq %r9,1*8(%rdi)
++7: movq %r10,2*8(%rdi)
++8: movq %r11,3*8(%rdi)
++9: movq 4*8(%rsi),%r8
++10: movq 5*8(%rsi),%r9
++11: movq 6*8(%rsi),%r10
++12: movq 7*8(%rsi),%r11
++13: movq %r8,4*8(%rdi)
++14: movq %r9,5*8(%rdi)
++15: movq %r10,6*8(%rdi)
++16: movq %r11,7*8(%rdi)
+ leaq 64(%rsi),%rsi
+ leaq 64(%rdi),%rdi
+-
+- jns .Lloop
+-
+- .p2align 4
+-.Lhandle_tail:
+- movl %ecx,%edx
+- andl $63,%ecx
+- shrl $3,%ecx
+- jz .Lhandle_7
+- movl $8,%ebx
+- .p2align 4
+-.Lloop_8:
+-.Ls9: movq (%rsi),%r8
+-.Ld9: movq %r8,(%rdi)
+ decl %ecx
+- leaq 8(%rdi),%rdi
++ jnz 1b
++17: movl %edx,%ecx
++ andl $7,%edx
++ shrl $3,%ecx
++ jz 20f
++18: movq (%rsi),%r8
++19: movq %r8,(%rdi)
+ leaq 8(%rsi),%rsi
+- jnz .Lloop_8
+-
+-.Lhandle_7:
++ leaq 8(%rdi),%rdi
++ decl %ecx
++ jnz 18b
++20: andl %edx,%edx
++ jz 23f
+ movl %edx,%ecx
+- andl $7,%ecx
+- jz .Lende
+- .p2align 4
+-.Lloop_1:
+-.Ls10: movb (%rsi),%bl
+-.Ld10: movb %bl,(%rdi)
+- incq %rdi
++21: movb (%rsi),%al
++22: movb %al,(%rdi)
+ incq %rsi
++ incq %rdi
+ decl %ecx
+- jnz .Lloop_1
+-
+- CFI_REMEMBER_STATE
+-.Lende:
+- popq %rcx
+- CFI_ADJUST_CFA_OFFSET -8
+- CFI_RESTORE rcx
+- popq %rbx
+- CFI_ADJUST_CFA_OFFSET -8
+- CFI_RESTORE rbx
++ jnz 21b
++23: xor %eax,%eax
+ ret
+- CFI_RESTORE_STATE
+
+-#ifdef FIX_ALIGNMENT
+- /* align destination */
+- .p2align 4
+-.Lbad_alignment:
+- movl $8,%r9d
+- subl %ecx,%r9d
+- movl %r9d,%ecx
+- cmpq %r9,%rdx
+- jz .Lhandle_7
+- js .Lhandle_7
+-.Lalign_1:
+-.Ls11: movb (%rsi),%bl
+-.Ld11: movb %bl,(%rdi)
+- incq %rsi
+- incq %rdi
+- decl %ecx
+- jnz .Lalign_1
+- subq %r9,%rdx
+- jmp .Lafter_bad_alignment
+-#endif
++ .section .fixup,"ax"
++30: shll $6,%ecx
++ addl %ecx,%edx
++ jmp 60f
++40: lea (%rdx,%rcx,8),%rdx
++ jmp 60f
++50: movl %ecx,%edx
++60: jmp copy_user_handle_tail /* ecx is zerorest also */
++ .previous
+
+- /* table sorted by exception address */
+ .section __ex_table,"a"
+ .align 8
+- .quad .Ls1,.Ls1e
+- .quad .Ls2,.Ls2e
+- .quad .Ls3,.Ls3e
+- .quad .Ls4,.Ls4e
+- .quad .Ld1,.Ls1e
+- .quad .Ld2,.Ls2e
+- .quad .Ld3,.Ls3e
+- .quad .Ld4,.Ls4e
+- .quad .Ls5,.Ls5e
+- .quad .Ls6,.Ls6e
+- .quad .Ls7,.Ls7e
+- .quad .Ls8,.Ls8e
+- .quad .Ld5,.Ls5e
+- .quad .Ld6,.Ls6e
+- .quad .Ld7,.Ls7e
+- .quad .Ld8,.Ls8e
+- .quad .Ls9,.Le_quad
+- .quad .Ld9,.Le_quad
+- .quad .Ls10,.Le_byte
+- .quad .Ld10,.Le_byte
+-#ifdef FIX_ALIGNMENT
+- .quad .Ls11,.Lzero_rest
+- .quad .Ld11,.Lzero_rest
+-#endif
+- .quad .Le5,.Le_zero
++ .quad 1b,30b
++ .quad 2b,30b
++ .quad 3b,30b
++ .quad 4b,30b
++ .quad 5b,30b
++ .quad 6b,30b
++ .quad 7b,30b
++ .quad 8b,30b
++ .quad 9b,30b
++ .quad 10b,30b
++ .quad 11b,30b
++ .quad 12b,30b
++ .quad 13b,30b
++ .quad 14b,30b
++ .quad 15b,30b
++ .quad 16b,30b
++ .quad 18b,40b
++ .quad 19b,40b
++ .quad 21b,50b
++ .quad 22b,50b
+ .previous
+-
+- /* compute 64-offset for main loop. 8 bytes accuracy with error on the
+- pessimistic side. this is gross. it would be better to fix the
+- interface. */
+- /* eax: zero, ebx: 64 */
+-.Ls1e: addl $8,%eax
+-.Ls2e: addl $8,%eax
+-.Ls3e: addl $8,%eax
+-.Ls4e: addl $8,%eax
+-.Ls5e: addl $8,%eax
+-.Ls6e: addl $8,%eax
+-.Ls7e: addl $8,%eax
+-.Ls8e: addl $8,%eax
+- addq %rbx,%rdi /* +64 */
+- subq %rax,%rdi /* correct destination with computed offset */
+-
+- shlq $6,%rdx /* loop counter * 64 (stride length) */
+- addq %rax,%rdx /* add offset to loopcnt */
+- andl $63,%ecx /* remaining bytes */
+- addq %rcx,%rdx /* add them */
+- jmp .Lzero_rest
+-
+- /* exception on quad word loop in tail handling */
+- /* ecx: loopcnt/8, %edx: length, rdi: correct */
+-.Le_quad:
+- shll $3,%ecx
+- andl $7,%edx
+- addl %ecx,%edx
+- /* edx: bytes to zero, rdi: dest, eax:zero */
+-.Lzero_rest:
+- cmpl $0,(%rsp)
+- jz .Le_zero
+- movq %rdx,%rcx
+-.Le_byte:
+- xorl %eax,%eax
+-.Le5: rep
+- stosb
+- /* when there is another exception while zeroing the rest just return */
+-.Le_zero:
+- movq %rdx,%rax
+- jmp .Lende
+ CFI_ENDPROC
+-ENDPROC(copy_user_generic)
++ENDPROC(copy_user_generic_unrolled)
+
+-
+- /* Some CPUs run faster using the string copy instructions.
+- This is also a lot simpler. Use them when possible.
+- Patch in jmps to this code instead of copying it fully
+- to avoid unwanted aliasing in the exception tables. */
+-
+- /* rdi destination
+- * rsi source
+- * rdx count
+- * ecx zero flag
+- *
+- * Output:
+- * eax uncopied bytes or 0 if successfull.
+- *
+- * Only 4GB of copy is supported. This shouldn't be a problem
+- * because the kernel normally only writes from/to page sized chunks
+- * even if user space passed a longer buffer.
+- * And more would be dangerous because both Intel and AMD have
+- * errata with rep movsq > 4GB. If someone feels the need to fix
+- * this please consider this.
+- */
++/* Some CPUs run faster using the string copy instructions.
++ * This is also a lot simpler. Use them when possible.
++ *
++ * Only 4GB of copy is supported. This shouldn't be a problem
++ * because the kernel normally only writes from/to page sized chunks
++ * even if user space passed a longer buffer.
++ * And more would be dangerous because both Intel and AMD have
++ * errata with rep movsq > 4GB. If someone feels the need to fix
++ * this please consider this.
++ *
++ * Input:
++ * rdi destination
++ * rsi source
++ * rdx count
++ *
++ * Output:
++ * eax uncopied bytes or 0 if successful.
++ */
+ ENTRY(copy_user_generic_string)
+ CFI_STARTPROC
+- movl %ecx,%r8d /* save zero flag */
++ andl %edx,%edx
++ jz 4f
++ cmpl $8,%edx
++ jb 2f /* less than 8 bytes, go to byte copy loop */
++ ALIGN_DESTINATION
+ movl %edx,%ecx
+ shrl $3,%ecx
+- andl $7,%edx
+- jz 10f
+-1: rep
+- movsq
+- movl %edx,%ecx
+-2: rep
+- movsb
+-9: movl %ecx,%eax
+- ret
+-
+- /* multiple of 8 byte */
+-10: rep
++ andl $7,%edx
++1: rep
+ movsq
+- xor %eax,%eax
++2: movl %edx,%ecx
++3: rep
++ movsb
++4: xorl %eax,%eax
+ ret
+
+- /* exception handling */
+-3: lea (%rdx,%rcx,8),%rax /* exception on quad loop */
+- jmp 6f
+-5: movl %ecx,%eax /* exception on byte loop */
+- /* eax: left over bytes */
+-6: testl %r8d,%r8d /* zero flag set? */
+- jz 7f
+- movl %eax,%ecx /* initialize x86 loop counter */
+- push %rax
+- xorl %eax,%eax
+-8: rep
+- stosb /* zero the rest */
+-11: pop %rax
+-7: ret
+- CFI_ENDPROC
+-END(copy_user_generic_c)
++ .section .fixup,"ax"
++11: lea (%rdx,%rcx,8),%rcx
++12: movl %ecx,%edx /* ecx is zerorest also */
++ jmp copy_user_handle_tail
++ .previous
+
+ .section __ex_table,"a"
+- .quad 1b,3b
+- .quad 2b,5b
+- .quad 8b,11b
+- .quad 10b,3b
++ .align 8
++ .quad 1b,11b
++ .quad 3b,12b
+ .previous
++ CFI_ENDPROC
++ENDPROC(copy_user_generic_string)
diff --git a/tags/2.6.18-12/30083_x86-wrong-register-was-used-in-align-macro.patch b/tags/2.6.18-12/30083_x86-wrong-register-was-used-in-align-macro.patch
new file mode 100644
index 0000000..8ba33b0
--- /dev/null
+++ b/tags/2.6.18-12/30083_x86-wrong-register-was-used-in-align-macro.patch
@@ -0,0 +1,29 @@
+commit afd962a9e8708c571c5c0c4a6d098f931742c229
+Author: Vitaly Mayatskikh <v.mayatskih@gmail.com>
+Date: Wed Jul 30 13:30:14 2008 +0200
+
+ x86: wrong register was used in align macro
+
+ New ALIGN_DESTINATION macro has sad typo: r8d register was used instead
+ of ecx in fixup section. This can be considered as a regression.
+
+ Register ecx was also wrongly loaded with value in r8d in
+ copy_user_nocache routine.
+
+ Signed-off-by: Vitaly Mayatskikh <v.mayatskih@gmail.com>
+ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+Backported to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/arch/x86_64/lib/copy_user.S linux-source-2.6.18/arch/x86_64/lib/copy_user.S
+--- linux-source-2.6.18.orig/arch/x86_64/lib/copy_user.S 2008-07-27 16:46:41.000000000 -0600
++++ linux-source-2.6.18/arch/x86_64/lib/copy_user.S 2008-08-12 00:13:09.000000000 -0600
+@@ -52,7 +52,7 @@
+ jnz 100b
+ 102:
+ .section .fixup,"ax"
+-103: addl %r8d,%edx /* ecx is zerorest also */
++103: addl %ecx,%edx /* ecx is zerorest also */
+ jmp copy_user_handle_tail
+ .previous
+
diff --git a/tags/2.6.18-12/30084_cifs-fix-compiler-warning.patch b/tags/2.6.18-12/30084_cifs-fix-compiler-warning.patch
new file mode 100644
index 0000000..098f634
--- /dev/null
+++ b/tags/2.6.18-12/30084_cifs-fix-compiler-warning.patch
@@ -0,0 +1,21 @@
+commit 04e1e0cccade330ab3715ce59234f7e3b087e246
+Author: Jan Beulich <jbeulich@novell.com>
+Date: Tue Jul 22 13:04:18 2008 +0000
+
+ [CIFS] Fix compiler warning on 64-bit
+
+ Signed-off-by: Steve French <sfrench@us.ibm.com>
+
+diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
+index f58e41d..4276546 100644
+--- a/fs/cifs/asn1.c
++++ b/fs/cifs/asn1.c
+@@ -400,7 +400,7 @@ asn1_oid_decode(struct asn1_ctx *ctx,
+ size = eoc - ctx->pointer + 1;
+
+ /* first subid actually encodes first two subids */
+- if (size < 2 || size > ULONG_MAX/sizeof(unsigned long))
++ if (size < 2 || size > UINT_MAX/sizeof(unsigned long))
+ return 0;
+
+ *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
diff --git a/tags/2.6.18-12/30085_netfilter-nf_nat_snmp_basic-fix-range-check.patch b/tags/2.6.18-12/30085_netfilter-nf_nat_snmp_basic-fix-range-check.patch
new file mode 100644
index 0000000..3bf2353
--- /dev/null
+++ b/tags/2.6.18-12/30085_netfilter-nf_nat_snmp_basic-fix-range-check.patch
@@ -0,0 +1,27 @@
+commit 252815b0cfe711001eff0327872209986b36d490
+Author: David Howells <dhowells@redhat.com>
+Date: Wed Jul 9 15:06:45 2008 -0700
+
+ netfilter: nf_nat_snmp_basic: fix a range check in NAT for SNMP
+
+ Fix a range check in netfilter IP NAT for SNMP to always use a big enough size
+ variable that the compiler won't moan about comparing it to ULONG_MAX/8 on a
+ 64-bit platform.
+
+ Signed-off-by: David Howells <dhowells@redhat.com>
+ Signed-off-by: Patrick McHardy <kaber@trash.net>
+ Signed-off-by: David S. Miller <davem@davemloft.net>
+
+diff -urpN linux-source-2.6.18.orig/net/ipv4/netfilter/ip_nat_snmp_basic.c linux-source-2.6.18/net/ipv4/netfilter/ip_nat_snmp_basic.c
+--- linux-source-2.6.18.orig/net/ipv4/netfilter/ip_nat_snmp_basic.c 2008-06-16 16:25:21.000000000 -0600
++++ linux-source-2.6.18/net/ipv4/netfilter/ip_nat_snmp_basic.c 2008-08-17 19:03:41.000000000 -0600
+@@ -442,8 +442,8 @@ static unsigned char asn1_oid_decode(str
+ unsigned int *len)
+ {
+ unsigned long subid;
+- unsigned int size;
+ unsigned long *optr;
++ size_t size;
+
+ size = eoc - ctx->pointer + 1;
+
diff --git a/tags/2.6.18-12/30086_sound-ensure-device-number-is-valid-in-snd_seq_oss_synth_make_info.patch b/tags/2.6.18-12/30086_sound-ensure-device-number-is-valid-in-snd_seq_oss_synth_make_info.patch
new file mode 100644
index 0000000..928c1ef
--- /dev/null
+++ b/tags/2.6.18-12/30086_sound-ensure-device-number-is-valid-in-snd_seq_oss_synth_make_info.patch
@@ -0,0 +1,30 @@
+commit 82e68f7ffec3800425f2391c8c86277606860442
+Author: Willy Tarreau <w@1wt.eu>
+Date: Sat Aug 2 18:25:16 2008 +0200
+
+ sound: ensure device number is valid in snd_seq_oss_synth_make_info
+
+ snd_seq_oss_synth_make_info() incorrectly reports information
+ to userspace without first checking for the validity of the
+ device number, leading to possible information leak (CVE-2008-3272).
+
+ Reported-By: Tobias Klein <tk@trapkit.de>
+ Acked-and-tested-by: Takashi Iwai <tiwai@suse.de>
+ Cc: stable@kernel.org
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
+index 558dadb..e024e45 100644
+--- a/sound/core/seq/oss/seq_oss_synth.c
++++ b/sound/core/seq/oss/seq_oss_synth.c
+@@ -604,6 +604,9 @@ snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_in
+ {
+ struct seq_oss_synth *rec;
+
++ if (dev < 0 || dev >= dp->max_synthdev)
++ return -ENXIO;
++
+ if (dp->synths[dev].is_midi) {
+ struct midi_info minf;
+ snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
diff --git a/tags/2.6.18-12/30087_vfs-fix-lookup-on-deleted-directory.patch b/tags/2.6.18-12/30087_vfs-fix-lookup-on-deleted-directory.patch
new file mode 100644
index 0000000..93605d0
--- /dev/null
+++ b/tags/2.6.18-12/30087_vfs-fix-lookup-on-deleted-directory.patch
@@ -0,0 +1,71 @@
+commit d70b67c8bc72ee23b55381bd6a884f4796692f77
+Author: Miklos Szeredi <mszeredi@suse.cz>
+Date: Wed Jul 2 21:30:15 2008 +0200
+
+ [patch] vfs: fix lookup on deleted directory
+
+ Lookup can install a child dentry for a deleted directory. This keeps
+ the directory dentry alive, and the inode pinned in the cache and on
+ disk, even after all external references have gone away.
+
+ This isn't a big problem normally, since memory pressure or umount
+ will clear out the directory dentry and its children, releasing the
+ inode. But for UBIFS this causes problems because its orphan area can
+ overflow.
+
+ Fix this by returning ENOENT for all lookups on a S_DEAD directory
+ before creating a child dentry.
+
+ Thanks to Zoltan Sogor for noticing this while testing UBIFS, and
+ Artem for the excellent analysis of the problem and testing.
+
+ Reported-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
+ Tested-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
+ Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+ Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+
+Adjusted to apply to Debian's 2.6.18 by dann frazier <dannf@debian.org>
+
+diff -urpN linux-source-2.6.18.orig/fs/namei.c linux-source-2.6.18/fs/namei.c
+--- linux-source-2.6.18.orig/fs/namei.c 2008-06-16 16:25:21.000000000 -0600
++++ linux-source-2.6.18/fs/namei.c 2008-08-15 13:51:40.000000000 -0600
+@@ -465,7 +465,14 @@ static struct dentry * real_lookup(struc
+ */
+ result = d_lookup(parent, name);
+ if (!result) {
+- struct dentry * dentry = d_alloc(parent, name);
++ struct dentry *dentry;
++
++ /* Don't create child dentry for a dead directory. */
++ result = ERR_PTR(-ENOENT);
++ if (IS_DEADDIR(dir))
++ goto out_unlock;
++
++ dentry = d_alloc(parent, name);
+ result = ERR_PTR(-ENOMEM);
+ if (dentry) {
+ result = dir->i_op->lookup(dir, dentry, nd);
+@@ -474,6 +481,7 @@ static struct dentry * real_lookup(struc
+ else
+ result = dentry;
+ }
++out_unlock:
+ mutex_unlock(&dir->i_mutex);
+ return result;
+ }
+@@ -1248,7 +1256,14 @@ static struct dentry * __lookup_hash(str
+
+ dentry = cached_lookup(base, name, nd);
+ if (!dentry) {
+- struct dentry *new = d_alloc(base, name);
++ struct dentry *new;
++
++ /* Don't create child dentry for a dead directory. */
++ dentry = ERR_PTR(-ENOENT);
++ if (IS_DEADDIR(inode))
++ goto out;
++
++ new = d_alloc(base, name);
+ dentry = ERR_PTR(-ENOMEM);
+ if (!new)
+ goto out;
diff --git a/tags/2.6.18-12/50009_gentooify-tls-warning.patch b/tags/2.6.18-12/50009_gentooify-tls-warning.patch
new file mode 100644
index 0000000..3c3db12
--- /dev/null
+++ b/tags/2.6.18-12/50009_gentooify-tls-warning.patch
@@ -0,0 +1,16 @@
+--- linux-2.6.20-xen-r6.orig/arch/i386/kernel/fixup.c 2007-10-15 16:07:58.000000000 -0700
++++ linux-2.6.20-xen-r6/arch/i386/kernel/fixup.c 2007-10-15 16:15:01.000000000 -0700
+@@ -60,10 +60,9 @@
+ DP("** WARNING: Currently emulating unsupported memory accesses **");
+ DP("** in /lib/tls glibc libraries. The emulation is **");
+ DP("** slow. To ensure full performance you should **");
+- DP("** install a 'xen-friendly' (nosegneg) version of **");
+- DP("** the library, or disable tls support by executing **");
+- DP("** the following as root: **");
+- DP("** mv /lib/tls /lib/tls.disabled **");
++ DP("** add -mno-tls-direct-seg-refs to your CFLAGS and **");
++ DP("** re-emerge glibc and any staticly linked programs **");
++ DP("** or emerge -e world to catch everything. **");
+ DP("** Offending process: %-38.38s **", info);
+ DP("***************************************************************");
+ DP("***************************************************************");