diff options
Diffstat (limited to 'x11-drivers/ati-drivers/files/8.42.3')
-rw-r--r-- | x11-drivers/ati-drivers/files/8.42.3/ati-drivers-2.6.23-2.patch | 327 |
1 files changed, 263 insertions, 64 deletions
diff --git a/x11-drivers/ati-drivers/files/8.42.3/ati-drivers-2.6.23-2.patch b/x11-drivers/ati-drivers/files/8.42.3/ati-drivers-2.6.23-2.patch index 4ab3ac6..5f1a34f 100644 --- a/x11-drivers/ati-drivers/files/8.42.3/ati-drivers-2.6.23-2.patch +++ b/x11-drivers/ati-drivers/files/8.42.3/ati-drivers-2.6.23-2.patch @@ -1,78 +1,277 @@ -diff -Naur common/lib/modules/fglrx/build_mod/firegl_public.c.old common/lib/modules/fglrx/build_mod/firegl_public.c ---- common/lib/modules/fglrx/build_mod/firegl_public.c 2007-10-24 14:10:52.000000000 +0000 -+++ common/lib/modules/fglrx/build_mod/firegl_public.c 2007-10-24 14:14:39.000000000 +0000 -@@ -217,6 +217,56 @@ - #define preempt_enable() - #endif +diff -urN common/lib/modules/fglrx/build_mod/firegl_public.c~ common/lib/modules/fglrx/build_mod/firegl_public.c +--- common/lib/modules/fglrx/build_mod/firegl_public.c~ 2007-10-22 11:30:01.000000000 -0500 ++++ common/lib/modules/fglrx/build_mod/firegl_public.c 2007-10-24 13:31:08.000000000 -0500 +@@ -796,7 +796,7 @@ + + // since privdev->pcidev is acquired in X server, use pdev + // directly here to allow suspend/resume without X server start. +- firegl_pci_save_state(pdev, privdev); ++ firegl_pci_save_state((__ke_pci_dev_t*)pdev, privdev); + pci_disable_device(pdev); + PMSG_EVENT(pdev->dev.power.power_state) = state; + } +@@ -838,7 +838,7 @@ -+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) -+#if defined(__i386__) -+#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \ -+ int __ret = 0; \ -+ if (pte_dirty(*(ptep))) \ -+ __ret = test_and_clear_bit(_PAGE_BIT_DIRTY, \ -+ &(ptep)->pte_low); \ -+ if (__ret) \ -+ pte_update((vma)->vm_mm, addr, ptep); \ -+ __ret; \ -+}) -+ -+static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; } -+static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; } -+#ifdef CONFIG_X86_PAE -+/* -+ * Is the pte executable? -+ */ -+static inline int pte_x(pte_t pte) -+{ -+ return !(pte_val(pte) & _PAGE_NX); -+} -+ -+/* -+ * All present user-pages with !NX bit are user-executable: + // PCI config space needs to be restored very early, in particular + // before pci_set_master! +- firegl_pci_restore_state(pdev, privdev); ++ firegl_pci_restore_state((__ke_pci_dev_t*)pdev, privdev); + + if (pci_enable_device(pdev)) + { +@@ -2016,7 +2016,11 @@ + + __ke_pci_dev_t* ATI_API_CALL __ke_pci_find_device (unsigned int vendor, unsigned int dev, __ke_pci_dev_t* from) + { +- return (__ke_pci_dev_t*)pci_find_device( vendor, dev, (struct pci_dev *)(void *)from ); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) ++ return (__ke_pci_dev_t*)pci_get_device( vendor, dev, (struct pci_dev *)(void *)from ); ++#else ++ return (__ke_pci_dev_t*)pci_find_device( vendor, dev, (struct pci_dev *)(void *)from ); ++#endif + } + + void* ATI_API_CALL __ke_malloc(__ke_size_t size) +@@ -2487,16 +2491,80 @@ + } + + #ifndef ptep_clear_flush_dirty +-#define ptep_clear_flush_dirty(__vma, __address, __ptep) \ +-({ \ +- int __dirty = ptep_test_and_clear_dirty(__ptep); \ +- if (__dirty) \ +- flush_tlb_page(__vma, __address); \ +- __dirty; \ +-}) ++/** \brief Test and clear the "dirty" bit in the page table entry ++ * ++ * \param vma Pointer to the memory region structure ++ * \param addr Virtual address covered by vma ++ * \param ptep Pointer to the table entry structure ++ * ++ * \return Old value of the "dirty" flag ++ * + */ -+static inline int pte_exec(pte_t pte) ++static inline int ptep_clear_flush_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) +{ -+ return pte_user(pte) && pte_x(pte); -+} ++ int ret = 0; ++ ++ DBG_ENTER("0x%08X, 0x%08X, 0x%08X->0x%08X", vma, addr, ptep, *ptep); ++ ++ if (pte_dirty(*ptep)) ++ { ++#ifdef __x86_64__ ++ DBG_TRACE("Test and clear bit %d in 0x%08X", _PAGE_BIT_DIRTY, ptep->pte); ++ ret = test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte); +#else -+static inline int pte_exec(pte_t pte) -+{ -+ return pte_user(pte); -+} -+#endif /* PAE */ ++ DBG_TRACE("Test and clear bit %d in 0x%08X", _PAGE_BIT_DIRTY, ptep->pte_low); ++ ret = test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); + -+#elif defined(__x86_64__) -+static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, -+ unsigned long addr, pte_t *ptep) -+{ -+ if (!pte_dirty(*ptep)) -+ return 0; -+ return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte); ++ // Since we modify PTE directly, it needs to inform the hypervisor ++ if (ret) ++ { ++ pte_update(vma->vm_mm, addr, ptep); ++ } ++#endif ++ } ++ ++ DBG_TRACE("0x%08X->0x%08X", ptep, *ptep); ++ ++ // Flush Translation Lookaside Buffers ++ if (ret) ++ { ++ flush_tlb_page(vma, addr); ++ } ++ ++ DBG_LEAVE("%d", ret); ++ ++ return ret; +} -+static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } -+static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); } -+#endif +#endif + - // ============================================================ - /* globals */ ++#ifdef pte_offset_atomic ++#define PTE_OFFSET_FUNC pte_offset_atomic ++#define PTE_UNMAP_FUNC(p) pte_kunmap(p) ++#else ++#ifdef pte_offset_map ++#define PTE_OFFSET_FUNC pte_offset_map ++#define PTE_UNMAP_FUNC(p) pte_unmap(p) ++#else ++#ifdef pte_offset_kernel ++#define PTE_OFFSET_FUNC pte_offset_kernel ++#define PTE_UNMAP_FUNC(p) do {} while (0) ++#else ++#define PTE_OFFSET_FUNC pte_offset ++#define PTE_UNMAP_FUNC(p) do {} while (0) ++#endif ++#endif + #endif -@@ -2490,7 +2540,7 @@ - #ifndef ptep_clear_flush_dirty - #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ - ({ \ -- int __dirty = ptep_test_and_clear_dirty(__ptep); \ -+ int __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \ - if (__dirty) \ - flush_tlb_page(__vma, __address); \ - __dirty; \ -@@ -2937,7 +2987,7 @@ +-int ATI_API_CALL __ke_vm_test_and_clear_dirty(struct mm_struct* mm, unsigned long virtual_addr) ++/** \brief Test and clear the "dirty" bit in the page table entry referred by ++ * the virtual address ++ * ++ * \param mm Pointer to the memory descriptor structure ++ * \param virtual_addr Virtual address ++ * ++ * \return Old value of the "dirty" flag on success or negative on error ++ * ++ */ ++int ATI_API_CALL KCL_TestAndClearPageDirtyFlag(struct mm_struct* mm, unsigned long virtual_addr) + { + int ret = -1; // init with page not present + pgd_t* pgd_p; +@@ -2530,37 +2598,16 @@ + } + __KE_DEBUG("pmd_p=0x%08lx\n", (unsigned long)pmd_p); + +-#ifdef pte_offset_atomic +- pte_p = pte_offset_atomic(pmd_p, virtual_addr); +- if (pte_present(*pte_p)) +- ret = (ptep_clear_flush_dirty(vma, virtual_addr, pte_p) ? 1 : 0); +- else +- __KE_DEBUG("page not exists!\n"); +- pte_kunmap(pte_p); +-#else +-#ifdef pte_offset_map +- pte_p = pte_offset_map(pmd_p, virtual_addr); +- if (pte_present(*pte_p)) +- ret = (ptep_clear_flush_dirty(vma, virtual_addr, pte_p) ? 1 : 0); +- else +- __KE_DEBUG("page not exists!\n"); +- pte_unmap(pte_p); +-#else +-#ifdef pte_offset_kernel +- pte_p = pte_offset_kernel(pmd_p, virtual_addr); +- if (pte_present(*pte_p)) +- ret = (ptep_clear_flush_dirty(vma, virtual_addr, pte_p) ? 1 : 0); +- else +- __KE_DEBUG("page not exists!\n"); +-#else +- pte_p = pte_offset(pmd_p, virtual_addr); ++ pte_p = PTE_OFFSET_FUNC(pmd_p, virtual_addr); + if (pte_present(*pte_p)) ++ { + ret = (ptep_clear_flush_dirty(vma, virtual_addr, pte_p) ? 1 : 0); ++ } + else ++ { + __KE_DEBUG("page not exists!\n"); +-#endif +-#endif +-#endif ++ } ++ PTE_UNMAP_FUNC(pte_p); + + if (debuglevel > 2) + { +@@ -2946,20 +2993,35 @@ + #else + static void ATI_API_CALL (*irq_handler_func)(int, void*, void*); /* function pointer variable */ + ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) + static irqreturn_t ke_irq_handler_wrap(int irq, void *arg1, struct pt_regs *regs) + { + irq_handler_func(irq, arg1, regs); + return IRQ_HANDLED; + } +- +-int ATI_API_CALL __ke_request_irq(unsigned int irq, ++#else ++static irqreturn_t ke_irq_handler_wrap(int irq, void *arg1) ++{ ++ irq_handler_func(irq, arg1, (void *)0); ++ return IRQ_HANDLED; ++} ++#endif ++ ++int ATI_API_CALL __ke_request_irq(unsigned int irq, + void (*ATI_API_CALL handler)(int, void *, void *), + const char *dev_name, void *dev_id) { - return request_irq(irq, - (void(*)(int, void *, struct pt_regs *))handler, + irq_handler_func = handler; +- return request_irq(irq, ++ return request_irq( ++ irq, + ke_irq_handler_wrap, - SA_SHIRQ, dev_name, dev_id); -+ IRQF_SHARED, dev_name, dev_id); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ++ SA_SHIRQ, ++#else ++ IRQF_SHARED, ++#endif ++ dev_name, ++ dev_id); } void ATI_API_CALL __ke_free_irq(unsigned int irq, void *dev_id) +@@ -3530,12 +3592,10 @@ + #else + *phys_address = pte_val(pte) & (u64)((u64)PAGE_MASK | (u64)0xf<<32); + #endif +- sprintf(buf, "0x%Lx %c%c%c%c%c%c\n", ++ sprintf(buf, "0x%Lx %c%c%c%c\n", + *phys_address, + pte_present (pte) ? 'p' : '-', +- pte_read (pte) ? 'r' : '-', + pte_write (pte) ? 'w' : '-', +- pte_exec (pte) ? 'x' : '-', + pte_dirty (pte) ? 'd' : '-', + pte_young (pte) ? 'a' : '-'); + } +@@ -5436,7 +5496,11 @@ + /** \brief Type definition of the structure describing Slab Cache object */ + typedef struct tag_kasSlabCache_t + { +- kmem_cache_t* cache; /* OS slab cache object */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct kmem_cache *cache; /* OS slab cache object */ ++#else ++ kmem_cache_t *cache; /* OS slab cache object */ ++#endif + spinlock_t lock; /* OS spinlock object protecting the cache */ + unsigned int routine_type; /* Type of routine the cache might be accessed from */ + char name[14]; /* Cache object name (kernel 2.4 restricts its length to 19 chars) */ +@@ -5482,8 +5546,12 @@ + DBG_TRACE("creating slab object '%s'", slabcache_obj->name); + + if ((slabcache_obj->cache = +- kmem_cache_create(slabcache_obj->name, iEntrySize, 0, 0, NULL, NULL))) +- { ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ++ kmem_cache_create(slabcache_obj->name, iEntrySize, 0, 0, NULL, NULL))) ++#else ++ kmem_cache_create(slabcache_obj->name, iEntrySize, 0, 0, NULL))) ++#endif ++{ + ret = 1; + } + +diff -urN common/lib/modules/fglrx/build_mod/firegl_public.h~ common/lib/modules/fglrx/build_mod/firegl_public.h +--- common/lib/modules/fglrx/build_mod/firegl_public.h~ 2007-10-22 11:30:01.000000000 -0500 ++++ common/lib/modules/fglrx/build_mod/firegl_public.h 2007-10-24 13:31:08.000000000 -0500 +@@ -241,9 +241,14 @@ + /*****************************************************************************/ + + typedef unsigned long __ke_dev_t; +-typedef unsigned long __ke_size_t; + typedef unsigned long __ke_off_t; ++#ifdef __x86_64__ + typedef long __ke_ssize_t; ++typedef unsigned long __ke_size_t; ++#else ++typedef int __ke_ssize_t; ++typedef unsigned int __ke_size_t; ++#endif + typedef unsigned char __ke_u8; + typedef unsigned short __ke_u16; + typedef unsigned int __ke_u32; +@@ -594,7 +599,7 @@ + extern char* ATI_API_CALL __ke_strchr(const char *s, int c); + extern int ATI_API_CALL __ke_sprintf(char* buf, const char* fmt, ...); + extern int ATI_API_CALL __ke_snprintf(char* buf, size_t size, const char* fmt, ...); +-extern int ATI_API_CALL __ke_vm_test_and_clear_dirty(struct mm_struct* mm, unsigned long virtual_addr); ++extern int ATI_API_CALL KCL_TestAndClearPageDirtyFlag(struct mm_struct* mm, unsigned long virtual_addr); + extern unsigned long ATI_API_CALL __ke_do_mmap(struct file * file, unsigned long addr, unsigned long len, unsigned long pgoff); + extern int ATI_API_CALL __ke_do_munmap(unsigned long addr, unsigned long len); + extern void* ATI_API_CALL __ke_vmap(unsigned long *pagelist, unsigned int count); |