1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
|
diff -urN common/lib/modules/fglrx/build_mod/firegl_public.c~ common/lib/modules/fglrx/build_mod/firegl_public.c
--- common/lib/modules/fglrx/build_mod/firegl_public.c~ 2007-10-22 11:30:01.000000000 -0500
+++ common/lib/modules/fglrx/build_mod/firegl_public.c 2007-10-24 13:31:08.000000000 -0500
@@ -796,7 +796,7 @@
// since privdev->pcidev is acquired in X server, use pdev
// directly here to allow suspend/resume without X server start.
- firegl_pci_save_state(pdev, privdev);
+ firegl_pci_save_state((__ke_pci_dev_t*)pdev, privdev);
pci_disable_device(pdev);
PMSG_EVENT(pdev->dev.power.power_state) = state;
}
@@ -838,7 +838,7 @@
// PCI config space needs to be restored very early, in particular
// before pci_set_master!
- firegl_pci_restore_state(pdev, privdev);
+ firegl_pci_restore_state((__ke_pci_dev_t*)pdev, privdev);
if (pci_enable_device(pdev))
{
@@ -2016,7 +2016,11 @@
__ke_pci_dev_t* ATI_API_CALL __ke_pci_find_device (unsigned int vendor, unsigned int dev, __ke_pci_dev_t* from)
{
- return (__ke_pci_dev_t*)pci_find_device( vendor, dev, (struct pci_dev *)(void *)from );
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
+ return (__ke_pci_dev_t*)pci_get_device( vendor, dev, (struct pci_dev *)(void *)from );
+#else
+ return (__ke_pci_dev_t*)pci_find_device( vendor, dev, (struct pci_dev *)(void *)from );
+#endif
}
void* ATI_API_CALL __ke_malloc(__ke_size_t size)
@@ -2487,16 +2491,80 @@
}
#ifndef ptep_clear_flush_dirty
-#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
-({ \
- int __dirty = ptep_test_and_clear_dirty(__ptep); \
- if (__dirty) \
- flush_tlb_page(__vma, __address); \
- __dirty; \
-})
+/** \brief Test and clear the "dirty" bit in the page table entry
+ *
+ * \param vma Pointer to the memory region structure
+ * \param addr Virtual address covered by vma
+ * \param ptep Pointer to the table entry structure
+ *
+ * \return Old value of the "dirty" flag
+ *
+ */
+static inline int ptep_clear_flush_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+ int ret = 0;
+
+ DBG_ENTER("0x%08X, 0x%08X, 0x%08X->0x%08X", vma, addr, ptep, *ptep);
+
+ if (pte_dirty(*ptep))
+ {
+#ifdef __x86_64__
+ DBG_TRACE("Test and clear bit %d in 0x%08X", _PAGE_BIT_DIRTY, ptep->pte);
+ ret = test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte);
+#else
+ DBG_TRACE("Test and clear bit %d in 0x%08X", _PAGE_BIT_DIRTY, ptep->pte_low);
+ ret = test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
+
+ // Since we modify PTE directly, it needs to inform the hypervisor
+ if (ret)
+ {
+ pte_update(vma->vm_mm, addr, ptep);
+ }
+#endif
+ }
+
+ DBG_TRACE("0x%08X->0x%08X", ptep, *ptep);
+
+ // Flush Translation Lookaside Buffers
+ if (ret)
+ {
+ flush_tlb_page(vma, addr);
+ }
+
+ DBG_LEAVE("%d", ret);
+
+ return ret;
+}
+#endif
+
+#ifdef pte_offset_atomic
+#define PTE_OFFSET_FUNC pte_offset_atomic
+#define PTE_UNMAP_FUNC(p) pte_kunmap(p)
+#else
+#ifdef pte_offset_map
+#define PTE_OFFSET_FUNC pte_offset_map
+#define PTE_UNMAP_FUNC(p) pte_unmap(p)
+#else
+#ifdef pte_offset_kernel
+#define PTE_OFFSET_FUNC pte_offset_kernel
+#define PTE_UNMAP_FUNC(p) do {} while (0)
+#else
+#define PTE_OFFSET_FUNC pte_offset
+#define PTE_UNMAP_FUNC(p) do {} while (0)
+#endif
+#endif
#endif
-int ATI_API_CALL __ke_vm_test_and_clear_dirty(struct mm_struct* mm, unsigned long virtual_addr)
+/** \brief Test and clear the "dirty" bit in the page table entry referred by
+ * the virtual address
+ *
+ * \param mm Pointer to the memory descriptor structure
+ * \param virtual_addr Virtual address
+ *
+ * \return Old value of the "dirty" flag on success or negative on error
+ *
+ */
+int ATI_API_CALL KCL_TestAndClearPageDirtyFlag(struct mm_struct* mm, unsigned long virtual_addr)
{
int ret = -1; // init with page not present
pgd_t* pgd_p;
@@ -2530,37 +2598,16 @@
}
__KE_DEBUG("pmd_p=0x%08lx\n", (unsigned long)pmd_p);
-#ifdef pte_offset_atomic
- pte_p = pte_offset_atomic(pmd_p, virtual_addr);
- if (pte_present(*pte_p))
- ret = (ptep_clear_flush_dirty(vma, virtual_addr, pte_p) ? 1 : 0);
- else
- __KE_DEBUG("page not exists!\n");
- pte_kunmap(pte_p);
-#else
-#ifdef pte_offset_map
- pte_p = pte_offset_map(pmd_p, virtual_addr);
- if (pte_present(*pte_p))
- ret = (ptep_clear_flush_dirty(vma, virtual_addr, pte_p) ? 1 : 0);
- else
- __KE_DEBUG("page not exists!\n");
- pte_unmap(pte_p);
-#else
-#ifdef pte_offset_kernel
- pte_p = pte_offset_kernel(pmd_p, virtual_addr);
- if (pte_present(*pte_p))
- ret = (ptep_clear_flush_dirty(vma, virtual_addr, pte_p) ? 1 : 0);
- else
- __KE_DEBUG("page not exists!\n");
-#else
- pte_p = pte_offset(pmd_p, virtual_addr);
+ pte_p = PTE_OFFSET_FUNC(pmd_p, virtual_addr);
if (pte_present(*pte_p))
+ {
ret = (ptep_clear_flush_dirty(vma, virtual_addr, pte_p) ? 1 : 0);
+ }
else
+ {
__KE_DEBUG("page not exists!\n");
-#endif
-#endif
-#endif
+ }
+ PTE_UNMAP_FUNC(pte_p);
if (debuglevel > 2)
{
@@ -2946,20 +2993,35 @@
#else
static void ATI_API_CALL (*irq_handler_func)(int, void*, void*); /* function pointer variable */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
static irqreturn_t ke_irq_handler_wrap(int irq, void *arg1, struct pt_regs *regs)
{
irq_handler_func(irq, arg1, regs);
return IRQ_HANDLED;
}
-
-int ATI_API_CALL __ke_request_irq(unsigned int irq,
+#else
+static irqreturn_t ke_irq_handler_wrap(int irq, void *arg1)
+{
+ irq_handler_func(irq, arg1, (void *)0);
+ return IRQ_HANDLED;
+}
+#endif
+
+int ATI_API_CALL __ke_request_irq(unsigned int irq,
void (*ATI_API_CALL handler)(int, void *, void *),
const char *dev_name, void *dev_id)
{
irq_handler_func = handler;
- return request_irq(irq,
+ return request_irq(
+ irq,
ke_irq_handler_wrap,
- SA_SHIRQ, dev_name, dev_id);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+ SA_SHIRQ,
+#else
+ IRQF_SHARED,
+#endif
+ dev_name,
+ dev_id);
}
void ATI_API_CALL __ke_free_irq(unsigned int irq, void *dev_id)
@@ -3530,12 +3592,10 @@
#else
*phys_address = pte_val(pte) & (u64)((u64)PAGE_MASK | (u64)0xf<<32);
#endif
- sprintf(buf, "0x%Lx %c%c%c%c%c%c\n",
+ sprintf(buf, "0x%Lx %c%c%c%c\n",
*phys_address,
pte_present (pte) ? 'p' : '-',
- pte_read (pte) ? 'r' : '-',
pte_write (pte) ? 'w' : '-',
- pte_exec (pte) ? 'x' : '-',
pte_dirty (pte) ? 'd' : '-',
pte_young (pte) ? 'a' : '-');
}
@@ -5436,7 +5496,11 @@
/** \brief Type definition of the structure describing Slab Cache object */
typedef struct tag_kasSlabCache_t
{
- kmem_cache_t* cache; /* OS slab cache object */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ struct kmem_cache *cache; /* OS slab cache object */
+#else
+ kmem_cache_t *cache; /* OS slab cache object */
+#endif
spinlock_t lock; /* OS spinlock object protecting the cache */
unsigned int routine_type; /* Type of routine the cache might be accessed from */
char name[14]; /* Cache object name (kernel 2.4 restricts its length to 19 chars) */
@@ -5482,8 +5546,12 @@
DBG_TRACE("creating slab object '%s'", slabcache_obj->name);
if ((slabcache_obj->cache =
- kmem_cache_create(slabcache_obj->name, iEntrySize, 0, 0, NULL, NULL)))
- {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+ kmem_cache_create(slabcache_obj->name, iEntrySize, 0, 0, NULL, NULL)))
+#else
+ kmem_cache_create(slabcache_obj->name, iEntrySize, 0, 0, NULL)))
+#endif
+{
ret = 1;
}
diff -urN common/lib/modules/fglrx/build_mod/firegl_public.h~ common/lib/modules/fglrx/build_mod/firegl_public.h
--- common/lib/modules/fglrx/build_mod/firegl_public.h~ 2007-10-22 11:30:01.000000000 -0500
+++ common/lib/modules/fglrx/build_mod/firegl_public.h 2007-10-24 13:31:08.000000000 -0500
@@ -241,9 +241,14 @@
/*****************************************************************************/
typedef unsigned long __ke_dev_t;
-typedef unsigned long __ke_size_t;
typedef unsigned long __ke_off_t;
+#ifdef __x86_64__
typedef long __ke_ssize_t;
+typedef unsigned long __ke_size_t;
+#else
+typedef int __ke_ssize_t;
+typedef unsigned int __ke_size_t;
+#endif
typedef unsigned char __ke_u8;
typedef unsigned short __ke_u16;
typedef unsigned int __ke_u32;
@@ -594,7 +599,7 @@
extern char* ATI_API_CALL __ke_strchr(const char *s, int c);
extern int ATI_API_CALL __ke_sprintf(char* buf, const char* fmt, ...);
extern int ATI_API_CALL __ke_snprintf(char* buf, size_t size, const char* fmt, ...);
-extern int ATI_API_CALL __ke_vm_test_and_clear_dirty(struct mm_struct* mm, unsigned long virtual_addr);
+extern int ATI_API_CALL KCL_TestAndClearPageDirtyFlag(struct mm_struct* mm, unsigned long virtual_addr);
extern unsigned long ATI_API_CALL __ke_do_mmap(struct file * file, unsigned long addr, unsigned long len, unsigned long pgoff);
extern int ATI_API_CALL __ke_do_munmap(unsigned long addr, unsigned long len);
extern void* ATI_API_CALL __ke_vmap(unsigned long *pagelist, unsigned int count);
|