summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2012-08-12 22:49:30 -0400
committerAnthony G. Basile <blueness@gentoo.org>2012-08-12 22:49:30 -0400
commit5aee5450202ce0b4dc63ea988f6d945d20f2fb54 (patch)
tree15e7374d08da29c53b8a555bc562035004f87773
parentGrsec/PaX: 2.9.1-3.5.1-201208091728 (diff)
downloadhardened-patchset-5aee5450202ce0b4dc63ea988f6d945d20f2fb54.tar.gz
hardened-patchset-5aee5450202ce0b4dc63ea988f6d945d20f2fb54.tar.bz2
hardened-patchset-5aee5450202ce0b4dc63ea988f6d945d20f2fb54.zip
Grsec/PaX: 2.9.1-{2.6.32.59,3.2.27,3.5.1}-20120812090720120812
-rw-r--r--2.6.32/0000_README2
-rw-r--r--2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201208120916.patch (renamed from 2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201208062015.patch)1308
-rw-r--r--3.2.27/0000_README (renamed from 3.2.26/0000_README)6
-rw-r--r--3.2.27/1021_linux-3.2.22.patch (renamed from 3.2.26/1021_linux-3.2.22.patch)0
-rw-r--r--3.2.27/1022_linux-3.2.23.patch (renamed from 3.2.26/1022_linux-3.2.23.patch)0
-rw-r--r--3.2.27/1023_linux-3.2.24.patch (renamed from 3.2.26/1023_linux-3.2.24.patch)0
-rw-r--r--3.2.27/1024_linux-3.2.25.patch (renamed from 3.2.26/1024_linux-3.2.25.patch)0
-rw-r--r--3.2.27/1025_linux-3.2.26.patch (renamed from 3.2.26/1025_linux-3.2.26.patch)0
-rw-r--r--3.2.27/1026_linux-3.2.27.patch3188
-rw-r--r--3.2.27/4420_grsecurity-2.9.1-3.2.27-201208120907.patch (renamed from 3.2.26/4420_grsecurity-2.9.1-3.2.26-201208062017.patch)1886
-rw-r--r--3.2.27/4430_grsec-remove-localversion-grsec.patch (renamed from 3.2.26/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.2.27/4435_grsec-mute-warnings.patch (renamed from 3.2.26/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.2.27/4440_grsec-remove-protected-paths.patch (renamed from 3.2.26/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.2.27/4450_grsec-kconfig-default-gids.patch (renamed from 3.2.26/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.2.27/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.2.26/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.2.27/4470_disable-compat_vdso.patch (renamed from 3.2.26/4470_disable-compat_vdso.patch)0
-rw-r--r--3.5.1/0000_README2
-rw-r--r--3.5.1/4420_grsecurity-2.9.1-3.5.1-201208112021.patch (renamed from 3.5.1/4420_grsecurity-2.9.1-3.5.1-201208091728.patch)1632
18 files changed, 6960 insertions, 1064 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index 80a0bc5..81c756c 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -30,7 +30,7 @@ Patch: 1058_linux-2.6.32.59.patch
From: http://www.kernel.org
Desc: Linux 2.6.32.59
-Patch: 4420_grsecurity-2.9.1-2.6.32.59-201208062015.patch
+Patch: 4420_grsecurity-2.9.1-2.6.32.59-201208120916.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201208062015.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201208120916.patch
index d00e268..2f59e3d 100644
--- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201208062015.patch
+++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201208120916.patch
@@ -817,7 +817,7 @@ index b68faef..6dd1496 100644
select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
-index d0daeab..1b01223 100644
+index d0daeab..638f5e8 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -15,6 +15,10 @@
@@ -971,7 +971,7 @@ index d0daeab..1b01223 100644
-"1: ldrex %0, [%2]\n"
-" sub %0, %0, %3\n"
+"1: ldrex %1, [%2]\n"
-+" sub %0, %1, %3\n"
++" subs %0, %1, %3\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
@@ -20150,15 +20150,15 @@ index 7d35d0f..03f1d52 100644
* Shouldnt happen, we returned above if in_interrupt():
*/
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
-index e444357..437b4c5 100644
+index e444357..e21e51e 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
-@@ -27,6 +27,8 @@ struct setup_data_node {
+@@ -26,7 +26,7 @@ struct setup_data_node {
+ u32 len;
};
- static ssize_t setup_data_read(struct file *file, char __user *user_buf,
-+ size_t count, loff_t *ppos) __size_overflow(3);
-+static ssize_t setup_data_read(struct file *file, char __user *user_buf,
+-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
++static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct setup_data_node *node = file->private_data;
@@ -25434,7 +25434,7 @@ index bf9a7d5..fb06ab5 100644
ret
CFI_ENDPROC
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
-index 1f118d4..a99a1eb 100644
+index 1f118d4..7d522b8 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -43,7 +43,7 @@ do { \
@@ -25550,7 +25550,7 @@ index 1f118d4..a99a1eb 100644
".section .fixup,\"ax\"\n"
"101: lea 0(%%eax,%0,4),%0\n"
" jmp 100b\n"
-@@ -334,46 +340,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
+@@ -334,46 +340,153 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
}
static unsigned long
@@ -25660,9 +25660,7 @@ index 1f118d4..a99a1eb 100644
+ return size;
+}
+
-+static unsigned long
-+__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
-+static unsigned long
++static unsigned long __size_overflow(3)
__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
{
int d0, d1;
@@ -25724,7 +25722,7 @@ index 1f118d4..a99a1eb 100644
" movl %%eax, 56(%3)\n"
" movl %%edx, 60(%3)\n"
" addl $-64, %0\n"
-@@ -385,9 +500,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
@@ -25736,12 +25734,12 @@ index 1f118d4..a99a1eb 100644
"8:\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
-@@ -434,47 +549,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+@@ -433,48 +546,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+ * hyoshiok@miraclelinux.com
*/
- static unsigned long __copy_user_zeroing_intel_nocache(void *to,
-+ const void __user *from, unsigned long size) __size_overflow(3);
-+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
++static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
const void __user *from, unsigned long size)
{
int d0, d1;
@@ -25804,7 +25802,7 @@ index 1f118d4..a99a1eb 100644
" movnti %%eax, 56(%3)\n"
" movnti %%edx, 60(%3)\n"
" addl $-64, %0\n"
-@@ -487,9 +604,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
@@ -25816,12 +25814,12 @@ index 1f118d4..a99a1eb 100644
"8:\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
-@@ -531,47 +648,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+@@ -530,48 +643,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ return size;
}
- static unsigned long __copy_user_intel_nocache(void *to,
-+ const void __user *from, unsigned long size) __size_overflow(3);
-+static unsigned long __copy_user_intel_nocache(void *to,
+-static unsigned long __copy_user_intel_nocache(void *to,
++static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
const void __user *from, unsigned long size)
{
int d0, d1;
@@ -25884,7 +25882,7 @@ index 1f118d4..a99a1eb 100644
" movnti %%eax, 56(%3)\n"
" movnti %%edx, 60(%3)\n"
" addl $-64, %0\n"
-@@ -584,9 +703,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
+@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
@@ -25896,7 +25894,7 @@ index 1f118d4..a99a1eb 100644
"8:\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
-@@ -629,32 +748,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
+@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
*/
unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
unsigned long size);
@@ -25938,7 +25936,7 @@ index 1f118d4..a99a1eb 100644
".section .fixup,\"ax\"\n" \
"5: addl %3,%0\n" \
" jmp 2b\n" \
-@@ -682,14 +805,14 @@ do { \
+@@ -682,14 +799,14 @@ do { \
" negl %0\n" \
" andl $7,%0\n" \
" subl %0,%3\n" \
@@ -25956,7 +25954,7 @@ index 1f118d4..a99a1eb 100644
"2:\n" \
".section .fixup,\"ax\"\n" \
"5: addl %3,%0\n" \
-@@ -775,9 +898,9 @@ survive:
+@@ -775,9 +892,9 @@ survive:
}
#endif
if (movsl_is_ok(to, from, n))
@@ -25968,7 +25966,7 @@ index 1f118d4..a99a1eb 100644
return n;
}
EXPORT_SYMBOL(__copy_to_user_ll);
-@@ -797,10 +920,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
+@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
unsigned long n)
{
if (movsl_is_ok(to, from, n))
@@ -25981,7 +25979,7 @@ index 1f118d4..a99a1eb 100644
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nozero);
-@@ -827,59 +949,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
+@@ -827,59 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
if (n > 64 && cpu_has_xmm2)
n = __copy_user_intel_nocache(to, from, n);
else
@@ -72306,7 +72304,7 @@ index ec88ff3..b843a82 100644
cache->c_bucket_bits = bucket_bits;
#ifdef MB_CACHE_INDEXES_COUNT
diff --git a/fs/namei.c b/fs/namei.c
-index b0afbd4..6579ccc 100644
+index b0afbd4..e3fc8f4 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
@@ -72362,7 +72360,7 @@ index b0afbd4..6579ccc 100644
dentry->d_inode->i_op->put_link(dentry, nd, cookie);
}
+
-+ if (!error && gr_handle_symlink_owner(path, nd->path.dentry->d_inode))
++ if (!error && !(nd->flags & LOOKUP_PARENT) && gr_handle_symlink_owner(path, nd->path.dentry->d_inode))
+ error = -EACCES;
+
path_put(path);
@@ -84526,7 +84524,7 @@ index 0000000..13e8574
+}
diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
new file mode 100644
-index 0000000..35a96d1
+index 0000000..a023dcf
--- /dev/null
+++ b/grsecurity/grsec_link.c
@@ -0,0 +1,59 @@
@@ -84543,7 +84541,7 @@ index 0000000..35a96d1
+
+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
+ /* ignore root-owned links, e.g. /proc/self */
-+ link_inode->i_uid &&
++ link_inode->i_uid && target &&
+ link_inode->i_uid != target->i_uid) {
+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
+ return 1;
@@ -91013,7 +91011,7 @@ index 850d057..aa58075 100644
ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
-index 0ec00b3..39cb7fc 100644
+index 0ec00b3..22b4715 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
@@ -91025,16 +91023,17 @@ index 0ec00b3..39cb7fc 100644
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
-@@ -29,6 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
+@@ -29,7 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return __kmalloc_node(size, flags, -1);
}
-+static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
- static __always_inline void *__kmalloc(size_t size, gfp_t flags)
+-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
++static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
{
return kmalloc(size, flags);
+ }
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
-index 5ad70a6..08563d8 100644
+index 5ad70a6..108e1dc 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -86,7 +86,7 @@ struct kmem_cache {
@@ -91046,15 +91045,16 @@ index 5ad70a6..08563d8 100644
void (*ctor)(void *);
int inuse; /* Offset to metadata */
int align; /* Alignment */
-@@ -145,6 +145,7 @@ extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
+@@ -145,7 +145,7 @@ extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
* Sorry that the following has to be that ugly but some versions of GCC
* have trouble with constant propagation and loops.
*/
-+static __always_inline int kmalloc_index(size_t size) __size_overflow(1);
- static __always_inline int kmalloc_index(size_t size)
+-static __always_inline int kmalloc_index(size_t size)
++static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
{
if (!size)
-@@ -215,7 +216,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
+ return 0;
+@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
#endif
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
@@ -91063,15 +91063,16 @@ index 5ad70a6..08563d8 100644
#ifdef CONFIG_KMEMTRACE
extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
-@@ -227,6 +228,7 @@ kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
+@@ -227,7 +227,7 @@ kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
}
#endif
-+static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
- static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
++static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
{
unsigned int order = get_order(size);
-@@ -263,7 +265,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
+ void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
+@@ -263,7 +263,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
}
#ifdef CONFIG_NUMA
@@ -102671,7 +102672,7 @@ index e48b493..24a601d 100644
mm->unmap_area = arch_unmap_area;
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index f34ffd0..6a3753d 100644
+index f34ffd0..95dd843 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
@@ -102815,18 +102816,16 @@ index f34ffd0..6a3753d 100644
p = &(*p)->rb_right;
else
BUG();
-@@ -326,6 +368,10 @@ static void purge_vmap_area_lazy(void);
- static struct vmap_area *alloc_vmap_area(unsigned long size,
+@@ -323,7 +365,7 @@ static void purge_vmap_area_lazy(void);
+ * Allocate a region of KVA of the specified size and alignment, within the
+ * vstart and vend.
+ */
+-static struct vmap_area *alloc_vmap_area(unsigned long size,
++static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
unsigned long align,
unsigned long vstart, unsigned long vend,
-+ int node, gfp_t gfp_mask) __size_overflow(1);
-+static struct vmap_area *alloc_vmap_area(unsigned long size,
-+ unsigned long align,
-+ unsigned long vstart, unsigned long vend,
int node, gfp_t gfp_mask)
- {
- struct vmap_area *va;
-@@ -1245,6 +1291,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
+@@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
struct vm_struct *area;
BUG_ON(in_interrupt());
@@ -102843,7 +102842,7 @@ index f34ffd0..6a3753d 100644
if (flags & VM_IOREMAP) {
int bit = fls(size);
-@@ -1484,6 +1540,11 @@ void *vmap(struct page **pages, unsigned int count,
+@@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
if (count > totalram_pages)
return NULL;
@@ -102855,17 +102854,7 @@ index f34ffd0..6a3753d 100644
area = get_vm_area_caller((count << PAGE_SHIFT), flags,
__builtin_return_address(0));
if (!area)
-@@ -1584,6 +1645,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
- */
- static void *__vmalloc_node(unsigned long size, unsigned long align,
- gfp_t gfp_mask, pgprot_t prot,
-+ int node, void *caller) __size_overflow(1);
-+static void *__vmalloc_node(unsigned long size, unsigned long align,
-+ gfp_t gfp_mask, pgprot_t prot,
- int node, void *caller)
- {
- struct vm_struct *area;
-@@ -1594,6 +1658,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
+@@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
return NULL;
@@ -102880,7 +102869,7 @@ index f34ffd0..6a3753d 100644
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
VMALLOC_START, VMALLOC_END, node,
gfp_mask, caller);
-@@ -1698,10 +1770,9 @@ EXPORT_SYMBOL(vmalloc_node);
+@@ -1698,10 +1763,9 @@ EXPORT_SYMBOL(vmalloc_node);
* For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*/
@@ -102892,7 +102881,7 @@ index f34ffd0..6a3753d 100644
-1, __builtin_return_address(0));
}
-@@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+@@ -1998,6 +2062,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long uaddr = vma->vm_start;
unsigned long usize = vma->vm_end - vma->vm_start;
@@ -102901,7 +102890,7 @@ index f34ffd0..6a3753d 100644
if ((PAGE_SIZE-1) & (unsigned long)addr)
return -EINVAL;
-@@ -2250,8 +2323,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
+@@ -2250,8 +2316,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
return NULL;
}
@@ -111240,10 +111229,10 @@ index 0000000..b8008f7
+}
diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
new file mode 100644
-index 0000000..eb35e4a
+index 0000000..42bbc4f
--- /dev/null
+++ b/tools/gcc/size_overflow_hash.data
-@@ -0,0 +1,1851 @@
+@@ -0,0 +1,2270 @@
+_000001_hash alloc_dr 2 65495 _000001_hash NULL
+_000002_hash __copy_from_user 3 10918 _000002_hash NULL
+_000003_hash __copy_from_user_inatomic 3 4365 _000003_hash NULL
@@ -111253,7 +111242,7 @@ index 0000000..eb35e4a
+_000007_hash do_xip_mapping_read 5 60297 _000007_hash NULL
+_000008_hash hugetlbfs_read 3 11268 _000008_hash NULL
+_000009_hash __kmalloc 1 23231 _000009_hash NULL
-+_000010_hash kmalloc 1 60432 _000010_hash NULL
++_000010_hash kmalloc 1 60432 _002402_hash NULL nohasharray
+_000011_hash kmalloc_slab 1 11917 _000011_hash NULL
+_000012_hash kmemdup 2 64015 _000012_hash NULL
+_000013_hash __krealloc 2 14857 _000657_hash NULL nohasharray
@@ -111428,7 +111417,7 @@ index 0000000..eb35e4a
+_000187_hash ipc_rcu_alloc 1 21208 _000566_hash NULL nohasharray
+_000188_hash ip_vs_create_timeout_table 2 64478 _000188_hash NULL
+_000189_hash ipw_queue_tx_init 3 49161 _000189_hash NULL
-+_000190_hash irias_new_octseq_value 2 13596 _000190_hash NULL
++_000190_hash irias_new_octseq_value 2 13596 _002230_hash NULL nohasharray
+_000191_hash isdn_add_channels 3 40905 _000191_hash NULL
+_000192_hash isdn_ppp_fill_rq 2 41428 _000192_hash NULL
+_000193_hash isdn_read 3 50021 _000193_hash NULL
@@ -111443,7 +111432,7 @@ index 0000000..eb35e4a
+_000203_hash keyctl_instantiate_key 3 41855 _000203_hash NULL
+_000204_hash keyctl_update_key 3 26061 _000204_hash NULL
+_000205_hash kfifo_alloc 1 65242 _000205_hash NULL
-+_000206_hash kmalloc_node 1 50163 _000206_hash NULL
++_000206_hash kmalloc_node 1 50163 _002227_hash NULL nohasharray
+_000207_hash kmem_alloc 1 31920 _000207_hash NULL
+_000208_hash kmsg_write 3 26398 _000208_hash NULL
+_000209_hash kobj_map 2-3 9566 _000209_hash NULL
@@ -111453,7 +111442,7 @@ index 0000000..eb35e4a
+_000214_hash lane2_associate_req 4 45398 _000214_hash NULL
+_000215_hash lbs_debugfs_write 3 48413 _000215_hash NULL
+_000216_hash ldm_frag_add 2 5611 _000216_hash NULL
-+_000217_hash libipw_alloc_txb 1 27579 _000217_hash NULL
++_000217_hash libipw_alloc_txb 1-3-2 27579 _000217_hash NULL
+_000218_hash listxattr 3 12769 _000218_hash NULL
+_000219_hash load_module 2 60056 _000219_hash NULL
+_000220_hash load_msg 2 95 _000220_hash NULL
@@ -111525,7 +111514,7 @@ index 0000000..eb35e4a
+_000289_hash restore_i387_fxsave 2 17528 _000289_hash NULL
+_000290_hash rndis_add_response 2 58544 _000290_hash NULL
+_000291_hash rndis_set_oid 4 6547 _000291_hash NULL
-+_000292_hash rngapi_reset 3 34366 _000292_hash NULL
++_000292_hash rngapi_reset 3 34366 _002129_hash NULL nohasharray
+_000293_hash rpc_malloc 2 43573 _000293_hash NULL
+_000294_hash rvmalloc 1 46873 _000294_hash NULL
+_000295_hash rw_copy_check_uvector 3 45748 _000295_hash NULL
@@ -111560,7 +111549,7 @@ index 0000000..eb35e4a
+_000324_hash sl_realloc_bufs 2 64086 _000324_hash NULL
+_000325_hash smb_do_alloc_request 2 43708 _000325_hash NULL
+_000326_hash snd_ctl_elem_user_tlv 3 11695 _000326_hash NULL
-+_000327_hash snd_emu10k1_fx8010_read 5-6 9605 _000327_hash NULL
++_000327_hash snd_emu10k1_fx8010_read 5-6 9605 _002373_hash NULL nohasharray
+_000329_hash snd_midi_channel_init_set 1 30092 _000329_hash NULL
+_000330_hash snd_midi_event_new 1 9893 _000465_hash NULL nohasharray
+_000331_hash snd_opl4_mem_proc_read 5-6 63774 _000331_hash NULL
@@ -111630,7 +111619,7 @@ index 0000000..eb35e4a
+_000400_hash zd_usb_rfwrite 3 42300 _000400_hash NULL
+_000401_hash zoran_write 3 22404 _000401_hash NULL
+_000402_hash acpi_battery_write_alarm 3 1240 _000402_hash NULL
-+_000403_hash acpi_ex_allocate_name_string 2 7685 _000403_hash NULL
++_000403_hash acpi_ex_allocate_name_string 2 7685 _002079_hash NULL nohasharray
+_000404_hash acpi_fan_write_state 3 39818 _000404_hash NULL
+_000405_hash acpi_os_allocate_zeroed 1 37422 _000405_hash NULL
+_000406_hash acpi_processor_write_limit 3 23201 _000406_hash NULL
@@ -111674,7 +111663,7 @@ index 0000000..eb35e4a
+_000448_hash asd_store_update_bios 4 10165 _000448_hash NULL
+_000449_hash ata_host_alloc 2 46094 _000449_hash NULL
+_000450_hash ath_descdma_setup 5 22128 _000450_hash NULL
-+_000451_hash ati_create_gatt_pages 1 4722 _000451_hash NULL
++_000451_hash ati_create_gatt_pages 1 4722 _002368_hash NULL nohasharray
+_000452_hash au0828_init_isoc 2-3 61917 _000452_hash NULL
+_000454_hash audio_write 4 54261 _001280_hash NULL nohasharray
+_000455_hash audit_init_entry 1 38644 _000455_hash NULL
@@ -111786,7 +111775,7 @@ index 0000000..eb35e4a
+_000572_hash garmin_read_process 3 27509 _000572_hash NULL
+_000573_hash garp_request_join 4 7471 _000573_hash NULL
+_000574_hash get_arg 3 5694 _000574_hash NULL
-+_000575_hash get_entry 4 16003 _000575_hash NULL
++_000575_hash get_entry 4 16003 _002030_hash NULL nohasharray
+_000576_hash get_free_de 2 33714 _000576_hash NULL
+_000577_hash get_new_cssid 2 51665 _000577_hash NULL
+_000578_hash get_ucode_user 3 38202 _000578_hash NULL
@@ -111865,7 +111854,7 @@ index 0000000..eb35e4a
+_000657_hash lcd_write 3 14857 _000657_hash &_000013_hash
+_000658_hash leaf_dealloc 3 24706 _000658_hash NULL
+_000659_hash __lgread 4 31668 _000659_hash NULL
-+_000660_hash linear_conf 2 23485 _000660_hash NULL
++_000660_hash linear_conf 2 23485 _002341_hash NULL nohasharray
+_000661_hash LoadBitmap 2 19658 _000661_hash NULL
+_000662_hash lpfc_sli4_queue_alloc 3 62646 _000662_hash NULL
+_000663_hash lp_write 3 9511 _000663_hash NULL
@@ -111982,7 +111971,7 @@ index 0000000..eb35e4a
+_000782_hash sctp_setsockopt_rtoinfo 3 30941 _000782_hash NULL
+_000783_hash sctp_tsnmap_init 2 36446 _000783_hash NULL
+_000784_hash security_context_to_sid 2 19839 _000784_hash NULL
-+_000785_hash security_context_to_sid_default 2 3492 _000785_hash NULL
++_000785_hash security_context_to_sid_default 2 3492 _002278_hash NULL nohasharray
+_000786_hash security_context_to_sid_force 2 20724 _000786_hash NULL
+_000787_hash sel_commit_bools_write 3 46077 _000787_hash NULL
+_000788_hash sel_write_access 3 51704 _000788_hash NULL
@@ -112045,7 +112034,7 @@ index 0000000..eb35e4a
+_000848_hash spidev_message 3 5518 _000848_hash NULL
+_000849_hash spidev_write 3 44510 _000849_hash NULL
+_000850_hash squashfs_cache_init 2 41656 _000850_hash NULL
-+_000851_hash squashfs_read_table 4 29235 _000851_hash NULL
++_000851_hash squashfs_read_table 4 29235 _002237_hash NULL nohasharray
+_000852_hash srp_alloc_iu 2 44227 _000852_hash NULL
+_000853_hash srp_iu_pool_alloc 2 17920 _000853_hash NULL
+_000854_hash srp_ring_alloc 2 26760 _000854_hash NULL
@@ -112124,7 +112113,7 @@ index 0000000..eb35e4a
+_000932_hash wusb_ccm_mac 7 32199 _000932_hash NULL
+_000933_hash _xfs_buf_get_pages 2 59472 _000933_hash NULL
+_000934_hash xfs_da_buf_make 1 25303 _000934_hash NULL
-+_000935_hash xfs_dir_cilookup_result 3 64288 _000935_hash NULL
++_000935_hash xfs_dir_cilookup_result 3 64288 _002349_hash NULL nohasharray
+_000936_hash xfs_handle_to_dentry 3 12135 _000936_hash NULL
+_000937_hash xfs_iext_add_indirect_multi 3 32400 _000937_hash NULL
+_000938_hash xfs_iext_inline_to_direct 2 12384 _000938_hash NULL
@@ -112178,11 +112167,11 @@ index 0000000..eb35e4a
+_000988_hash construct_key_and_link 4 8321 _000988_hash NULL
+_000989_hash copy_entries_to_user 1 52367 _000989_hash NULL
+_000990_hash copy_from_buf 4 27308 _000990_hash NULL
-+_000991_hash copy_oldmem_page 3 26164 _000991_hash NULL
++_000991_hash copy_oldmem_page 3-1 26164 _000991_hash NULL
+_000992_hash copy_to_user_fromio 3 57432 _000992_hash NULL
+_000993_hash create_rsb 3 42744 _000993_hash NULL
+_000994_hash cryptd_hash_setkey 3 42781 _000994_hash NULL
-+_000995_hash crypto_authenc_setkey 3 80 _000995_hash NULL
++_000995_hash crypto_authenc_setkey 3 80 _002298_hash NULL nohasharray
+_000996_hash cx18_copy_buf_to_user 4 50990 _000996_hash NULL
+_000997_hash cxio_init_resource_fifo 3 25429 _000997_hash NULL
+_000998_hash cxio_init_resource_fifo_random 3 43814 _000998_hash NULL
@@ -112207,7 +112196,7 @@ index 0000000..eb35e4a
+_001017_hash drm_mode_create_tv_properties 2 23122 _001017_hash NULL
+_001018_hash dv1394_read 3 21920 _001018_hash NULL
+_001019_hash dvb_audio_write 3 51275 _001019_hash NULL
-+_001020_hash dvb_ringbuffer_pkt_read_user 3-5 4303 _001020_hash NULL
++_001020_hash dvb_ringbuffer_pkt_read_user 3-5-2 4303 _001020_hash NULL
+_001022_hash dvb_ringbuffer_read_user 3 56702 _001022_hash NULL
+_001023_hash dvb_video_write 3 754 _001023_hash NULL
+_001024_hash ecryptfs_filldir 3 6622 _001024_hash NULL
@@ -112405,7 +112394,7 @@ index 0000000..eb35e4a
+_001231_hash st_write 3 16874 _001231_hash NULL
+_001232_hash subbuf_read_actor 3 2071 _001232_hash NULL
+_001233_hash sys_bind 3 10799 _001233_hash NULL
-+_001234_hash sys_connect 3 15291 _001234_hash NULL
++_001234_hash sys_connect 3 15291 _002287_hash NULL nohasharray
+_001235_hash sysctl_ipc_registered_data 5 36266 _001235_hash NULL
+_001236_hash sysctl_tcp_congestion_control 5 27564 _001236_hash NULL
+_001237_hash sysctl_uts_string 5 17797 _001237_hash NULL
@@ -112429,7 +112418,7 @@ index 0000000..eb35e4a
+_001256_hash unix_stream_sendmsg 4 61455 _001256_hash NULL
+_001257_hash usb_allocate_stream_buffers 3 8964 _001257_hash NULL
+_001258_hash usbdev_read 3 45114 _001258_hash NULL
-+_001259_hash usblp_read 3 57342 _001259_hash NULL
++_001259_hash usblp_read 3 57342 _002236_hash NULL nohasharray
+_001260_hash usbtmc_read 3 32377 _001260_hash NULL
+_001261_hash usbvideo_v4l_read 3 48274 _001261_hash NULL
+_001262_hash usbvision_v4l2_read 3 34386 _001262_hash NULL
@@ -112536,8 +112525,8 @@ index 0000000..eb35e4a
+_001368_hash fuse_conn_limit_read 3 20084 _001368_hash NULL
+_001369_hash fuse_conn_waiting_read 3 49762 _001369_hash NULL
+_001370_hash generic_readlink 3 32654 _001370_hash NULL
-+_001371_hash ht40allow_map_read 3 55209 _001371_hash NULL
-+_001372_hash hysdn_conf_read 3 42324 _001372_hash NULL
++_001371_hash ht40allow_map_read 3 55209 _002056_hash NULL nohasharray
++_001372_hash hysdn_conf_read 3 42324 _002388_hash NULL nohasharray
+_001373_hash i2400m_rx_stats_read 3 57706 _001373_hash NULL
+_001374_hash i2400m_tx_stats_read 3 28527 _001374_hash NULL
+_001375_hash idmouse_read 3 63374 _001375_hash NULL
@@ -112627,7 +112616,7 @@ index 0000000..eb35e4a
+_001459_hash mon_bin_read 3 6841 _001459_hash NULL
+_001460_hash mon_stat_read 3 25238 _001460_hash NULL
+_001461_hash mqueue_read_file 3 6228 _001461_hash NULL
-+_001462_hash nfsd_vfs_read 6 62605 _001462_hash NULL
++_001462_hash nfsd_vfs_read 6 62605 _002206_hash NULL nohasharray
+_001463_hash nfsd_vfs_write 6 54577 _001463_hash NULL
+_001464_hash noack_read 3 63419 _001464_hash NULL
+_001465_hash o2hb_debug_read 3 37851 _001465_hash NULL
@@ -112781,7 +112770,7 @@ index 0000000..eb35e4a
+_001621_hash stats_rx_handlers_drop_read 3 3284 _001621_hash NULL
+_001622_hash stats_rx_handlers_drop_short_read 3 45391 _001622_hash NULL
+_001623_hash stats_rx_handlers_fragments_read 3 10356 _001623_hash NULL
-+_001624_hash stats_rx_handlers_queued_read 3 5922 _001624_hash NULL
++_001624_hash stats_rx_handlers_queued_read 3 5922 _002145_hash NULL nohasharray
+_001625_hash stats_transmitted_fragment_count_read 3 28770 _001625_hash NULL
+_001626_hash stats_transmitted_frame_count_read 3 33861 _001626_hash NULL
+_001627_hash stats_tx_expand_skb_head_cloned_read 3 11107 _001627_hash NULL
@@ -112849,7 +112838,7 @@ index 0000000..eb35e4a
+_001689_hash xlog_get_bp 2 23229 _001689_hash NULL
+_001690_hash aac_change_queue_depth 2 51753 _001690_hash NULL
+_001691_hash add_sctp_bind_addr 3 12269 _001691_hash NULL
-+_001692_hash agp_allocate_memory_wrap 1 16576 _001692_hash NULL
++_001692_hash agp_allocate_memory_wrap 1 16576 _002200_hash NULL nohasharray
+_001693_hash arcmsr_adjust_disk_queue_depth 2 34916 _001693_hash NULL
+_001694_hash atalk_recvmsg 4 22053 _001694_hash NULL
+_001695_hash atomic_read_file 3 16227 _001695_hash NULL
@@ -112894,7 +112883,7 @@ index 0000000..eb35e4a
+_001736_hash ieee80211_if_read_force_unicast_rateidx 3 32147 _001736_hash NULL
+_001737_hash ieee80211_if_read_fwded_frames 3 36520 _001737_hash NULL
+_001738_hash ieee80211_if_read_fwded_mcast 3 39571 _001738_hash NULL
-+_001739_hash ieee80211_if_read_fwded_unicast 3 59740 _001739_hash NULL
++_001739_hash ieee80211_if_read_fwded_unicast 3 59740 _002083_hash NULL nohasharray
+_001740_hash ieee80211_if_read_max_ratectrl_rateidx 3 64369 _001740_hash NULL
+_001741_hash ieee80211_if_read_min_discovery_timeout 3 13946 _001741_hash NULL
+_001742_hash ieee80211_if_read_num_buffered_multicast 3 12716 _001742_hash NULL
@@ -113091,16 +113080,435 @@ index 0000000..eb35e4a
+_001944_hash vmemmap_alloc_block 1 43245 _001944_hash NULL
+_001945_hash xpc_kmalloc_cacheline_aligned 1 42895 _001945_hash NULL
+_001946_hash xpc_kzalloc_cacheline_aligned 1 65433 _001946_hash NULL
-+_001947_hash create_table 2 16213 _001947_hash NULL
-+_001948_hash acl_alloc 1 35979 _001948_hash NULL
-+_001949_hash acl_alloc_stack_init 1 60630 _001949_hash NULL
-+_001950_hash acl_alloc_num 1-2 60778 _001950_hash NULL
++_001947_hash alloc_fdtable 1 17389 _001947_hash NULL
++_001948_hash alloc_ldt 2 21972 _001948_hash NULL
++_001949_hash __alloc_skb 1 23940 _001949_hash NULL
++_001950_hash ata_scsi_change_queue_depth 2 37702 _001950_hash NULL
++_001951_hash ccid3_hc_rx_getsockopt 3 62331 _001951_hash NULL
++_001952_hash ccid3_hc_tx_getsockopt 3 16314 _001952_hash NULL
++_001953_hash cistpl_vers_1 4 15023 _001953_hash NULL
++_001954_hash cmm_read 3 57520 _001954_hash NULL
++_001955_hash cosa_read 3 25966 _001955_hash NULL
++_001956_hash csr1212_append_new_cache 2 32202 _001956_hash NULL
++_001957_hash dma_region_alloc 2 56079 _001957_hash NULL
++_001958_hash dm_table_create 3 35687 _001958_hash NULL
++_001959_hash do_write_orph_node 2 64343 _001959_hash NULL
++_001961_hash ep0_read 3 38095 _001961_hash NULL
++_001962_hash event_buffer_read 3 48772 _001962_hash NULL
++_001963_hash extract_entropy_user 3 26952 _001963_hash NULL
++_001964_hash get_fd_set 1 3866 _001964_hash NULL
++_001965_hash __get_vm_area_node 1 55305 _001965_hash NULL
++_001966_hash hpsb_alloc_packet 1 49798 _001966_hash NULL
++_001967_hash joydev_handle_JSIOCSAXMAP 3 48898 _002061_hash NULL nohasharray
++_001968_hash joydev_handle_JSIOCSBTNMAP 3 15643 _001968_hash NULL
++_001969_hash minstrel_stats_read 3 17290 _001969_hash NULL
++_001970_hash ntfs_rl_realloc 3 56831 _001970_hash NULL
++_001971_hash ntfs_rl_realloc_nofail 3 32173 _001971_hash NULL
++_001972_hash reqsk_queue_alloc 2 40272 _001972_hash NULL
++_001973_hash resize_info_buffer 2 62889 _001973_hash NULL
++_001974_hash rfkill_fop_write 3 64808 _001974_hash NULL
++_001975_hash sctp_getsockopt_active_key 2 45483 _001975_hash NULL
++_001976_hash sctp_getsockopt_adaptation_layer 2 45375 _001976_hash NULL
++_001977_hash sctp_getsockopt_associnfo 2 58169 _001977_hash NULL
++_001978_hash sctp_getsockopt_assoc_number 2 6384 _001978_hash NULL
++_001979_hash sctp_getsockopt_context 2 52490 _001979_hash NULL
++_001980_hash sctp_getsockopt_default_send_param 2 63056 _001980_hash NULL
++_001981_hash sctp_getsockopt_disable_fragments 2 12330 _001981_hash NULL
++_001982_hash sctp_getsockopt_events 2 3607 _001982_hash NULL
++_001983_hash sctp_getsockopt_fragment_interleave 2 51215 _001983_hash NULL
++_001984_hash sctp_getsockopt_initmsg 2 26042 _001984_hash NULL
++_001985_hash sctp_getsockopt_local_addrs_old 2 4220 _001985_hash NULL
++_001986_hash sctp_getsockopt_mappedv4 2 20044 _001986_hash NULL
++_001987_hash sctp_getsockopt_nodelay 2 9560 _001987_hash NULL
++_001988_hash sctp_getsockopt_partial_delivery_point 2 60952 _001988_hash NULL
++_001989_hash sctp_getsockopt_peeloff 2 59190 _001989_hash NULL
++_001990_hash sctp_getsockopt_peer_addr_info 2 6024 _001990_hash NULL
++_001991_hash sctp_getsockopt_peer_addr_params 2 53645 _001991_hash NULL
++_001992_hash sctp_getsockopt_peer_addrs_old 2 11565 _002138_hash NULL nohasharray
++_001993_hash sctp_getsockopt_primary_addr 2 24639 _001993_hash NULL
++_001994_hash sctp_getsockopt_rtoinfo 2 62027 _001994_hash NULL
++_001995_hash sctp_getsockopt_sctp_status 2 56540 _001995_hash NULL
++_001996_hash snd_mixart_BA0_read 5 45069 _001996_hash NULL
++_001997_hash snd_mixart_BA1_read 5 5082 _001997_hash NULL
++_001998_hash snd_pcm_oss_read2 3 54387 _001998_hash NULL
++_001999_hash tty_buffer_find 2 2443 _001999_hash NULL
++_002000_hash unix_bind 3 15668 _002000_hash NULL
++_002001_hash usbvideo_rvmalloc 1 17758 _002001_hash NULL
++_002002_hash usbvision_rvmalloc 1 19655 _002002_hash NULL
++_002003_hash alloc_skb 1 55439 _002003_hash NULL
++_002004_hash alloc_skb_fclone 1 3467 _002004_hash NULL
++_002005_hash core_sys_select 1 47494 _002005_hash NULL
++_002006_hash create_reply_packet 3 6789 _002006_hash NULL
++_002007_hash expand_fdtable 2 39273 _002007_hash NULL
++_002008_hash get_vm_area 1 18080 _002008_hash NULL
++_002009_hash __get_vm_area 1 61599 _002009_hash NULL
++_002010_hash get_vm_area_caller 1 10527 _002010_hash NULL
++_002011_hash __get_vm_area_caller 1 56416 _002233_hash NULL nohasharray
++_002012_hash get_vm_area_node 1 44507 _002012_hash NULL
++_002013_hash hpsb_make_readpacket 4 34181 _002013_hash NULL
++_002014_hash hpsb_make_streampacket 3 21071 _002014_hash NULL
++_002015_hash hpsb_make_writepacket 5 61656 _002015_hash NULL
++_002016_hash inet_csk_listen_start 2 38233 _002016_hash NULL
++_002017_hash __netdev_alloc_skb 2 18595 _002017_hash NULL
++_002018_hash ntfs_rl_append 2-4 6037 _002018_hash NULL
++_002020_hash ntfs_rl_insert 2-4 4931 _002020_hash NULL
++_002022_hash ntfs_rl_replace 2-4 14136 _002022_hash NULL
++_002024_hash ntfs_rl_split 2-4 52328 _002024_hash NULL
++_002026_hash random_read 3 13815 _002026_hash NULL
++_002027_hash tty_buffer_request_room 2 23228 _002027_hash NULL
++_002028_hash urandom_read 3 30462 _002028_hash NULL
++_002029_hash alloc_tx 2 32143 _002029_hash NULL
++_002030_hash alloc_vm_area 1 16003 _002030_hash &_000575_hash
++_002031_hash atm_alloc_charge 2 19517 _002100_hash NULL nohasharray
++_002032_hash ax25_output 2 22736 _002032_hash NULL
++_002033_hash bcsp_prepare_pkt 3 12961 _002033_hash NULL
++_002034_hash bt_skb_alloc 1 6404 _002034_hash NULL
++_002035_hash cxgb3_get_cpl_reply_skb 2 10620 _002035_hash NULL
++_002036_hash dccp_listen_start 2 35918 _002036_hash NULL
++_002037_hash __dev_alloc_skb 1 28681 _002037_hash NULL
++_002038_hash dn_alloc_skb 2 6631 _002038_hash NULL
++_002039_hash do_pselect 1 62061 _002039_hash NULL
++_002040_hash edge_tty_recv 4 18667 _002040_hash NULL
++_002041_hash expand_files 2 17080 _002041_hash NULL
++_002042_hash find_skb 2 20431 _002042_hash NULL
++_002043_hash gem_alloc_skb 1 59411 _002043_hash NULL
++_002044_hash get_packet 3 41914 _002044_hash NULL
++_002045_hash get_packet 3 5747 _002045_hash NULL
++_002046_hash get_packet_pg 4 28023 _002046_hash NULL
++_002047_hash get_skb 2 63008 _002047_hash NULL
++_002048_hash hidp_queue_report 3 1881 _002048_hash NULL
++_002049_hash __hidp_send_ctrl_message 4 28303 _002049_hash NULL
++_002050_hash hpsb_read 6 29049 _002050_hash NULL
++_002051_hash hpsb_write 6 49217 _002051_hash NULL
++_002052_hash i2400m_net_rx 5 27170 _002052_hash NULL
++_002053_hash igmpv3_newpack 2 35912 _002053_hash NULL
++_002054_hash inet_listen 2 14723 _002054_hash NULL
++_002055_hash __ioremap_caller 2-1 21800 _002055_hash NULL
++_002056_hash isdn_net_ciscohdlck_alloc_skb 2 55209 _002056_hash &_001371_hash
++_002057_hash isdn_ppp_ccp_xmit_reset 6 63297 _002057_hash NULL
++_002058_hash _l2_alloc_skb 1 11883 _002058_hash NULL
++_002059_hash l3_alloc_skb 1 32289 _002059_hash NULL
++_002060_hash llc_alloc_frame 4 64366 _002060_hash NULL
++_002061_hash mac_drv_rx_init 2 48898 _002061_hash &_001967_hash
++_002062_hash mI_alloc_skb 1 24770 _002062_hash NULL
++_002063_hash module_alloc 1 63630 _002063_hash NULL
++_002064_hash netdev_alloc_skb 2 62437 _002064_hash NULL
++_002065_hash new_skb 1 21148 _002065_hash NULL
++_002066_hash nfulnl_alloc_skb 2 65207 _002066_hash NULL
++_002067_hash ni65_alloc_mem 3 10664 _002067_hash NULL
++_002068_hash pep_reply 5 50582 _002068_hash NULL
++_002069_hash pn_raw_send 2 54330 _002069_hash NULL
++_002070_hash refill_pool 2 19477 _002070_hash NULL
++_002071_hash rfcomm_wmalloc 2 58090 _002071_hash NULL
++_002072_hash rtl8169_alloc_rx_skb 4-5 25392 _002072_hash NULL
++_002074_hash rx 4 57944 _002074_hash NULL
++_002075_hash sbp2util_node_write_no_wait 4 7510 _002075_hash NULL
++_002076_hash sctp_ulpevent_new 1 33377 _002076_hash NULL
++_002077_hash send_command 4 10832 _002077_hash NULL
++_002078_hash send_to_tty 3 45141 _002078_hash NULL
++_002079_hash skb_copy_expand 2-3 7685 _002079_hash &_000403_hash
++_002081_hash sk_stream_alloc_skb 2 57622 _002081_hash NULL
++_002082_hash sock_alloc_send_pskb 2 21246 _002082_hash NULL
++_002083_hash sock_rmalloc 2 59740 _002083_hash &_001739_hash
++_002084_hash sock_wmalloc 2 16472 _002084_hash NULL
++_002085_hash solos_param_store 4 34755 _002085_hash NULL
++_002086_hash sys_select 1 38827 _002086_hash NULL
++_002087_hash tcp_collapse 5-6 63294 _002087_hash NULL
++_002089_hash tipc_cfg_reply_alloc 1 27606 _002089_hash NULL
++_002090_hash ti_recv 4 22027 _002090_hash NULL
++_002091_hash tty_prepare_flip_string 3 39955 _002091_hash NULL
++_002092_hash tty_prepare_flip_string_flags 4 59240 _002092_hash NULL
++_002093_hash ulog_alloc_skb 1 23427 _002093_hash NULL
++_002094_hash vmap 2 15025 _002094_hash NULL
++_002095_hash alloc_fd 1 37637 _002095_hash NULL
++_002096_hash _alloc_mISDN_skb 3 52232 _002096_hash NULL
++_002097_hash ath_rxbuf_alloc 2 24745 _002097_hash NULL
++_002098_hash ax25_send_frame 2 19964 _002098_hash NULL
++_002099_hash console_store 4 36007 _002099_hash NULL
++_002100_hash dev_alloc_skb 1 19517 _002100_hash &_002031_hash
++_002101_hash dn_nsp_do_disc 2-6 49474 _002101_hash NULL
++_002103_hash dsp_cmx_send_member 2 15625 _002103_hash NULL
++_002104_hash enic_rq_alloc_skb 2 17220 _002104_hash NULL
++_002105_hash hci_send_cmd 3 43810 _002105_hash NULL
++_002106_hash hci_si_event 3 1404 _002106_hash NULL
++_002107_hash hfcpci_empty_bfifo 4 62323 _002107_hash NULL
++_002108_hash hidp_send_ctrl_message 4 43702 _002108_hash NULL
++_002109_hash hpsb_node_read 4 53963 _002109_hash NULL
++_002110_hash hpsb_node_write 4 52928 _002110_hash NULL
++_002111_hash inet_dccp_listen 2 28565 _002111_hash NULL
++_002112_hash ioremap_cache 2-1 47189 _002112_hash NULL
++_002113_hash ioremap_default 2-1 64667 _002113_hash NULL
++_002114_hash ioremap_nocache 2-1 2439 _002114_hash NULL
++_002115_hash ioremap_prot 2-1 51764 _002115_hash NULL
++_002116_hash ioremap_wc 2-1 62695 _002116_hash NULL
++_002117_hash ip6_append_data 4-5 60501 _002117_hash NULL
++_002118_hash ip_append_data 4-5 41234 _002118_hash NULL
++_002119_hash l1oip_socket_recv 6 56537 _002119_hash NULL
++_002120_hash l2cap_build_cmd 4 48676 _002120_hash NULL
++_002121_hash l2down_create 4 21755 _002121_hash NULL
++_002122_hash l2up_create 3 6430 _002122_hash NULL
++_002125_hash lro_gen_skb 6 2644 _002125_hash NULL
++_002126_hash module_alloc_update_bounds 1 47205 _002126_hash NULL
++_002127_hash netpoll_send_udp 3 58955 _002127_hash NULL
++_002128_hash nfqnl_mangle 2 14583 _002128_hash NULL
++_002129_hash p54_alloc_skb 3 34366 _002129_hash &_000292_hash
++_002130_hash process_rcvd_data 3 6679 _002130_hash NULL
++_002131_hash receive_copy 3 12216 _002131_hash NULL
++_002132_hash rfcomm_tty_write 3 51603 _002132_hash NULL
++_002133_hash rtl8169_try_rx_copy 3 55465 _002133_hash NULL
++_002134_hash send_mpa_reject 3 7135 _002134_hash NULL
++_002135_hash send_mpa_reply 3 32372 _002135_hash NULL
++_002136_hash sge_rx 3 50594 _002136_hash NULL
++_002137_hash sis190_try_rx_copy 3 57069 _002137_hash NULL
++_002138_hash skb_cow_data 2 11565 _002138_hash &_001992_hash
++_002139_hash skge_rx_get 3 40598 _002139_hash NULL
++_002140_hash sock_alloc_send_skb 2 23720 _002140_hash NULL
++_002141_hash sys_dup3 2 33421 _002141_hash NULL
++_002142_hash sys_pselect6 1 57449 _002142_hash NULL
++_002143_hash tcp_fragment 3 20436 _002143_hash NULL
++_002144_hash teiup_create 3 43201 _002144_hash NULL
++_002145_hash ttm_bo_kmap_ttm 3 5922 _002145_hash &_001624_hash
++_002146_hash tun_alloc_skb 2-4-3 41216 _002146_hash NULL
++_002148_hash use_pool 2 64607 _002148_hash NULL
++_002149_hash velocity_rx_copy 2 34583 _002149_hash NULL
++_002150_hash vxge_rx_alloc 3 52024 _002150_hash NULL
++_002151_hash add_rx_skb 3 8257 _002151_hash NULL
++_002152_hash ar9170_rx_copy_data 2 35787 _002152_hash NULL
++_002153_hash arlan_rx_interrupt 4 10184 _002153_hash NULL
++_002154_hash br_send_bpdu 3 29669 _002154_hash NULL
++_002155_hash bt_skb_send_alloc 2 6581 _002155_hash NULL
++_002156_hash cosa_net_setup_rx 2 38594 _002156_hash NULL
++_002157_hash deliver_packet 3 767 _002157_hash NULL
++_002158_hash devm_ioremap_nocache 3-2 2036 _002158_hash NULL
++_002159_hash dn_alloc_send_pskb 2 4465 _002159_hash NULL
++_002160_hash dn_nsp_return_disc 2 60296 _002160_hash NULL
++_002161_hash dn_nsp_send_disc 2 23469 _002161_hash NULL
++_002162_hash do_fcntl 3 31468 _002162_hash NULL
++_002163_hash dsp_tone_hw_message 3 17678 _002163_hash NULL
++_002164_hash dvb_net_sec 3 37884 _002164_hash NULL
++_002165_hash ether1394_data_handler 5 2481 _002165_hash NULL
++_002166_hash fast_rx_path 3 59214 _002166_hash NULL
++_002167_hash __fc_frame_alloc 1 23432 _002167_hash NULL
++_002168_hash fwnet_incoming_packet 3 40380 _002168_hash NULL
++_002169_hash fwnet_pd_new 4 39947 _002169_hash NULL
++_002170_hash got_frame 2 16028 _002170_hash NULL
++_002171_hash hdlcdev_rx 3 997 _002171_hash NULL
++_002172_hash HDLC_Encode 2-3 7186 _002172_hash NULL
++_002174_hash hfc_empty_fifo 2 57972 _002174_hash NULL
++_002175_hash hfcpci_empty_fifo 4 2427 _002175_hash NULL
++_002176_hash hysdn_rx_netpkt 3 16136 _002176_hash NULL
++_002177_hash ieee80211_fragment 4 33112 _002177_hash NULL
++_002178_hash ieee80211_send_auth 5 60865 _002178_hash NULL
++_002179_hash ieee80211_send_probe_req 6 49265 _002179_hash NULL
++_002180_hash ioremap 2-1 23172 _002180_hash NULL
++_002181_hash ip6_ufo_append_data 5-7-6 40301 _002181_hash NULL
++_002184_hash ip_send_reply 4 46635 _002184_hash NULL
++_002185_hash ip_ufo_append_data 5-7-6 27187 _002185_hash NULL
++_002188_hash ipw_packet_received_skb 2 1230 _002188_hash NULL
++_002189_hash iraw_encode 2-3 18099 _002189_hash NULL
++_002191_hash iwch_reject_cr 3 23901 _002191_hash NULL
++_002192_hash iwm_rx_packet_alloc 3 9898 _002192_hash NULL
++_002193_hash l1oip_socket_parse 4 4507 _002193_hash NULL
++_002194_hash l2cap_send_cmd 4 3678 _002194_hash NULL
++_002196_hash mcs_unwrap_fir 3 25733 _002196_hash NULL
++_002197_hash mcs_unwrap_mir 3 9455 _002197_hash NULL
++_002198_hash mga_ioremap 2-1 8571 _002198_hash NULL
++_002199_hash mld_newpack 2 50950 _002199_hash NULL
++_002200_hash new_partial_datagram 4 16576 _002200_hash &_001692_hash
++_002201_hash node_read 4 45634 _002201_hash NULL
++_002202_hash node_write 4 29878 _002202_hash NULL
++_002203_hash p54_download_eeprom 4 43842 _002203_hash NULL
++_002204_hash pci_iomap 3 47575 _002204_hash NULL
++_002205_hash ppp_tx_cp 5 62044 _002205_hash NULL
++_002206_hash prism2_send_mgmt 4 62605 _002206_hash &_001462_hash
++_002207_hash prism2_sta_send_mgmt 5 43916 _002207_hash NULL
++_002208_hash _queue_data 4 54983 _002208_hash NULL
++_002209_hash read_fifo 3 826 _002209_hash NULL
++_002210_hash rx_data 4 60442 _002210_hash NULL
++_002211_hash sbp2util_notify_fetch_agent 4 40494 _002211_hash NULL
++_002212_hash set_rxd_buffer_pointer 8 9950 _002212_hash NULL
++_002213_hash sky2_receive 2 13407 _002213_hash NULL
++_002214_hash smctr_process_rx_packet 2 13270 _002214_hash NULL
++_002215_hash sys_dup2 2 25284 _002215_hash NULL
++_002216_hash tcp_mark_head_lost 2 50087 _002216_hash NULL
++_002217_hash tcp_match_skb_to_sack 3-4 23568 _002217_hash NULL
++_002219_hash tso_fragment 3 12539 _002219_hash NULL
++_002220_hash ttm_bo_ioremap 4-3-2 40854 _002220_hash NULL
++_002221_hash ttm_bo_kmap 3 60118 _002221_hash NULL
++_002222_hash udp_sendmsg 4 4492 _002222_hash NULL
++_002223_hash udpv6_sendmsg 4 22316 _002223_hash NULL
++_002224_hash wl1271_rx_handle_data 2 47827 _002224_hash NULL
++_002225_hash wv_packet_read 3 39140 _002225_hash NULL
++_002226_hash zd_mac_rx 3 38296 _002226_hash NULL
++_002227_hash aac_nark_ioremap 2 50163 _002227_hash &_000206_hash
++_002228_hash aac_rkt_ioremap 2 3333 _002228_hash NULL
++_002229_hash aac_rx_ioremap 2 52410 _002229_hash NULL
++_002230_hash aac_sa_ioremap 2 13596 _002230_hash &_000190_hash
++_002231_hash acpi_os_map_memory 2-1 11161 _002231_hash NULL
++_002232_hash acpi_os_read_memory 3-1 54186 _002232_hash NULL
++_002233_hash acpi_os_write_memory 3-1 56416 _002233_hash &_002011_hash
++_002234_hash ar9170_handle_mpdu 3 37956 _002234_hash NULL
++_002235_hash check586 2 29914 _002235_hash NULL
++_002236_hash check_mirror 2-1 57342 _002236_hash &_001259_hash
++_002237_hash devm_ioremap 3-2 29235 _002237_hash &_000851_hash
++_002238_hash dma_declare_coherent_memory 4-2 14244 _002238_hash NULL
++_002239_hash dvb_net_sec_callback 2 28786 _002239_hash NULL
++_002240_hash ether1394_write 6 6180 _002240_hash NULL
++_002241_hash _fc_frame_alloc 2 28865 _002241_hash NULL
++_002242_hash fc_frame_alloc_fill 2 59394 _002242_hash NULL
++_002243_hash fwnet_receive_packet 10 46054 _002243_hash NULL
++_002244_hash handle_rx_packet 3 58993 _002244_hash NULL
++_002245_hash hysdn_sched_rx 3 60533 _002245_hash NULL
++_002250_hash ipwireless_network_packet_received 4 51277 _002250_hash NULL
++_002251_hash isp1760_register 2-1 14408 _002251_hash NULL
++_002252_hash iwm_ntf_rx_packet 3 60452 _002252_hash NULL
++_002253_hash mthca_map_reg 3-2 5664 _002253_hash NULL
++_002254_hash pcim_iomap 3 58334 _002254_hash NULL
++_002255_hash ppp_cp_event 6 2965 _002255_hash NULL
++_002256_hash register_device 3-2 60015 _002256_hash NULL
++_002257_hash remap_pci_mem 1-2 15966 _002257_hash NULL
++_002259_hash sfi_map_memory 2-1 5183 _002259_hash NULL
++_002260_hash sriov_enable_migration 2 14889 _002260_hash NULL
++_002261_hash sys_fcntl 3 19267 _002261_hash NULL
++_002262_hash sys_fcntl64 3 29031 _002262_hash NULL
++_002263_hash tcp_sacktag_walk 5-6 26339 _002263_hash NULL
++_002265_hash tcp_update_scoreboard 2 21639 _002265_hash NULL
++_002266_hash tcp_write_xmit 2 39755 _002266_hash NULL
++_002267_hash tpm_tis_init 3-2 15304 _002267_hash NULL
++_002268_hash acpi_ex_system_memory_space_handler 2 31192 _002268_hash NULL
++_002269_hash dmam_declare_coherent_memory 4-2 43679 _002269_hash NULL
++_002270_hash fc_frame_alloc 2 1596 _002270_hash NULL
++_002271_hash sriov_enable 2 59689 _002271_hash NULL
++_002272_hash tcp_push_one 2 48816 _002272_hash NULL
++_002273_hash __tcp_push_pending_frames 2 48148 _002273_hash NULL
++_002274_hash pci_enable_sriov 2 35745 _002274_hash NULL
++_002275_hash tcp_push 3 10680 _002275_hash NULL
++_002276_hash msix_map_region 3 3411 _002276_hash NULL
++_002277_hash compat_sys_fcntl64 3 60256 _002277_hash NULL
++_002278_hash efi_ioremap 2-1 3492 _002278_hash &_000785_hash
++_002279_hash snd_nm256_capture_copy 5 28622 _002279_hash NULL
++_002280_hash snd_nm256_playback_copy 5 38567 _002280_hash NULL
++_002281_hash compat_sys_fcntl 3 15654 _002281_hash NULL
++_002287_hash xlate_dev_mem_ptr 1 15291 _002287_hash &_001234_hash
++_002288_hash a4t_cs_init 3 27734 _002288_hash NULL
++_002292_hash atyfb_setup_generic 3 49151 _002292_hash NULL
++_002293_hash c101_run 2 37279 _002293_hash NULL
++_002295_hash cru_detect 1 11272 _002295_hash NULL
++_002296_hash cs553x_init_one 3 58886 _002296_hash NULL
++_002297_hash cycx_setup 4 47562 _002297_hash NULL
++_002298_hash DepcaSignature 2 80 _002298_hash &_000995_hash
++_002301_hash doc_probe 1 23285 _002301_hash NULL
++_002302_hash DoC_Probe 1 57534 _002302_hash NULL
++_002303_hash gdth_init_isa 1 28091 _002303_hash NULL
++_002304_hash gdth_search_isa 1 58595 _002304_hash NULL
++_002307_hash n2_run 3 53459 _002307_hash NULL
++_002308_hash probe_bios 1 17467 _002308_hash NULL
++_002311_hash ssb_bus_scan 2 36578 _002311_hash NULL
++_002312_hash ssb_ioremap 2 5228 _002312_hash NULL
++_002313_hash acpi_tb_check_xsdt 1 21862 _002313_hash NULL
++_002314_hash acpi_tb_install_table 1 12988 _002314_hash NULL
++_002315_hash acpi_tb_parse_root_table 1 53455 _002315_hash NULL
++_002316_hash com90xx_found 3 13974 _002316_hash NULL
++_002318_hash gdth_isa_probe_one 1 48925 _002318_hash NULL
++_002319_hash sfi_map_table 1 5462 _002319_hash NULL
++_002320_hash ssb_bus_register 3 65183 _002320_hash NULL
++_002321_hash sfi_check_table 1 6772 _002321_hash NULL
++_002322_hash ssb_bus_pcmciabus_register 3 56020 _002322_hash NULL
++_002323_hash ssb_bus_ssbbus_register 2 2217 _002323_hash NULL
++_002326_hash iommu_map_mmio_space 1 30919 _002326_hash NULL
++_002327_hash allocate_probes 1 40204 _002327_hash NULL
++_002328_hash b1_alloc_card 1 36155 _002328_hash NULL
++_002329_hash blk_dropped_read 3 4168 _002329_hash NULL
++_002330_hash blk_msg_write 3 13655 _002330_hash NULL
++_002331_hash capinc_tty_write 3 28539 _002331_hash NULL
++_002332_hash capi_write 3 35104 _002332_hash NULL
++_002333_hash cmtp_add_msgpart 4 9252 _002333_hash NULL
++_002334_hash cmtp_send_interopmsg 7 376 _002334_hash NULL
++_002335_hash dccpprobe_read 3 52549 _002335_hash NULL
++_002336_hash __devres_alloc 2 25598 _002336_hash NULL
++_002337_hash diva_os_alloc_message_buffer 1 64568 _002337_hash NULL
++_002338_hash diva_os_copy_from_user 4 7792 _002338_hash NULL
++_002339_hash diva_os_copy_to_user 4 48508 _002339_hash NULL
++_002340_hash diva_os_malloc 2 16406 _002340_hash NULL
++_002341_hash divasa_remap_pci_bar 3-4 23485 _002341_hash &_000660_hash
++_002343_hash do_test 1 15766 _002343_hash NULL
++_002344_hash event_enable_read 3 7074 _002344_hash NULL
++_002345_hash event_enable_write 3 45238 _002345_hash NULL
++_002346_hash event_filter_read 3 23494 _002346_hash NULL
++_002347_hash event_filter_write 3 56609 _002347_hash NULL
++_002348_hash event_format_read 3 54674 _002348_hash NULL
++_002349_hash event_id_read 3 64288 _002349_hash &_000935_hash
++_002350_hash ftrace_pid_read 3 14970 _002350_hash NULL
++_002351_hash ftrace_pid_write 3 39710 _002351_hash NULL
++_002352_hash ftrace_profile_read 3 21327 _002352_hash NULL
++_002353_hash ftrace_profile_write 3 53327 _002353_hash NULL
++_002354_hash hycapi_rx_capipkt 3 11602 _002354_hash NULL
++_002355_hash io_mapping_create_wc 1-2 1354 _002355_hash NULL
++_002357_hash kgdb_hex2mem 3 24755 _002357_hash NULL
++_002358_hash kgdb_mem2hex 3 1578 _002358_hash NULL
++_002359_hash __module_alloc 1 50004 _002359_hash NULL
++_002360_hash module_alloc_update_bounds_rw 1 63233 _002360_hash NULL
++_002361_hash module_alloc_update_bounds_rx 1 58634 _002361_hash NULL
++_002362_hash p9_client_read 5 19750 _002362_hash NULL
++_002363_hash pmcraid_copy_sglist 3 38431 _002363_hash NULL
++_002364_hash proc_fault_inject_read 3 36802 _002364_hash NULL
++_002365_hash proc_fault_inject_write 3 21058 _002365_hash NULL
++_002366_hash rb_simple_read 3 45972 _002366_hash NULL
++_002367_hash rb_simple_write 3 20890 _002367_hash NULL
++_002368_hash show_header 3 4722 _002368_hash &_000451_hash
++_002369_hash stack_max_size_read 3 1445 _002369_hash NULL
++_002370_hash stack_max_size_write 3 36068 _002370_hash NULL
++_002371_hash subsystem_filter_read 3 62310 _002371_hash NULL
++_002372_hash subsystem_filter_write 3 13022 _002372_hash NULL
++_002373_hash sysprof_sample_read 3 9605 _002373_hash &_000327_hash
++_002374_hash sysprof_sample_write 3 62489 _002374_hash NULL
++_002375_hash system_enable_read 3 25815 _002375_hash NULL
++_002376_hash system_enable_write 3 61396 _002376_hash NULL
++_002377_hash trace_options_core_read 3 47390 _002377_hash NULL
++_002378_hash trace_options_core_write 3 61551 _002378_hash NULL
++_002379_hash trace_options_read 3 11419 _002379_hash NULL
++_002380_hash trace_options_write 3 48275 _002380_hash NULL
++_002381_hash trace_parser_get_init 2 31379 _002381_hash NULL
++_002382_hash trace_seq_to_user 3 65398 _002382_hash NULL
++_002383_hash tracing_buffers_read 3 11124 _002383_hash NULL
++_002384_hash tracing_clock_read 3 39975 _002384_hash NULL
++_002385_hash tracing_clock_write 3 27961 _002385_hash NULL
++_002386_hash tracing_cpumask_read 3 7010 _002386_hash NULL
++_002387_hash tracing_ctrl_read 3 46922 _002387_hash NULL
++_002388_hash tracing_ctrl_write 3 42324 _002388_hash &_001372_hash
++_002389_hash tracing_entries_read 3 8345 _002389_hash NULL
++_002390_hash tracing_entries_write 3 60563 _002390_hash NULL
++_002391_hash tracing_mark_write 3 62143 _002391_hash NULL
++_002392_hash tracing_max_lat_read 3 8890 _002392_hash NULL
++_002393_hash tracing_max_lat_write 3 8728 _002393_hash NULL
++_002394_hash tracing_read_dyn_info 3 45468 _002394_hash NULL
++_002395_hash tracing_readme_read 3 16493 _002395_hash NULL
++_002396_hash tracing_saved_cmdlines_read 3 21434 _002396_hash NULL
++_002397_hash tracing_set_trace_read 3 44122 _002397_hash NULL
++_002398_hash tracing_set_trace_write 3 57096 _002398_hash NULL
++_002399_hash tracing_stats_read 3 34537 _002399_hash NULL
++_002400_hash tracing_trace_options_read 3 51405 _002400_hash NULL
++_002401_hash tracing_trace_options_write 3 153 _002401_hash NULL
++_002402_hash tstats_write 3 60432 _002402_hash &_000010_hash
++_002403_hash um_idi_write 3 18293 _002403_hash NULL
++_002404_hash __vmalloc_node 1 39308 _002404_hash NULL
++_002405_hash xdi_copy_from_user 4 8395 _002405_hash NULL
++_002406_hash xdi_copy_to_user 4 48900 _002406_hash NULL
++_002407_hash c4_add_card 3 54968 _002407_hash NULL
++_002408_hash mmio_read 4 40348 _002408_hash NULL
++_002409_hash tracing_read_pipe 3 35312 _002409_hash NULL
++_002410_hash v9fs_file_read 3 40858 _002410_hash NULL
++_002411_hash v9fs_file_readn 4 36353 _002411_hash NULL
++_002412_hash create_table 2 16213 _002412_hash NULL
++_002413_hash acl_alloc 1 35979 _002413_hash NULL
++_002414_hash acl_alloc_stack_init 1 60630 _002414_hash NULL
++_002415_hash acl_alloc_num 1-2 60778 _002415_hash NULL
diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
new file mode 100644
-index 0000000..cc96254
+index 0000000..5af42b5
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin.c
-@@ -0,0 +1,1204 @@
+@@ -0,0 +1,1558 @@
+/*
+ * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -113151,6 +113559,8 @@ index 0000000..cc96254
+#define CREATE_NEW_VAR NULL_TREE
+#define CODES_LIMIT 32
+#define MAX_PARAM 10
++#define MY_STMT GF_PLF_1
++#define NO_CAST_CHECK GF_PLF_2
+
+#if BUILDING_GCC_VERSION == 4005
+#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
@@ -113160,20 +113570,30 @@ index 0000000..cc96254
+void debug_gimple_stmt(gimple gs);
+
+static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var);
-+static tree signed_size_overflow_type;
-+static tree unsigned_size_overflow_type;
+static tree report_size_overflow_decl;
+static tree const_char_ptr_type_node;
+static unsigned int handle_function(void);
++static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool *potentionally_overflowed, bool before);
++static tree get_size_overflow_type(gimple stmt, tree node);
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20120618beta",
++ .version = "20120811beta",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
+static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
+{
-+ unsigned int arg_count = type_num_arguments(*node);
++ unsigned int arg_count;
++
++ if (TREE_CODE(*node) == FUNCTION_DECL)
++ arg_count = type_num_arguments(TREE_TYPE(*node));
++ else if (TREE_CODE(*node) == FUNCTION_TYPE || TREE_CODE(*node) == METHOD_TYPE)
++ arg_count = type_num_arguments(*node);
++ else {
++ *no_add_attrs = true;
++ error("%qE attribute only applies to functions", name);
++ return NULL_TREE;
++ }
+
+ for (; args; args = TREE_CHAIN(args)) {
+ tree position = TREE_VALUE(args);
@@ -113185,13 +113605,13 @@ index 0000000..cc96254
+ return NULL_TREE;
+}
+
-+static struct attribute_spec no_size_overflow_attr = {
++static struct attribute_spec size_overflow_attr = {
+ .name = "size_overflow",
+ .min_length = 1,
+ .max_length = -1,
-+ .decl_required = false,
-+ .type_required = true,
-+ .function_type_required = true,
++ .decl_required = true,
++ .type_required = false,
++ .function_type_required = false,
+ .handler = handle_size_overflow_attribute,
+#if BUILDING_GCC_VERSION >= 4007
+ .affects_type_identity = false
@@ -113200,7 +113620,7 @@ index 0000000..cc96254
+
+static void register_attributes(void __unused *event_data, void __unused *data)
+{
-+ register_attribute(&no_size_overflow_attr);
++ register_attribute(&size_overflow_attr);
+}
+
+// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
@@ -113251,6 +113671,7 @@ index 0000000..cc96254
+
+static inline gimple get_def_stmt(tree node)
+{
++ gcc_assert(node != NULL_TREE);
+ gcc_assert(TREE_CODE(node) == SSA_NAME);
+ return SSA_NAME_DEF_STMT(node);
+}
@@ -113413,11 +113834,11 @@ index 0000000..cc96254
+ gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
+
+ type = TREE_TYPE(arg);
-+ // skip function pointers
-+ if (TREE_CODE(type) == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == FUNCTION_TYPE)
++
++ if (TREE_CODE(type) == POINTER_TYPE)
+ return;
+
-+ if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func))))
++ if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(func)))
+ return;
+
+ argnum = find_arg_number(arg, func);
@@ -113438,6 +113859,22 @@ index 0000000..cc96254
+ return new_var;
+}
+
++static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++ tree type = TREE_TYPE(rhs1);
++ tree lhs = create_new_var(type);
++
++ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
++ gimple_set_lhs(assign, make_ssa_name(lhs, assign));
++
++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++ gimple_set_plf(assign, MY_STMT, true);
++ return assign;
++}
++
+static bool is_bool(tree node)
+{
+ tree type;
@@ -113457,34 +113894,63 @@ index 0000000..cc96254
+
+static tree cast_a_tree(tree type, tree var)
+{
-+ gcc_assert(type != NULL_TREE && var != NULL_TREE);
++ gcc_assert(type != NULL_TREE);
++ gcc_assert(var != NULL_TREE);
+ gcc_assert(fold_convertible_p(type, var));
+
+ return fold_convert(type, var);
+}
+
-+static tree signed_cast(tree var)
-+{
-+ return cast_a_tree(signed_size_overflow_type, var);
-+}
-+
-+static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc)
++static gimple build_cast_stmt(tree type, tree var, tree new_var, gimple_stmt_iterator *gsi, bool before)
+{
+ gimple assign;
++ location_t loc;
++
++ gcc_assert(type != NULL_TREE && var != NULL_TREE);
++ if (gsi_end_p(*gsi) && before == BEFORE_STMT)
++ gcc_unreachable();
+
+ if (new_var == CREATE_NEW_VAR)
+ new_var = create_new_var(type);
+
+ assign = gimple_build_assign(new_var, cast_a_tree(type, var));
-+ gimple_set_location(assign, loc);
++
++ if (!gsi_end_p(*gsi)) {
++ loc = gimple_location(gsi_stmt(*gsi));
++ gimple_set_location(assign, loc);
++ }
++
+ gimple_set_lhs(assign, make_ssa_name(new_var, assign));
+
++ if (before)
++ gsi_insert_before(gsi, assign, GSI_NEW_STMT);
++ else
++ gsi_insert_after(gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++ gimple_set_plf(assign, MY_STMT, true);
++
+ return assign;
+}
+
++static tree cast_to_new_size_overflow_type(gimple stmt, tree new_rhs1, tree size_overflow_type, bool before)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi;
++
++ if (new_rhs1 == NULL_TREE)
++ return NULL_TREE;
++
++ if (!useless_type_conversion_p(TREE_TYPE(new_rhs1), size_overflow_type)) {
++ gsi = gsi_for_stmt(stmt);
++ assign = build_cast_stmt(size_overflow_type, new_rhs1, CREATE_NEW_VAR, &gsi, before);
++ return gimple_get_lhs(assign);
++ }
++ return new_rhs1;
++}
++
+static tree create_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, bool before)
+{
-+ tree oldstmt_rhs1;
++ tree oldstmt_rhs1, size_overflow_type, lhs;
+ enum tree_code code;
+ gimple stmt;
+ gimple_stmt_iterator gsi;
@@ -113498,13 +113964,18 @@ index 0000000..cc96254
+ gcc_unreachable();
+ }
+
++ if (gimple_code(oldstmt) == GIMPLE_ASM)
++ lhs = rhs1;
++ else
++ lhs = gimple_get_lhs(oldstmt);
++
+ oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
+ code = TREE_CODE(oldstmt_rhs1);
+ if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP))
+ check_missing_attribute(oldstmt_rhs1);
+
-+ stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt));
+ gsi = gsi_for_stmt(oldstmt);
++ pointer_set_insert(visited, oldstmt);
+ if (lookup_stmt_eh_lp(oldstmt) != 0) {
+ basic_block next_bb, cur_bb;
+ edge e;
@@ -113522,18 +113993,20 @@ index 0000000..cc96254
+
+ gsi = gsi_after_labels(next_bb);
+ gcc_assert(!gsi_end_p(gsi));
++
+ before = true;
++ oldstmt = gsi_stmt(gsi);
++ pointer_set_insert(visited, oldstmt);
+ }
-+ if (before)
-+ gsi_insert_before(&gsi, stmt, GSI_NEW_STMT);
-+ else
-+ gsi_insert_after(&gsi, stmt, GSI_NEW_STMT);
-+ update_stmt(stmt);
-+ pointer_set_insert(visited, oldstmt);
++
++ size_overflow_type = get_size_overflow_type(oldstmt, lhs);
++
++ stmt = build_cast_stmt(size_overflow_type, rhs1, CREATE_NEW_VAR, &gsi, before);
++ gimple_set_plf(stmt, MY_STMT, true);
+ return gimple_get_lhs(stmt);
+}
+
-+static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3)
++static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree size_overflow_type, tree rhs1, tree rhs2, tree __unused rhs3)
+{
+ tree new_var, lhs = gimple_get_lhs(oldstmt);
+ gimple stmt;
@@ -113542,6 +114015,9 @@ index 0000000..cc96254
+ if (!*potentionally_overflowed)
+ return NULL_TREE;
+
++ if (gimple_plf(oldstmt, MY_STMT))
++ return lhs;
++
+ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
+ rhs1 = gimple_assign_rhs1(oldstmt);
+ rhs1 = create_assign(visited, potentionally_overflowed, oldstmt, rhs1, BEFORE_STMT);
@@ -113553,6 +114029,7 @@ index 0000000..cc96254
+
+ stmt = gimple_copy(oldstmt);
+ gimple_set_location(stmt, gimple_location(oldstmt));
++ gimple_set_plf(stmt, MY_STMT, true);
+
+ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
+ gimple_assign_set_rhs_code(stmt, MULT_EXPR);
@@ -113560,13 +114037,13 @@ index 0000000..cc96254
+ if (is_bool(lhs))
+ new_var = SSA_NAME_VAR(lhs);
+ else
-+ new_var = create_new_var(signed_size_overflow_type);
++ new_var = create_new_var(size_overflow_type);
+ new_var = make_ssa_name(new_var, stmt);
+ gimple_set_lhs(stmt, new_var);
+
+ if (rhs1 != NULL_TREE) {
+ if (!gimple_assign_cast_p(oldstmt))
-+ rhs1 = signed_cast(rhs1);
++ rhs1 = cast_a_tree(size_overflow_type, rhs1);
+ gimple_assign_set_rhs1(stmt, rhs1);
+ }
+
@@ -113601,6 +114078,7 @@ index 0000000..cc96254
+ gsi = gsi_for_stmt(oldstmt);
+ gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
+ gimple_set_bb(phi, bb);
++ gimple_set_plf(phi, MY_STMT, true);
+ return phi;
+}
+
@@ -113614,28 +114092,29 @@ index 0000000..cc96254
+ return first_bb;
+}
+
-+static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var, unsigned int i)
++static tree cast_old_phi_arg(gimple oldstmt, tree size_overflow_type, tree arg, tree new_var, unsigned int i)
+{
+ basic_block bb;
-+ gimple newstmt, def_stmt;
++ gimple newstmt;
+ gimple_stmt_iterator gsi;
++ bool before = BEFORE_STMT;
+
-+ newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt));
-+ if (TREE_CODE(arg) == SSA_NAME) {
-+ def_stmt = get_def_stmt(arg);
-+ if (gimple_code(def_stmt) != GIMPLE_NOP) {
-+ gsi = gsi_for_stmt(def_stmt);
-+ gsi_insert_after(&gsi, newstmt, GSI_NEW_STMT);
-+ return newstmt;
-+ }
++ if (TREE_CODE(arg) == SSA_NAME && gimple_code(get_def_stmt(arg)) != GIMPLE_NOP) {
++ gsi = gsi_for_stmt(get_def_stmt(arg));
++ newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, AFTER_STMT);
++ return gimple_get_lhs(newstmt);
+ }
+
+ bb = gimple_phi_arg_edge(oldstmt, i)->src;
-+ if (bb->index == 0)
-+ bb = create_a_first_bb();
+ gsi = gsi_after_labels(bb);
-+ gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT);
-+ return newstmt;
++ if (bb->index == 0) {
++ bb = create_a_first_bb();
++ gsi = gsi_start_bb(bb);
++ }
++ if (gsi_end_p(gsi))
++ before = AFTER_STMT;
++ newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, before);
++ return gimple_get_lhs(newstmt);
+}
+
+static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs)
@@ -113668,30 +114147,36 @@ index 0000000..cc96254
+
+ gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
+ gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
++ gimple_set_plf(newstmt, MY_STMT, true);
+ update_stmt(newstmt);
+ return newstmt;
+}
+
-+static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree arg, tree new_var)
++static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree size_overflow_type, tree arg, tree new_var)
+{
+ gimple newstmt;
+ tree new_rhs;
+
+ new_rhs = expand(visited, potentionally_overflowed, arg);
-+
+ if (new_rhs == NULL_TREE)
+ return NULL_TREE;
+
++ new_rhs = cast_to_new_size_overflow_type(get_def_stmt(new_rhs), new_rhs, size_overflow_type, AFTER_STMT);
++
+ newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
+ return gimple_get_lhs(newstmt);
+}
+
-+static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt)
++static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
+{
-+ gimple phi;
-+ tree new_var = create_new_var(signed_size_overflow_type);
++ gimple phi, oldstmt = get_def_stmt(var);
++ tree new_var, size_overflow_type;
+ unsigned int i, n = gimple_phi_num_args(oldstmt);
+
++ size_overflow_type = get_size_overflow_type(oldstmt, var);
++
++ new_var = create_new_var(size_overflow_type);
++
+ pointer_set_insert(visited, oldstmt);
+ phi = overflow_create_phi_node(oldstmt, new_var);
+ for (i = 0; i < n; i++) {
@@ -113699,10 +114184,10 @@ index 0000000..cc96254
+
+ arg = gimple_phi_arg_def(oldstmt, i);
+ if (is_gimple_constant(arg))
-+ arg = signed_cast(arg);
-+ lhs = build_new_phi_arg(visited, potentionally_overflowed, arg, new_var);
++ arg = cast_a_tree(size_overflow_type, arg);
++ lhs = build_new_phi_arg(visited, potentionally_overflowed, size_overflow_type, arg, new_var);
+ if (lhs == NULL_TREE)
-+ lhs = gimple_get_lhs(cast_old_phi_arg(oldstmt, arg, new_var, i));
++ lhs = cast_old_phi_arg(oldstmt, size_overflow_type, arg, new_var, i);
+ add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
+ }
+
@@ -113710,35 +114195,132 @@ index 0000000..cc96254
+ return gimple_phi_result(phi);
+}
+
-+static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
++static tree change_assign_rhs(gimple stmt, tree orig_rhs, tree new_rhs)
+{
-+ gimple def_stmt = get_def_stmt(var);
-+ tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt);
++ gimple assign;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++ tree origtype = TREE_TYPE(orig_rhs);
++
++ gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
++
++ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
++ return gimple_get_lhs(assign);
++}
++
++static void change_rhs1(gimple stmt, tree new_rhs1)
++{
++ tree assign_rhs;
++ tree rhs = gimple_assign_rhs1(stmt);
++
++ assign_rhs = change_assign_rhs(stmt, rhs, new_rhs1);
++ gimple_assign_set_rhs1(stmt, assign_rhs);
++ update_stmt(stmt);
++}
++
++static bool check_mode_type(gimple stmt)
++{
++ tree lhs = gimple_get_lhs(stmt);
++ tree lhs_type = TREE_TYPE(lhs);
++ tree rhs_type = TREE_TYPE(gimple_assign_rhs1(stmt));
++ enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
++ enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
++
++ if (rhs_mode == lhs_mode && TYPE_UNSIGNED(rhs_type) == TYPE_UNSIGNED(lhs_type))
++ return false;
++
++ if (rhs_mode == SImode && lhs_mode == DImode && (TYPE_UNSIGNED(rhs_type) || !TYPE_UNSIGNED(lhs_type)))
++ return false;
++
++ return true;
++}
++
++static bool check_undefined_integer_operation(gimple stmt)
++{
++ gimple def_stmt;
++ tree lhs = gimple_get_lhs(stmt);
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs1_type = TREE_TYPE(rhs1);
++ tree lhs_type = TREE_TYPE(lhs);
++
++ if (TYPE_MODE(rhs1_type) != TYPE_MODE(lhs_type) || TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
++ return false;
++
++ def_stmt = get_def_stmt(rhs1);
++ if (gimple_code(def_stmt) != GIMPLE_ASSIGN)
++ return false;
++
++ if (gimple_assign_rhs_code(def_stmt) != MINUS_EXPR)
++ return false;
++ return true;
++}
++
++static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple stmt)
++{
++ tree size_overflow_type, lhs = gimple_get_lhs(stmt);
++ tree new_rhs1, rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs1_type = TREE_TYPE(rhs1);
++ tree lhs_type = TREE_TYPE(lhs);
+
+ *potentionally_overflowed = true;
++
+ new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
-+ if (new_rhs1 == NULL_TREE) {
-+ if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
-+ else
-+ return create_assign(visited, potentionally_overflowed, def_stmt, rhs1, AFTER_STMT);
++
++ if (new_rhs1 == NULL_TREE || TREE_CODE(rhs1_type) == POINTER_TYPE)
++ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
++
++ if (gimple_plf(stmt, MY_STMT))
++ return lhs;
++
++ if (gimple_plf(stmt, NO_CAST_CHECK)) {
++ size_overflow_type = get_size_overflow_type(stmt, rhs1);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, NULL_TREE, NULL_TREE);
++ }
++
++ if (!gimple_assign_cast_p(stmt)) {
++ size_overflow_type = get_size_overflow_type(stmt, lhs);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, NULL_TREE, NULL_TREE);
+ }
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, NULL_TREE, NULL_TREE);
++
++ if (check_undefined_integer_operation(stmt)) {
++ size_overflow_type = get_size_overflow_type(stmt, lhs);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, NULL_TREE, NULL_TREE);
++ }
++
++ size_overflow_type = get_size_overflow_type(stmt, rhs1);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++
++ change_rhs1(stmt, new_rhs1);
++ check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, potentionally_overflowed, BEFORE_STMT);
++
++ if (TYPE_UNSIGNED(rhs1_type) != TYPE_UNSIGNED(lhs_type))
++ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
++
++ if (!check_mode_type(stmt))
++ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
++
++ size_overflow_type = get_size_overflow_type(stmt, lhs);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++
++ check_size_overflow(stmt, size_overflow_type, new_rhs1, lhs, potentionally_overflowed, BEFORE_STMT);
++
++ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
+}
+
-+static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
++static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree lhs)
+{
-+ gimple def_stmt = get_def_stmt(var);
++ gimple def_stmt = get_def_stmt(lhs);
+ tree rhs1 = gimple_assign_rhs1(def_stmt);
+
+ if (is_gimple_constant(rhs1))
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, signed_cast(rhs1), NULL_TREE, NULL_TREE);
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+
+ gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
+ switch (TREE_CODE(rhs1)) {
+ case SSA_NAME:
-+ return handle_unary_rhs(visited, potentionally_overflowed, var);
-+
++ return handle_unary_rhs(visited, potentionally_overflowed, def_stmt);
+ case ARRAY_REF:
+ case BIT_FIELD_REF:
+ case ADDR_EXPR:
@@ -113750,7 +114332,7 @@ index 0000000..cc96254
+ case PARM_DECL:
+ case TARGET_MEM_REF:
+ case VAR_DECL:
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+
+ default:
+ debug_gimple_stmt(def_stmt);
@@ -113786,11 +114368,12 @@ index 0000000..cc96254
+ return build1(ADDR_EXPR, ptr_type_node, string);
+}
+
-+static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg)
++static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg, bool min)
+{
+ gimple func_stmt, def_stmt;
-+ tree current_func, loc_file, loc_line;
++ tree current_func, loc_file, loc_line, ssa_name;
+ expanded_location xloc;
++ char ssa_name_buf[100];
+ gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
+
+ def_stmt = get_def_stmt(arg);
@@ -113810,8 +114393,15 @@ index 0000000..cc96254
+ current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
+ current_func = create_string_param(current_func);
+
-+ // void report_size_overflow(const char *file, unsigned int line, const char *func)
-+ func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func);
++ if (min)
++ snprintf(ssa_name_buf, 100, "%s_%u (min)\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg));
++ else
++ snprintf(ssa_name_buf, 100, "%s_%u (max)\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg));
++ ssa_name = build_string(100, ssa_name_buf);
++ ssa_name = create_string_param(ssa_name);
++
++ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
++ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
+
+ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
+}
@@ -113823,14 +114413,15 @@ index 0000000..cc96254
+ inform(loc, "Integer size_overflow check applied here.");
+}
+
-+static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value)
++static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
+{
+ basic_block cond_bb, join_bb, bb_true;
+ edge e;
+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
+
+ cond_bb = gimple_bb(stmt);
-+ gsi_prev(&gsi);
++ if (before)
++ gsi_prev(&gsi);
+ if (gsi_end_p(gsi))
+ e = split_block_after_labels(cond_bb);
+ else
@@ -113856,80 +114447,218 @@ index 0000000..cc96254
+ }
+
+ insert_cond(cond_bb, arg, cond_code, type_value);
-+ insert_cond_result(bb_true, stmt, arg);
++ insert_cond_result(bb_true, stmt, arg, min);
+
+// print_the_code_insertions(stmt);
+}
+
-+static gimple cast_to_unsigned_size_overflow_type(gimple stmt, tree cast_rhs)
++static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool *potentionally_overflowed, bool before)
+{
-+ gimple ucast_stmt;
-+ gimple_stmt_iterator gsi;
-+ location_t loc = gimple_location(stmt);
++ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min, rhs_type = TREE_TYPE(rhs);
++ gcc_assert(rhs_type != NULL_TREE);
++ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == BOOLEAN_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
+
-+ ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, cast_rhs, CREATE_NEW_VAR, loc);
-+ gsi = gsi_for_stmt(stmt);
-+ gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT);
-+ return ucast_stmt;
++ if (!*potentionally_overflowed)
++ return;
++
++ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
++ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
++
++ gcc_assert(!TREE_OVERFLOW(type_max));
++
++ cast_rhs_type = TREE_TYPE(cast_rhs);
++ type_max_type = TREE_TYPE(type_max);
++ type_min_type = TREE_TYPE(type_min);
++ gcc_assert(useless_type_conversion_p(cast_rhs_type, type_max_type));
++ gcc_assert(useless_type_conversion_p(type_max_type, type_min_type));
++
++ insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max, before, false);
++ insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min, before, true);
+}
+
-+static void check_size_overflow(gimple stmt, tree cast_rhs, tree rhs, bool *potentionally_overflowed)
++static tree get_handle_const_assign_size_overflow_type(gimple def_stmt, tree var_rhs)
+{
-+ tree type_max, type_min, rhs_type = TREE_TYPE(rhs);
-+ gimple ucast_stmt;
++ gimple var_rhs_def_stmt;
++ tree lhs = gimple_get_lhs(def_stmt);
++ tree lhs_type = TREE_TYPE(lhs);
++ tree rhs1_type = TREE_TYPE(gimple_assign_rhs1(def_stmt));
++ tree rhs2_type = TREE_TYPE(gimple_assign_rhs2(def_stmt));
+
-+ if (!*potentionally_overflowed)
-+ return;
++ if (var_rhs == NULL_TREE)
++ return get_size_overflow_type(def_stmt, lhs);
+
-+ if (TYPE_UNSIGNED(rhs_type)) {
-+ ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, cast_rhs);
-+ type_max = cast_a_tree(unsigned_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
-+ insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
-+ } else {
-+ type_max = signed_cast(TYPE_MAX_VALUE(rhs_type));
-+ insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max);
++ var_rhs_def_stmt = get_def_stmt(var_rhs);
++
++ if (TREE_CODE_CLASS(gimple_assign_rhs_code(def_stmt)) == tcc_comparison)
++ return get_size_overflow_type(var_rhs_def_stmt, var_rhs);
++
++ if (gimple_assign_rhs_code(def_stmt) == LSHIFT_EXPR)
++ return get_size_overflow_type(var_rhs_def_stmt, var_rhs);
++
++ if (gimple_assign_rhs_code(def_stmt) == RSHIFT_EXPR)
++ return get_size_overflow_type(var_rhs_def_stmt, var_rhs);
+
-+ type_min = signed_cast(TYPE_MIN_VALUE(rhs_type));
-+ insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min);
++ if (!useless_type_conversion_p(lhs_type, rhs1_type) || !useless_type_conversion_p(rhs1_type, rhs2_type)) {
++ debug_gimple_stmt(def_stmt);
++ gcc_unreachable();
+ }
++
++ return get_size_overflow_type(def_stmt, lhs);
+}
+
-+static tree change_assign_rhs(gimple stmt, tree orig_rhs, tree new_rhs)
++static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var_rhs, tree new_rhs1, tree new_rhs2)
+{
-+ gimple assign;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+ tree origtype = TREE_TYPE(orig_rhs);
++ tree new_rhs, size_overflow_type, orig_rhs;
++ void (*gimple_assign_set_rhs)(gimple, tree);
++ tree rhs1 = gimple_assign_rhs1(def_stmt);
++ tree rhs2 = gimple_assign_rhs2(def_stmt);
++ tree lhs = gimple_get_lhs(def_stmt);
+
-+ gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
++ if (var_rhs == NULL_TREE)
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+
-+ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, gimple_location(stmt));
-+ gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
-+ update_stmt(assign);
-+ return gimple_get_lhs(assign);
-+}
++ if (new_rhs2 == NULL_TREE) {
++ size_overflow_type = get_handle_const_assign_size_overflow_type(def_stmt, new_rhs1);
++ new_rhs2 = cast_a_tree(size_overflow_type, rhs2);
++ orig_rhs = rhs1;
++ gimple_assign_set_rhs = &gimple_assign_set_rhs1;
++ } else {
++ size_overflow_type = get_handle_const_assign_size_overflow_type(def_stmt, new_rhs2);
++ new_rhs1 = cast_a_tree(size_overflow_type, rhs1);
++ orig_rhs = rhs2;
++ gimple_assign_set_rhs = &gimple_assign_set_rhs2;
++ }
+
-+static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var, tree orig_rhs, tree var_rhs, tree new_rhs1, tree new_rhs2, void (*gimple_assign_set_rhs)(gimple, tree))
-+{
-+ tree new_rhs;
++ var_rhs = cast_to_new_size_overflow_type(def_stmt, var_rhs, size_overflow_type, BEFORE_STMT);
+
+ if (gimple_assign_rhs_code(def_stmt) == MIN_EXPR)
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
++ return dup_assign(visited, potentionally_overflowed, def_stmt, size_overflow_type, new_rhs1, new_rhs2, NULL_TREE);
+
-+ if (var_rhs == NULL_TREE)
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ check_size_overflow(def_stmt, size_overflow_type, var_rhs, orig_rhs, potentionally_overflowed, BEFORE_STMT);
+
+ new_rhs = change_assign_rhs(def_stmt, orig_rhs, var_rhs);
+ gimple_assign_set_rhs(def_stmt, new_rhs);
+ update_stmt(def_stmt);
+
-+ check_size_overflow(def_stmt, var_rhs, orig_rhs, potentionally_overflowed);
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+}
+
-+static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
++static tree get_cast_def_stmt_rhs(tree new_rhs)
+{
-+ tree rhs1, rhs2;
-+ gimple def_stmt = get_def_stmt(var);
++ gimple def_stmt;
++
++ def_stmt = get_def_stmt(new_rhs);
++ // get_size_overflow_type
++ if (LONG_TYPE_SIZE != GET_MODE_BITSIZE(SImode))
++ gcc_assert(gimple_assign_cast_p(def_stmt));
++ return gimple_assign_rhs1(def_stmt);
++}
++
++static tree cast_to_int_TI_type_and_check(bool *potentionally_overflowed, gimple stmt, tree new_rhs)
++{
++ gimple_stmt_iterator gsi;
++ gimple cast_stmt, def_stmt;
++ enum machine_mode mode = TYPE_MODE(TREE_TYPE(new_rhs));
++
++ if (mode != TImode && mode != DImode) {
++ def_stmt = get_def_stmt(new_rhs);
++ gcc_assert(gimple_assign_cast_p(def_stmt));
++ new_rhs = gimple_assign_rhs1(def_stmt);
++ mode = TYPE_MODE(TREE_TYPE(new_rhs));
++ }
++
++ gcc_assert(mode == TImode || mode == DImode);
++
++ if (mode == TYPE_MODE(intTI_type_node) && useless_type_conversion_p(TREE_TYPE(new_rhs), intTI_type_node))
++ return new_rhs;
++
++ gsi = gsi_for_stmt(stmt);
++ cast_stmt = build_cast_stmt(intTI_type_node, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
++ new_rhs = gimple_get_lhs(cast_stmt);
++
++ if (mode == DImode)
++ return new_rhs;
++
++ check_size_overflow(stmt, intTI_type_node, new_rhs, new_rhs, potentionally_overflowed, BEFORE_STMT);
++
++ return new_rhs;
++}
++
++static bool is_an_integer_trunction(gimple stmt)
++{
++ gimple rhs1_def_stmt, rhs2_def_stmt;
++ tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1;
++ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode;
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs2 = gimple_assign_rhs2(stmt);
++ enum machine_mode rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1));
++ enum machine_mode rhs2_mode = TYPE_MODE(TREE_TYPE(rhs2));
++
++ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
++ return false;
++
++ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
++
++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR || rhs1_mode != SImode || rhs2_mode != SImode)
++ return false;
++
++ rhs1_def_stmt = get_def_stmt(rhs1);
++ rhs2_def_stmt = get_def_stmt(rhs2);
++ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
++ return false;
++
++ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
++ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
++ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
++ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
++ if (rhs1_def_stmt_rhs1_mode != DImode || rhs2_def_stmt_rhs1_mode != DImode)
++ return false;
++
++ gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true);
++ gimple_set_plf(rhs2_def_stmt, NO_CAST_CHECK, true);
++ return true;
++}
++
++static tree handle_integer_truncation(struct pointer_set_t *visited, bool *potentionally_overflowed, tree lhs)
++{
++ tree new_rhs1, new_rhs2, size_overflow_type;
++ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
++ tree new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type;
++ gimple assign, stmt = get_def_stmt(lhs);
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs2 = gimple_assign_rhs2(stmt);
++
++ if (!is_an_integer_trunction(stmt))
++ return NULL_TREE;
++
++ new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
++ new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
++
++ new_rhs1_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs1);
++ new_rhs2_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs2);
++
++ new_rhs1_def_stmt_rhs1_type = TREE_TYPE(new_rhs1_def_stmt_rhs1);
++ new_rhs2_def_stmt_rhs1_type = TREE_TYPE(new_rhs2_def_stmt_rhs1);
++
++ if (!useless_type_conversion_p(new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type)) {
++ new_rhs1_def_stmt_rhs1 = cast_to_int_TI_type_and_check(potentionally_overflowed, stmt, new_rhs1_def_stmt_rhs1);
++ new_rhs2_def_stmt_rhs1 = cast_to_int_TI_type_and_check(potentionally_overflowed, stmt, new_rhs2_def_stmt_rhs1);
++ }
++
++ assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
++ new_lhs = gimple_get_lhs(assign);
++ check_size_overflow(assign, TREE_TYPE(new_lhs), new_lhs, rhs1, potentionally_overflowed, AFTER_STMT);
++
++ size_overflow_type = get_size_overflow_type(stmt, lhs);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ new_rhs2 = cast_to_new_size_overflow_type(stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
++ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, new_rhs2, NULL_TREE);
++}
++
++static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree lhs)
++{
++ tree rhs1, rhs2, size_overflow_type, new_lhs;
++ gimple def_stmt = get_def_stmt(lhs);
+ tree new_rhs1 = NULL_TREE;
+ tree new_rhs2 = NULL_TREE;
+
@@ -113950,32 +114679,41 @@ index 0000000..cc96254
+ case EXACT_DIV_EXPR:
+ case POINTER_PLUS_EXPR:
+ case BIT_AND_EXPR:
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+ default:
+ break;
+ }
+
+ *potentionally_overflowed = true;
+
++ new_lhs = handle_integer_truncation(visited, potentionally_overflowed, lhs);
++ if (new_lhs != NULL_TREE)
++ return new_lhs;
++
+ if (TREE_CODE(rhs1) == SSA_NAME)
+ new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
+ if (TREE_CODE(rhs2) == SSA_NAME)
+ new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
+
+ if (is_gimple_constant(rhs2))
-+ return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs1, new_rhs1, new_rhs1, signed_cast(rhs2), &gimple_assign_set_rhs1);
++ return handle_const_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs1, NULL_TREE);
+
+ if (is_gimple_constant(rhs1))
-+ return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs2, new_rhs2, signed_cast(rhs1), new_rhs2, &gimple_assign_set_rhs2);
++ return handle_const_assign(visited, potentionally_overflowed, def_stmt, new_rhs2, NULL_TREE, new_rhs2);
++
++ size_overflow_type = get_size_overflow_type(def_stmt, lhs);
+
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
++ new_rhs1 = cast_to_new_size_overflow_type(def_stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ new_rhs2 = cast_to_new_size_overflow_type(def_stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
++
++ return dup_assign(visited, potentionally_overflowed, def_stmt, size_overflow_type, new_rhs1, new_rhs2, NULL_TREE);
+}
+
+#if BUILDING_GCC_VERSION >= 4007
-+static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree rhs)
++static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree size_overflow_type, tree rhs)
+{
+ if (is_gimple_constant(rhs))
-+ return signed_cast(rhs);
++ return cast_a_tree(size_overflow_type, rhs);
+ if (TREE_CODE(rhs) != SSA_NAME)
+ return NULL_TREE;
+ return expand(visited, potentionally_overflowed, rhs);
@@ -113983,61 +114721,72 @@ index 0000000..cc96254
+
+static tree handle_ternary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
+{
-+ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3;
++ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
+ gimple def_stmt = get_def_stmt(var);
+
+ *potentionally_overflowed = true;
+
++ size_overflow_type = get_size_overflow_type(def_stmt, var);
++
+ rhs1 = gimple_assign_rhs1(def_stmt);
+ rhs2 = gimple_assign_rhs2(def_stmt);
+ rhs3 = gimple_assign_rhs3(def_stmt);
-+ new_rhs1 = get_new_rhs(visited, potentionally_overflowed, rhs1);
-+ new_rhs2 = get_new_rhs(visited, potentionally_overflowed, rhs2);
-+ new_rhs3 = get_new_rhs(visited, potentionally_overflowed, rhs3);
++ new_rhs1 = get_new_rhs(visited, potentionally_overflowed, size_overflow_type, rhs1);
++ new_rhs2 = get_new_rhs(visited, potentionally_overflowed, size_overflow_type, rhs2);
++ new_rhs3 = get_new_rhs(visited, potentionally_overflowed, size_overflow_type, rhs3);
+
-+ if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE)
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, new_rhs3);
-+ error("handle_ternary_ops: unknown rhs");
-+ gcc_unreachable();
++ new_rhs1 = cast_to_new_size_overflow_type(def_stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ new_rhs2 = cast_to_new_size_overflow_type(def_stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
++ new_rhs3 = cast_to_new_size_overflow_type(def_stmt, new_rhs3, size_overflow_type, BEFORE_STMT);
++
++ return dup_assign(visited, potentionally_overflowed, def_stmt, size_overflow_type, new_rhs1, new_rhs2, new_rhs3);
+}
+#endif
+
-+static void set_size_overflow_type(tree node)
++static tree get_size_overflow_type(gimple stmt, tree node)
+{
-+ switch (TYPE_MODE(TREE_TYPE(node))) {
++ tree type;
++
++ gcc_assert(node != NULL_TREE);
++
++ type = TREE_TYPE(node);
++
++ if (gimple_plf(stmt, MY_STMT))
++ return TREE_TYPE(node);
++
++ switch (TYPE_MODE(type)) {
++ case QImode:
++ return (TYPE_UNSIGNED(type)) ? unsigned_intHI_type_node : intHI_type_node;
++ case HImode:
++ return (TYPE_UNSIGNED(type)) ? unsigned_intSI_type_node : intSI_type_node;
+ case SImode:
-+ signed_size_overflow_type = intDI_type_node;
-+ unsigned_size_overflow_type = unsigned_intDI_type_node;
-+ break;
++ return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
+ case DImode:
-+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) {
-+ signed_size_overflow_type = intDI_type_node;
-+ unsigned_size_overflow_type = unsigned_intDI_type_node;
-+ } else {
-+ signed_size_overflow_type = intTI_type_node;
-+ unsigned_size_overflow_type = unsigned_intTI_type_node;
-+ }
-+ break;
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
++ return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
++ return (TYPE_UNSIGNED(type)) ? unsigned_intTI_type_node : intTI_type_node;
+ default:
-+ error("set_size_overflow_type: unsupported gcc configuration.");
++ debug_tree(node);
++ error("get_size_overflow_type: unsupported gcc configuration.");
+ gcc_unreachable();
+ }
+}
+
+static tree expand_visited(gimple def_stmt)
+{
-+ gimple tmp;
++ gimple next_stmt;
+ gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
+
+ gsi_next(&gsi);
-+ tmp = gsi_stmt(gsi);
-+ switch (gimple_code(tmp)) {
++ next_stmt = gsi_stmt(gsi);
++
++ switch (gimple_code(next_stmt)) {
+ case GIMPLE_ASSIGN:
-+ return gimple_get_lhs(tmp);
++ return gimple_get_lhs(next_stmt);
+ case GIMPLE_PHI:
-+ return gimple_phi_result(tmp);
++ return gimple_phi_result(next_stmt);
+ case GIMPLE_CALL:
-+ return gimple_call_lhs(tmp);
++ return gimple_call_lhs(next_stmt);
+ default:
+ return NULL_TREE;
+ }
@@ -114055,19 +114804,18 @@ index 0000000..cc96254
+ return NULL_TREE;
+
+ gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
-+ if (code != INTEGER_TYPE)
-+ return NULL_TREE;
+
-+ if (SSA_NAME_IS_DEFAULT_DEF(var)) {
++ if (TREE_CODE(SSA_NAME_VAR(var)) == PARM_DECL)
+ check_missing_attribute(var);
-+ return NULL_TREE;
-+ }
+
+ def_stmt = get_def_stmt(var);
+
+ if (!def_stmt)
+ return NULL_TREE;
+
++ if (gimple_plf(def_stmt, MY_STMT))
++ return var;
++
+ if (pointer_set_contains(visited, def_stmt))
+ return expand_visited(def_stmt);
+
@@ -114076,7 +114824,7 @@ index 0000000..cc96254
+ check_missing_attribute(var);
+ return NULL_TREE;
+ case GIMPLE_PHI:
-+ return build_new_phi(visited, potentionally_overflowed, def_stmt);
++ return build_new_phi(visited, potentionally_overflowed, var);
+ case GIMPLE_CALL:
+ case GIMPLE_ASM:
+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
@@ -114106,9 +114854,7 @@ index 0000000..cc96254
+
+ gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
+
-+ assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt));
-+ gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
-+ update_stmt(assign);
++ assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
+
+ gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
+ update_stmt(stmt);
@@ -114158,8 +114904,6 @@ index 0000000..cc96254
+
+ check_arg_type(arg);
+
-+ set_size_overflow_type(arg);
-+
+ visited = pointer_set_create();
+ potentionally_overflowed = false;
+ newarg = expand(visited, &potentionally_overflowed, arg);
@@ -114170,7 +114914,7 @@ index 0000000..cc96254
+
+ change_function_arg(stmt, arg, argnum, newarg);
+
-+ check_size_overflow(stmt, newarg, arg, &potentionally_overflowed);
++ check_size_overflow(stmt, TREE_TYPE(newarg), newarg, arg, &potentionally_overflowed, BEFORE_STMT);
+}
+
+static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl)
@@ -114198,14 +114942,29 @@ index 0000000..cc96254
+ handle_function_arg(stmt, fndecl, num - 1);
+}
+
++static void set_plf_false(void)
++{
++ basic_block bb;
++
++ FOR_ALL_BB(bb) {
++ gimple_stmt_iterator si;
++
++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
++ gimple_set_plf(gsi_stmt(si), MY_STMT, false);
++ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
++ gimple_set_plf(gsi_stmt(si), MY_STMT, false);
++ }
++}
++
+static unsigned int handle_function(void)
+{
-+ basic_block bb = ENTRY_BLOCK_PTR->next_bb;
-+ int saved_last_basic_block = last_basic_block;
++ basic_block next, bb = ENTRY_BLOCK_PTR->next_bb;
++
++ set_plf_false();
+
+ do {
+ gimple_stmt_iterator gsi;
-+ basic_block next = bb->next_bb;
++ next = bb->next_bb;
+
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+ tree fndecl, attr;
@@ -114218,15 +114977,16 @@ index 0000000..cc96254
+ continue;
+ if (gimple_call_num_args(stmt) == 0)
+ continue;
-+ attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl)));
++ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
+ if (!attr || !TREE_VALUE(attr))
+ handle_function_by_hash(stmt, fndecl);
+ else
+ handle_function_by_attribute(stmt, attr, fndecl);
+ gsi = gsi_for_stmt(stmt);
++ next = gimple_bb(stmt)->next_bb;
+ }
+ bb = next;
-+ } while (bb && bb->index <= saved_last_basic_block);
++ } while (bb);
+ return 0;
+}
+
@@ -114254,11 +115014,12 @@ index 0000000..cc96254
+
+ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
+
-+ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func)
++ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
+ fntype = build_function_type_list(void_type_node,
+ const_char_ptr_type_node,
+ unsigned_type_node,
+ const_char_ptr_type_node,
++ const_char_ptr_type_node,
+ NULL_TREE);
+ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
+
@@ -114266,6 +115027,7 @@ index 0000000..cc96254
+ TREE_PUBLIC(report_size_overflow_decl) = 1;
+ DECL_EXTERNAL(report_size_overflow_decl) = 1;
+ DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
++ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
@@ -114298,7 +115060,7 @@ index 0000000..cc96254
+
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
+ if (enable) {
-+ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
++ register_callback("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
+ }
+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
diff --git a/3.2.26/0000_README b/3.2.27/0000_README
index 7e6481a..93a9a96 100644
--- a/3.2.26/0000_README
+++ b/3.2.27/0000_README
@@ -22,7 +22,11 @@ Patch: 1025_linux-3.2.26.patch
From: http://www.kernel.org
Desc: Linux 3.2.26
-Patch: 4420_grsecurity-2.9.1-3.2.26-201208062017.patch
+Patch: 1026_linux-3.2.27.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.27
+
+Patch: 4420_grsecurity-2.9.1-3.2.27-201208120907.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.26/1021_linux-3.2.22.patch b/3.2.27/1021_linux-3.2.22.patch
index e6ad93a..e6ad93a 100644
--- a/3.2.26/1021_linux-3.2.22.patch
+++ b/3.2.27/1021_linux-3.2.22.patch
diff --git a/3.2.26/1022_linux-3.2.23.patch b/3.2.27/1022_linux-3.2.23.patch
index 3d796d0..3d796d0 100644
--- a/3.2.26/1022_linux-3.2.23.patch
+++ b/3.2.27/1022_linux-3.2.23.patch
diff --git a/3.2.26/1023_linux-3.2.24.patch b/3.2.27/1023_linux-3.2.24.patch
index 4692eb4..4692eb4 100644
--- a/3.2.26/1023_linux-3.2.24.patch
+++ b/3.2.27/1023_linux-3.2.24.patch
diff --git a/3.2.26/1024_linux-3.2.25.patch b/3.2.27/1024_linux-3.2.25.patch
index e95c213..e95c213 100644
--- a/3.2.26/1024_linux-3.2.25.patch
+++ b/3.2.27/1024_linux-3.2.25.patch
diff --git a/3.2.26/1025_linux-3.2.26.patch b/3.2.27/1025_linux-3.2.26.patch
index 44065b9..44065b9 100644
--- a/3.2.26/1025_linux-3.2.26.patch
+++ b/3.2.27/1025_linux-3.2.26.patch
diff --git a/3.2.27/1026_linux-3.2.27.patch b/3.2.27/1026_linux-3.2.27.patch
new file mode 100644
index 0000000..5878eb4
--- /dev/null
+++ b/3.2.27/1026_linux-3.2.27.patch
@@ -0,0 +1,3188 @@
+diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
+index edad99a..69820b2 100644
+--- a/Documentation/sound/alsa/HD-Audio-Models.txt
++++ b/Documentation/sound/alsa/HD-Audio-Models.txt
+@@ -60,10 +60,11 @@ ALC267/268
+ ==========
+ N/A
+
+-ALC269
++ALC269/270/275/276/280/282
+ ======
+ laptop-amic Laptops with analog-mic input
+ laptop-dmic Laptops with digital-mic input
++ lenovo-dock Enables docking station I/O for some Lenovos
+
+ ALC662/663/272
+ ==============
+diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
+index e1f856b..22bf11b 100644
+--- a/Documentation/stable_kernel_rules.txt
++++ b/Documentation/stable_kernel_rules.txt
+@@ -1,4 +1,4 @@
+-Everything you ever wanted to know about Linux 2.6 -stable releases.
++Everything you ever wanted to know about Linux -stable releases.
+
+ Rules on what kind of patches are accepted, and which ones are not, into the
+ "-stable" tree:
+@@ -41,10 +41,10 @@ Procedure for submitting patches to the -stable tree:
+ cherry-picked than this can be specified in the following format in
+ the sign-off area:
+
+- Cc: <stable@vger.kernel.org> # .32.x: a1f84a3: sched: Check for idle
+- Cc: <stable@vger.kernel.org> # .32.x: 1b9508f: sched: Rate-limit newidle
+- Cc: <stable@vger.kernel.org> # .32.x: fd21073: sched: Fix affinity logic
+- Cc: <stable@vger.kernel.org> # .32.x
++ Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
++ Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
++ Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
++ Cc: <stable@vger.kernel.org> # 3.3.x
+ Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+ The tag sequence has the meaning of:
+@@ -78,6 +78,15 @@ Review cycle:
+ security kernel team, and not go through the normal review cycle.
+ Contact the kernel security team for more details on this procedure.
+
++Trees:
++
++ - The queues of patches, for both completed versions and in progress
++ versions can be found at:
++ http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git
++ - The finalized and tagged releases of all stable kernels can be found
++ in separate branches per version at:
++ http://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git
++
+
+ Review committee:
+
+diff --git a/Makefile b/Makefile
+index fa5acc83..bdf851f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 26
++SUBLEVEL = 27
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
+index 93226cf..b1479fd 100644
+--- a/arch/arm/include/asm/mutex.h
++++ b/arch/arm/include/asm/mutex.h
+@@ -7,121 +7,10 @@
+ */
+ #ifndef _ASM_MUTEX_H
+ #define _ASM_MUTEX_H
+-
+-#if __LINUX_ARM_ARCH__ < 6
+-/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
+-# include <asm-generic/mutex-xchg.h>
+-#else
+-
+ /*
+- * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
+- * atomic decrement (it is not a reliable atomic decrement but it satisfies
+- * the defined semantics for our purpose, while being smaller and faster
+- * than a real atomic decrement or atomic swap. The idea is to attempt
+- * decrementing the lock value only once. If once decremented it isn't zero,
+- * or if its store-back fails due to a dispute on the exclusive store, we
+- * simply bail out immediately through the slow path where the lock will be
+- * reattempted until it succeeds.
++ * On pre-ARMv6 hardware this results in a swp-based implementation,
++ * which is the most efficient. For ARMv6+, we emit a pair of exclusive
++ * accesses instead.
+ */
+-static inline void
+-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res;
+-
+- __asm__ (
+-
+- "ldrex %0, [%2] \n\t"
+- "sub %0, %0, #1 \n\t"
+- "strex %1, %0, [%2] "
+-
+- : "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&(count)->counter)
+- : "cc","memory" );
+-
+- __res |= __ex_flag;
+- if (unlikely(__res != 0))
+- fail_fn(count);
+-}
+-
+-static inline int
+-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res;
+-
+- __asm__ (
+-
+- "ldrex %0, [%2] \n\t"
+- "sub %0, %0, #1 \n\t"
+- "strex %1, %0, [%2] "
+-
+- : "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&(count)->counter)
+- : "cc","memory" );
+-
+- __res |= __ex_flag;
+- if (unlikely(__res != 0))
+- __res = fail_fn(count);
+- return __res;
+-}
+-
+-/*
+- * Same trick is used for the unlock fast path. However the original value,
+- * rather than the result, is used to test for success in order to have
+- * better generated assembly.
+- */
+-static inline void
+-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res, __orig;
+-
+- __asm__ (
+-
+- "ldrex %0, [%3] \n\t"
+- "add %1, %0, #1 \n\t"
+- "strex %2, %1, [%3] "
+-
+- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&(count)->counter)
+- : "cc","memory" );
+-
+- __orig |= __ex_flag;
+- if (unlikely(__orig != 0))
+- fail_fn(count);
+-}
+-
+-/*
+- * If the unlock was done on a contended lock, or if the unlock simply fails
+- * then the mutex remains locked.
+- */
+-#define __mutex_slowpath_needs_to_unlock() 1
+-
+-/*
+- * For __mutex_fastpath_trylock we use another construct which could be
+- * described as a "single value cmpxchg".
+- *
+- * This provides the needed trylock semantics like cmpxchg would, but it is
+- * lighter and less generic than a true cmpxchg implementation.
+- */
+-static inline int
+-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res, __orig;
+-
+- __asm__ (
+-
+- "1: ldrex %0, [%3] \n\t"
+- "subs %1, %0, #1 \n\t"
+- "strexeq %2, %1, [%3] \n\t"
+- "movlt %0, #0 \n\t"
+- "cmpeq %2, #0 \n\t"
+- "bgt 1b "
+-
+- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&count->counter)
+- : "cc", "memory" );
+-
+- return __orig;
+-}
+-
+-#endif
++#include <asm-generic/mutex-xchg.h>
+ #endif
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index b145f16..ece0996 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -242,6 +242,19 @@ svc_preempt:
+ b 1b
+ #endif
+
++__und_fault:
++ @ Correct the PC such that it is pointing at the instruction
++ @ which caused the fault. If the faulting instruction was ARM
++ @ the PC will be pointing at the next instruction, and have to
++ @ subtract 4. Otherwise, it is Thumb, and the PC will be
++ @ pointing at the second half of the Thumb instruction. We
++ @ have to subtract 2.
++ ldr r2, [r0, #S_PC]
++ sub r2, r2, r1
++ str r2, [r0, #S_PC]
++ b do_undefinstr
++ENDPROC(__und_fault)
++
+ .align 5
+ __und_svc:
+ #ifdef CONFIG_KPROBES
+@@ -259,25 +272,32 @@ __und_svc:
+ @
+ @ r0 - instruction
+ @
+-#ifndef CONFIG_THUMB2_KERNEL
++#ifndef CONFIG_THUMB2_KERNEL
+ ldr r0, [r4, #-4]
+ #else
++ mov r1, #2
+ ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
+ cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
+- ldrhhs r9, [r4] @ bottom 16 bits
+- orrhs r0, r9, r0, lsl #16
++ blo __und_svc_fault
++ ldrh r9, [r4] @ bottom 16 bits
++ add r4, r4, #2
++ str r4, [sp, #S_PC]
++ orr r0, r9, r0, lsl #16
+ #endif
+- adr r9, BSYM(1f)
++ adr r9, BSYM(__und_svc_finish)
+ mov r2, r4
+ bl call_fpe
+
++ mov r1, #4 @ PC correction to apply
++__und_svc_fault:
+ mov r0, sp @ struct pt_regs *regs
+- bl do_undefinstr
++ bl __und_fault
+
+ @
+ @ IRQs off again before pulling preserved data off the stack
+ @
+-1: disable_irq_notrace
++__und_svc_finish:
++ disable_irq_notrace
+
+ @
+ @ restore SPSR and restart the instruction
+@@ -421,25 +441,33 @@ __und_usr:
+ mov r2, r4
+ mov r3, r5
+
++ @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
++ @ faulting instruction depending on Thumb mode.
++ @ r3 = regs->ARM_cpsr
+ @
+- @ fall through to the emulation code, which returns using r9 if
+- @ it has emulated the instruction, or the more conventional lr
+- @ if we are to treat this as a real undefined instruction
+- @
+- @ r0 - instruction
++ @ The emulation code returns using r9 if it has emulated the
++ @ instruction, or the more conventional lr if we are to treat
++ @ this as a real undefined instruction
+ @
+ adr r9, BSYM(ret_from_exception)
+- adr lr, BSYM(__und_usr_unknown)
++
+ tst r3, #PSR_T_BIT @ Thumb mode?
+- itet eq @ explicit IT needed for the 1f label
+- subeq r4, r2, #4 @ ARM instr at LR - 4
+- subne r4, r2, #2 @ Thumb instr at LR - 2
+-1: ldreqt r0, [r4]
++ bne __und_usr_thumb
++ sub r4, r2, #4 @ ARM instr at LR - 4
++1: ldrt r0, [r4]
+ #ifdef CONFIG_CPU_ENDIAN_BE8
+- reveq r0, r0 @ little endian instruction
++ rev r0, r0 @ little endian instruction
+ #endif
+- beq call_fpe
++ @ r0 = 32-bit ARM instruction which caused the exception
++ @ r2 = PC value for the following instruction (:= regs->ARM_pc)
++ @ r4 = PC value for the faulting instruction
++ @ lr = 32-bit undefined instruction function
++ adr lr, BSYM(__und_usr_fault_32)
++ b call_fpe
++
++__und_usr_thumb:
+ @ Thumb instruction
++ sub r4, r2, #2 @ First half of thumb instr at LR - 2
+ #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
+ /*
+ * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
+@@ -453,7 +481,7 @@ __und_usr:
+ ldr r5, .LCcpu_architecture
+ ldr r5, [r5]
+ cmp r5, #CPU_ARCH_ARMv7
+- blo __und_usr_unknown
++ blo __und_usr_fault_16 @ 16bit undefined instruction
+ /*
+ * The following code won't get run unless the running CPU really is v7, so
+ * coding round the lack of ldrht on older arches is pointless. Temporarily
+@@ -461,15 +489,18 @@ __und_usr:
+ */
+ .arch armv6t2
+ #endif
+-2:
+- ARM( ldrht r5, [r4], #2 )
+- THUMB( ldrht r5, [r4] )
+- THUMB( add r4, r4, #2 )
++2: ldrht r5, [r4]
+ cmp r5, #0xe800 @ 32bit instruction if xx != 0
+- blo __und_usr_unknown
+-3: ldrht r0, [r4]
++ blo __und_usr_fault_16 @ 16bit undefined instruction
++3: ldrht r0, [r2]
+ add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
++ str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
+ orr r0, r0, r5, lsl #16
++ adr lr, BSYM(__und_usr_fault_32)
++ @ r0 = the two 16-bit Thumb instructions which caused the exception
++ @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
++ @ r4 = PC value for the first 16-bit Thumb instruction
++ @ lr = 32bit undefined instruction function
+
+ #if __LINUX_ARM_ARCH__ < 7
+ /* If the target arch was overridden, change it back: */
+@@ -480,17 +511,13 @@ __und_usr:
+ #endif
+ #endif /* __LINUX_ARM_ARCH__ < 7 */
+ #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
+- b __und_usr_unknown
++ b __und_usr_fault_16
+ #endif
+- UNWIND(.fnend )
++ UNWIND(.fnend)
+ ENDPROC(__und_usr)
+
+- @
+- @ fallthrough to call_fpe
+- @
+-
+ /*
+- * The out of line fixup for the ldrt above.
++ * The out of line fixup for the ldrt instructions above.
+ */
+ .pushsection .fixup, "ax"
+ 4: mov pc, r9
+@@ -521,11 +548,12 @@ ENDPROC(__und_usr)
+ * NEON handler code.
+ *
+ * Emulators may wish to make use of the following registers:
+- * r0 = instruction opcode.
+- * r2 = PC+4
++ * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
++ * r2 = PC value to resume execution after successful emulation
+ * r9 = normal "successful" return address
+- * r10 = this threads thread_info structure.
++ * r10 = this threads thread_info structure
+ * lr = unrecognised instruction return address
++ * IRQs disabled, FIQs enabled.
+ */
+ @
+ @ Fall-through from Thumb-2 __und_usr
+@@ -660,12 +688,17 @@ ENTRY(no_fp)
+ mov pc, lr
+ ENDPROC(no_fp)
+
+-__und_usr_unknown:
+- enable_irq
++__und_usr_fault_32:
++ mov r1, #4
++ b 1f
++__und_usr_fault_16:
++ mov r1, #2
++1: enable_irq
+ mov r0, sp
+ adr lr, BSYM(ret_from_exception)
+- b do_undefinstr
+-ENDPROC(__und_usr_unknown)
++ b __und_fault
++ENDPROC(__und_usr_fault_32)
++ENDPROC(__und_usr_fault_16)
+
+ .align 5
+ __pabt_usr:
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 3d0c6fb..e68d251 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -125,6 +125,7 @@ void arm_machine_restart(char mode, const char *cmd)
+ */
+ mdelay(1000);
+ printk("Reboot failed -- System halted\n");
++ local_irq_disable();
+ while (1);
+ }
+
+@@ -240,6 +241,7 @@ void machine_shutdown(void)
+ void machine_halt(void)
+ {
+ machine_shutdown();
++ local_irq_disable();
+ while (1);
+ }
+
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 160cb16..8380bd1 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -362,18 +362,10 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+
+ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+ {
+- unsigned int correction = thumb_mode(regs) ? 2 : 4;
+ unsigned int instr;
+ siginfo_t info;
+ void __user *pc;
+
+- /*
+- * According to the ARM ARM, PC is 2 or 4 bytes ahead,
+- * depending whether we're in Thumb mode or not.
+- * Correct this offset.
+- */
+- regs->ARM_pc -= correction;
+-
+ pc = (void __user *)instruction_pointer(regs);
+
+ if (processor_mode(regs) == SVC_MODE) {
+diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
+index 845f461..c202113 100644
+--- a/arch/arm/mm/tlb-v7.S
++++ b/arch/arm/mm/tlb-v7.S
+@@ -38,11 +38,19 @@ ENTRY(v7wbi_flush_user_tlb_range)
+ dsb
+ mov r0, r0, lsr #PAGE_SHIFT @ align address
+ mov r1, r1, lsr #PAGE_SHIFT
++#ifdef CONFIG_ARM_ERRATA_720789
++ mov r3, #0
++#else
+ asid r3, r3 @ mask ASID
++#endif
+ orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
+ mov r1, r1, lsl #PAGE_SHIFT
+ 1:
++#ifdef CONFIG_ARM_ERRATA_720789
++ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
++#else
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
++#endif
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
+
+ add r0, r0, #PAGE_SZ
+@@ -67,7 +75,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
+ mov r0, r0, lsl #PAGE_SHIFT
+ mov r1, r1, lsl #PAGE_SHIFT
+ 1:
++#ifdef CONFIG_ARM_ERRATA_720789
++ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
++#else
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
++#endif
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
+ add r0, r0, #PAGE_SZ
+ cmp r0, r1
+diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
+index 4fa9903..cc926c9 100644
+--- a/arch/arm/vfp/entry.S
++++ b/arch/arm/vfp/entry.S
+@@ -7,18 +7,20 @@
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+- *
+- * Basic entry code, called from the kernel's undefined instruction trap.
+- * r0 = faulted instruction
+- * r5 = faulted PC+4
+- * r9 = successful return
+- * r10 = thread_info structure
+- * lr = failure return
+ */
+ #include <asm/thread_info.h>
+ #include <asm/vfpmacros.h>
+ #include "../kernel/entry-header.S"
+
++@ VFP entry point.
++@
++@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
++@ r2 = PC value to resume execution after successful emulation
++@ r9 = normal "successful" return address
++@ r10 = this threads thread_info structure
++@ lr = unrecognised instruction return address
++@ IRQs disabled.
++@
+ ENTRY(do_vfp)
+ #ifdef CONFIG_PREEMPT
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
+index 2d30c7f..3a0efaa 100644
+--- a/arch/arm/vfp/vfphw.S
++++ b/arch/arm/vfp/vfphw.S
+@@ -61,13 +61,13 @@
+
+ @ VFP hardware support entry point.
+ @
+-@ r0 = faulted instruction
+-@ r2 = faulted PC+4
+-@ r9 = successful return
++@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
++@ r2 = PC value to resume execution after successful emulation
++@ r9 = normal "successful" return address
+ @ r10 = vfp_state union
+ @ r11 = CPU number
+-@ lr = failure return
+-
++@ lr = unrecognised instruction return address
++@ IRQs enabled.
+ ENTRY(vfp_support_entry)
+ DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
+
+@@ -161,9 +161,12 @@ vfp_hw_state_valid:
+ @ exception before retrying branch
+ @ out before setting an FPEXC that
+ @ stops us reading stuff
+- VFPFMXR FPEXC, r1 @ restore FPEXC last
+- sub r2, r2, #4
+- str r2, [sp, #S_PC] @ retry the instruction
++ VFPFMXR FPEXC, r1 @ Restore FPEXC last
++ sub r2, r2, #4 @ Retry current instruction - if Thumb
++ str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
++ @ else it's one 32-bit instruction, so
++ @ always subtract 4 from the following
++ @ instruction address.
+ #ifdef CONFIG_PREEMPT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
+index 8ea07e4..ad83dad 100644
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -453,10 +453,16 @@ static int vfp_pm_suspend(void)
+
+ /* disable, just in case */
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
++ } else if (vfp_current_hw_state[ti->cpu]) {
++#ifndef CONFIG_SMP
++ fmxr(FPEXC, fpexc | FPEXC_EN);
++ vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
++ fmxr(FPEXC, fpexc);
++#endif
+ }
+
+ /* clear any information we had about last context state */
+- memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
++ vfp_current_hw_state[ti->cpu] = NULL;
+
+ return 0;
+ }
+diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
+index 3fad89e..2fc214b 100644
+--- a/arch/ia64/include/asm/atomic.h
++++ b/arch/ia64/include/asm/atomic.h
+@@ -18,8 +18,8 @@
+ #include <asm/system.h>
+
+
+-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
+-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
++#define ATOMIC_INIT(i) { (i) }
++#define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) (*(volatile int *)&(v)->counter)
+ #define atomic64_read(v) (*(volatile long *)&(v)->counter)
+diff --git a/arch/m68k/include/asm/entry.h b/arch/m68k/include/asm/entry.h
+index c3c5a86..8798ebc 100644
+--- a/arch/m68k/include/asm/entry.h
++++ b/arch/m68k/include/asm/entry.h
+@@ -33,8 +33,8 @@
+
+ /* the following macro is used when enabling interrupts */
+ #if defined(MACH_ATARI_ONLY)
+- /* block out HSYNC on the atari */
+-#define ALLOWINT (~0x400)
++ /* block out HSYNC = ipl 2 on the atari */
++#define ALLOWINT (~0x500)
+ #define MAX_NOINT_IPL 3
+ #else
+ /* portable version */
+diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
+index 8623f8d..9a5932e 100644
+--- a/arch/m68k/kernel/sys_m68k.c
++++ b/arch/m68k/kernel/sys_m68k.c
+@@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
+ goto bad_access;
+ }
+
+- mem_value = *mem;
++ /*
++ * No need to check for EFAULT; we know that the page is
++ * present and writable.
++ */
++ __get_user(mem_value, mem);
+ if (mem_value == oldval)
+- *mem = newval;
++ __put_user(newval, mem);
+
+ pte_unmap_unlock(pte, ptl);
+ up_read(&mm->mmap_sem);
+diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
+index 5682f16..20f0e01 100644
+--- a/arch/s390/include/asm/mmu_context.h
++++ b/arch/s390/include/asm/mmu_context.h
+@@ -12,7 +12,6 @@
+ #include <asm/pgalloc.h>
+ #include <asm/uaccess.h>
+ #include <asm/tlbflush.h>
+-#include <asm-generic/mm_hooks.h>
+
+ static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+@@ -92,4 +91,17 @@ static inline void activate_mm(struct mm_struct *prev,
+ switch_mm(prev, next, current);
+ }
+
++static inline void arch_dup_mmap(struct mm_struct *oldmm,
++ struct mm_struct *mm)
++{
++#ifdef CONFIG_64BIT
++ if (oldmm->context.asce_limit < mm->context.asce_limit)
++ crst_table_downgrade(mm, oldmm->context.asce_limit);
++#endif
++}
++
++static inline void arch_exit_mmap(struct mm_struct *mm)
++{
++}
++
+ #endif /* __S390_MMU_CONTEXT_H */
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index 5f33d37..172550d 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -130,7 +130,9 @@ struct stack_frame {
+ regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
+ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
+ regs->gprs[15] = new_stackp; \
++ __tlb_flush_mm(current->mm); \
+ crst_table_downgrade(current->mm, 1UL << 31); \
++ update_mm(current->mm, current); \
+ } while (0)
+
+ /* Forward declaration, a strange C thing */
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index b28aaa4..0fc0a7e 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -453,6 +453,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
+ struct pt_regs regs;
+ int access, fault;
+
++ /* Emulate a uaccess fault from kernel mode. */
+ regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
+ if (!irqs_disabled())
+ regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
+@@ -461,12 +462,12 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
+ uaddr &= PAGE_MASK;
+ access = write ? VM_WRITE : VM_READ;
+ fault = do_exception(&regs, access, uaddr | 2);
+- if (unlikely(fault)) {
+- if (fault & VM_FAULT_OOM)
+- return -EFAULT;
+- else if (fault & VM_FAULT_SIGBUS)
+- do_sigbus(&regs, pgm_int_code, uaddr);
+- }
++ /*
++ * Since the fault happened in kernel mode while performing a uaccess
++ * all we need to do now is emulating a fixup in case "fault" is not
++ * zero.
++ * For the calling uaccess functions this results always in -EFAULT.
++ */
+ return fault ? -EFAULT : 0;
+ }
+
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index a0155c0..c70b3d8 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -106,9 +106,15 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
+
+ int s390_mmap_check(unsigned long addr, unsigned long len)
+ {
++ int rc;
++
+ if (!is_compat_task() &&
+- len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
+- return crst_table_upgrade(current->mm, 1UL << 53);
++ len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
++ rc = crst_table_upgrade(current->mm, 1UL << 53);
++ if (rc)
++ return rc;
++ update_mm(current->mm, current);
++ }
+ return 0;
+ }
+
+@@ -128,6 +134,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
+ rc = crst_table_upgrade(mm, 1UL << 53);
+ if (rc)
+ return (unsigned long) rc;
++ update_mm(mm, current);
+ area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
+ }
+ return area;
+@@ -150,6 +157,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
+ rc = crst_table_upgrade(mm, 1UL << 53);
+ if (rc)
+ return (unsigned long) rc;
++ update_mm(mm, current);
+ area = arch_get_unmapped_area_topdown(filp, addr, len,
+ pgoff, flags);
+ }
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index f8ceac4..f8e92f8 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -97,7 +97,6 @@ repeat:
+ crst_table_free(mm, table);
+ if (mm->context.asce_limit < limit)
+ goto repeat;
+- update_mm(mm, current);
+ return 0;
+ }
+
+@@ -105,9 +104,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+ {
+ pgd_t *pgd;
+
+- if (mm->context.asce_limit <= limit)
+- return;
+- __tlb_flush_mm(mm);
+ while (mm->context.asce_limit > limit) {
+ pgd = mm->pgd;
+ switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
+@@ -130,7 +126,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+ mm->task_size = mm->context.asce_limit;
+ crst_table_free(mm, (unsigned long *) pgd);
+ }
+- update_mm(mm, current);
+ }
+ #endif
+
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 1f84794..73ef56c 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -219,7 +219,7 @@ void __init arch_init_ideal_nops(void)
+ ideal_nops = intel_nops;
+ #endif
+ }
+-
++ break;
+ default:
+ #ifdef CONFIG_X86_64
+ ideal_nops = k8_nops;
+diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
+index 1b267e7..00a0385 100644
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -686,6 +686,7 @@ int m2p_add_override(unsigned long mfn, struct page *page,
+ unsigned long uninitialized_var(address);
+ unsigned level;
+ pte_t *ptep = NULL;
++ int ret = 0;
+
+ pfn = page_to_pfn(page);
+ if (!PageHighMem(page)) {
+@@ -721,6 +722,24 @@ int m2p_add_override(unsigned long mfn, struct page *page,
+ list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
+ spin_unlock_irqrestore(&m2p_override_lock, flags);
+
++ /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
++ * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
++ * pfn so that the following mfn_to_pfn(mfn) calls will return the
++ * pfn from the m2p_override (the backend pfn) instead.
++ * We need to do this because the pages shared by the frontend
++ * (xen-blkfront) can be already locked (lock_page, called by
++ * do_read_cache_page); when the userspace backend tries to use them
++ * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
++ * do_blockdev_direct_IO is going to try to lock the same pages
++ * again resulting in a deadlock.
++ * As a side effect get_user_pages_fast might not be safe on the
++ * frontend pages while they are being shared with the backend,
++ * because mfn_to_pfn (that ends up being called by GUPF) will
++ * return the backend pfn rather than the frontend pfn. */
++ ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
++ if (ret == 0 && get_phys_to_machine(pfn) == mfn)
++ set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(m2p_add_override);
+@@ -732,6 +751,7 @@ int m2p_remove_override(struct page *page, bool clear_pte)
+ unsigned long uninitialized_var(address);
+ unsigned level;
+ pte_t *ptep = NULL;
++ int ret = 0;
+
+ pfn = page_to_pfn(page);
+ mfn = get_phys_to_machine(pfn);
+@@ -801,6 +821,22 @@ int m2p_remove_override(struct page *page, bool clear_pte)
+ } else
+ set_phys_to_machine(pfn, page->index);
+
++ /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
++ * somewhere in this domain, even before being added to the
++ * m2p_override (see comment above in m2p_add_override).
++ * If there are no other entries in the m2p_override corresponding
++ * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
++ * the original pfn (the one shared by the frontend): the backend
++ * cannot do any IO on this page anymore because it has been
++ * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
++ * the original pfn causes mfn_to_pfn(mfn) to return the frontend
++ * pfn again. */
++ mfn &= ~FOREIGN_FRAME_BIT;
++ ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
++ if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
++ m2p_find_override(mfn) == NULL)
++ set_phys_to_machine(pfn, mfn);
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(m2p_remove_override);
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 9955a53..c864add 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -4369,8 +4369,14 @@ out_unreg_blkdev:
+ out_put_disk:
+ while (dr--) {
+ del_timer_sync(&motor_off_timer[dr]);
+- if (disks[dr]->queue)
++ if (disks[dr]->queue) {
+ blk_cleanup_queue(disks[dr]->queue);
++ /*
++ * put_disk() is not paired with add_disk() and
++ * will put queue reference one extra time. fix it.
++ */
++ disks[dr]->queue = NULL;
++ }
+ put_disk(disks[dr]);
+ }
+ return err;
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index e46f2f7..650a308 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -20,8 +20,6 @@ struct workqueue_struct *virtblk_wq;
+
+ struct virtio_blk
+ {
+- spinlock_t lock;
+-
+ struct virtio_device *vdev;
+ struct virtqueue *vq;
+
+@@ -62,7 +60,7 @@ static void blk_done(struct virtqueue *vq)
+ unsigned int len;
+ unsigned long flags;
+
+- spin_lock_irqsave(&vblk->lock, flags);
++ spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
+ while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
+ int error;
+
+@@ -97,7 +95,7 @@ static void blk_done(struct virtqueue *vq)
+ }
+ /* In case queue is stopped waiting for more buffers. */
+ blk_start_queue(vblk->disk->queue);
+- spin_unlock_irqrestore(&vblk->lock, flags);
++ spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
+ }
+
+ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
+@@ -384,7 +382,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
+ }
+
+ INIT_LIST_HEAD(&vblk->reqs);
+- spin_lock_init(&vblk->lock);
+ vblk->vdev = vdev;
+ vblk->sg_elems = sg_elems;
+ sg_init_table(vblk->sg, vblk->sg_elems);
+@@ -410,7 +407,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
+ goto out_mempool;
+ }
+
+- q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
++ q = vblk->disk->queue = blk_init_queue(do_virtblk_request, NULL);
+ if (!q) {
+ err = -ENOMEM;
+ goto out_put_disk;
+diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
+index 5c0d96a..b12ffea 100644
+--- a/drivers/char/mspec.c
++++ b/drivers/char/mspec.c
+@@ -284,7 +284,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
+ vdata->flags = flags;
+ vdata->type = type;
+ spin_lock_init(&vdata->lock);
+- vdata->refcnt = ATOMIC_INIT(1);
++ atomic_set(&vdata->refcnt, 1);
+ vma->vm_private_data = vdata;
+
+ vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 6035ab8..631d4f6 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -125,21 +125,26 @@
+ * The current exported interfaces for gathering environmental noise
+ * from the devices are:
+ *
++ * void add_device_randomness(const void *buf, unsigned int size);
+ * void add_input_randomness(unsigned int type, unsigned int code,
+ * unsigned int value);
+- * void add_interrupt_randomness(int irq);
++ * void add_interrupt_randomness(int irq, int irq_flags);
+ * void add_disk_randomness(struct gendisk *disk);
+ *
++ * add_device_randomness() is for adding data to the random pool that
++ * is likely to differ between two devices (or possibly even per boot).
++ * This would be things like MAC addresses or serial numbers, or the
++ * read-out of the RTC. This does *not* add any actual entropy to the
++ * pool, but it initializes the pool to different values for devices
++ * that might otherwise be identical and have very little entropy
++ * available to them (particularly common in the embedded world).
++ *
+ * add_input_randomness() uses the input layer interrupt timing, as well as
+ * the event type information from the hardware.
+ *
+- * add_interrupt_randomness() uses the inter-interrupt timing as random
+- * inputs to the entropy pool. Note that not all interrupts are good
+- * sources of randomness! For example, the timer interrupts is not a
+- * good choice, because the periodicity of the interrupts is too
+- * regular, and hence predictable to an attacker. Network Interface
+- * Controller interrupts are a better measure, since the timing of the
+- * NIC interrupts are more unpredictable.
++ * add_interrupt_randomness() uses the interrupt timing as random
++ * inputs to the entropy pool. Using the cycle counters and the irq source
++ * as inputs, it feeds the randomness roughly once a second.
+ *
+ * add_disk_randomness() uses what amounts to the seek time of block
+ * layer request events, on a per-disk_devt basis, as input to the
+@@ -248,6 +253,8 @@
+ #include <linux/percpu.h>
+ #include <linux/cryptohash.h>
+ #include <linux/fips.h>
++#include <linux/ptrace.h>
++#include <linux/kmemcheck.h>
+
+ #ifdef CONFIG_GENERIC_HARDIRQS
+ # include <linux/irq.h>
+@@ -256,6 +263,7 @@
+ #include <asm/processor.h>
+ #include <asm/uaccess.h>
+ #include <asm/irq.h>
++#include <asm/irq_regs.h>
+ #include <asm/io.h>
+
+ /*
+@@ -266,6 +274,8 @@
+ #define SEC_XFER_SIZE 512
+ #define EXTRACT_SIZE 10
+
++#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
++
+ /*
+ * The minimum number of bits of entropy before we wake up a read on
+ * /dev/random. Should be enough to do a significant reseed.
+@@ -420,8 +430,10 @@ struct entropy_store {
+ /* read-write data: */
+ spinlock_t lock;
+ unsigned add_ptr;
++ unsigned input_rotate;
+ int entropy_count;
+- int input_rotate;
++ int entropy_total;
++ unsigned int initialized:1;
+ __u8 last_data[EXTRACT_SIZE];
+ };
+
+@@ -454,6 +466,10 @@ static struct entropy_store nonblocking_pool = {
+ .pool = nonblocking_pool_data
+ };
+
++static __u32 const twist_table[8] = {
++ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
++ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
++
+ /*
+ * This function adds bytes into the entropy "pool". It does not
+ * update the entropy estimate. The caller should call
+@@ -464,29 +480,24 @@ static struct entropy_store nonblocking_pool = {
+ * it's cheap to do so and helps slightly in the expected case where
+ * the entropy is concentrated in the low-order bits.
+ */
+-static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+- int nbytes, __u8 out[64])
++static void __mix_pool_bytes(struct entropy_store *r, const void *in,
++ int nbytes, __u8 out[64])
+ {
+- static __u32 const twist_table[8] = {
+- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+ unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
+ int input_rotate;
+ int wordmask = r->poolinfo->poolwords - 1;
+ const char *bytes = in;
+ __u32 w;
+- unsigned long flags;
+
+- /* Taps are constant, so we can load them without holding r->lock. */
+ tap1 = r->poolinfo->tap1;
+ tap2 = r->poolinfo->tap2;
+ tap3 = r->poolinfo->tap3;
+ tap4 = r->poolinfo->tap4;
+ tap5 = r->poolinfo->tap5;
+
+- spin_lock_irqsave(&r->lock, flags);
+- input_rotate = r->input_rotate;
+- i = r->add_ptr;
++ smp_rmb();
++ input_rotate = ACCESS_ONCE(r->input_rotate);
++ i = ACCESS_ONCE(r->add_ptr);
+
+ /* mix one byte at a time to simplify size handling and churn faster */
+ while (nbytes--) {
+@@ -513,19 +524,53 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+ input_rotate += i ? 7 : 14;
+ }
+
+- r->input_rotate = input_rotate;
+- r->add_ptr = i;
++ ACCESS_ONCE(r->input_rotate) = input_rotate;
++ ACCESS_ONCE(r->add_ptr) = i;
++ smp_wmb();
+
+ if (out)
+ for (j = 0; j < 16; j++)
+ ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
++}
++
++static void mix_pool_bytes(struct entropy_store *r, const void *in,
++ int nbytes, __u8 out[64])
++{
++ unsigned long flags;
+
++ spin_lock_irqsave(&r->lock, flags);
++ __mix_pool_bytes(r, in, nbytes, out);
+ spin_unlock_irqrestore(&r->lock, flags);
+ }
+
+-static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
++struct fast_pool {
++ __u32 pool[4];
++ unsigned long last;
++ unsigned short count;
++ unsigned char rotate;
++ unsigned char last_timer_intr;
++};
++
++/*
++ * This is a fast mixing routine used by the interrupt randomness
++ * collector. It's hardcoded for an 128 bit pool and assumes that any
++ * locks that might be needed are taken by the caller.
++ */
++static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
+ {
+- mix_pool_bytes_extract(r, in, bytes, NULL);
++ const char *bytes = in;
++ __u32 w;
++ unsigned i = f->count;
++ unsigned input_rotate = f->rotate;
++
++ while (nbytes--) {
++ w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
++ f->pool[(i + 1) & 3];
++ f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
++ input_rotate += (i++ & 3) ? 7 : 14;
++ }
++ f->count = i;
++ f->rotate = input_rotate;
+ }
+
+ /*
+@@ -533,30 +578,34 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
+ */
+ static void credit_entropy_bits(struct entropy_store *r, int nbits)
+ {
+- unsigned long flags;
+- int entropy_count;
++ int entropy_count, orig;
+
+ if (!nbits)
+ return;
+
+- spin_lock_irqsave(&r->lock, flags);
+-
+ DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
+- entropy_count = r->entropy_count;
++retry:
++ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ entropy_count += nbits;
+ if (entropy_count < 0) {
+ DEBUG_ENT("negative entropy/overflow\n");
+ entropy_count = 0;
+ } else if (entropy_count > r->poolinfo->POOLBITS)
+ entropy_count = r->poolinfo->POOLBITS;
+- r->entropy_count = entropy_count;
++ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
++ goto retry;
++
++ if (!r->initialized && nbits > 0) {
++ r->entropy_total += nbits;
++ if (r->entropy_total > 128)
++ r->initialized = 1;
++ }
+
+ /* should we wake readers? */
+ if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
+ wake_up_interruptible(&random_read_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ }
+- spin_unlock_irqrestore(&r->lock, flags);
+ }
+
+ /*********************************************************************
+@@ -609,6 +658,25 @@ static void set_timer_rand_state(unsigned int irq,
+ }
+ #endif
+
++/*
++ * Add device- or boot-specific data to the input and nonblocking
++ * pools to help initialize them to unique values.
++ *
++ * None of this adds any entropy, it is meant to avoid the
++ * problem of the nonblocking pool having similar initial state
++ * across largely identical devices.
++ */
++void add_device_randomness(const void *buf, unsigned int size)
++{
++ unsigned long time = get_cycles() ^ jiffies;
++
++ mix_pool_bytes(&input_pool, buf, size, NULL);
++ mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
++ mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
++ mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
++}
++EXPORT_SYMBOL(add_device_randomness);
++
+ static struct timer_rand_state input_timer_state;
+
+ /*
+@@ -624,8 +692,8 @@ static struct timer_rand_state input_timer_state;
+ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ {
+ struct {
+- cycles_t cycles;
+ long jiffies;
++ unsigned cycles;
+ unsigned num;
+ } sample;
+ long delta, delta2, delta3;
+@@ -639,7 +707,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ sample.jiffies = jiffies;
+ sample.cycles = get_cycles();
+ sample.num = num;
+- mix_pool_bytes(&input_pool, &sample, sizeof(sample));
++ mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
+
+ /*
+ * Calculate number of bits of randomness we probably added.
+@@ -696,17 +764,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
+ }
+ EXPORT_SYMBOL_GPL(add_input_randomness);
+
+-void add_interrupt_randomness(int irq)
++static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
++
++void add_interrupt_randomness(int irq, int irq_flags)
+ {
+- struct timer_rand_state *state;
++ struct entropy_store *r;
++ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
++ struct pt_regs *regs = get_irq_regs();
++ unsigned long now = jiffies;
++ __u32 input[4], cycles = get_cycles();
++
++ input[0] = cycles ^ jiffies;
++ input[1] = irq;
++ if (regs) {
++ __u64 ip = instruction_pointer(regs);
++ input[2] = ip;
++ input[3] = ip >> 32;
++ }
+
+- state = get_timer_rand_state(irq);
++ fast_mix(fast_pool, input, sizeof(input));
+
+- if (state == NULL)
++ if ((fast_pool->count & 1023) &&
++ !time_after(now, fast_pool->last + HZ))
+ return;
+
+- DEBUG_ENT("irq event %d\n", irq);
+- add_timer_randomness(state, 0x100 + irq);
++ fast_pool->last = now;
++
++ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
++ __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
++ /*
++ * If we don't have a valid cycle counter, and we see
++ * back-to-back timer interrupts, then skip giving credit for
++ * any entropy.
++ */
++ if (cycles == 0) {
++ if (irq_flags & __IRQF_TIMER) {
++ if (fast_pool->last_timer_intr)
++ return;
++ fast_pool->last_timer_intr = 1;
++ } else
++ fast_pool->last_timer_intr = 0;
++ }
++ credit_entropy_bits(r, 1);
+ }
+
+ #ifdef CONFIG_BLOCK
+@@ -738,7 +837,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ */
+ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+ {
+- __u32 tmp[OUTPUT_POOL_WORDS];
++ __u32 tmp[OUTPUT_POOL_WORDS];
+
+ if (r->pull && r->entropy_count < nbytes * 8 &&
+ r->entropy_count < r->poolinfo->POOLBITS) {
+@@ -757,7 +856,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+
+ bytes = extract_entropy(r->pull, tmp, bytes,
+ random_read_wakeup_thresh / 8, rsvd);
+- mix_pool_bytes(r, tmp, bytes);
++ mix_pool_bytes(r, tmp, bytes, NULL);
+ credit_entropy_bits(r, bytes*8);
+ }
+ }
+@@ -816,13 +915,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
+ static void extract_buf(struct entropy_store *r, __u8 *out)
+ {
+ int i;
+- __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
++ union {
++ __u32 w[5];
++ unsigned long l[LONGS(EXTRACT_SIZE)];
++ } hash;
++ __u32 workspace[SHA_WORKSPACE_WORDS];
+ __u8 extract[64];
++ unsigned long flags;
+
+ /* Generate a hash across the pool, 16 words (512 bits) at a time */
+- sha_init(hash);
++ sha_init(hash.w);
++ spin_lock_irqsave(&r->lock, flags);
+ for (i = 0; i < r->poolinfo->poolwords; i += 16)
+- sha_transform(hash, (__u8 *)(r->pool + i), workspace);
++ sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
+
+ /*
+ * We mix the hash back into the pool to prevent backtracking
+@@ -833,13 +938,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ * brute-forcing the feedback as hard as brute-forcing the
+ * hash.
+ */
+- mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
++ __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
++ spin_unlock_irqrestore(&r->lock, flags);
+
+ /*
+ * To avoid duplicates, we atomically extract a portion of the
+ * pool while mixing, and hash one final time.
+ */
+- sha_transform(hash, extract, workspace);
++ sha_transform(hash.w, extract, workspace);
+ memset(extract, 0, sizeof(extract));
+ memset(workspace, 0, sizeof(workspace));
+
+@@ -848,19 +954,30 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ * pattern, we fold it in half. Thus, we always feed back
+ * twice as much data as we output.
+ */
+- hash[0] ^= hash[3];
+- hash[1] ^= hash[4];
+- hash[2] ^= rol32(hash[2], 16);
+- memcpy(out, hash, EXTRACT_SIZE);
+- memset(hash, 0, sizeof(hash));
++ hash.w[0] ^= hash.w[3];
++ hash.w[1] ^= hash.w[4];
++ hash.w[2] ^= rol32(hash.w[2], 16);
++
++ /*
++ * If we have a architectural hardware random number
++ * generator, mix that in, too.
++ */
++ for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
++ unsigned long v;
++ if (!arch_get_random_long(&v))
++ break;
++ hash.l[i] ^= v;
++ }
++
++ memcpy(out, &hash, EXTRACT_SIZE);
++ memset(&hash, 0, sizeof(hash));
+ }
+
+ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+- size_t nbytes, int min, int reserved)
++ size_t nbytes, int min, int reserved)
+ {
+ ssize_t ret = 0, i;
+ __u8 tmp[EXTRACT_SIZE];
+- unsigned long flags;
+
+ xfer_secondary_pool(r, nbytes);
+ nbytes = account(r, nbytes, min, reserved);
+@@ -869,6 +986,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ extract_buf(r, tmp);
+
+ if (fips_enabled) {
++ unsigned long flags;
++
+ spin_lock_irqsave(&r->lock, flags);
+ if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
+ panic("Hardware RNG duplicated output!\n");
+@@ -927,17 +1046,34 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+
+ /*
+ * This function is the exported kernel interface. It returns some
+- * number of good random numbers, suitable for seeding TCP sequence
+- * numbers, etc.
++ * number of good random numbers, suitable for key generation, seeding
++ * TCP sequence numbers, etc. It does not use the hw random number
++ * generator, if available; use get_random_bytes_arch() for that.
+ */
+ void get_random_bytes(void *buf, int nbytes)
+ {
++ extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
++}
++EXPORT_SYMBOL(get_random_bytes);
++
++/*
++ * This function will use the architecture-specific hardware random
++ * number generator if it is available. The arch-specific hw RNG will
++ * almost certainly be faster than what we can do in software, but it
++ * is impossible to verify that it is implemented securely (as
++ * opposed, to, say, the AES encryption of a sequence number using a
++ * key known by the NSA). So it's useful if we need the speed, but
++ * only if we're willing to trust the hardware manufacturer not to
++ * have put in a back door.
++ */
++void get_random_bytes_arch(void *buf, int nbytes)
++{
+ char *p = buf;
+
+ while (nbytes) {
+ unsigned long v;
+ int chunk = min(nbytes, (int)sizeof(unsigned long));
+-
++
+ if (!arch_get_random_long(&v))
+ break;
+
+@@ -946,9 +1082,11 @@ void get_random_bytes(void *buf, int nbytes)
+ nbytes -= chunk;
+ }
+
+- extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
++ if (nbytes)
++ extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
+ }
+-EXPORT_SYMBOL(get_random_bytes);
++EXPORT_SYMBOL(get_random_bytes_arch);
++
+
+ /*
+ * init_std_data - initialize pool with system data
+@@ -961,16 +1099,19 @@ EXPORT_SYMBOL(get_random_bytes);
+ */
+ static void init_std_data(struct entropy_store *r)
+ {
+- ktime_t now;
+- unsigned long flags;
++ int i;
++ ktime_t now = ktime_get_real();
++ unsigned long rv;
+
+- spin_lock_irqsave(&r->lock, flags);
+ r->entropy_count = 0;
+- spin_unlock_irqrestore(&r->lock, flags);
+-
+- now = ktime_get_real();
+- mix_pool_bytes(r, &now, sizeof(now));
+- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
++ r->entropy_total = 0;
++ mix_pool_bytes(r, &now, sizeof(now), NULL);
++ for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
++ if (!arch_get_random_long(&rv))
++ break;
++ mix_pool_bytes(r, &rv, sizeof(rv), NULL);
++ }
++ mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
+ }
+
+ static int rand_initialize(void)
+@@ -1107,7 +1248,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+ count -= bytes;
+ p += bytes;
+
+- mix_pool_bytes(r, buf, bytes);
++ mix_pool_bytes(r, buf, bytes, NULL);
+ cond_resched();
+ }
+
+diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
+index 51e0e2d..a330492 100644
+--- a/drivers/firmware/pcdp.c
++++ b/drivers/firmware/pcdp.c
+@@ -95,7 +95,7 @@ efi_setup_pcdp_console(char *cmdline)
+ if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+- pcdp = ioremap(efi.hcdp, 4096);
++ pcdp = early_ioremap(efi.hcdp, 4096);
+ printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
+
+ if (strstr(cmdline, "console=hcdp")) {
+@@ -131,6 +131,6 @@ efi_setup_pcdp_console(char *cmdline)
+ }
+
+ out:
+- iounmap(pcdp);
++ early_iounmap(pcdp, 4096);
+ return rc;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index d4c4937..fae2050 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -708,8 +708,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+
+ bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+
+- for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+- for (clock = 0; clock <= max_clock; clock++) {
++ for (clock = 0; clock <= max_clock; clock++) {
++ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
+
+ if (intel_dp_link_required(mode->clock, bpp)
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index a6dcd18..96532bc 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -40,11 +40,28 @@
+ * Note that newer firmware allows querying device for maximum useable
+ * coordinates.
+ */
++#define XMIN 0
++#define XMAX 6143
++#define YMIN 0
++#define YMAX 6143
+ #define XMIN_NOMINAL 1472
+ #define XMAX_NOMINAL 5472
+ #define YMIN_NOMINAL 1408
+ #define YMAX_NOMINAL 4448
+
++/* Size in bits of absolute position values reported by the hardware */
++#define ABS_POS_BITS 13
++
++/*
++ * Any position values from the hardware above the following limits are
++ * treated as "wrapped around negative" values that have been truncated to
++ * the 13-bit reporting range of the hardware. These are just reasonable
++ * guesses and can be adjusted if hardware is found that operates outside
++ * of these parameters.
++ */
++#define X_MAX_POSITIVE (((1 << ABS_POS_BITS) + XMAX) / 2)
++#define Y_MAX_POSITIVE (((1 << ABS_POS_BITS) + YMAX) / 2)
++
+ /*
+ * Synaptics touchpads report the y coordinate from bottom to top, which is
+ * opposite from what userspace expects.
+@@ -544,6 +561,12 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
+ hw->right = (buf[0] & 0x02) ? 1 : 0;
+ }
+
++ /* Convert wrap-around values to negative */
++ if (hw->x > X_MAX_POSITIVE)
++ hw->x -= 1 << ABS_POS_BITS;
++ if (hw->y > Y_MAX_POSITIVE)
++ hw->y -= 1 << ABS_POS_BITS;
++
+ return 0;
+ }
+
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 532a902..d432032 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -19,7 +19,7 @@
+ /*
+ * Tunable constants
+ */
+-#define ENDIO_HOOK_POOL_SIZE 10240
++#define ENDIO_HOOK_POOL_SIZE 1024
+ #define DEFERRED_SET_SIZE 64
+ #define MAPPING_POOL_SIZE 1024
+ #define PRISON_CELLS 1024
+@@ -857,7 +857,7 @@ static void process_prepared_mapping(struct new_mapping *m)
+
+ if (m->err) {
+ cell_error(m->cell);
+- return;
++ goto out;
+ }
+
+ /*
+@@ -869,7 +869,7 @@ static void process_prepared_mapping(struct new_mapping *m)
+ if (r) {
+ DMERR("dm_thin_insert_block() failed");
+ cell_error(m->cell);
+- return;
++ goto out;
+ }
+
+ /*
+@@ -884,6 +884,7 @@ static void process_prepared_mapping(struct new_mapping *m)
+ } else
+ cell_defer(tc, m->cell, m->data_block);
+
++out:
+ list_del(&m->list);
+ mempool_free(m, tc->pool->mapping_pool);
+ }
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 2d97bf0..62306e5 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2321,7 +2321,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
+ /* There is nowhere to write, so all non-sync
+ * drives must be failed - so we are finished
+ */
+- sector_t rv = max_sector - sector_nr;
++ sector_t rv;
++ if (min_bad > 0)
++ max_sector = sector_nr + min_bad;
++ rv = max_sector - sector_nr;
+ *skipped = 1;
+ put_buf(r1_bio);
+ return rv;
+diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
+index ed77c6d..5327061 100644
+--- a/drivers/media/rc/ene_ir.c
++++ b/drivers/media/rc/ene_ir.c
+@@ -1018,6 +1018,8 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
+
+ spin_lock_init(&dev->hw_lock);
+
++ dev->hw_io = pnp_port_start(pnp_dev, 0);
++
+ pnp_set_drvdata(pnp_dev, dev);
+ dev->pnp_dev = pnp_dev;
+
+@@ -1072,7 +1074,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
+
+ /* claim the resources */
+ error = -EBUSY;
+- dev->hw_io = pnp_port_start(pnp_dev, 0);
+ if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
+ dev->hw_io = -1;
+ dev->irq = -1;
+diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
+index 60107ee..4eec7b7 100644
+--- a/drivers/mfd/ab3100-core.c
++++ b/drivers/mfd/ab3100-core.c
+@@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
+ u32 fatevent;
+ int err;
+
+- add_interrupt_randomness(irq);
+-
+ err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
+ event_regs, 3);
+ if (err)
+diff --git a/drivers/mfd/wm831x-otp.c b/drivers/mfd/wm831x-otp.c
+index f742745..b90f3e0 100644
+--- a/drivers/mfd/wm831x-otp.c
++++ b/drivers/mfd/wm831x-otp.c
+@@ -18,6 +18,7 @@
+ #include <linux/bcd.h>
+ #include <linux/delay.h>
+ #include <linux/mfd/core.h>
++#include <linux/random.h>
+
+ #include <linux/mfd/wm831x/core.h>
+ #include <linux/mfd/wm831x/otp.h>
+@@ -66,6 +67,7 @@ static DEVICE_ATTR(unique_id, 0444, wm831x_unique_id_show, NULL);
+
+ int wm831x_otp_init(struct wm831x *wm831x)
+ {
++ char uuid[WM831X_UNIQUE_ID_LEN];
+ int ret;
+
+ ret = device_create_file(wm831x->dev, &dev_attr_unique_id);
+@@ -73,6 +75,12 @@ int wm831x_otp_init(struct wm831x *wm831x)
+ dev_err(wm831x->dev, "Unique ID attribute not created: %d\n",
+ ret);
+
++ ret = wm831x_unique_id_read(wm831x, uuid);
++ if (ret == 0)
++ add_device_randomness(uuid, sizeof(uuid));
++ else
++ dev_err(wm831x->dev, "Failed to read UUID: %d\n", ret);
++
+ return ret;
+ }
+
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index bdf960b..ae7528b 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -925,6 +925,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x0411, 0x015d) },
+ { USB_DEVICE(0x0411, 0x016f) },
+ { USB_DEVICE(0x0411, 0x01a2) },
++ { USB_DEVICE(0x0411, 0x01ee) },
+ /* Corega */
+ { USB_DEVICE(0x07aa, 0x002f) },
+ { USB_DEVICE(0x07aa, 0x003c) },
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index d1049ee..26fba2d 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -1431,14 +1431,9 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
+ */
+ if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, 0, 0, NULL))
+ asus->dsts_id = ASUS_WMI_METHODID_DSTS;
+- else if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2, 0, 0, NULL))
++ else
+ asus->dsts_id = ASUS_WMI_METHODID_DSTS2;
+
+- if (!asus->dsts_id) {
+- pr_err("Can't find DSTS");
+- return -ENODEV;
+- }
+-
+ /* CWAP allow to define the behavior of the Fn+F2 key,
+ * this method doesn't seems to be present on Eee PCs */
+ if (asus->driver->wapf >= 0)
+diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
+index bdc909b..f3c2110 100644
+--- a/drivers/rtc/rtc-wm831x.c
++++ b/drivers/rtc/rtc-wm831x.c
+@@ -24,7 +24,7 @@
+ #include <linux/mfd/wm831x/core.h>
+ #include <linux/delay.h>
+ #include <linux/platform_device.h>
+-
++#include <linux/random.h>
+
+ /*
+ * R16416 (0x4020) - RTC Write Counter
+@@ -96,6 +96,26 @@ struct wm831x_rtc {
+ unsigned int alarm_enabled:1;
+ };
+
++static void wm831x_rtc_add_randomness(struct wm831x *wm831x)
++{
++ int ret;
++ u16 reg;
++
++ /*
++ * The write counter contains a pseudo-random number which is
++ * regenerated every time we set the RTC so it should be a
++ * useful per-system source of entropy.
++ */
++ ret = wm831x_reg_read(wm831x, WM831X_RTC_WRITE_COUNTER);
++ if (ret >= 0) {
++ reg = ret;
++ add_device_randomness(&reg, sizeof(reg));
++ } else {
++ dev_warn(wm831x->dev, "Failed to read RTC write counter: %d\n",
++ ret);
++ }
++}
++
+ /*
+ * Read current time and date in RTC
+ */
+@@ -449,6 +469,8 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
+ alm_irq, ret);
+ }
+
++ wm831x_rtc_add_randomness(wm831x);
++
+ return 0;
+
+ err:
+diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
+index 6903d39..90e9e32 100644
+--- a/drivers/staging/media/lirc/lirc_sir.c
++++ b/drivers/staging/media/lirc/lirc_sir.c
+@@ -53,6 +53,7 @@
+ #include <linux/io.h>
+ #include <asm/irq.h>
+ #include <linux/fcntl.h>
++#include <linux/platform_device.h>
+ #ifdef LIRC_ON_SA1100
+ #include <asm/hardware.h>
+ #ifdef CONFIG_SA1100_COLLIE
+@@ -488,9 +489,11 @@ static struct lirc_driver driver = {
+ .owner = THIS_MODULE,
+ };
+
++static struct platform_device *lirc_sir_dev;
+
+ static int init_chrdev(void)
+ {
++ driver.dev = &lirc_sir_dev->dev;
+ driver.minor = lirc_register_driver(&driver);
+ if (driver.minor < 0) {
+ printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n");
+@@ -1216,20 +1219,71 @@ static int init_lirc_sir(void)
+ return 0;
+ }
+
++static int __devinit lirc_sir_probe(struct platform_device *dev)
++{
++ return 0;
++}
++
++static int __devexit lirc_sir_remove(struct platform_device *dev)
++{
++ return 0;
++}
++
++static struct platform_driver lirc_sir_driver = {
++ .probe = lirc_sir_probe,
++ .remove = __devexit_p(lirc_sir_remove),
++ .driver = {
++ .name = "lirc_sir",
++ .owner = THIS_MODULE,
++ },
++};
+
+ static int __init lirc_sir_init(void)
+ {
+ int retval;
+
++ retval = platform_driver_register(&lirc_sir_driver);
++ if (retval) {
++ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform driver register "
++ "failed!\n");
++ return -ENODEV;
++ }
++
++ lirc_sir_dev = platform_device_alloc("lirc_dev", 0);
++ if (!lirc_sir_dev) {
++ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device alloc "
++ "failed!\n");
++ retval = -ENOMEM;
++ goto pdev_alloc_fail;
++ }
++
++ retval = platform_device_add(lirc_sir_dev);
++ if (retval) {
++ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device add "
++ "failed!\n");
++ retval = -ENODEV;
++ goto pdev_add_fail;
++ }
++
+ retval = init_chrdev();
+ if (retval < 0)
+- return retval;
++ goto fail;
++
+ retval = init_lirc_sir();
+ if (retval) {
+ drop_chrdev();
+- return retval;
++ goto fail;
+ }
++
+ return 0;
++
++fail:
++ platform_device_del(lirc_sir_dev);
++pdev_add_fail:
++ platform_device_put(lirc_sir_dev);
++pdev_alloc_fail:
++ platform_driver_unregister(&lirc_sir_driver);
++ return retval;
+ }
+
+ static void __exit lirc_sir_exit(void)
+@@ -1237,6 +1291,8 @@ static void __exit lirc_sir_exit(void)
+ drop_hardware();
+ drop_chrdev();
+ drop_port();
++ platform_device_unregister(lirc_sir_dev);
++ platform_driver_unregister(&lirc_sir_driver);
+ printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n");
+ }
+
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index a4b192d..08b92a6 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -660,7 +660,8 @@ static void pch_dma_rx_complete(void *arg)
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+ async_tx_ack(priv->desc_rx);
+- pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
++ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ }
+
+ static void pch_dma_tx_complete(void *arg)
+@@ -715,7 +716,8 @@ static int handle_rx_to(struct eg20t_port *priv)
+ int rx_size;
+ int ret;
+ if (!priv->start_rx) {
+- pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
++ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ return 0;
+ }
+ buf = &priv->rxbuf;
+@@ -977,11 +979,13 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
+ case PCH_UART_IID_RDR: /* Received Data Ready */
+ if (priv->use_dma) {
+ pch_uart_hal_disable_interrupt(priv,
+- PCH_UART_HAL_RX_INT);
++ PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ ret = dma_handle_rx(priv);
+ if (!ret)
+ pch_uart_hal_enable_interrupt(priv,
+- PCH_UART_HAL_RX_INT);
++ PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ } else {
+ ret = handle_rx(priv);
+ }
+@@ -1107,7 +1111,8 @@ static void pch_uart_stop_rx(struct uart_port *port)
+ struct eg20t_port *priv;
+ priv = container_of(port, struct eg20t_port, port);
+ priv->start_rx = 0;
+- pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
++ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ priv->int_dis_flag = 1;
+ }
+
+@@ -1163,6 +1168,7 @@ static int pch_uart_startup(struct uart_port *port)
+ break;
+ case 16:
+ fifo_size = PCH_UART_HAL_FIFO16;
++ break;
+ case 1:
+ default:
+ fifo_size = PCH_UART_HAL_FIFO_DIS;
+@@ -1200,7 +1206,8 @@ static int pch_uart_startup(struct uart_port *port)
+ pch_request_dma(port);
+
+ priv->start_rx = 1;
+- pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
++ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ uart_update_timeout(port, CS8, default_baud);
+
+ return 0;
+@@ -1258,7 +1265,7 @@ static void pch_uart_set_termios(struct uart_port *port,
+ stb = PCH_UART_HAL_STB1;
+
+ if (termios->c_cflag & PARENB) {
+- if (!(termios->c_cflag & PARODD))
++ if (termios->c_cflag & PARODD)
+ parity = PCH_UART_HAL_PARITY_ODD;
+ else
+ parity = PCH_UART_HAL_PARITY_EVEN;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 175b6bb..52340cc 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -24,6 +24,7 @@
+ #include <linux/kthread.h>
+ #include <linux/mutex.h>
+ #include <linux/freezer.h>
++#include <linux/random.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/byteorder.h>
+@@ -1897,6 +1898,14 @@ int usb_new_device(struct usb_device *udev)
+ /* Tell the world! */
+ announce_device(udev);
+
++ if (udev->serial)
++ add_device_randomness(udev->serial, strlen(udev->serial));
++ if (udev->product)
++ add_device_randomness(udev->product, strlen(udev->product));
++ if (udev->manufacturer)
++ add_device_randomness(udev->manufacturer,
++ strlen(udev->manufacturer));
++
+ device_enable_async_suspend(&udev->dev);
+ /* Register the device. The device driver is responsible
+ * for configuring the device and invoking the add-device
+diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
+index 1fc8f12..347bb05 100644
+--- a/drivers/usb/early/ehci-dbgp.c
++++ b/drivers/usb/early/ehci-dbgp.c
+@@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void)
+ writel(FLAG_CF, &ehci_regs->configured_flag);
+
+ /* Wait until the controller is no longer halted */
+- loop = 10;
++ loop = 1000;
+ do {
+ status = readl(&ehci_regs->status);
+ if (!(status & STS_HALT))
+diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
+index aaccffa..dd9533a 100644
+--- a/drivers/video/smscufx.c
++++ b/drivers/video/smscufx.c
+@@ -904,7 +904,7 @@ static ssize_t ufx_ops_write(struct fb_info *info, const char __user *buf,
+ result = fb_sys_write(info, buf, count, ppos);
+
+ if (result > 0) {
+- int start = max((int)(offset / info->fix.line_length) - 1, 0);
++ int start = max((int)(offset / info->fix.line_length), 0);
+ int lines = min((u32)((result / info->fix.line_length) + 1),
+ (u32)info->var.yres);
+
+diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
+index 24a49d4..1585db1 100644
+--- a/fs/exofs/ore.c
++++ b/fs/exofs/ore.c
+@@ -837,11 +837,11 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
+ bio->bi_rw |= REQ_WRITE;
+ }
+
+- osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
+- bio, per_dev->length);
++ osd_req_write(or, _ios_obj(ios, cur_comp),
++ per_dev->offset, bio, per_dev->length);
+ ORE_DBGMSG("write(0x%llx) offset=0x%llx "
+ "length=0x%llx dev=%d\n",
+- _LLU(_ios_obj(ios, dev)->id),
++ _LLU(_ios_obj(ios, cur_comp)->id),
+ _LLU(per_dev->offset),
+ _LLU(per_dev->length), dev);
+ } else if (ios->kern_buff) {
+@@ -853,20 +853,20 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
+ (ios->si.unit_off + ios->length >
+ ios->layout->stripe_unit));
+
+- ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev),
++ ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
+ per_dev->offset,
+ ios->kern_buff, ios->length);
+ if (unlikely(ret))
+ goto out;
+ ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
+ "length=0x%llx dev=%d\n",
+- _LLU(_ios_obj(ios, dev)->id),
++ _LLU(_ios_obj(ios, cur_comp)->id),
+ _LLU(per_dev->offset),
+ _LLU(ios->length), per_dev->dev);
+ } else {
+- osd_req_set_attributes(or, _ios_obj(ios, dev));
++ osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
+ ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
+- _LLU(_ios_obj(ios, dev)->id),
++ _LLU(_ios_obj(ios, cur_comp)->id),
+ ios->out_attr_len, dev);
+ }
+
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index c43a452..961e562 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -452,8 +452,11 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
+
+ dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
+
+- /* Only do I/O if gfp is a superset of GFP_KERNEL */
+- if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
++ /* Only do I/O if gfp is a superset of GFP_KERNEL, and we're not
++ * doing this memory reclaim for a fs-related allocation.
++ */
++ if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL &&
++ !(current->flags & PF_FSTRANS)) {
+ int how = FLUSH_SYNC;
+
+ /* Don't let kswapd deadlock waiting for OOM RPC calls */
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 9cfa60a..87a1746 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2236,7 +2236,7 @@ out_acl:
+ if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
+ if ((buflen -= 4) < 0)
+ goto out_resource;
+- WRITE32(1);
++ WRITE32(0);
+ }
+ if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
+ if ((buflen -= 4) < 0)
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index ac258be..c598cfb 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -182,7 +182,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
+ if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
+ goto out;
+
+- down_read(&inode->i_sb->s_umount);
++ mutex_lock(&nilfs->ns_snapshot_mount_mutex);
+
+ nilfs_transaction_begin(inode->i_sb, &ti, 0);
+ ret = nilfs_cpfile_change_cpmode(
+@@ -192,7 +192,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
+ else
+ nilfs_transaction_commit(inode->i_sb); /* never fails */
+
+- up_read(&inode->i_sb->s_umount);
++ mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
+ out:
+ mnt_drop_write(filp->f_path.mnt);
+ return ret;
+diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
+index 8351c44..97bfbdd 100644
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -951,6 +951,8 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
+ struct nilfs_root *root;
+ int ret;
+
++ mutex_lock(&nilfs->ns_snapshot_mount_mutex);
++
+ down_read(&nilfs->ns_segctor_sem);
+ ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
+ up_read(&nilfs->ns_segctor_sem);
+@@ -975,6 +977,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
+ ret = nilfs_get_root_dentry(s, root, root_dentry);
+ nilfs_put_root(root);
+ out:
++ mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
+ return ret;
+ }
+
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 35a8970..1c98f53 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -76,6 +76,7 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev)
+ nilfs->ns_bdev = bdev;
+ atomic_set(&nilfs->ns_ndirtyblks, 0);
+ init_rwsem(&nilfs->ns_sem);
++ mutex_init(&nilfs->ns_snapshot_mount_mutex);
+ INIT_LIST_HEAD(&nilfs->ns_dirty_files);
+ INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
+ spin_lock_init(&nilfs->ns_inode_lock);
+diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
+index 9992b11..de7435f 100644
+--- a/fs/nilfs2/the_nilfs.h
++++ b/fs/nilfs2/the_nilfs.h
+@@ -47,6 +47,7 @@ enum {
+ * @ns_flags: flags
+ * @ns_bdev: block device
+ * @ns_sem: semaphore for shared states
++ * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
+ * @ns_sbh: buffer heads of on-disk super blocks
+ * @ns_sbp: pointers to super block data
+ * @ns_sbwtime: previous write time of super block
+@@ -99,6 +100,7 @@ struct the_nilfs {
+
+ struct block_device *ns_bdev;
+ struct rw_semaphore ns_sem;
++ struct mutex ns_snapshot_mount_mutex;
+
+ /*
+ * used for
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index c5ed2f1..a2227f7 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -41,6 +41,9 @@ int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
+ unsigned long *, int *, int, unsigned int flags);
+ void unmap_hugepage_range(struct vm_area_struct *,
+ unsigned long, unsigned long, struct page *);
++void __unmap_hugepage_range_final(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end,
++ struct page *ref_page);
+ void __unmap_hugepage_range(struct vm_area_struct *,
+ unsigned long, unsigned long, struct page *);
+ int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
+@@ -99,6 +102,13 @@ static inline unsigned long hugetlb_total_pages(void)
+ #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
+ #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
+ #define unmap_hugepage_range(vma, start, end, page) BUG()
++static inline void __unmap_hugepage_range_final(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end,
++ struct page *ref_page)
++{
++ BUG();
++}
++
+ static inline void hugetlb_report_meminfo(struct seq_file *m)
+ {
+ }
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index df53fdf..cdde2b3 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -124,8 +124,17 @@ extern struct group_info init_groups;
+
+ extern struct cred init_cred;
+
++extern struct task_group root_task_group;
++
++#ifdef CONFIG_CGROUP_SCHED
++# define INIT_CGROUP_SCHED(tsk) \
++ .sched_task_group = &root_task_group,
++#else
++# define INIT_CGROUP_SCHED(tsk)
++#endif
++
+ #ifdef CONFIG_PERF_EVENTS
+-# define INIT_PERF_EVENTS(tsk) \
++# define INIT_PERF_EVENTS(tsk) \
+ .perf_event_mutex = \
+ __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
+ .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
+@@ -162,6 +171,7 @@ extern struct cred init_cred;
+ }, \
+ .tasks = LIST_HEAD_INIT(tsk.tasks), \
+ INIT_PUSHABLE_TASKS(tsk) \
++ INIT_CGROUP_SCHED(tsk) \
+ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
+ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
+ .real_parent = &tsk, \
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 8f74538..29e217a 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -50,11 +50,13 @@ struct rnd_state {
+
+ extern void rand_initialize_irq(int irq);
+
++extern void add_device_randomness(const void *, unsigned int);
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value);
+-extern void add_interrupt_randomness(int irq);
++extern void add_interrupt_randomness(int irq, int irq_flags);
+
+ extern void get_random_bytes(void *buf, int nbytes);
++extern void get_random_bytes_arch(void *buf, int nbytes);
+ void generate_random_uuid(unsigned char uuid_out[16]);
+
+ #ifndef MODULE
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index d336c35..1e86bb4 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1236,6 +1236,9 @@ struct task_struct {
+ const struct sched_class *sched_class;
+ struct sched_entity se;
+ struct sched_rt_entity rt;
++#ifdef CONFIG_CGROUP_SCHED
++ struct task_group *sched_task_group;
++#endif
+
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
+ /* list of struct preempt_notifier: */
+@@ -2646,7 +2649,7 @@ extern int sched_group_set_rt_period(struct task_group *tg,
+ extern long sched_group_rt_period(struct task_group *tg);
+ extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
+ #endif
+-#endif
++#endif /* CONFIG_CGROUP_SCHED */
+
+ extern int task_can_switch_user(struct user_struct *up,
+ struct task_struct *tsk);
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 866c9d5..80fb1c6 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2231,11 +2231,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
+ * @uaddr2: the pi futex we will take prior to returning to user-space
+ *
+ * The caller will wait on uaddr and will be requeued by futex_requeue() to
+- * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
+- * complete the acquisition of the rt_mutex prior to returning to userspace.
+- * This ensures the rt_mutex maintains an owner when it has waiters; without
+- * one, the pi logic wouldn't know which task to boost/deboost, if there was a
+- * need to.
++ * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
++ * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
++ * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
++ * without one, the pi logic would not know which task to boost/deboost, if
++ * there was a need to.
+ *
+ * We call schedule in futex_wait_queue_me() when we enqueue and return there
+ * via the following:
+@@ -2272,6 +2272,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ struct futex_q q = futex_q_init;
+ int res, ret;
+
++ if (uaddr == uaddr2)
++ return -EINVAL;
++
+ if (!bitset)
+ return -EINVAL;
+
+@@ -2343,7 +2346,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ * signal. futex_unlock_pi() will not destroy the lock_ptr nor
+ * the pi_state.
+ */
+- WARN_ON(!&q.pi_state);
++ WARN_ON(!q.pi_state);
+ pi_mutex = &q.pi_state->pi_mutex;
+ ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
+ debug_rt_mutex_free_waiter(&rt_waiter);
+@@ -2370,7 +2373,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ * fault, unlock the rt_mutex and return the fault to userspace.
+ */
+ if (ret == -EFAULT) {
+- if (rt_mutex_owner(pi_mutex) == current)
++ if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
+ rt_mutex_unlock(pi_mutex);
+ } else if (ret == -EINTR) {
+ /*
+diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
+index 470d08c..10e0772 100644
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -117,7 +117,7 @@ irqreturn_t
+ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+ {
+ irqreturn_t retval = IRQ_NONE;
+- unsigned int random = 0, irq = desc->irq_data.irq;
++ unsigned int flags = 0, irq = desc->irq_data.irq;
+
+ do {
+ irqreturn_t res;
+@@ -145,7 +145,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+
+ /* Fall through to add to randomness */
+ case IRQ_HANDLED:
+- random |= action->flags;
++ flags |= action->flags;
+ break;
+
+ default:
+@@ -156,8 +156,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+ action = action->next;
+ } while (action);
+
+- if (random & IRQF_SAMPLE_RANDOM)
+- add_interrupt_randomness(irq);
++ add_interrupt_randomness(irq, flags);
+
+ if (!noirqdebug)
+ note_interrupt(irq, desc, retval);
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 9cd8ca7..e0431c4 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -746,22 +746,19 @@ static inline int cpu_of(struct rq *rq)
+ /*
+ * Return the group to which this tasks belongs.
+ *
+- * We use task_subsys_state_check() and extend the RCU verification with
+- * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
+- * task it moves into the cgroup. Therefore by holding either of those locks,
+- * we pin the task to the current cgroup.
++ * We cannot use task_subsys_state() and friends because the cgroup
++ * subsystem changes that value before the cgroup_subsys::attach() method
++ * is called, therefore we cannot pin it and might observe the wrong value.
++ *
++ * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
++ * core changes this before calling sched_move_task().
++ *
++ * Instead we use a 'copy' which is updated from sched_move_task() while
++ * holding both task_struct::pi_lock and rq::lock.
+ */
+ static inline struct task_group *task_group(struct task_struct *p)
+ {
+- struct task_group *tg;
+- struct cgroup_subsys_state *css;
+-
+- css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
+- lockdep_is_held(&p->pi_lock) ||
+- lockdep_is_held(&task_rq(p)->lock));
+- tg = container_of(css, struct task_group, css);
+-
+- return autogroup_task_group(p, tg);
++ return p->sched_task_group;
+ }
+
+ /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
+@@ -2372,7 +2369,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
+ *
+ * sched_move_task() holds both and thus holding either pins the cgroup,
+- * see set_task_rq().
++ * see task_group().
+ *
+ * Furthermore, all task_rq users should acquire both locks, see
+ * task_rq_lock().
+@@ -8952,6 +8949,7 @@ void sched_destroy_group(struct task_group *tg)
+ */
+ void sched_move_task(struct task_struct *tsk)
+ {
++ struct task_group *tg;
+ int on_rq, running;
+ unsigned long flags;
+ struct rq *rq;
+@@ -8966,6 +8964,12 @@ void sched_move_task(struct task_struct *tsk)
+ if (unlikely(running))
+ tsk->sched_class->put_prev_task(rq, tsk);
+
++ tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
++ lockdep_is_held(&tsk->sighand->siglock)),
++ struct task_group, css);
++ tg = autogroup_task_group(tsk, tg);
++ tsk->sched_task_group = tg;
++
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ if (tsk->sched_class->task_move_group)
+ tsk->sched_class->task_move_group(tsk, on_rq);
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index 993599e..d74c317 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -886,7 +886,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+ * %pK cannot be used in IRQ context because its test
+ * for CAP_SYSLOG would be meaningless.
+ */
+- if (in_irq() || in_serving_softirq() || in_nmi()) {
++ if (kptr_restrict && (in_irq() || in_serving_softirq() ||
++ in_nmi())) {
+ if (spec.field_width == -1)
+ spec.field_width = 2 * sizeof(void *);
+ return string(buf, end, "pK-error", spec);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index b1e1bad..0f897b8 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2382,6 +2382,25 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
+ }
+ }
+
++void __unmap_hugepage_range_final(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end,
++ struct page *ref_page)
++{
++ __unmap_hugepage_range(vma, start, end, ref_page);
++
++ /*
++ * Clear this flag so that x86's huge_pmd_share page_table_shareable
++ * test will fail on a vma being torn down, and not grab a page table
++ * on its way out. We're lucky that the flag has such an appropriate
++ * name, and can in fact be safely cleared here. We could clear it
++ * before the __unmap_hugepage_range above, but all that's necessary
++ * is to clear it before releasing the i_mmap_mutex. This works
++ * because in the context this is called, the VMA is about to be
++ * destroyed and the i_mmap_mutex is held.
++ */
++ vma->vm_flags &= ~VM_MAYSHARE;
++}
++
+ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, struct page *ref_page)
+ {
+@@ -2939,9 +2958,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
+ }
+ }
+ spin_unlock(&mm->page_table_lock);
+- mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+-
++ /*
++ * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
++ * may have cleared our pud entry and done put_page on the page table:
++ * once we release i_mmap_mutex, another task can do the final put_page
++ * and that page table be reused and filled with junk.
++ */
+ flush_tlb_range(vma, start, end);
++ mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ }
+
+ int hugetlb_reserve_pages(struct inode *inode,
+diff --git a/mm/internal.h b/mm/internal.h
+index 2189af4..0c26b5e 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -309,3 +309,5 @@ extern u64 hwpoison_filter_flags_mask;
+ extern u64 hwpoison_filter_flags_value;
+ extern u64 hwpoison_filter_memcg;
+ extern u32 hwpoison_filter_enable;
++
++extern void set_pageblock_order(void);
+diff --git a/mm/memory.c b/mm/memory.c
+index 1b1ca17..70f5daf 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1358,8 +1358,11 @@ unsigned long unmap_vmas(struct mmu_gather *tlb,
+ * Since no pte has actually been setup, it is
+ * safe to do nothing in this case.
+ */
+- if (vma->vm_file)
+- unmap_hugepage_range(vma, start, end, NULL);
++ if (vma->vm_file) {
++ mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
++ __unmap_hugepage_range_final(vma, start, end, NULL);
++ mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
++ }
+
+ start = end;
+ } else
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
+index 9a611d3..862b608 100644
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -33,6 +33,24 @@
+ void __mmu_notifier_release(struct mm_struct *mm)
+ {
+ struct mmu_notifier *mn;
++ struct hlist_node *n;
++
++ /*
++ * RCU here will block mmu_notifier_unregister until
++ * ->release returns.
++ */
++ rcu_read_lock();
++ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
++ /*
++ * if ->release runs before mmu_notifier_unregister it
++ * must be handled as it's the only way for the driver
++ * to flush all existing sptes and stop the driver
++ * from establishing any more sptes before all the
++ * pages in the mm are freed.
++ */
++ if (mn->ops->release)
++ mn->ops->release(mn, mm);
++ rcu_read_unlock();
+
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
+@@ -46,23 +64,6 @@ void __mmu_notifier_release(struct mm_struct *mm)
+ * mmu_notifier_unregister to return.
+ */
+ hlist_del_init_rcu(&mn->hlist);
+- /*
+- * RCU here will block mmu_notifier_unregister until
+- * ->release returns.
+- */
+- rcu_read_lock();
+- spin_unlock(&mm->mmu_notifier_mm->lock);
+- /*
+- * if ->release runs before mmu_notifier_unregister it
+- * must be handled as it's the only way for the driver
+- * to flush all existing sptes and stop the driver
+- * from establishing any more sptes before all the
+- * pages in the mm are freed.
+- */
+- if (mn->ops->release)
+- mn->ops->release(mn, mm);
+- rcu_read_unlock();
+- spin_lock(&mm->mmu_notifier_mm->lock);
+ }
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+
+@@ -284,16 +285,13 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+ {
+ BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
+- spin_lock(&mm->mmu_notifier_mm->lock);
+ if (!hlist_unhashed(&mn->hlist)) {
+- hlist_del_rcu(&mn->hlist);
+-
+ /*
+ * RCU here will force exit_mmap to wait ->release to finish
+ * before freeing the pages.
+ */
+ rcu_read_lock();
+- spin_unlock(&mm->mmu_notifier_mm->lock);
++
+ /*
+ * exit_mmap will block in mmu_notifier_release to
+ * guarantee ->release is called before freeing the
+@@ -302,8 +300,11 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
+ rcu_read_unlock();
+- } else
++
++ spin_lock(&mm->mmu_notifier_mm->lock);
++ hlist_del_rcu(&mn->hlist);
+ spin_unlock(&mm->mmu_notifier_mm->lock);
++ }
+
+ /*
+ * Wait any running method to finish, of course including
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 065dbe8..6e51bf0 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4281,25 +4281,24 @@ static inline void setup_usemap(struct pglist_data *pgdat,
+
+ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+
+-/* Return a sensible default order for the pageblock size. */
+-static inline int pageblock_default_order(void)
+-{
+- if (HPAGE_SHIFT > PAGE_SHIFT)
+- return HUGETLB_PAGE_ORDER;
+-
+- return MAX_ORDER-1;
+-}
+-
+ /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
+-static inline void __init set_pageblock_order(unsigned int order)
++void __init set_pageblock_order(void)
+ {
++ unsigned int order;
++
+ /* Check that pageblock_nr_pages has not already been setup */
+ if (pageblock_order)
+ return;
+
++ if (HPAGE_SHIFT > PAGE_SHIFT)
++ order = HUGETLB_PAGE_ORDER;
++ else
++ order = MAX_ORDER - 1;
++
+ /*
+ * Assume the largest contiguous order of interest is a huge page.
+- * This value may be variable depending on boot parameters on IA64
++ * This value may be variable depending on boot parameters on IA64 and
++ * powerpc.
+ */
+ pageblock_order = order;
+ }
+@@ -4307,15 +4306,13 @@ static inline void __init set_pageblock_order(unsigned int order)
+
+ /*
+ * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
+- * and pageblock_default_order() are unused as pageblock_order is set
+- * at compile-time. See include/linux/pageblock-flags.h for the values of
+- * pageblock_order based on the kernel config
++ * is unused as pageblock_order is set at compile-time. See
++ * include/linux/pageblock-flags.h for the values of pageblock_order based on
++ * the kernel config
+ */
+-static inline int pageblock_default_order(unsigned int order)
++void __init set_pageblock_order(void)
+ {
+- return MAX_ORDER-1;
+ }
+-#define set_pageblock_order(x) do {} while (0)
+
+ #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
+
+@@ -4403,7 +4400,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
+ if (!size)
+ continue;
+
+- set_pageblock_order(pageblock_default_order());
++ set_pageblock_order();
+ setup_usemap(pgdat, zone, size);
+ ret = init_currently_empty_zone(zone, zone_start_pfn,
+ size, MEMMAP_EARLY);
+diff --git a/mm/sparse.c b/mm/sparse.c
+index a8bc7d3..bf7d3cc 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -486,6 +486,9 @@ void __init sparse_init(void)
+ struct page **map_map;
+ #endif
+
++ /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
++ set_pageblock_order();
++
+ /*
+ * map is using big page (aka 2M in x86 64 bit)
+ * usemap is less one page (aka 24 bytes)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 5738654..4b18703 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1177,6 +1177,7 @@ static int __dev_open(struct net_device *dev)
+ net_dmaengine_get();
+ dev_set_rx_mode(dev);
+ dev_activate(dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+ }
+
+ return ret;
+@@ -4841,6 +4842,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
+ err = ops->ndo_set_mac_address(dev, sa);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+ return err;
+ }
+ EXPORT_SYMBOL(dev_set_mac_address);
+@@ -5621,6 +5623,7 @@ int register_netdevice(struct net_device *dev)
+ dev_init_scheduler(dev);
+ dev_hold(dev);
+ list_netdevice(dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+
+ /* Notify protocols, that a new device appeared. */
+ ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index 7f36b38..b856f87 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -33,22 +33,19 @@
+ #define TRACE_ON 1
+ #define TRACE_OFF 0
+
+-static void send_dm_alert(struct work_struct *unused);
+-
+-
+ /*
+ * Globals, our netlink socket pointer
+ * and the work handle that will send up
+ * netlink alerts
+ */
+ static int trace_state = TRACE_OFF;
+-static DEFINE_SPINLOCK(trace_state_lock);
++static DEFINE_MUTEX(trace_state_mutex);
+
+ struct per_cpu_dm_data {
+- struct work_struct dm_alert_work;
+- struct sk_buff *skb;
+- atomic_t dm_hit_count;
+- struct timer_list send_timer;
++ spinlock_t lock;
++ struct sk_buff *skb;
++ struct work_struct dm_alert_work;
++ struct timer_list send_timer;
+ };
+
+ struct dm_hw_stat_delta {
+@@ -74,56 +71,59 @@ static int dm_delay = 1;
+ static unsigned long dm_hw_check_delta = 2*HZ;
+ static LIST_HEAD(hw_stats_list);
+
+-static void reset_per_cpu_data(struct per_cpu_dm_data *data)
++static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
+ {
+ size_t al;
+ struct net_dm_alert_msg *msg;
+ struct nlattr *nla;
++ struct sk_buff *skb;
++ unsigned long flags;
+
+ al = sizeof(struct net_dm_alert_msg);
+ al += dm_hit_limit * sizeof(struct net_dm_drop_point);
+ al += sizeof(struct nlattr);
+
+- data->skb = genlmsg_new(al, GFP_KERNEL);
+- genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family,
+- 0, NET_DM_CMD_ALERT);
+- nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg));
+- msg = nla_data(nla);
+- memset(msg, 0, al);
+- atomic_set(&data->dm_hit_count, dm_hit_limit);
++ skb = genlmsg_new(al, GFP_KERNEL);
++
++ if (skb) {
++ genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
++ 0, NET_DM_CMD_ALERT);
++ nla = nla_reserve(skb, NLA_UNSPEC,
++ sizeof(struct net_dm_alert_msg));
++ msg = nla_data(nla);
++ memset(msg, 0, al);
++ } else {
++ mod_timer(&data->send_timer, jiffies + HZ / 10);
++ }
++
++ spin_lock_irqsave(&data->lock, flags);
++ swap(data->skb, skb);
++ spin_unlock_irqrestore(&data->lock, flags);
++
++ return skb;
+ }
+
+-static void send_dm_alert(struct work_struct *unused)
++static void send_dm_alert(struct work_struct *work)
+ {
+ struct sk_buff *skb;
+- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
++ struct per_cpu_dm_data *data;
+
+- /*
+- * Grab the skb we're about to send
+- */
+- skb = data->skb;
++ data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
+
+- /*
+- * Replace it with a new one
+- */
+- reset_per_cpu_data(data);
+-
+- /*
+- * Ship it!
+- */
+- genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
++ skb = reset_per_cpu_data(data);
+
++ if (skb)
++ genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
+ }
+
+ /*
+ * This is the timer function to delay the sending of an alert
+ * in the event that more drops will arrive during the
+- * hysteresis period. Note that it operates under the timer interrupt
+- * so we don't need to disable preemption here
++ * hysteresis period.
+ */
+-static void sched_send_work(unsigned long unused)
++static void sched_send_work(unsigned long _data)
+ {
+- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
++ struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
+
+ schedule_work(&data->dm_alert_work);
+ }
+@@ -134,17 +134,19 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+ struct nlmsghdr *nlh;
+ struct nlattr *nla;
+ int i;
+- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
++ struct sk_buff *dskb;
++ struct per_cpu_dm_data *data;
++ unsigned long flags;
+
++ local_irq_save(flags);
++ data = &__get_cpu_var(dm_cpu_data);
++ spin_lock(&data->lock);
++ dskb = data->skb;
+
+- if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
+- /*
+- * we're already at zero, discard this hit
+- */
++ if (!dskb)
+ goto out;
+- }
+
+- nlh = (struct nlmsghdr *)data->skb->data;
++ nlh = (struct nlmsghdr *)dskb->data;
+ nla = genlmsg_data(nlmsg_data(nlh));
+ msg = nla_data(nla);
+ for (i = 0; i < msg->entries; i++) {
+@@ -153,11 +155,12 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+ goto out;
+ }
+ }
+-
++ if (msg->entries == dm_hit_limit)
++ goto out;
+ /*
+ * We need to create a new entry
+ */
+- __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point));
++ __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
+ nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
+ memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
+ msg->points[msg->entries].count = 1;
+@@ -165,11 +168,11 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+
+ if (!timer_pending(&data->send_timer)) {
+ data->send_timer.expires = jiffies + dm_delay * HZ;
+- add_timer_on(&data->send_timer, smp_processor_id());
++ add_timer(&data->send_timer);
+ }
+
+ out:
+- return;
++ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
+@@ -213,7 +216,7 @@ static int set_all_monitor_traces(int state)
+ struct dm_hw_stat_delta *new_stat = NULL;
+ struct dm_hw_stat_delta *temp;
+
+- spin_lock(&trace_state_lock);
++ mutex_lock(&trace_state_mutex);
+
+ if (state == trace_state) {
+ rc = -EAGAIN;
+@@ -252,7 +255,7 @@ static int set_all_monitor_traces(int state)
+ rc = -EINPROGRESS;
+
+ out_unlock:
+- spin_unlock(&trace_state_lock);
++ mutex_unlock(&trace_state_mutex);
+
+ return rc;
+ }
+@@ -295,12 +298,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
+
+ new_stat->dev = dev;
+ new_stat->last_rx = jiffies;
+- spin_lock(&trace_state_lock);
++ mutex_lock(&trace_state_mutex);
+ list_add_rcu(&new_stat->list, &hw_stats_list);
+- spin_unlock(&trace_state_lock);
++ mutex_unlock(&trace_state_mutex);
+ break;
+ case NETDEV_UNREGISTER:
+- spin_lock(&trace_state_lock);
++ mutex_lock(&trace_state_mutex);
+ list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
+ if (new_stat->dev == dev) {
+ new_stat->dev = NULL;
+@@ -311,7 +314,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
+ }
+ }
+ }
+- spin_unlock(&trace_state_lock);
++ mutex_unlock(&trace_state_mutex);
+ break;
+ }
+ out:
+@@ -367,13 +370,15 @@ static int __init init_net_drop_monitor(void)
+
+ for_each_present_cpu(cpu) {
+ data = &per_cpu(dm_cpu_data, cpu);
+- reset_per_cpu_data(data);
+ INIT_WORK(&data->dm_alert_work, send_dm_alert);
+ init_timer(&data->send_timer);
+- data->send_timer.data = cpu;
++ data->send_timer.data = (unsigned long)data;
+ data->send_timer.function = sched_send_work;
++ spin_lock_init(&data->lock);
++ reset_per_cpu_data(data);
+ }
+
++
+ goto out;
+
+ out_unreg:
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 2ef859a..05842ab 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1354,6 +1354,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
+ goto errout;
+ send_addr_notify = 1;
+ modified = 1;
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+ }
+
+ if (tb[IFLA_MTU]) {
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 8761bf8..337c68b 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -246,7 +246,7 @@ static int rpcb_create_local_unix(void)
+ if (IS_ERR(clnt)) {
+ dprintk("RPC: failed to create AF_LOCAL rpcbind "
+ "client (errno %ld).\n", PTR_ERR(clnt));
+- result = -PTR_ERR(clnt);
++ result = PTR_ERR(clnt);
+ goto out;
+ }
+
+@@ -293,7 +293,7 @@ static int rpcb_create_local_net(void)
+ if (IS_ERR(clnt)) {
+ dprintk("RPC: failed to create local rpcbind "
+ "client (errno %ld).\n", PTR_ERR(clnt));
+- result = -PTR_ERR(clnt);
++ result = PTR_ERR(clnt);
+ goto out;
+ }
+
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 4e2b3b4..c90b832 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -755,7 +755,9 @@ void rpc_execute(struct rpc_task *task)
+
+ static void rpc_async_schedule(struct work_struct *work)
+ {
++ current->flags |= PF_FSTRANS;
+ __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /**
+diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
+index b446e10..06cdbff 100644
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -200,6 +200,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
+ int rc = 0;
+
+ if (!xprt->shutdown) {
++ current->flags |= PF_FSTRANS;
+ xprt_clear_connected(xprt);
+
+ dprintk("RPC: %s: %sconnect\n", __func__,
+@@ -212,10 +213,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
+
+ out:
+ xprt_wake_pending_tasks(xprt, rc);
+-
+ out_clear:
+ dprintk("RPC: %s: exit\n", __func__);
+ xprt_clear_connecting(xprt);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /*
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 55472c4..1a6edc7 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1895,6 +1895,8 @@ static void xs_local_setup_socket(struct work_struct *work)
+ if (xprt->shutdown)
+ goto out;
+
++ current->flags |= PF_FSTRANS;
++
+ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
+ status = __sock_create(xprt->xprt_net, AF_LOCAL,
+ SOCK_STREAM, 0, &sock, 1);
+@@ -1928,6 +1930,7 @@ static void xs_local_setup_socket(struct work_struct *work)
+ out:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, status);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+@@ -1970,6 +1973,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
+ if (xprt->shutdown)
+ goto out;
+
++ current->flags |= PF_FSTRANS;
++
+ /* Start by resetting any existing state */
+ xs_reset_transport(transport);
+ sock = xs_create_sock(xprt, transport,
+@@ -1988,6 +1993,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
+ out:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, status);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /*
+@@ -2113,6 +2119,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ if (xprt->shutdown)
+ goto out;
+
++ current->flags |= PF_FSTRANS;
++
+ if (!sock) {
+ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
+ sock = xs_create_sock(xprt, transport,
+@@ -2162,6 +2170,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ case -EINPROGRESS:
+ case -EALREADY:
+ xprt_clear_connecting(xprt);
++ current->flags &= ~PF_FSTRANS;
+ return;
+ case -EINVAL:
+ /* Happens, for instance, if the user specified a link
+@@ -2174,6 +2183,7 @@ out_eagain:
+ out:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, status);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /**
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 74d5292..b5e4c1c 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -981,6 +981,9 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ }
+ mutex_unlock(&rdev->devlist_mtx);
+
++ if (total == 1)
++ return 0;
++
+ for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
+ const struct ieee80211_iface_combination *c;
+ struct ieee80211_iface_limit *limits;
+diff --git a/sound/drivers/mpu401/mpu401_uart.c b/sound/drivers/mpu401/mpu401_uart.c
+index 1cff331..4608c2c 100644
+--- a/sound/drivers/mpu401/mpu401_uart.c
++++ b/sound/drivers/mpu401/mpu401_uart.c
+@@ -554,6 +554,7 @@ int snd_mpu401_uart_new(struct snd_card *card, int device,
+ spin_lock_init(&mpu->output_lock);
+ spin_lock_init(&mpu->timer_lock);
+ mpu->hardware = hardware;
++ mpu->irq = -1;
+ if (! (info_flags & MPU401_INFO_INTEGRATED)) {
+ int res_size = hardware == MPU401_HW_PC98II ? 4 : 2;
+ mpu->res = request_region(port, res_size, "MPU401 UART");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 191fd78..2e2eb93 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4809,6 +4809,15 @@ static int alc269_resume(struct hda_codec *codec)
+ }
+ #endif /* CONFIG_PM */
+
++static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
++ const struct alc_fixup *fix, int action)
++{
++ struct alc_spec *spec = codec->spec;
++
++ if (action == ALC_FIXUP_ACT_PRE_PROBE)
++ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
++}
++
+ static void alc269_fixup_hweq(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+@@ -4909,6 +4918,8 @@ enum {
+ ALC269_FIXUP_DMIC,
+ ALC269VB_FIXUP_AMIC,
+ ALC269VB_FIXUP_DMIC,
++ ALC269_FIXUP_LENOVO_DOCK,
++ ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
+ };
+
+ static const struct alc_fixup alc269_fixups[] = {
+@@ -5029,6 +5040,20 @@ static const struct alc_fixup alc269_fixups[] = {
+ { }
+ },
+ },
++ [ALC269_FIXUP_LENOVO_DOCK] = {
++ .type = ALC_FIXUP_PINS,
++ .v.pins = (const struct alc_pincfg[]) {
++ { 0x19, 0x23a11040 }, /* dock mic */
++ { 0x1b, 0x2121103f }, /* dock headphone */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT
++ },
++ [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = {
++ .type = ALC_FIXUP_FUNC,
++ .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5051,6 +5076,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
++ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+@@ -5109,6 +5136,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ static const struct alc_model_fixup alc269_fixup_models[] = {
+ {.id = ALC269_FIXUP_AMIC, .name = "laptop-amic"},
+ {.id = ALC269_FIXUP_DMIC, .name = "laptop-dmic"},
++ {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
+ {}
+ };
+
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index 1fe1308..7160ff2 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -3227,7 +3227,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
+ {
+ struct via_spec *spec = codec->spec;
+ int imux_is_smixer;
+- unsigned int parm;
++ unsigned int parm, parm2;
+ /* MUX6 (1eh) = stereo mixer */
+ imux_is_smixer =
+ snd_hda_codec_read(codec, 0x1e, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 5;
+@@ -3250,7 +3250,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
+ parm = AC_PWRST_D3;
+ set_pin_power_state(codec, 0x27, &parm);
+ snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_POWER_STATE, parm);
+- snd_hda_codec_write(codec, 0xb, 0, AC_VERB_SET_POWER_STATE, parm);
++ parm2 = parm; /* for pin 0x0b */
+
+ /* PW2 (26h), AOW2 (ah) */
+ parm = AC_PWRST_D3;
+@@ -3265,6 +3265,9 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
+ if (!spec->hp_independent_mode) /* check for redirected HP */
+ set_pin_power_state(codec, 0x28, &parm);
+ snd_hda_codec_write(codec, 0x8, 0, AC_VERB_SET_POWER_STATE, parm);
++ if (!spec->hp_independent_mode && parm2 != AC_PWRST_D3)
++ parm = parm2;
++ snd_hda_codec_write(codec, 0xb, 0, AC_VERB_SET_POWER_STATE, parm);
+ /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */
+ snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_POWER_STATE,
+ imux_is_smixer ? AC_PWRST_D0 : parm);
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index 07dd7eb..e97df24 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -3105,6 +3105,9 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec,
+ /* VMID 2*250k */
+ snd_soc_update_bits(codec, WM8962_PWR_MGMT_1,
+ WM8962_VMID_SEL_MASK, 0x100);
++
++ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
++ msleep(100);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index de61b8a..98c5774 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -2508,7 +2508,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
+ return -EINVAL;
+ }
+
+- bclk_rate = params_rate(params) * 2;
++ bclk_rate = params_rate(params) * 4;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ bclk_rate *= 16;
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 379baad..5e634a2 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -111,7 +111,8 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, int source_id)
+ return 0;
+
+ /* If a clock source can't tell us whether it's valid, we assume it is */
+- if (!uac2_control_is_readable(cs_desc->bmControls, UAC2_CS_CONTROL_CLOCK_VALID))
++ if (!uac2_control_is_readable(cs_desc->bmControls,
++ UAC2_CS_CONTROL_CLOCK_VALID - 1))
+ return 1;
+
+ err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
diff --git a/3.2.26/4420_grsecurity-2.9.1-3.2.26-201208062017.patch b/3.2.27/4420_grsecurity-2.9.1-3.2.27-201208120907.patch
index cb16dab..6f38d4f 100644
--- a/3.2.26/4420_grsecurity-2.9.1-3.2.26-201208062017.patch
+++ b/3.2.27/4420_grsecurity-2.9.1-3.2.27-201208120907.patch
@@ -245,7 +245,7 @@ index 88fd7f5..b318a78 100644
==============================================================
diff --git a/Makefile b/Makefile
-index fa5acc83..d3285e9 100644
+index bdf851f..c020e9d 100644
--- a/Makefile
+++ b/Makefile
@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -768,7 +768,7 @@ index fadd5f8..904e73a 100644
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
-index 86976d0..58a8b07 100644
+index 86976d0..269b872 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -15,6 +15,10 @@
@@ -920,7 +920,7 @@ index 86976d0..58a8b07 100644
-"1: ldrex %0, [%3]\n"
-" sub %0, %0, %4\n"
+"1: ldrex %1, [%3]\n"
-+" sub %0, %1, %4\n"
++" subs %0, %1, %4\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
@@ -1248,7 +1248,7 @@ index 86976d0..58a8b07 100644
-" sbc %H0, %H0, %H4\n"
+"1: ldrexd %1, %H1, [%3]\n"
+" subs %0, %1, %4\n"
-+" sbc %H0, %H1, %H4\n"
++" sbcs %H0, %H1, %H4\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
@@ -1317,7 +1317,7 @@ index 86976d0..58a8b07 100644
-" sbc %H0, %H0, #0\n"
+"1: ldrexd %1, %H1, [%3]\n"
+" subs %0, %1, #1\n"
-+" sbc %H0, %H1, #0\n"
++" sbcs %H0, %H1, #0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
@@ -1350,7 +1350,8 @@ index 86976d0..58a8b07 100644
-" beq 2f\n"
+" beq 4f\n"
" adds %0, %0, %6\n"
- " adc %H0, %H0, %H6\n"
+-" adc %H0, %H0, %H6\n"
++" adcs %H0, %H0, %H6\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
@@ -1656,7 +1657,7 @@ index b2a27b6..520889c 100644
cmp scno, #NR_syscalls @ check upper syscall limit
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 3d0c6fb..9d326fa 100644
+index e68d251..b70de67 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -28,7 +28,6 @@
@@ -1676,7 +1677,7 @@ index 3d0c6fb..9d326fa 100644
{
/* Disable interrupts first */
local_irq_disable();
-@@ -134,7 +133,7 @@ void arm_machine_restart(char mode, const char *cmd)
+@@ -135,7 +134,7 @@ void arm_machine_restart(char mode, const char *cmd)
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
@@ -1685,7 +1686,7 @@ index 3d0c6fb..9d326fa 100644
EXPORT_SYMBOL_GPL(arm_pm_restart);
static void do_nothing(void *unused)
-@@ -248,6 +247,7 @@ void machine_power_off(void)
+@@ -250,6 +249,7 @@ void machine_power_off(void)
machine_shutdown();
if (pm_power_off)
pm_power_off();
@@ -1693,7 +1694,7 @@ index 3d0c6fb..9d326fa 100644
}
void machine_restart(char *cmd)
-@@ -484,12 +484,6 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -486,12 +486,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
@@ -1752,7 +1753,7 @@ index 8fc2c8f..064c150 100644
#endif
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
-index 160cb16..c3261f6 100644
+index 8380bd1..3829a27 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
@@ -2354,7 +2355,7 @@ index 0f01de2..d37d309 100644
#define __cacheline_aligned __aligned(L1_CACHE_BYTES)
#define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
-index 3fad89e..3047da5 100644
+index 2fc214b..7597423 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
@@ -4967,7 +4968,7 @@ index 53088e2..9f44a36 100644
- return ret;
-}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
-index a0155c0..34cc491 100644
+index c70b3d8..d01c6b3 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
@@ -4993,7 +4994,7 @@ index a0155c0..34cc491 100644
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}
-@@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -175,10 +187,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
*/
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE;
@@ -13792,7 +13793,7 @@ index 13ab720..95d5442 100644
bogus_magic:
jmp bogus_magic
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
-index 1f84794..e23f862 100644
+index 73ef56c..0238021 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
@@ -17805,15 +17806,15 @@ index 69bca46..0bac999 100644
WARN_ONCE(regs->sp >= curbase &&
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
-index 90fcf62..e682cdd 100644
+index 90fcf62..738e356 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
-@@ -28,6 +28,8 @@ struct setup_data_node {
+@@ -27,7 +27,7 @@ struct setup_data_node {
+ u32 len;
};
- static ssize_t setup_data_read(struct file *file, char __user *user_buf,
-+ size_t count, loff_t *ppos) __size_overflow(3);
-+static ssize_t setup_data_read(struct file *file, char __user *user_buf,
+-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
++static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct setup_data_node *node = file->private_data;
@@ -22989,7 +22990,7 @@ index a63efd6..ccecad8 100644
ret
CFI_ENDPROC
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
-index e218d5d..a99a1eb 100644
+index e218d5d..7d522b8 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -43,7 +43,7 @@ do { \
@@ -23105,7 +23106,7 @@ index e218d5d..a99a1eb 100644
".section .fixup,\"ax\"\n"
"101: lea 0(%%eax,%0,4),%0\n"
" jmp 100b\n"
-@@ -334,46 +340,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
+@@ -334,46 +340,153 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
}
static unsigned long
@@ -23215,9 +23216,7 @@ index e218d5d..a99a1eb 100644
+ return size;
+}
+
-+static unsigned long
-+__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
-+static unsigned long
++static unsigned long __size_overflow(3)
__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
{
int d0, d1;
@@ -23279,7 +23278,7 @@ index e218d5d..a99a1eb 100644
" movl %%eax, 56(%3)\n"
" movl %%edx, 60(%3)\n"
" addl $-64, %0\n"
-@@ -385,9 +500,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
@@ -23291,12 +23290,12 @@ index e218d5d..a99a1eb 100644
"8:\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
-@@ -434,47 +549,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+@@ -433,48 +546,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+ * hyoshiok@miraclelinux.com
*/
- static unsigned long __copy_user_zeroing_intel_nocache(void *to,
-+ const void __user *from, unsigned long size) __size_overflow(3);
-+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
++static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
const void __user *from, unsigned long size)
{
int d0, d1;
@@ -23359,7 +23358,7 @@ index e218d5d..a99a1eb 100644
" movnti %%eax, 56(%3)\n"
" movnti %%edx, 60(%3)\n"
" addl $-64, %0\n"
-@@ -487,9 +604,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
@@ -23371,12 +23370,12 @@ index e218d5d..a99a1eb 100644
"8:\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
-@@ -531,47 +648,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+@@ -530,48 +643,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ return size;
}
- static unsigned long __copy_user_intel_nocache(void *to,
-+ const void __user *from, unsigned long size) __size_overflow(3);
-+static unsigned long __copy_user_intel_nocache(void *to,
+-static unsigned long __copy_user_intel_nocache(void *to,
++static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
const void __user *from, unsigned long size)
{
int d0, d1;
@@ -23439,7 +23438,7 @@ index e218d5d..a99a1eb 100644
" movnti %%eax, 56(%3)\n"
" movnti %%edx, 60(%3)\n"
" addl $-64, %0\n"
-@@ -584,9 +703,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
+@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
@@ -23451,7 +23450,7 @@ index e218d5d..a99a1eb 100644
"8:\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
-@@ -629,32 +748,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
+@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
*/
unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
unsigned long size);
@@ -23493,7 +23492,7 @@ index e218d5d..a99a1eb 100644
".section .fixup,\"ax\"\n" \
"5: addl %3,%0\n" \
" jmp 2b\n" \
-@@ -682,14 +805,14 @@ do { \
+@@ -682,14 +799,14 @@ do { \
" negl %0\n" \
" andl $7,%0\n" \
" subl %0,%3\n" \
@@ -23511,7 +23510,7 @@ index e218d5d..a99a1eb 100644
"2:\n" \
".section .fixup,\"ax\"\n" \
"5: addl %3,%0\n" \
-@@ -775,9 +898,9 @@ survive:
+@@ -775,9 +892,9 @@ survive:
}
#endif
if (movsl_is_ok(to, from, n))
@@ -23523,7 +23522,7 @@ index e218d5d..a99a1eb 100644
return n;
}
EXPORT_SYMBOL(__copy_to_user_ll);
-@@ -797,10 +920,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
+@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
unsigned long n)
{
if (movsl_is_ok(to, from, n))
@@ -23536,7 +23535,7 @@ index e218d5d..a99a1eb 100644
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nozero);
-@@ -827,65 +949,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
+@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
if (n > 64 && cpu_has_xmm2)
n = __copy_user_intel_nocache(to, from, n);
else
@@ -29843,10 +29842,10 @@ index da3cfee..a5a6606 100644
*ppos = i;
diff --git a/drivers/char/random.c b/drivers/char/random.c
-index 6035ab8..c7e4a44 100644
+index 631d4f6..24a2a8c 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
-@@ -261,8 +261,13 @@
+@@ -269,8 +269,13 @@
/*
* Configuration information
*/
@@ -29860,7 +29859,7 @@ index 6035ab8..c7e4a44 100644
#define SEC_XFER_SIZE 512
#define EXTRACT_SIZE 10
-@@ -300,10 +305,17 @@ static struct poolinfo {
+@@ -310,10 +315,17 @@ static struct poolinfo {
int poolwords;
int tap1, tap2, tap3, tap4, tap5;
} poolinfo_table[] = {
@@ -29878,7 +29877,18 @@ index 6035ab8..c7e4a44 100644
#if 0
/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
{ 2048, 1638, 1231, 819, 411, 1 },
-@@ -722,6 +734,17 @@ void add_disk_randomness(struct gendisk *disk)
+@@ -524,8 +536,8 @@ static void __mix_pool_bytes(struct entropy_store *r, const void *in,
+ input_rotate += i ? 7 : 14;
+ }
+
+- ACCESS_ONCE(r->input_rotate) = input_rotate;
+- ACCESS_ONCE(r->add_ptr) = i;
++ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
++ ACCESS_ONCE_RW(r->add_ptr) = i;
+ smp_wmb();
+
+ if (out)
+@@ -821,6 +833,17 @@ void add_disk_randomness(struct gendisk *disk)
}
#endif
@@ -29887,8 +29897,8 @@ index 6035ab8..c7e4a44 100644
+
+__init void transfer_latent_entropy(void)
+{
-+ mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy));
-+ mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy));
++ mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy), NULL);
++ mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy), NULL);
+// printk(KERN_INFO "PAX: transferring latent entropy: %16llx\n", latent_entropy);
+}
+#endif
@@ -29896,7 +29906,7 @@ index 6035ab8..c7e4a44 100644
/*********************************************************************
*
* Entropy extraction routines
-@@ -909,7 +932,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+@@ -1028,7 +1051,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
extract_buf(r, tmp);
i = min_t(int, nbytes, EXTRACT_SIZE);
@@ -29905,7 +29915,7 @@ index 6035ab8..c7e4a44 100644
ret = -EFAULT;
break;
}
-@@ -1228,7 +1251,7 @@ EXPORT_SYMBOL(generate_random_uuid);
+@@ -1369,7 +1392,7 @@ EXPORT_SYMBOL(generate_random_uuid);
#include <linux/sysctl.h>
static int min_read_thresh = 8, min_write_thresh;
@@ -29914,7 +29924,7 @@ index 6035ab8..c7e4a44 100644
static int max_write_thresh = INPUT_POOL_WORDS * 32;
static char sysctl_bootid[16];
-@@ -1250,10 +1273,15 @@ static int proc_do_uuid(ctl_table *table, int write,
+@@ -1391,10 +1414,15 @@ static int proc_do_uuid(ctl_table *table, int write,
uuid = table->data;
if (!uuid) {
uuid = tmp_uuid;
@@ -34174,7 +34184,7 @@ index 1cbfc6b..56e1dbb 100644
/*----------------------------------------------------------------*/
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
-index 2d97bf0..5caa9cf 100644
+index 62306e5..c32000a 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1581,7 +1581,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
@@ -39187,7 +39197,7 @@ index d956965..4179a77 100644
file->f_version = event_count;
return POLLIN | POLLRDNORM;
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
-index 1fc8f12..20647c1 100644
+index 347bb05..63e1b73 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
@@ -47426,7 +47436,7 @@ index fcc50ab..c3dacf2 100644
lock_flocks();
diff --git a/fs/namei.c b/fs/namei.c
-index 9680cef..2f81108 100644
+index 9680cef..8af5ce7 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
@@ -47501,27 +47511,25 @@ index 9680cef..2f81108 100644
error = 0;
if (s)
error = __vfs_follow_link(nd, s);
-@@ -1345,6 +1361,9 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
+@@ -1345,6 +1361,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
if (!res)
res = walk_component(nd, path, &nd->last,
nd->last_type, LOOKUP_FOLLOW);
-+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode)) {
++ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
+ res = -EACCES;
-+ }
put_link(nd, &link, cookie);
} while (res > 0);
-@@ -1617,6 +1636,9 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1617,6 +1635,8 @@ static int path_lookupat(int dfd, const char *name,
err = follow_link(&link, nd, &cookie);
if (!err)
err = lookup_last(nd, &path);
-+ if (!err && gr_handle_symlink_owner(&link, nd->inode)) {
++ if (!err && gr_handle_symlink_owner(&link, nd->inode))
+ err = -EACCES;
-+ }
put_link(nd, &link, cookie);
}
}
-@@ -1624,6 +1646,21 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1624,6 +1644,21 @@ static int path_lookupat(int dfd, const char *name,
if (!err)
err = complete_walk(nd);
@@ -47543,7 +47551,7 @@ index 9680cef..2f81108 100644
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!nd->inode->i_op->lookup) {
path_put(&nd->path);
-@@ -1651,6 +1688,15 @@ static int do_path_lookup(int dfd, const char *name,
+@@ -1651,6 +1686,15 @@ static int do_path_lookup(int dfd, const char *name,
retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
if (likely(!retval)) {
@@ -47559,7 +47567,7 @@ index 9680cef..2f81108 100644
if (unlikely(!audit_dummy_context())) {
if (nd->path.dentry && nd->inode)
audit_inode(name, nd->path.dentry);
-@@ -2048,6 +2094,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
+@@ -2048,6 +2092,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
@@ -47573,7 +47581,16 @@ index 9680cef..2f81108 100644
return 0;
}
-@@ -2109,6 +2162,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+@@ -2083,7 +2134,7 @@ static inline int open_to_namei_flags(int flag)
+ /*
+ * Handle the last step of open()
+ */
+-static struct file *do_last(struct nameidata *nd, struct path *path,
++static struct file *do_last(struct nameidata *nd, struct path *path, struct path *link,
+ const struct open_flags *op, const char *pathname)
+ {
+ struct dentry *dir = nd->path.dentry;
+@@ -2109,16 +2160,44 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
@@ -47590,7 +47607,14 @@ index 9680cef..2f81108 100644
audit_inode(pathname, nd->path.dentry);
if (open_flag & O_CREAT) {
error = -EISDIR;
-@@ -2119,6 +2182,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ goto exit;
+ }
++ if (link && gr_handle_symlink_owner(link, nd->inode)) {
++ error = -EACCES;
++ goto exit;
++ }
+ goto ok;
+ case LAST_BIND:
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
@@ -47604,10 +47628,26 @@ index 9680cef..2f81108 100644
+ error = -ENOENT;
+ goto exit;
+ }
++ if (link && gr_handle_symlink_owner(link, nd->inode)) {
++ error = -EACCES;
++ goto exit;
++ }
audit_inode(pathname, dir);
goto ok;
}
-@@ -2140,6 +2213,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+@@ -2134,18 +2213,37 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ !symlink_ok);
+ if (error < 0)
+ return ERR_PTR(error);
+- if (error) /* symlink */
++ if (error) /* symlink */ {
++ if (link && gr_handle_symlink_owner(link, nd->inode)) {
++ error = -EACCES;
++ goto exit;
++ }
+ return NULL;
++ }
+ /* sayonara */
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
@@ -47624,7 +47664,17 @@ index 9680cef..2f81108 100644
error = -ENOTDIR;
if (nd->flags & LOOKUP_DIRECTORY) {
-@@ -2180,6 +2263,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ if (!nd->inode->i_op->lookup)
+ goto exit;
+ }
++ if (link && gr_handle_symlink_owner(link, nd->inode)) {
++ error = -EACCES;
++ goto exit;
++ }
+ audit_inode(pathname, nd->path.dentry);
+ goto ok;
+ }
+@@ -2180,6 +2278,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
/* Negative dentry, just create the file */
if (!dentry->d_inode) {
int mode = op->mode;
@@ -47637,7 +47687,7 @@ index 9680cef..2f81108 100644
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
/*
-@@ -2203,6 +2292,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+@@ -2203,6 +2307,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
error = vfs_create(dir->d_inode, dentry, mode, nd);
if (error)
goto exit_mutex_unlock;
@@ -47646,7 +47696,7 @@ index 9680cef..2f81108 100644
mutex_unlock(&dir->d_inode->i_mutex);
dput(nd->path.dentry);
nd->path.dentry = dentry;
-@@ -2212,6 +2303,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+@@ -2212,6 +2318,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
/*
* It already exists.
*/
@@ -47666,23 +47716,60 @@ index 9680cef..2f81108 100644
mutex_unlock(&dir->d_inode->i_mutex);
audit_inode(pathname, path->dentry);
-@@ -2329,8 +2433,14 @@ static struct file *path_openat(int dfd, const char *pathname,
+@@ -2230,11 +2349,17 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ if (!path->dentry->d_inode)
+ goto exit_dput;
+
+- if (path->dentry->d_inode->i_op->follow_link)
++ if (path->dentry->d_inode->i_op->follow_link) {
++ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
++ error = -EACCES;
++ goto exit;
++ }
+ return NULL;
++ }
+
+ path_to_nameidata(path, nd);
+ nd->inode = path->dentry->d_inode;
++
+ /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
+ error = complete_walk(nd);
+ if (error)
+@@ -2242,6 +2367,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ error = -EISDIR;
+ if (S_ISDIR(nd->inode->i_mode))
+ goto exit;
++
++ if (link && gr_handle_symlink_owner(link, nd->inode)) {
++ error = -EACCES;
++ goto exit;
++ }
++
+ ok:
+ if (!S_ISREG(nd->inode->i_mode))
+ will_truncate = 0;
+@@ -2314,7 +2445,7 @@ static struct file *path_openat(int dfd, const char *pathname,
+ if (unlikely(error))
+ goto out_filp;
+
+- filp = do_last(nd, &path, op, pathname);
++ filp = do_last(nd, &path, NULL, op, pathname);
+ while (unlikely(!filp)) { /* trailing symlink */
+ struct path link = path;
+ void *cookie;
+@@ -2329,8 +2460,9 @@ static struct file *path_openat(int dfd, const char *pathname,
error = follow_link(&link, nd, &cookie);
if (unlikely(error))
filp = ERR_PTR(error);
- else
+- filp = do_last(nd, &path, op, pathname);
+ else {
- filp = do_last(nd, &path, op, pathname);
-+ if (!IS_ERR(filp) && gr_handle_symlink_owner(&link, nd->inode)) {
-+ if (filp)
-+ fput(filp);
-+ filp = ERR_PTR(-EACCES);
-+ }
++ filp = do_last(nd, &path, &link, op, pathname);
+ }
put_link(nd, &link, cookie);
}
out:
-@@ -2424,6 +2534,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
+@@ -2424,6 +2556,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
*path = nd.path;
return dentry;
eexist:
@@ -47694,7 +47781,7 @@ index 9680cef..2f81108 100644
dput(dentry);
dentry = ERR_PTR(-EEXIST);
fail:
-@@ -2446,6 +2561,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
+@@ -2446,6 +2583,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
}
EXPORT_SYMBOL(user_path_create);
@@ -47715,7 +47802,7 @@ index 9680cef..2f81108 100644
int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
{
int error = may_create(dir, dentry);
-@@ -2513,6 +2642,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
+@@ -2513,6 +2664,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
@@ -47733,7 +47820,7 @@ index 9680cef..2f81108 100644
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out_drop_write;
-@@ -2530,6 +2670,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
+@@ -2530,6 +2692,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
}
out_drop_write:
mnt_drop_write(path.mnt);
@@ -47743,7 +47830,7 @@ index 9680cef..2f81108 100644
out_dput:
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
-@@ -2579,12 +2722,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
+@@ -2579,12 +2744,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
@@ -47765,7 +47852,7 @@ index 9680cef..2f81108 100644
out_dput:
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
-@@ -2664,6 +2816,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -2664,6 +2838,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
char * name;
struct dentry *dentry;
struct nameidata nd;
@@ -47774,7 +47861,7 @@ index 9680cef..2f81108 100644
error = user_path_parent(dfd, pathname, &nd, &name);
if (error)
-@@ -2692,6 +2846,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -2692,6 +2868,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
error = -ENOENT;
goto exit3;
}
@@ -47790,7 +47877,7 @@ index 9680cef..2f81108 100644
error = mnt_want_write(nd.path.mnt);
if (error)
goto exit3;
-@@ -2699,6 +2862,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -2699,6 +2884,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
if (error)
goto exit4;
error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
@@ -47799,7 +47886,7 @@ index 9680cef..2f81108 100644
exit4:
mnt_drop_write(nd.path.mnt);
exit3:
-@@ -2761,6 +2926,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -2761,6 +2948,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
struct dentry *dentry;
struct nameidata nd;
struct inode *inode = NULL;
@@ -47808,7 +47895,7 @@ index 9680cef..2f81108 100644
error = user_path_parent(dfd, pathname, &nd, &name);
if (error)
-@@ -2783,6 +2950,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -2783,6 +2972,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
if (!inode)
goto slashes;
ihold(inode);
@@ -47825,7 +47912,7 @@ index 9680cef..2f81108 100644
error = mnt_want_write(nd.path.mnt);
if (error)
goto exit2;
-@@ -2790,6 +2967,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -2790,6 +2989,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
if (error)
goto exit3;
error = vfs_unlink(nd.path.dentry->d_inode, dentry);
@@ -47834,7 +47921,7 @@ index 9680cef..2f81108 100644
exit3:
mnt_drop_write(nd.path.mnt);
exit2:
-@@ -2865,10 +3044,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
+@@ -2865,10 +3066,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
@@ -47853,7 +47940,7 @@ index 9680cef..2f81108 100644
out_drop_write:
mnt_drop_write(path.mnt);
out_dput:
-@@ -2940,6 +3127,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -2940,6 +3149,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
{
struct dentry *new_dentry;
struct path old_path, new_path;
@@ -47861,7 +47948,7 @@ index 9680cef..2f81108 100644
int how = 0;
int error;
-@@ -2963,7 +3151,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -2963,7 +3173,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
if (error)
return error;
@@ -47870,7 +47957,7 @@ index 9680cef..2f81108 100644
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto out;
-@@ -2974,13 +3162,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -2974,13 +3184,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
error = mnt_want_write(new_path.mnt);
if (error)
goto out_dput;
@@ -47901,7 +47988,7 @@ index 9680cef..2f81108 100644
dput(new_dentry);
mutex_unlock(&new_path.dentry->d_inode->i_mutex);
path_put(&new_path);
-@@ -3208,6 +3413,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
+@@ -3208,6 +3435,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
if (new_dentry == trap)
goto exit5;
@@ -47914,7 +48001,7 @@ index 9680cef..2f81108 100644
error = mnt_want_write(oldnd.path.mnt);
if (error)
goto exit5;
-@@ -3217,6 +3428,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
+@@ -3217,6 +3450,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
goto exit6;
error = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry);
@@ -47924,7 +48011,7 @@ index 9680cef..2f81108 100644
exit6:
mnt_drop_write(oldnd.path.mnt);
exit5:
-@@ -3242,6 +3456,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
+@@ -3242,6 +3478,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
{
@@ -47933,7 +48020,7 @@ index 9680cef..2f81108 100644
int len;
len = PTR_ERR(link);
-@@ -3251,7 +3467,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
+@@ -3251,7 +3489,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
len = strlen(link);
if (len > (unsigned) buflen)
len = buflen;
@@ -48090,10 +48177,10 @@ index 5c3cd82..ed535e5 100644
if (host_err < 0)
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
-index 35a8970..501b7f8 100644
+index 1c98f53..41e6a04 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
-@@ -409,6 +409,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
+@@ -410,6 +410,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block);
nilfs->ns_r_segments_percentage =
le32_to_cpu(sbp->s_r_segments_percentage);
@@ -58795,7 +58882,7 @@ index 0000000..05a6015
+}
diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
new file mode 100644
-index 0000000..35a96d1
+index 0000000..a023dcf
--- /dev/null
+++ b/grsecurity/grsec_link.c
@@ -0,0 +1,59 @@
@@ -58812,7 +58899,7 @@ index 0000000..35a96d1
+
+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
+ /* ignore root-owned links, e.g. /proc/self */
-+ link_inode->i_uid &&
++ link_inode->i_uid && target &&
+ link_inode->i_uid != target->i_uid) {
+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
+ return 1;
@@ -63045,10 +63132,10 @@ index 9146f39..e19693b 100644
void cleanup_module(void) __attribute__((alias(#exitfn)));
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
-index df53fdf..3d0b4d3 100644
+index cdde2b3..d782954 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
-@@ -135,6 +135,12 @@ extern struct cred init_cred;
+@@ -144,6 +144,12 @@ extern struct task_group root_task_group;
#define INIT_TASK_COMM "swapper"
@@ -63061,7 +63148,7 @@ index df53fdf..3d0b4d3 100644
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
-@@ -173,6 +179,7 @@ extern struct cred init_cred;
+@@ -183,6 +189,7 @@ extern struct task_group root_task_group;
RCU_INIT_POINTER(.cred, &init_cred), \
.comm = INIT_TASK_COMM, \
.thread = INIT_THREAD, \
@@ -64025,21 +64112,21 @@ index 800f113..12c82ec 100644
}
diff --git a/include/linux/random.h b/include/linux/random.h
-index 8f74538..de61694 100644
+index 29e217a..b9d7532 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
-@@ -54,6 +54,10 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
+@@ -55,6 +55,10 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
- extern void add_interrupt_randomness(int irq);
+ extern void add_interrupt_randomness(int irq, int irq_flags);
+#ifdef CONFIG_PAX_LATENT_ENTROPY
+extern void transfer_latent_entropy(void);
+#endif
+
extern void get_random_bytes(void *buf, int nbytes);
+ extern void get_random_bytes_arch(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]);
-
-@@ -69,12 +73,17 @@ void srandom32(u32 seed);
+@@ -71,12 +75,17 @@ void srandom32(u32 seed);
u32 prandom32(struct rnd_state *);
@@ -64177,7 +64264,7 @@ index 2148b12..519b820 100644
static inline void anon_vma_merge(struct vm_area_struct *vma,
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index d336c35..796b375 100644
+index 1e86bb4..37d6860 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -101,6 +101,7 @@ struct bio_list;
@@ -64244,7 +64331,7 @@ index d336c35..796b375 100644
/* Hash table maintenance information */
struct hlist_node uidhash_node;
uid_t uid;
-@@ -1338,8 +1363,8 @@ struct task_struct {
+@@ -1341,8 +1366,8 @@ struct task_struct {
struct list_head thread_group;
struct completion *vfork_done; /* for vfork() */
@@ -64255,7 +64342,7 @@ index d336c35..796b375 100644
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
-@@ -1355,13 +1380,6 @@ struct task_struct {
+@@ -1358,13 +1383,6 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -64269,7 +64356,7 @@ index d336c35..796b375 100644
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
-@@ -1378,8 +1396,16 @@ struct task_struct {
+@@ -1381,8 +1399,16 @@ struct task_struct {
#endif
/* CPU-specific state of this task */
struct thread_struct thread;
@@ -64286,7 +64373,7 @@ index d336c35..796b375 100644
/* open file information */
struct files_struct *files;
/* namespaces */
-@@ -1426,6 +1452,11 @@ struct task_struct {
+@@ -1429,6 +1455,11 @@ struct task_struct {
struct rt_mutex_waiter *pi_blocked_on;
#endif
@@ -64298,7 +64385,7 @@ index d336c35..796b375 100644
#ifdef CONFIG_DEBUG_MUTEXES
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
-@@ -1541,6 +1572,27 @@ struct task_struct {
+@@ -1544,6 +1575,27 @@ struct task_struct {
unsigned long default_timer_slack_ns;
struct list_head *scm_work_list;
@@ -64326,7 +64413,7 @@ index d336c35..796b375 100644
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack */
int curr_ret_stack;
-@@ -1575,6 +1627,51 @@ struct task_struct {
+@@ -1578,6 +1630,51 @@ struct task_struct {
#endif
};
@@ -64378,7 +64465,7 @@ index d336c35..796b375 100644
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-@@ -2090,7 +2187,9 @@ void yield(void);
+@@ -2093,7 +2190,9 @@ void yield(void);
extern struct exec_domain default_exec_domain;
union thread_union {
@@ -64388,7 +64475,7 @@ index d336c35..796b375 100644
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
-@@ -2123,6 +2222,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2126,6 +2225,7 @@ extern struct pid_namespace init_pid_ns;
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -64396,7 +64483,7 @@ index d336c35..796b375 100644
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
-@@ -2244,6 +2344,12 @@ static inline void mmdrop(struct mm_struct * mm)
+@@ -2247,6 +2347,12 @@ static inline void mmdrop(struct mm_struct * mm)
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
@@ -64409,7 +64496,7 @@ index d336c35..796b375 100644
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);
/* Allocate a new mm structure and copy contents from tsk->mm */
-@@ -2260,7 +2366,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2263,7 +2369,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -64418,7 +64505,7 @@ index d336c35..796b375 100644
extern void daemonize(const char *, ...);
extern int allow_signal(int);
-@@ -2425,9 +2531,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2428,9 +2534,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#endif
@@ -64700,36 +64787,29 @@ index d00e0ba..f75c968 100644
return kmem_cache_alloc_node_trace(size, cachep, flags, node);
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
-index 0ec00b3..65e7e0e 100644
+index 0ec00b3..22b4715 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
-@@ -9,8 +9,9 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
+@@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
return kmem_cache_alloc_node(cachep, flags, -1);
}
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
-+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
- return __kmalloc_node(size, flags, node);
-@@ -24,11 +25,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
- * kmalloc is the normal method of allocating memory
- * in the kernel.
- */
-+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
- static __always_inline void *kmalloc(size_t size, gfp_t flags)
- {
+@@ -29,7 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return __kmalloc_node(size, flags, -1);
}
-+static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
- static __always_inline void *__kmalloc(size_t size, gfp_t flags)
+-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
++static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
{
return kmalloc(size, flags);
+ }
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
-index a32bcfd..fb2b210 100644
+index a32bcfd..c3991fb 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -89,7 +89,7 @@ struct kmem_cache {
@@ -64741,41 +64821,33 @@ index a32bcfd..fb2b210 100644
void (*ctor)(void *);
int inuse; /* Offset to metadata */
int align; /* Alignment */
-@@ -150,6 +150,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
+@@ -150,7 +150,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
* Sorry that the following has to be that ugly but some versions of GCC
* have trouble with constant propagation and loops.
*/
-+static __always_inline int kmalloc_index(size_t size) __size_overflow(1);
- static __always_inline int kmalloc_index(size_t size)
+-static __always_inline int kmalloc_index(size_t size)
++static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
{
if (!size)
-@@ -204,6 +205,7 @@ static __always_inline int kmalloc_index(size_t size)
- * This ought to end up with a global pointer to the right cache
- * in kmalloc_caches.
- */
-+static __always_inline struct kmem_cache *kmalloc_slab(size_t size) __size_overflow(1);
- static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
- {
- int index = kmalloc_index(size);
-@@ -215,9 +217,11 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
+ return 0;
+@@ -215,9 +215,9 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
}
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-void *__kmalloc(size_t size, gfp_t flags);
+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
- static __always_inline void *
-+kmalloc_order(size_t size, gfp_t flags, unsigned int order) __size_overflow(1);
-+static __always_inline void *
+-static __always_inline void *
++static __always_inline __size_overflow(1) void *
kmalloc_order(size_t size, gfp_t flags, unsigned int order)
{
void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
-@@ -256,12 +260,14 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+@@ -256,12 +256,13 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
}
#endif
-+static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
- static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
++static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
{
unsigned int order = get_order(size);
return kmalloc_order_trace(size, flags, order);
@@ -64785,7 +64857,7 @@ index a32bcfd..fb2b210 100644
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
-@@ -281,7 +287,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
+@@ -281,7 +282,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
}
#ifdef CONFIG_NUMA
@@ -64794,7 +64866,7 @@ index a32bcfd..fb2b210 100644
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
#ifdef CONFIG_TRACING
-@@ -298,6 +304,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
+@@ -298,6 +299,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
}
#endif
@@ -67669,7 +67741,7 @@ index 222457a..ecd0db4 100644
else
new_fs = fs;
diff --git a/kernel/futex.c b/kernel/futex.c
-index 866c9d5..31751ff 100644
+index 80fb1c6..23d88d8 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -54,6 +54,7 @@
@@ -67692,43 +67764,7 @@ index 866c9d5..31751ff 100644
/*
* The futex address must be "naturally" aligned.
*/
-@@ -2231,11 +2237,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
- * @uaddr2: the pi futex we will take prior to returning to user-space
- *
- * The caller will wait on uaddr and will be requeued by futex_requeue() to
-- * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
-- * complete the acquisition of the rt_mutex prior to returning to userspace.
-- * This ensures the rt_mutex maintains an owner when it has waiters; without
-- * one, the pi logic wouldn't know which task to boost/deboost, if there was a
-- * need to.
-+ * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
-+ * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
-+ * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
-+ * without one, the pi logic would not know which task to boost/deboost, if
-+ * there was a need to.
- *
- * We call schedule in futex_wait_queue_me() when we enqueue and return there
- * via the following:
-@@ -2272,6 +2278,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
- struct futex_q q = futex_q_init;
- int res, ret;
-
-+ if (uaddr == uaddr2)
-+ return -EINVAL;
-+
- if (!bitset)
- return -EINVAL;
-
-@@ -2370,7 +2379,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
- * fault, unlock the rt_mutex and return the fault to userspace.
- */
- if (ret == -EFAULT) {
-- if (rt_mutex_owner(pi_mutex) == current)
-+ if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
- rt_mutex_unlock(pi_mutex);
- } else if (ret == -EINTR) {
- /*
-@@ -2721,6 +2730,7 @@ static int __init futex_init(void)
+@@ -2724,6 +2730,7 @@ static int __init futex_init(void)
{
u32 curval;
int i;
@@ -67736,7 +67772,7 @@ index 866c9d5..31751ff 100644
/*
* This will fail and we want it. Some arch implementations do
-@@ -2732,8 +2742,11 @@ static int __init futex_init(void)
+@@ -2735,8 +2742,11 @@ static int __init futex_init(void)
* implementation, the non-functional ones will return
* -ENOSYS.
*/
@@ -70051,10 +70087,10 @@ index 3d9f31c..7fefc9e 100644
default:
diff --git a/kernel/sched.c b/kernel/sched.c
-index 9cd8ca7..e73caba 100644
+index e0431c4..40c5ece 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
-@@ -5287,6 +5287,8 @@ int can_nice(const struct task_struct *p, const int nice)
+@@ -5284,6 +5284,8 @@ int can_nice(const struct task_struct *p, const int nice)
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = 20 - nice;
@@ -70063,7 +70099,7 @@ index 9cd8ca7..e73caba 100644
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
-@@ -5320,7 +5322,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+@@ -5317,7 +5319,8 @@ SYSCALL_DEFINE1(nice, int, increment)
if (nice > 19)
nice = 19;
@@ -70073,7 +70109,7 @@ index 9cd8ca7..e73caba 100644
return -EPERM;
retval = security_task_setnice(current, nice);
-@@ -5477,6 +5480,7 @@ recheck:
+@@ -5474,6 +5477,7 @@ recheck:
unsigned long rlim_rtprio =
task_rlimit(p, RLIMIT_RTPRIO);
@@ -71567,7 +71603,7 @@ index d9df745..e73c2fe 100644
static inline void *ptr_to_indirect(void *ptr)
{
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
-index 993599e..24690ac 100644
+index d74c317..24690ac 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -16,6 +16,9 @@
@@ -71639,7 +71675,7 @@ index 993599e..24690ac 100644
case 'B':
return symbol_string(buf, end, ptr, spec, *fmt);
case 'R':
-@@ -878,15 +894,24 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+@@ -878,9 +894,17 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'U':
return uuid_string(buf, end, ptr, spec, fmt);
case 'V':
@@ -71660,15 +71696,7 @@ index 993599e..24690ac 100644
case 'K':
/*
* %pK cannot be used in IRQ context because its test
- * for CAP_SYSLOG would be meaningless.
- */
-- if (in_irq() || in_serving_softirq() || in_nmi()) {
-+ if (kptr_restrict && (in_irq() || in_serving_softirq() ||
-+ in_nmi())) {
- if (spec.field_width == -1)
- spec.field_width = 2 * sizeof(void *);
- return string(buf, end, "pK-error", spec);
-@@ -897,6 +922,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+@@ -898,6 +922,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
ptr = NULL;
break;
}
@@ -71690,7 +71718,7 @@ index 993599e..24690ac 100644
spec.flags |= SMALL;
if (spec.field_width == -1) {
spec.field_width = 2 * sizeof(void *);
-@@ -1608,11 +1648,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+@@ -1609,11 +1648,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
typeof(type) value; \
if (sizeof(type) == 8) { \
args = PTR_ALIGN(args, sizeof(u32)); \
@@ -71705,7 +71733,7 @@ index 993599e..24690ac 100644
} \
args += sizeof(type); \
value; \
-@@ -1675,7 +1715,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+@@ -1676,7 +1715,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
case FORMAT_TYPE_STR: {
const char *str_arg = args;
args += strlen(str_arg) + 1;
@@ -71828,10 +71856,10 @@ index 8f005e9..1cb1036 100644
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index b1e1bad..597b8ba 100644
+index 0f897b8..5a74f92 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
-@@ -2442,6 +2442,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2461,6 +2461,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
return 1;
}
@@ -71859,7 +71887,7 @@ index b1e1bad..597b8ba 100644
/*
* Hugetlb_cow() should be called with page lock of the original hugepage held.
*/
-@@ -2544,6 +2565,11 @@ retry_avoidcopy:
+@@ -2563,6 +2584,11 @@ retry_avoidcopy:
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page);
hugepage_add_new_anon_rmap(new_page, vma, address);
@@ -71871,7 +71899,7 @@ index b1e1bad..597b8ba 100644
/* Make the old page be freed below */
new_page = old_page;
mmu_notifier_invalidate_range_end(mm,
-@@ -2695,6 +2721,10 @@ retry:
+@@ -2714,6 +2740,10 @@ retry:
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
@@ -71882,7 +71910,7 @@ index b1e1bad..597b8ba 100644
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
-@@ -2724,6 +2754,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2743,6 +2773,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma);
@@ -71893,7 +71921,7 @@ index b1e1bad..597b8ba 100644
ptep = huge_pte_offset(mm, address);
if (ptep) {
entry = huge_ptep_get(ptep);
-@@ -2735,6 +2769,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2754,6 +2788,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
VM_FAULT_SET_HINDEX(h - hstates);
}
@@ -71921,7 +71949,7 @@ index b1e1bad..597b8ba 100644
if (!ptep)
return VM_FAULT_OOM;
diff --git a/mm/internal.h b/mm/internal.h
-index 2189af4..f2ca332 100644
+index 0c26b5e..1cc340f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
@@ -72133,7 +72161,7 @@ index 5bd5bb1..2da9ddb 100644
/* keep elevated page count for bad page */
return ret;
diff --git a/mm/memory.c b/mm/memory.c
-index 1b1ca17..e6715dd 100644
+index 70f5daf..0964853 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -72162,7 +72190,7 @@ index 1b1ca17..e6715dd 100644
}
/*
-@@ -1574,12 +1581,6 @@ no_page_table:
+@@ -1577,12 +1584,6 @@ no_page_table:
return page;
}
@@ -72175,7 +72203,7 @@ index 1b1ca17..e6715dd 100644
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
-@@ -1652,10 +1653,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1655,10 +1656,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0;
@@ -72188,7 +72216,7 @@ index 1b1ca17..e6715dd 100644
if (!vma && in_gate_area(mm, start)) {
unsigned long pg = start & PAGE_MASK;
pgd_t *pgd;
-@@ -1703,7 +1704,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1706,7 +1707,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto next_page;
}
@@ -72197,7 +72225,7 @@ index 1b1ca17..e6715dd 100644
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
-@@ -1730,11 +1731,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1733,11 +1734,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int ret;
unsigned int fault_flags = 0;
@@ -72209,7 +72237,7 @@ index 1b1ca17..e6715dd 100644
if (foll_flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking)
-@@ -1808,7 +1804,7 @@ next_page:
+@@ -1811,7 +1807,7 @@ next_page:
start += PAGE_SIZE;
nr_pages--;
} while (nr_pages && start < vma->vm_end);
@@ -72218,7 +72246,7 @@ index 1b1ca17..e6715dd 100644
return i;
}
EXPORT_SYMBOL(__get_user_pages);
-@@ -2015,6 +2011,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2018,6 +2014,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
@@ -72229,7 +72257,7 @@ index 1b1ca17..e6715dd 100644
retval = 0;
pte_unmap_unlock(pte, ptl);
return retval;
-@@ -2049,10 +2049,22 @@ out:
+@@ -2052,10 +2052,22 @@ out:
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page)
{
@@ -72252,7 +72280,7 @@ index 1b1ca17..e6715dd 100644
vma->vm_flags |= VM_INSERTPAGE;
return insert_page(vma, addr, page, vma->vm_page_prot);
}
-@@ -2138,6 +2150,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+@@ -2141,6 +2153,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
@@ -72260,7 +72288,7 @@ index 1b1ca17..e6715dd 100644
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
-@@ -2345,7 +2358,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -2348,7 +2361,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
@@ -72271,7 +72299,7 @@ index 1b1ca17..e6715dd 100644
if (!pmd)
return -ENOMEM;
do {
-@@ -2365,7 +2380,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -2368,7 +2383,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long next;
int err;
@@ -72282,7 +72310,7 @@ index 1b1ca17..e6715dd 100644
if (!pud)
return -ENOMEM;
do {
-@@ -2453,6 +2470,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
+@@ -2456,6 +2473,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
copy_user_highpage(dst, src, va, vma);
}
@@ -72469,7 +72497,7 @@ index 1b1ca17..e6715dd 100644
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
-@@ -2664,6 +2861,12 @@ gotten:
+@@ -2667,6 +2864,12 @@ gotten:
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
@@ -72482,7 +72510,7 @@ index 1b1ca17..e6715dd 100644
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2715,6 +2918,10 @@ gotten:
+@@ -2718,6 +2921,10 @@ gotten:
page_remove_rmap(old_page);
}
@@ -72493,7 +72521,7 @@ index 1b1ca17..e6715dd 100644
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
-@@ -2994,6 +3201,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2997,6 +3204,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
@@ -72505,7 +72533,7 @@ index 1b1ca17..e6715dd 100644
unlock_page(page);
if (swapcache) {
/*
-@@ -3017,6 +3229,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3020,6 +3232,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -72517,7 +72545,7 @@ index 1b1ca17..e6715dd 100644
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -3036,40 +3253,6 @@ out_release:
+@@ -3039,40 +3256,6 @@ out_release:
}
/*
@@ -72558,7 +72586,7 @@ index 1b1ca17..e6715dd 100644
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -3078,27 +3261,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3081,27 +3264,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
@@ -72591,7 +72619,7 @@ index 1b1ca17..e6715dd 100644
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -3117,6 +3296,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3120,6 +3299,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_none(*page_table))
goto release;
@@ -72603,7 +72631,7 @@ index 1b1ca17..e6715dd 100644
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
setpte:
-@@ -3124,6 +3308,12 @@ setpte:
+@@ -3127,6 +3311,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -72616,7 +72644,7 @@ index 1b1ca17..e6715dd 100644
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -3267,6 +3457,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3270,6 +3460,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*/
/* Only go through if we didn't race with anybody else... */
if (likely(pte_same(*page_table, orig_pte))) {
@@ -72629,7 +72657,7 @@ index 1b1ca17..e6715dd 100644
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
-@@ -3286,6 +3482,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3289,6 +3485,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, page_table);
@@ -72644,7 +72672,7 @@ index 1b1ca17..e6715dd 100644
} else {
if (cow_page)
mem_cgroup_uncharge_page(cow_page);
-@@ -3439,6 +3643,12 @@ int handle_pte_fault(struct mm_struct *mm,
+@@ -3442,6 +3646,12 @@ int handle_pte_fault(struct mm_struct *mm,
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
@@ -72657,7 +72685,7 @@ index 1b1ca17..e6715dd 100644
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3455,6 +3665,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3458,6 +3668,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *pte;
@@ -72668,7 +72696,7 @@ index 1b1ca17..e6715dd 100644
__set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT);
-@@ -3466,6 +3680,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3469,6 +3683,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
@@ -72703,7 +72731,7 @@ index 1b1ca17..e6715dd 100644
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
-@@ -3495,7 +3737,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3498,7 +3740,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* run pte_offset_map on the pmd, if an huge pmd could
* materialize from under us from a different thread.
*/
@@ -72712,7 +72740,7 @@ index 1b1ca17..e6715dd 100644
return VM_FAULT_OOM;
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
-@@ -3532,6 +3774,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3535,6 +3777,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -72736,7 +72764,7 @@ index 1b1ca17..e6715dd 100644
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
-@@ -3562,6 +3821,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3565,6 +3824,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -72767,7 +72795,7 @@ index 1b1ca17..e6715dd 100644
#endif /* __PAGETABLE_PMD_FOLDED */
int make_pages_present(unsigned long addr, unsigned long end)
-@@ -3599,7 +3882,7 @@ static int __init gate_vma_init(void)
+@@ -3602,7 +3885,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -74711,7 +74739,7 @@ index f59e170..34e2a2b 100644
new->vm_region = region;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 065dbe8..2a2e9e4 100644
+index 6e51bf0..347a789 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -341,7 +341,7 @@ out:
@@ -75941,7 +75969,7 @@ index 136ac4f..f917fa9 100644
mm->unmap_area = arch_unmap_area;
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index eeba3bb..820e22e 100644
+index eeba3bb..b4410cf 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
@@ -76065,6 +76093,15 @@ index eeba3bb..820e22e 100644
if (!pmd_none(*pmd)) {
pte_t *ptep, pte;
+@@ -329,7 +369,7 @@ static void purge_vmap_area_lazy(void);
+ * Allocate a region of KVA of the specified size and alignment, within the
+ * vstart and vend.
+ */
+-static struct vmap_area *alloc_vmap_area(unsigned long size,
++static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
+ unsigned long align,
+ unsigned long vstart, unsigned long vend,
+ int node, gfp_t gfp_mask)
@@ -1295,6 +1335,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
struct vm_struct *area;
@@ -76851,7 +76888,7 @@ index 68bbf9f..5ef0d12 100644
return err;
diff --git a/net/core/dev.c b/net/core/dev.c
-index 5738654..2078746 100644
+index 4b18703..076f9cb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
@@ -76869,7 +76906,7 @@ index 5738654..2078746 100644
}
}
EXPORT_SYMBOL(dev_load);
-@@ -1593,7 +1597,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+@@ -1594,7 +1598,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
@@ -76878,7 +76915,7 @@ index 5738654..2078746 100644
kfree_skb(skb);
return NET_RX_DROP;
}
-@@ -1603,7 +1607,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+@@ -1604,7 +1608,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
nf_reset(skb);
if (unlikely(!is_skb_forwardable(dev, skb))) {
@@ -76887,7 +76924,7 @@ index 5738654..2078746 100644
kfree_skb(skb);
return NET_RX_DROP;
}
-@@ -2030,7 +2034,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
+@@ -2031,7 +2035,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
struct dev_gso_cb {
void (*destructor)(struct sk_buff *skb);
@@ -76896,7 +76933,7 @@ index 5738654..2078746 100644
#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
-@@ -2943,7 +2947,7 @@ enqueue:
+@@ -2944,7 +2948,7 @@ enqueue:
local_irq_restore(flags);
@@ -76905,7 +76942,7 @@ index 5738654..2078746 100644
kfree_skb(skb);
return NET_RX_DROP;
}
-@@ -3017,7 +3021,7 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3018,7 +3022,7 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
@@ -76914,7 +76951,7 @@ index 5738654..2078746 100644
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
-@@ -3306,7 +3310,7 @@ ncls:
+@@ -3307,7 +3311,7 @@ ncls:
if (pt_prev) {
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
} else {
@@ -76923,7 +76960,7 @@ index 5738654..2078746 100644
kfree_skb(skb);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
-@@ -3871,7 +3875,7 @@ void netif_napi_del(struct napi_struct *napi)
+@@ -3872,7 +3876,7 @@ void netif_napi_del(struct napi_struct *napi)
}
EXPORT_SYMBOL(netif_napi_del);
@@ -76932,7 +76969,7 @@ index 5738654..2078746 100644
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
-@@ -5897,7 +5901,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+@@ -5900,7 +5904,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
@@ -77004,7 +77041,7 @@ index c40f27e..7f49254 100644
m->msg_iov = iov;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
-index 2ef859a..795f65f 100644
+index 05842ab..6d674ce 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -57,7 +57,7 @@ struct rtnl_link {
@@ -78804,7 +78841,7 @@ index 732152f..60bb09e 100644
*uaddr_len = sizeof(struct sockaddr_ax25);
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
-index d9d4970..a49013a 100644
+index d9d4970..906059b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1675,7 +1675,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -78825,7 +78862,26 @@ index d9d4970..a49013a 100644
spin_unlock(&sk->sk_receive_queue.lock);
drop_n_restore:
-@@ -3266,7 +3266,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+@@ -2613,6 +2613,7 @@ out:
+
+ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ {
++ struct sock_extended_err ee;
+ struct sock_exterr_skb *serr;
+ struct sk_buff *skb, *skb2;
+ int copied, err;
+@@ -2634,8 +2635,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ sock_recv_timestamp(msg, sk, skb);
+
+ serr = SKB_EXT_ERR(skb);
++ ee = serr->ee;
+ put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
+- sizeof(serr->ee), &serr->ee);
++ sizeof ee, &ee);
+
+ msg->msg_flags |= MSG_ERRQUEUE;
+ err = copied;
+@@ -3266,7 +3268,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
case PACKET_HDRLEN:
if (len > sizeof(int))
len = sizeof(int);
@@ -78834,7 +78890,7 @@ index d9d4970..a49013a 100644
return -EFAULT;
switch (val) {
case TPACKET_V1:
-@@ -3316,7 +3316,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+@@ -3316,7 +3318,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
if (put_user(len, optlen))
return -EFAULT;
@@ -79702,7 +79758,7 @@ index 273cbce..fd1e8ff 100644
set_fs(KERNEL_DS);
if (level == SOL_SOCKET)
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
-index 4e2b3b4..ad3adc6 100644
+index c90b832..69d57f6 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word)
@@ -84155,10 +84211,10 @@ index 0000000..b8008f7
+}
diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
new file mode 100644
-index 0000000..54a12fe
+index 0000000..a898f84
--- /dev/null
+++ b/tools/gcc/size_overflow_hash.data
-@@ -0,0 +1,2392 @@
+@@ -0,0 +1,2936 @@
+_000001_hash alloc_dr 2 65495 _000001_hash NULL
+_000002_hash __copy_from_user 3 10918 _000002_hash NULL
+_000003_hash __copy_from_user_inatomic 3 4365 _000003_hash NULL
@@ -84166,7 +84222,7 @@ index 0000000..54a12fe
+_000005_hash __copy_to_user_inatomic 3 19214 _000005_hash NULL
+_000006_hash kcalloc 1-2 27770 _000006_hash NULL
+_000008_hash kmalloc 1 60432 _002505_hash NULL nohasharray
-+_000009_hash kmalloc_node 1 50163 _000009_hash NULL
++_000009_hash kmalloc_node 1 50163 _002930_hash NULL nohasharray
+_000010_hash kmalloc_slab 1 11917 _000010_hash NULL
+_000011_hash kmemdup 2 64015 _000011_hash NULL
+_000012_hash __krealloc 2 14857 _001118_hash NULL nohasharray
@@ -84331,7 +84387,7 @@ index 0000000..54a12fe
+_000180_hash i2o_parm_table_get 6 61635 _000180_hash NULL
+_000181_hash ib_ucm_alloc_data 3 36885 _000181_hash NULL
+_000182_hash ib_uverbs_unmarshall_recv 5 12251 _000182_hash NULL
-+_000183_hash ieee80211_build_probe_req 7 27660 _000183_hash NULL
++_000183_hash ieee80211_build_probe_req 7-5 27660 _000183_hash NULL
+_000184_hash ieee80211_if_write 3 34894 _000184_hash NULL
+_000185_hash if_write 3 51756 _000185_hash NULL
+_000186_hash ima_write_policy 3 40548 _000186_hash NULL
@@ -84349,7 +84405,7 @@ index 0000000..54a12fe
+_000198_hash ip_vs_conn_fill_param_sync 6 29771 _001499_hash NULL nohasharray
+_000199_hash ip_vs_create_timeout_table 2 64478 _000199_hash NULL
+_000200_hash ipw_queue_tx_init 3 49161 _000200_hash NULL
-+_000201_hash irias_new_octseq_value 2 13596 _000201_hash NULL
++_000201_hash irias_new_octseq_value 2 13596 _002933_hash NULL nohasharray
+_000202_hash ir_lirc_transmit_ir 3 64403 _000202_hash NULL
+_000203_hash isdn_add_channels 3 40905 _000203_hash NULL
+_000204_hash isdn_ppp_fill_rq 2 41428 _000204_hash NULL
@@ -84383,7 +84439,7 @@ index 0000000..54a12fe
+_000234_hash lbs_debugfs_write 3 48413 _000234_hash NULL
+_000235_hash lc_create 3 48662 _000235_hash NULL
+_000236_hash ldm_frag_add 2 5611 _000236_hash NULL
-+_000237_hash libipw_alloc_txb 1 27579 _000237_hash NULL
++_000237_hash libipw_alloc_txb 1-3-2 27579 _000237_hash NULL
+_000238_hash listxattr 3 12769 _000238_hash NULL
+_000239_hash load_msg 2 95 _000239_hash NULL
+_000240_hash mb_cache_create 2 17307 _000240_hash NULL
@@ -84481,7 +84537,7 @@ index 0000000..54a12fe
+_000336_hash restore_i387_fxsave 2 17528 _000336_hash NULL
+_000337_hash rndis_add_response 2 58544 _000337_hash NULL
+_000338_hash rndis_set_oid 4 6547 _000338_hash NULL
-+_000339_hash rngapi_reset 3 34366 _000339_hash NULL
++_000339_hash rngapi_reset 3 34366 _002740_hash NULL nohasharray
+_000340_hash roccat_common_receive 4 53407 _000340_hash NULL
+_000341_hash roccat_common_send 4 12284 _000341_hash NULL
+_000342_hash rpc_malloc 2 43573 _000342_hash NULL
@@ -84592,7 +84648,7 @@ index 0000000..54a12fe
+_000451_hash zd_usb_iowrite16v_async 3 23984 _000451_hash NULL
+_000452_hash zd_usb_read_fw 4 22049 _000452_hash NULL
+_000453_hash aa_simple_write_to_buffer 3-4 49683 _000453_hash NULL
-+_000454_hash acpi_ex_allocate_name_string 2 7685 _000454_hash NULL
++_000454_hash acpi_ex_allocate_name_string 2 7685 _002692_hash NULL nohasharray
+_000455_hash acpi_os_allocate_zeroed 1 37422 _000455_hash NULL
+_000456_hash acpi_ut_initialize_buffer 2 47143 _002270_hash NULL nohasharray
+_000457_hash ad7879_spi_xfer 3 36311 _000457_hash NULL
@@ -84751,7 +84807,7 @@ index 0000000..54a12fe
+_000628_hash ib_send_cm_rtu 3 63138 _000628_hash NULL
+_000629_hash ieee80211_key_alloc 3 19065 _000629_hash NULL
+_000630_hash ieee80211_mgmt_tx 9 59699 _000630_hash NULL
-+_000631_hash ieee80211_send_probe_req 6 6924 _000631_hash NULL
++_000631_hash ieee80211_send_probe_req 6-4 6924 _000631_hash NULL
+_000632_hash init_bch 1-2 64130 _000632_hash NULL
+_000634_hash init_ipath 1 48187 _000634_hash NULL
+_000635_hash init_list_set 2-3 39188 _000635_hash NULL
@@ -84795,7 +84851,7 @@ index 0000000..54a12fe
+_000675_hash kvm_write_guest_page 5 63555 _000675_hash NULL
+_000676_hash l2tp_session_create 1 25286 _000676_hash NULL
+_000677_hash leaf_dealloc 3 29566 _000677_hash NULL
-+_000678_hash linear_conf 2 23485 _000678_hash NULL
++_000678_hash linear_conf 2 23485 _003035_hash NULL nohasharray
+_000679_hash lirc_buffer_init 2-3 53282 _000679_hash NULL
+_000681_hash lpfc_sli4_queue_alloc 3 62646 _000681_hash NULL
+_000682_hash mce_request_packet 3 1073 _000682_hash NULL
@@ -84870,7 +84926,7 @@ index 0000000..54a12fe
+_000760_hash sctp_setsockopt_connectx_old 3 22631 _000760_hash NULL
+_000761_hash sctp_tsnmap_init 2 36446 _000761_hash NULL
+_000762_hash security_context_to_sid 2 19839 _000762_hash NULL
-+_000763_hash security_context_to_sid_default 2 3492 _000763_hash NULL
++_000763_hash security_context_to_sid_default 2 3492 _002996_hash NULL nohasharray
+_000764_hash security_context_to_sid_force 2 20724 _000764_hash NULL
+_000765_hash sel_write_access 3 51704 _000765_hash NULL
+_000766_hash sel_write_create 3 11353 _000766_hash NULL
@@ -84924,7 +84980,7 @@ index 0000000..54a12fe
+_000817_hash tcf_hash_create 4 54360 _000817_hash NULL
+_000818_hash test_unaligned_bulk 3 52333 _000818_hash NULL
+_000819_hash tifm_alloc_adapter 1 10903 _000819_hash NULL
-+_000820_hash tm6000_read_write_usb 7 50774 _000820_hash NULL
++_000820_hash tm6000_read_write_usb 7 50774 _002745_hash NULL nohasharray
+_000821_hash tnode_alloc 1 49407 _000821_hash NULL
+_000822_hash tomoyo_commit_ok 2 20167 _000822_hash NULL
+_000823_hash tomoyo_scan_bprm 2-4 15642 _000823_hash NULL
@@ -84938,7 +84994,7 @@ index 0000000..54a12fe
+_000832_hash usblp_write 3 23178 _000832_hash NULL
+_000833_hash user_confirm_reply 4 43708 _000833_hash NULL
+_000834_hash uvc_alloc_buffers 2 9656 _000834_hash NULL
-+_000835_hash uvc_alloc_entity 3 20836 _000835_hash NULL
++_000835_hash uvc_alloc_entity 3-4 20836 _000835_hash NULL
+_000836_hash v4l2_ctrl_new 7 38725 _000836_hash NULL
+_000837_hash v4l2_event_subscribe 3 19510 _000837_hash NULL
+_000838_hash vc_resize 2-3 3585 _000838_hash NULL
@@ -85067,12 +85123,12 @@ index 0000000..54a12fe
+_000968_hash copy_entries_to_user 1 52367 _000968_hash NULL
+_000969_hash copy_from_buf 4 27308 _000969_hash NULL
+_000970_hash copy_from_user_toio 3 31966 _000970_hash NULL
-+_000971_hash copy_oldmem_page 3 26164 _000971_hash NULL
++_000971_hash copy_oldmem_page 3-1 26164 _000971_hash NULL
+_000972_hash copy_to_user_fromio 3 57432 _000972_hash NULL
+_000973_hash copy_vm86_regs_from_user 3 45340 _000973_hash NULL
+_000974_hash cryptd_hash_setkey 3 42781 _000974_hash NULL
+_000975_hash crypto_authenc_esn_setkey 3 6985 _000975_hash NULL
-+_000976_hash crypto_authenc_setkey 3 80 _000976_hash NULL
++_000976_hash crypto_authenc_setkey 3 80 _002947_hash NULL nohasharray
+_000977_hash csum_partial_copy_fromiovecend 3-4 9957 _000977_hash NULL
+_000979_hash cx18_copy_buf_to_user 4 22735 _000979_hash NULL
+_000981_hash cxgbi_ddp_reserve 4 30091 _000981_hash NULL
@@ -85111,7 +85167,7 @@ index 0000000..54a12fe
+_001015_hash dvb_dmxdev_set_buffer_size 2 55643 _001015_hash NULL
+_001016_hash dvb_dvr_set_buffer_size 2 9840 _001016_hash NULL
+_001017_hash dvb_play 3 50814 _001017_hash NULL
-+_001018_hash dvb_ringbuffer_pkt_read_user 3-5 4303 _001018_hash NULL
++_001018_hash dvb_ringbuffer_pkt_read_user 3-5-2 4303 _001018_hash NULL
+_001020_hash dvb_ringbuffer_read_user 3 56702 _001020_hash NULL
+_001021_hash econet_sendmsg 4 51430 _001021_hash NULL
+_001022_hash ecryptfs_filldir 3 6622 _001022_hash NULL
@@ -85440,7 +85496,7 @@ index 0000000..54a12fe
+_001364_hash unlink1 3 63059 _001364_hash NULL
+_001366_hash usb_allocate_stream_buffers 3 8964 _001366_hash NULL
+_001367_hash usbdev_read 3 45114 _001367_hash NULL
-+_001368_hash usblp_read 3 57342 _001368_hash NULL
++_001368_hash usblp_read 3 57342 _002942_hash NULL nohasharray
+_001369_hash usbtmc_read 3 32377 _001369_hash NULL
+_001370_hash usbtmc_write 3 64340 _001370_hash NULL
+_001371_hash usbvision_v4l2_read 3 34386 _001371_hash NULL
@@ -85490,7 +85546,7 @@ index 0000000..54a12fe
+_001418_hash xfs_iext_realloc_indirect 2 59211 _001418_hash NULL
+_001419_hash xfs_inumbers_fmt 3 12817 _001419_hash NULL
+_001420_hash xlog_recover_add_to_cont_trans 4 44102 _001420_hash NULL
-+_001421_hash xz_dec_lzma2_create 2 36353 _001421_hash NULL
++_001421_hash xz_dec_lzma2_create 2 36353 _002638_hash NULL nohasharray
+_001422_hash _zd_iowrite32v_locked 3 44725 _001422_hash NULL
+_001423_hash zerocopy_sg_from_iovec 3 11828 _001423_hash NULL
+_001424_hash zoran_write 3 22404 _001424_hash NULL
@@ -85622,10 +85678,10 @@ index 0000000..54a12fe
+_001557_hash generic_readlink 3 32654 _001557_hash NULL
+_001558_hash gpio_power_read 3 36059 _001558_hash NULL
+_001559_hash hash_recvmsg 4 50924 _001559_hash NULL
-+_001560_hash ht40allow_map_read 3 55209 _001560_hash NULL
++_001560_hash ht40allow_map_read 3 55209 _002670_hash NULL nohasharray
+_001561_hash hugetlbfs_read 3 11268 _001561_hash NULL
+_001562_hash hwflags_read 3 52318 _001562_hash NULL
-+_001563_hash hysdn_conf_read 3 42324 _001563_hash NULL
++_001563_hash hysdn_conf_read 3 42324 _003094_hash NULL nohasharray
+_001564_hash i2400m_rx_stats_read 3 57706 _001564_hash NULL
+_001565_hash i2400m_tx_stats_read 3 28527 _001565_hash NULL
+_001566_hash i2o_pool_alloc 4 55485 _001566_hash NULL
@@ -85698,7 +85754,7 @@ index 0000000..54a12fe
+_001635_hash iwl_dbgfs_rxon_flags_read 3 20795 _001635_hash NULL
+_001636_hash iwl_dbgfs_rx_queue_read 3 19943 _001636_hash NULL
+_001637_hash iwl_dbgfs_rx_statistics_read 3 62687 _001637_hash &_000308_hash
-+_001638_hash iwl_dbgfs_sensitivity_read 3 63116 _001638_hash NULL
++_001638_hash iwl_dbgfs_sensitivity_read 3 63116 _002844_hash NULL nohasharray
+_001639_hash iwl_dbgfs_sleep_level_override_read 3 3038 _001639_hash NULL
+_001640_hash iwl_dbgfs_sram_read 3 44505 _001640_hash NULL
+_001641_hash iwl_dbgfs_stations_read 3 9309 _001641_hash NULL
@@ -85731,7 +85787,7 @@ index 0000000..54a12fe
+_001668_hash iwl_legacy_dbgfs_sensitivity_read 3 55816 _001668_hash NULL
+_001669_hash iwl_legacy_dbgfs_sram_read 3 26419 _001669_hash NULL
+_001670_hash iwl_legacy_dbgfs_stations_read 3 24121 _001670_hash NULL
-+_001671_hash iwl_legacy_dbgfs_status_read 3 48508 _001671_hash NULL
++_001671_hash iwl_legacy_dbgfs_status_read 3 48508 _003033_hash NULL nohasharray
+_001672_hash iwl_legacy_dbgfs_traffic_log_read 3 31625 _001672_hash NULL
+_001673_hash iwl_legacy_dbgfs_tx_queue_read 3 34192 _001673_hash NULL
+_001674_hash iwl_legacy_dbgfs_tx_statistics_read 3 63987 _001674_hash NULL
@@ -85791,7 +85847,7 @@ index 0000000..54a12fe
+_001730_hash mwifiex_regrdwr_read 3 34472 _001730_hash NULL
+_001731_hash netlink_sendmsg 4 33708 _001731_hash &_000809_hash
+_001732_hash nfsctl_transaction_write 3 64800 _001732_hash NULL
-+_001733_hash nfsd_vfs_read 6 62605 _001733_hash NULL
++_001733_hash nfsd_vfs_read 6 62605 _002821_hash NULL nohasharray
+_001734_hash nfsd_vfs_write 6 54577 _001734_hash NULL
+_001735_hash nfs_map_group_to_gid 3 15892 _001735_hash NULL
+_001736_hash nfs_map_name_to_uid 3 51132 _001736_hash NULL
@@ -85864,7 +85920,7 @@ index 0000000..54a12fe
+_001806_hash queues_read 3 24877 _001806_hash NULL
+_001807_hash raw_recvmsg 4 17277 _001807_hash NULL
+_001808_hash raw_send_hdrinc 4 58803 _001808_hash NULL
-+_001809_hash raw_sendmsg 4 23078 _001809_hash NULL
++_001809_hash raw_sendmsg 4 23078 _002900_hash NULL nohasharray
+_001810_hash rawsock_sendmsg 4 60010 _001810_hash NULL
+_001811_hash rawv6_send_hdrinc 3 35425 _001811_hash NULL
+_001812_hash rcname_read 3 25919 _001812_hash NULL
@@ -86006,7 +86062,7 @@ index 0000000..54a12fe
+_001955_hash st_write 3 16874 _001955_hash NULL
+_001956_hash supply_map_read_file 3 10608 _001956_hash NULL
+_001957_hash sys_bind 3 10799 _001957_hash NULL
-+_001958_hash sys_connect 3 15291 _001958_hash NULL
++_001958_hash sys_connect 3 15291 _002928_hash NULL nohasharray
+_001959_hash sysfs_acpi_set 3 625 _001959_hash NULL
+_001960_hash sysfs_read_file 3 42113 _001960_hash NULL
+_001961_hash sysfs_write_file 3 57116 _001961_hash NULL
@@ -86088,7 +86144,7 @@ index 0000000..54a12fe
+_002045_hash enable_read 3 2117 _002045_hash NULL
+_002046_hash exofs_read_kern 6 39921 _002046_hash &_001745_hash
+_002047_hash fc_change_queue_depth 2 36841 _002047_hash NULL
-+_002048_hash frequency_read 3 64031 _002048_hash NULL
++_002048_hash frequency_read 3 64031 _003133_hash NULL nohasharray
+_002049_hash get_alua_req 3 4166 _002049_hash NULL
+_002050_hash get_rdac_req 3 45882 _002050_hash NULL
+_002051_hash hci_sock_recvmsg 4 7072 _002051_hash NULL
@@ -86125,7 +86181,7 @@ index 0000000..54a12fe
+_002082_hash ieee80211_if_read_flags 3 57470 _002082_hash NULL
+_002083_hash ieee80211_if_read_fwded_frames 3 36520 _002083_hash NULL
+_002084_hash ieee80211_if_read_fwded_mcast 3 39571 _002084_hash &_000104_hash
-+_002085_hash ieee80211_if_read_fwded_unicast 3 59740 _002085_hash NULL
++_002085_hash ieee80211_if_read_fwded_unicast 3 59740 _002696_hash NULL nohasharray
+_002086_hash ieee80211_if_read_last_beacon 3 31257 _002086_hash NULL
+_002087_hash ieee80211_if_read_min_discovery_timeout 3 13946 _002087_hash NULL
+_002088_hash ieee80211_if_read_num_buffered_multicast 3 12716 _002088_hash NULL
@@ -86306,7 +86362,7 @@ index 0000000..54a12fe
+_002270_hash ses_recv_diag 4 47143 _002270_hash &_000456_hash
+_002271_hash ses_send_diag 4 64527 _002271_hash NULL
+_002272_hash spi_dv_device_echo_buffer 2-3 39846 _002272_hash NULL
-+_002274_hash ubifs_leb_change 4 22399 _002274_hash NULL
++_002274_hash ubifs_leb_change 4 22399 _003009_hash NULL nohasharray
+_002275_hash ubifs_leb_write 4-5 61226 _002275_hash NULL
+_002277_hash ubi_write 4-5 30809 _002277_hash NULL
+_002278_hash fixup_leb 3 43256 _002278_hash NULL
@@ -86381,7 +86437,7 @@ index 0000000..54a12fe
+_002347_hash sys32_ipc 3 7238 _002347_hash NULL
+_002348_hash sys32_rt_sigpending 2 25814 _002348_hash NULL
+_002349_hash compat_do_readv_writev 4 49102 _002349_hash NULL
-+_002350_hash compat_keyctl_instantiate_key_iov 3 57431 _002350_hash NULL
++_002350_hash compat_keyctl_instantiate_key_iov 3 57431 _003006_hash NULL nohasharray
+_002351_hash compat_process_vm_rw 3-5 22254 _002351_hash NULL
+_002353_hash compat_sys_setsockopt 5 3326 _002353_hash NULL
+_002354_hash ipath_cdev_init 1 37752 _002354_hash NULL
@@ -86547,16 +86603,560 @@ index 0000000..54a12fe
+_002534_hash vmw_cursor_update_dmabuf 3-4 32045 _002534_hash NULL
+_002536_hash vmw_gmr_bind 3 44130 _002536_hash NULL
+_002537_hash vmw_du_crtc_cursor_set 4-5 28479 _002537_hash NULL
-+_002538_hash create_table 2 16213 _002538_hash NULL
-+_002539_hash acl_alloc 1 35979 _002539_hash NULL
-+_002540_hash acl_alloc_stack_init 1 60630 _002540_hash NULL
-+_002541_hash acl_alloc_num 1-2 60778 _002541_hash NULL
++_002538_hash alloc_fdtable 1 17389 _002538_hash NULL
++_002539_hash alloc_ldt 2 21972 _002539_hash NULL
++_002540_hash __alloc_skb 1 23940 _002540_hash NULL
++_002541_hash __ata_change_queue_depth 3 23484 _002541_hash NULL
++_002542_hash ccid3_hc_rx_getsockopt 3 62331 _002542_hash NULL
++_002543_hash ccid3_hc_tx_getsockopt 3 16314 _002543_hash NULL
++_002544_hash cistpl_vers_1 4 15023 _002544_hash NULL
++_002545_hash cmm_read 3 57520 _002545_hash NULL
++_002546_hash cosa_read 3 25966 _002546_hash NULL
++_002547_hash dm_table_create 3 35687 _002547_hash NULL
++_002548_hash do_write_orph_node 2 64343 _002548_hash NULL
++_002550_hash ep0_read 3 38095 _002550_hash NULL
++_002551_hash event_buffer_read 3 48772 _002551_hash NULL
++_002552_hash extract_entropy_user 3 26952 _002552_hash NULL
++_002553_hash ffs_ep0_read 3 2672 _002553_hash NULL
++_002554_hash fill_readbuf 3 32464 _002554_hash NULL
++_002555_hash get_fd_set 1 3866 _002555_hash NULL
++_002556_hash joydev_handle_JSIOCSAXMAP 3 48898 _002675_hash NULL nohasharray
++_002557_hash joydev_handle_JSIOCSBTNMAP 3 15643 _002557_hash NULL
++_002558_hash __kfifo_from_user_r 3 60345 _002558_hash NULL
++_002559_hash kstrtoint_from_user 2 8778 _002559_hash NULL
++_002560_hash kstrtol_from_user 2 10168 _002560_hash NULL
++_002561_hash kstrtoll_from_user 2 19500 _002561_hash NULL
++_002562_hash kstrtos16_from_user 2 28300 _002562_hash NULL
++_002563_hash kstrtos8_from_user 2 58268 _002563_hash NULL
++_002564_hash kstrtou16_from_user 2 54274 _002564_hash NULL
++_002565_hash kstrtou8_from_user 2 55599 _002565_hash NULL
++_002566_hash kstrtouint_from_user 2 10536 _002566_hash NULL
++_002567_hash kstrtoul_from_user 2 64569 _002567_hash NULL
++_002568_hash kstrtoull_from_user 2 63026 _002568_hash NULL
++_002569_hash ntfs_rl_realloc 3 56831 _002569_hash NULL
++_002570_hash ntfs_rl_realloc_nofail 3 32173 _002570_hash NULL
++_002571_hash port_fops_write 3 54627 _002571_hash NULL
++_002572_hash ptp_read 4 63251 _002572_hash NULL
++_002573_hash reqsk_queue_alloc 2 40272 _002573_hash NULL
++_002574_hash resize_info_buffer 2 62889 _002574_hash NULL
++_002575_hash rfkill_fop_write 3 64808 _002575_hash NULL
++_002576_hash rvmalloc 1 46873 _002576_hash NULL
++_002577_hash sctp_getsockopt_active_key 2 45483 _002577_hash NULL
++_002578_hash sctp_getsockopt_adaptation_layer 2 45375 _002578_hash NULL
++_002579_hash sctp_getsockopt_assoc_ids 2 9043 _002579_hash NULL
++_002580_hash sctp_getsockopt_associnfo 2 58169 _002580_hash NULL
++_002581_hash sctp_getsockopt_assoc_number 2 6384 _002581_hash NULL
++_002582_hash sctp_getsockopt_auto_asconf 2 46584 _002582_hash NULL
++_002583_hash sctp_getsockopt_context 2 52490 _002583_hash NULL
++_002584_hash sctp_getsockopt_default_send_param 2 63056 _002584_hash NULL
++_002585_hash sctp_getsockopt_disable_fragments 2 12330 _002585_hash NULL
++_002586_hash sctp_getsockopt_fragment_interleave 2 51215 _002586_hash NULL
++_002587_hash sctp_getsockopt_initmsg 2 26042 _002587_hash NULL
++_002588_hash sctp_getsockopt_mappedv4 2 20044 _002588_hash NULL
++_002589_hash sctp_getsockopt_nodelay 2 9560 _002589_hash NULL
++_002590_hash sctp_getsockopt_partial_delivery_point 2 60952 _002590_hash NULL
++_002591_hash sctp_getsockopt_peeloff 2 59190 _002591_hash NULL
++_002592_hash sctp_getsockopt_peer_addr_info 2 6024 _002592_hash NULL
++_002593_hash sctp_getsockopt_peer_addr_params 2 53645 _002593_hash NULL
++_002594_hash sctp_getsockopt_primary_addr 2 24639 _002594_hash NULL
++_002595_hash sctp_getsockopt_rtoinfo 2 62027 _002595_hash NULL
++_002596_hash sctp_getsockopt_sctp_status 2 56540 _002596_hash NULL
++_002597_hash snd_mixart_BA0_read 5 45069 _002597_hash NULL
++_002598_hash snd_mixart_BA1_read 5 5082 _002598_hash NULL
++_002599_hash snd_pcm_oss_read2 3 54387 _002599_hash NULL
++_002600_hash tomoyo_init_log 2 61526 _002600_hash NULL
++_002601_hash unix_bind 3 15668 _002601_hash NULL
++_002602_hash usbvision_rvmalloc 1 19655 _002602_hash NULL
++_002604_hash v9fs_fid_readn 4 60544 _002604_hash NULL
++_002605_hash v9fs_file_read 3 40858 _002605_hash NULL
++_002606_hash yurex_write 3 8761 _002606_hash NULL
++_002607_hash ab8500_address_write 3 4099 _002607_hash NULL
++_002608_hash ab8500_bank_write 3 51960 _002608_hash NULL
++_002609_hash ab8500_val_write 3 16473 _002609_hash NULL
++_002610_hash alloc_skb 1 55439 _002610_hash NULL
++_002611_hash alloc_skb_fclone 1 3467 _002611_hash NULL
++_002612_hash ata_scsi_change_queue_depth 2 23126 _002612_hash NULL
++_002613_hash beacon_interval_write 3 17952 _002613_hash NULL
++_002614_hash core_sys_select 1 47494 _002614_hash NULL
++_002615_hash dtim_interval_write 3 30489 _002615_hash NULL
++_002616_hash expand_fdtable 2 39273 _002616_hash NULL
++_002617_hash get_chars 3 40373 _002617_hash NULL
++_002618_hash gpio_power_write 3 1991 _002618_hash NULL
++_002619_hash inet_csk_listen_start 2 38233 _002619_hash NULL
++_002620_hash kstrtou32_from_user 2 30361 _002620_hash NULL
++_002621_hash __netdev_alloc_skb 2 18595 _002621_hash NULL
++_002622_hash ntfs_rl_append 2-4 6037 _002622_hash NULL
++_002624_hash ntfs_rl_insert 2-4 4931 _002624_hash NULL
++_002626_hash ntfs_rl_replace 2-4 14136 _002626_hash NULL
++_002628_hash ntfs_rl_split 2-4 52328 _002628_hash NULL
++_002630_hash port_fops_read 3 49626 _002630_hash NULL
++_002631_hash random_read 3 13815 _002631_hash NULL
++_002632_hash rx_streaming_always_write 3 32357 _002632_hash NULL
++_002633_hash rx_streaming_interval_write 3 50120 _002633_hash NULL
++_002634_hash tomoyo_write_log2 2 34318 _002634_hash NULL
++_002635_hash uapsd_queues_write 3 43040 _002635_hash NULL
++_002636_hash urandom_read 3 30462 _002636_hash NULL
++_002637_hash v9fs_direct_read 3 45546 _002637_hash NULL
++_002638_hash v9fs_file_readn 4 36353 _002638_hash &_001421_hash
++_002639_hash alloc_tx 2 32143 _002639_hash NULL
++_002640_hash alloc_wr 1-2 24635 _002640_hash NULL
++_002642_hash ath6kl_fwlog_mask_write 3 24810 _002642_hash NULL
++_002643_hash ath9k_wmi_cmd 4 327 _002643_hash NULL
++_002644_hash atm_alloc_charge 2 19517 _002713_hash NULL nohasharray
++_002645_hash ax25_output 2 22736 _002645_hash NULL
++_002646_hash bcsp_prepare_pkt 3 12961 _002646_hash NULL
++_002647_hash bt_skb_alloc 1 6404 _002647_hash NULL
++_002648_hash cfpkt_create_pfx 1-2 23594 _002648_hash NULL
++_002650_hash cmd_complete 5 14502 _002650_hash NULL
++_002651_hash cxgb3_get_cpl_reply_skb 2 10620 _002651_hash NULL
++_002652_hash dccp_listen_start 2 35918 _002652_hash NULL
++_002653_hash __dev_alloc_skb 1 28681 _002653_hash NULL
++_002654_hash dn_alloc_skb 2 6631 _002654_hash NULL
++_002655_hash do_pselect 1 62061 _002655_hash NULL
++_002656_hash expand_files 2 17080 _002656_hash NULL
++_002657_hash _fc_frame_alloc 1 43568 _002657_hash NULL
++_002658_hash find_skb 2 20431 _002658_hash NULL
++_002659_hash fm_send_cmd 5 39639 _002659_hash NULL
++_002660_hash gem_alloc_skb 2 51715 _002660_hash NULL
++_002661_hash get_packet 3 41914 _002661_hash NULL
++_002662_hash get_packet 3 5747 _002662_hash NULL
++_002663_hash get_packet_pg 4 28023 _002663_hash NULL
++_002664_hash get_skb 2 63008 _002664_hash NULL
++_002665_hash hidp_queue_report 3 1881 _002665_hash NULL
++_002666_hash __hidp_send_ctrl_message 4 28303 _002666_hash NULL
++_002667_hash i2400m_net_rx 5 27170 _002667_hash NULL
++_002668_hash igmpv3_newpack 2 35912 _002668_hash NULL
++_002669_hash inet_listen 2 14723 _002669_hash NULL
++_002670_hash isdn_net_ciscohdlck_alloc_skb 2 55209 _002670_hash &_001560_hash
++_002671_hash isdn_ppp_ccp_xmit_reset 6 63297 _002671_hash NULL
++_002672_hash _l2_alloc_skb 1 11883 _002672_hash NULL
++_002673_hash l3_alloc_skb 1 32289 _002673_hash NULL
++_002674_hash llc_alloc_frame 4 64366 _002674_hash NULL
++_002675_hash mac_drv_rx_init 2 48898 _002675_hash &_002556_hash
++_002676_hash mgmt_event 4 46069 _002676_hash NULL
++_002677_hash mI_alloc_skb 1 24770 _002677_hash NULL
++_002678_hash nci_skb_alloc 2 49757 _002678_hash NULL
++_002679_hash netdev_alloc_skb 2 62437 _002679_hash NULL
++_002680_hash __netdev_alloc_skb_ip_align 2 55067 _002680_hash NULL
++_002681_hash new_skb 1 21148 _002681_hash NULL
++_002682_hash nfc_alloc_skb 1 6216 _002682_hash NULL
++_002683_hash nfulnl_alloc_skb 2 65207 _002683_hash NULL
++_002684_hash ni65_alloc_mem 3 10664 _002684_hash NULL
++_002685_hash pep_alloc_skb 3 46303 _002685_hash NULL
++_002686_hash pn_raw_send 2 54330 _002686_hash NULL
++_002687_hash refill_pool 2 19477 _002687_hash NULL
++_002688_hash rfcomm_wmalloc 2 58090 _002688_hash NULL
++_002689_hash rx 4 57944 _002689_hash NULL
++_002690_hash sctp_ulpevent_new 1 33377 _002690_hash NULL
++_002691_hash send_command 4 10832 _002691_hash NULL
++_002692_hash skb_copy_expand 2-3 7685 _002692_hash &_000454_hash
++_002694_hash sk_stream_alloc_skb 2 57622 _002694_hash NULL
++_002695_hash sock_alloc_send_pskb 2 21246 _002695_hash NULL
++_002696_hash sock_rmalloc 2 59740 _002696_hash &_002085_hash
++_002697_hash sock_wmalloc 2 16472 _002697_hash NULL
++_002698_hash solos_param_store 4 34755 _002698_hash NULL
++_002699_hash sys_select 1 38827 _002699_hash NULL
++_002700_hash t4vf_pktgl_to_skb 2 39005 _002700_hash NULL
++_002701_hash tcp_collapse 5-6 63294 _002701_hash NULL
++_002703_hash tipc_cfg_reply_alloc 1 27606 _002703_hash NULL
++_002704_hash ulog_alloc_skb 1 23427 _002704_hash NULL
++_002705_hash v9fs_cached_file_read 3 2514 _002705_hash NULL
++_002706_hash alloc_fd 1 37637 _002706_hash NULL
++_002707_hash _alloc_mISDN_skb 3 52232 _002707_hash NULL
++_002708_hash ath9k_multi_regread 4 65056 _002708_hash NULL
++_002709_hash ath_rxbuf_alloc 2 24745 _002709_hash NULL
++_002710_hash ax25_send_frame 2 19964 _002710_hash NULL
++_002711_hash cfpkt_create 1 18197 _002711_hash NULL
++_002712_hash console_store 4 36007 _002712_hash NULL
++_002713_hash dev_alloc_skb 1 19517 _002713_hash &_002644_hash
++_002714_hash dn_nsp_do_disc 2-6 49474 _002714_hash NULL
++_002716_hash dsp_cmx_send_member 2 15625 _002716_hash NULL
++_002717_hash fc_frame_alloc 2 1596 _002717_hash NULL
++_002718_hash fc_frame_alloc_fill 2 59394 _002718_hash NULL
++_002719_hash fmc_send_cmd 5 20435 _002719_hash NULL
++_002720_hash hci_send_cmd 3 43810 _002720_hash NULL
++_002721_hash hci_si_event 3 1404 _002721_hash NULL
++_002722_hash hfcpci_empty_bfifo 4 62323 _002722_hash NULL
++_002723_hash hidp_send_ctrl_message 4 43702 _002723_hash NULL
++_002724_hash inet_dccp_listen 2 28565 _002724_hash NULL
++_002725_hash ip6_append_data 4-5 36490 _002725_hash NULL
++_002726_hash __ip_append_data 7-8 36191 _002726_hash NULL
++_002727_hash l1oip_socket_recv 6 56537 _002727_hash NULL
++_002728_hash l2cap_build_cmd 4 48676 _002728_hash NULL
++_002729_hash l2down_create 4 21755 _002729_hash NULL
++_002730_hash l2up_create 3 6430 _002730_hash NULL
++_002731_hash ldisc_receive 4 41516 _002731_hash NULL
++_002734_hash lro_gen_skb 6 2644 _002734_hash NULL
++_002735_hash macvtap_alloc_skb 2-4-3 50629 _002735_hash NULL
++_002737_hash nci_send_cmd 3 58206 _002737_hash NULL
++_002738_hash netdev_alloc_skb_ip_align 2 40811 _002738_hash NULL
++_002739_hash nfqnl_mangle 2 14583 _002739_hash NULL
++_002740_hash p54_alloc_skb 3 34366 _002740_hash &_000339_hash
++_002741_hash packet_alloc_skb 2-5-4 62602 _002741_hash NULL
++_002743_hash pep_indicate 5 38611 _002743_hash NULL
++_002744_hash pep_reply 5 50582 _002744_hash NULL
++_002745_hash pipe_handler_request 5 50774 _002745_hash &_000820_hash
++_002746_hash ql_process_mac_rx_page 4 15543 _002746_hash NULL
++_002747_hash ql_process_mac_rx_skb 4 6689 _002747_hash NULL
++_002748_hash rfcomm_tty_write 3 51603 _002748_hash NULL
++_002749_hash send_mpa_reject 3 7135 _002749_hash NULL
++_002750_hash send_mpa_reply 3 32372 _002750_hash NULL
++_002751_hash sge_rx 3 50594 _002751_hash NULL
++_002752_hash skb_cow_data 2 11565 _002752_hash NULL
++_002753_hash smp_build_cmd 3 45853 _002753_hash NULL
++_002754_hash sock_alloc_send_skb 2 23720 _002754_hash NULL
++_002755_hash sys_dup3 2 33421 _002755_hash NULL
++_002756_hash sys_pselect6 1 57449 _002756_hash NULL
++_002757_hash tcp_fragment 3 20436 _002757_hash NULL
++_002758_hash teiup_create 3 43201 _002758_hash NULL
++_002759_hash tg3_run_loopback 2 30093 _002759_hash NULL
++_002760_hash tun_alloc_skb 2-4-3 41216 _002760_hash NULL
++_002762_hash use_pool 2 64607 _002762_hash NULL
++_002763_hash vxge_rx_alloc 3 52024 _002763_hash NULL
++_002764_hash wl1271_rx_handle_data 3 1714 _002764_hash NULL
++_002765_hash add_packet 3 54433 _002765_hash NULL
++_002766_hash add_rx_skb 3 8257 _002766_hash NULL
++_002767_hash ath6kl_buf_alloc 1 57304 _002767_hash NULL
++_002768_hash bat_ogm_aggregate_new 2 13813 _002768_hash NULL
++_002769_hash bnx2fc_process_l2_frame_compl 3 65072 _002769_hash NULL
++_002770_hash brcmu_pkt_buf_get_skb 1 5556 _002770_hash NULL
++_002771_hash br_send_bpdu 3 29669 _002771_hash NULL
++_002772_hash bt_skb_send_alloc 2 6581 _002772_hash NULL
++_002773_hash c4iw_reject_cr 3 28174 _002773_hash NULL
++_002774_hash carl9170_rx_copy_data 2 21656 _002774_hash NULL
++_002775_hash cfpkt_add_body 3 44630 _002775_hash NULL
++_002776_hash cfpkt_append 3 61206 _002776_hash NULL
++_002777_hash cosa_net_setup_rx 2 38594 _002777_hash NULL
++_002778_hash cxgb4_pktgl_to_skb 2 61899 _002778_hash NULL
++_002779_hash dn_alloc_send_pskb 2 4465 _002779_hash NULL
++_002780_hash dn_nsp_return_disc 2 60296 _002780_hash NULL
++_002781_hash dn_nsp_send_disc 2 23469 _002781_hash NULL
++_002782_hash do_fcntl 3 31468 _002782_hash NULL
++_002783_hash dsp_tone_hw_message 3 17678 _002783_hash NULL
++_002784_hash dvb_net_sec 3 37884 _002784_hash NULL
++_002785_hash e1000_check_copybreak 3 62448 _002785_hash NULL
++_002786_hash fast_rx_path 3 59214 _002786_hash NULL
++_002787_hash fc_fcp_frame_alloc 2 12624 _002787_hash NULL
++_002788_hash fcoe_ctlr_send_keep_alive 3 15308 _002788_hash NULL
++_002789_hash fwnet_incoming_packet 3 40380 _002789_hash NULL
++_002790_hash fwnet_pd_new 4 39947 _002790_hash NULL
++_002791_hash got_frame 2 16028 _002791_hash NULL
++_002792_hash gsm_mux_rx_netchar 3 33336 _002792_hash NULL
++_002793_hash hdlcdev_rx 3 997 _002793_hash NULL
++_002794_hash hfc_empty_fifo 2 57972 _002794_hash NULL
++_002795_hash hfcpci_empty_fifo 4 2427 _002795_hash NULL
++_002796_hash hidp_output_raw_report 3 5629 _002796_hash NULL
++_002797_hash hysdn_rx_netpkt 3 16136 _002797_hash NULL
++_002798_hash ieee80211_fragment 4 33112 _002798_hash NULL
++_002799_hash ieee80211_probereq_get 4-6 29069 _002799_hash NULL
++_002801_hash ieee80211_send_auth 5 60865 _002801_hash NULL
++_002802_hash ieee80211_tdls_mgmt 8 9581 _002802_hash NULL
++_002803_hash ip6_ufo_append_data 5-7-6 4780 _002803_hash NULL
++_002806_hash ip_ufo_append_data 6-8-7 12775 _002806_hash NULL
++_002809_hash ipw_packet_received_skb 2 1230 _002809_hash NULL
++_002810_hash iwch_reject_cr 3 23901 _002810_hash NULL
++_002811_hash iwm_rx_packet_alloc 3 9898 _002811_hash NULL
++_002812_hash ixgb_check_copybreak 3 5847 _002812_hash NULL
++_002813_hash l1oip_socket_parse 4 4507 _002813_hash NULL
++_002814_hash l2cap_send_cmd 4 14548 _002814_hash NULL
++_002816_hash mcs_unwrap_fir 3 25733 _002816_hash NULL
++_002817_hash mcs_unwrap_mir 3 9455 _002817_hash NULL
++_002818_hash mld_newpack 2 50950 _002818_hash NULL
++_002819_hash p54_download_eeprom 4 43842 _002819_hash NULL
++_002820_hash ppp_tx_cp 5 62044 _002820_hash NULL
++_002821_hash prism2_send_mgmt 4 62605 _002821_hash &_001733_hash
++_002822_hash prism2_sta_send_mgmt 5 43916 _002822_hash NULL
++_002823_hash _queue_data 4 54983 _002823_hash NULL
++_002824_hash read_fifo 3 826 _002824_hash NULL
++_002825_hash receive_copy 3 12216 _002825_hash NULL
++_002826_hash rtl8169_try_rx_copy 3 705 _002826_hash NULL
++_002827_hash _rtl92s_firmware_downloadcode 3 14021 _002827_hash NULL
++_002828_hash rx_data 4 60442 _002828_hash NULL
++_002829_hash set_rxd_buffer_pointer 8 9950 _002829_hash NULL
++_002830_hash sis190_try_rx_copy 3 57069 _002830_hash NULL
++_002831_hash skge_rx_get 3 40598 _002831_hash NULL
++_002832_hash smctr_process_rx_packet 2 13270 _002832_hash NULL
++_002833_hash sys_dup2 2 25284 _002833_hash NULL
++_002834_hash tcp_mark_head_lost 2 35895 _002834_hash NULL
++_002835_hash tcp_match_skb_to_sack 3-4 23568 _002835_hash NULL
++_002837_hash tso_fragment 3 29050 _002837_hash NULL
++_002838_hash tt_response_fill_table 1 57902 _002838_hash NULL
++_002839_hash udpv6_sendmsg 4 22316 _002839_hash NULL
++_002840_hash velocity_rx_copy 2 34583 _002840_hash NULL
++_002841_hash zd_mac_rx 3 38296 _002841_hash NULL
++_002842_hash ath6kl_wmi_get_new_buf 1 52304 _002842_hash NULL
++_002843_hash bat_ogm_queue_add 3 40337 _002843_hash NULL
++_002844_hash brcmf_alloc_pkt_and_read 2 63116 _002844_hash &_001638_hash
++_002845_hash brcmf_sdioh_request_buffer 7 40239 _002845_hash NULL
++_002846_hash carl9170_handle_mpdu 3 11056 _002846_hash NULL
++_002847_hash cfpkt_add_trail 3 27260 _002847_hash NULL
++_002848_hash cfpkt_pad_trail 2 55511 _002848_hash NULL
++_002849_hash dvb_net_sec_callback 2 28786 _002849_hash NULL
++_002850_hash fwnet_receive_packet 9 50537 _002850_hash NULL
++_002851_hash handle_rx_packet 3 58993 _002851_hash NULL
++_002852_hash hysdn_sched_rx 3 60533 _002852_hash NULL
++_002858_hash ipwireless_network_packet_received 4 51277 _002858_hash NULL
++_002859_hash l2cap_bredr_sig_cmd 3 49065 _002859_hash NULL
++_002860_hash ppp_cp_event 6 2965 _002860_hash NULL
++_002861_hash receive_client_update_packet 3 49104 _002861_hash NULL
++_002862_hash receive_server_sync_packet 3 59021 _002862_hash NULL
++_002863_hash sky2_receive 2 13407 _002863_hash NULL
++_002864_hash sys_fcntl 3 19267 _002864_hash NULL
++_002865_hash sys_fcntl64 3 29031 _002865_hash NULL
++_002866_hash tcp_sacktag_walk 5-6 26339 _002866_hash NULL
++_002868_hash tcp_write_xmit 2 39755 _002868_hash NULL
++_002869_hash wl1271_cmd_build_probe_req 3-5 51141 _002869_hash NULL
++_002870_hash ath6kl_wmi_send_probe_response_cmd 5 45422 _002870_hash NULL
++_002871_hash ath6kl_wmi_set_appie_cmd 4 47855 _002871_hash NULL
++_002872_hash ath6kl_wmi_startscan_cmd 7 24580 _002872_hash NULL
++_002873_hash ath6kl_wmi_test_cmd 3 27312 _002873_hash NULL
++_002874_hash brcmf_sdcard_recv_buf 6 24006 _002874_hash NULL
++_002875_hash brcmf_sdcard_rwdata 5 65041 _002875_hash NULL
++_002876_hash brcmf_sdcard_send_buf 6 54980 _002876_hash NULL
++_002877_hash __carl9170_rx 3 56784 _002877_hash NULL
++_002878_hash cfpkt_setlen 2 49343 _002878_hash NULL
++_002880_hash tcp_push_one 2 48816 _002880_hash NULL
++_002881_hash __tcp_push_pending_frames 2 48148 _002881_hash NULL
++_002882_hash ath6kl_tm_rx_report 3 44494 _002882_hash NULL
++_002883_hash brcmf_sdbrcm_membytes 3-5 37324 _002883_hash NULL
++_002885_hash brcmf_sdbrcm_read_control 3 22721 _002885_hash NULL
++_002886_hash brcmf_sdbrcm_send_buf 6 9129 _002886_hash NULL
++_002887_hash carl9170_rx 3 13272 _002887_hash NULL
++_002888_hash carl9170_rx_stream 3 1334 _002888_hash NULL
++_002889_hash tcp_push 3 10680 _002889_hash NULL
++_002890_hash compat_sys_fcntl64 3 60256 _002890_hash NULL
++_002891_hash snd_nm256_capture_copy 5 28622 _002891_hash NULL
++_002892_hash snd_nm256_playback_copy 5 38567 _002892_hash NULL
++_002893_hash tomoyo_init_log 2 14806 _002893_hash NULL
++_002894_hash compat_sys_fcntl 3 15654 _002894_hash NULL
++_002895_hash tomoyo_write_log2 2 11732 _002895_hash NULL
++_002896_hash OS_mem_token_alloc 1 14276 _002896_hash NULL
++_002897_hash packet_came 3 18072 _002897_hash NULL
++_002898_hash softsynth_write 3 3455 _002898_hash NULL
++_002899_hash __get_vm_area_node 1 55305 _002899_hash NULL
++_002900_hash vm_map_ram 2 23078 _002900_hash &_001809_hash
++_002901_hash get_vm_area 1 18080 _002901_hash NULL
++_002902_hash __get_vm_area 1 61599 _002902_hash NULL
++_002903_hash get_vm_area_caller 1 10527 _002903_hash NULL
++_002904_hash __get_vm_area_caller 1 56416 _002938_hash NULL nohasharray
++_002905_hash alloc_vm_area 1 36149 _002905_hash NULL
++_002906_hash __ioremap_caller 1-2 21800 _002906_hash NULL
++_002908_hash vmap 2 15025 _002908_hash NULL
++_002909_hash ioremap_cache 1-2 47189 _002909_hash NULL
++_002911_hash ioremap_nocache 1-2 2439 _002911_hash NULL
++_002913_hash ioremap_prot 1-2 51764 _002913_hash NULL
++_002915_hash ioremap_wc 1-2 62695 _002915_hash NULL
++_002916_hash acpi_os_ioremap 1-2 49523 _002916_hash NULL
++_002918_hash devm_ioremap_nocache 2-3 2036 _002918_hash NULL
++_002920_hash __einj_error_trigger 1 12304 _002920_hash NULL
++_002921_hash io_mapping_map_wc 2 19284 _002921_hash NULL
++_002922_hash ioremap 1-2 23172 _002922_hash NULL
++_002924_hash msix_map_region 3 3411 _002924_hash NULL
++_002925_hash pci_iomap 3 47575 _002925_hash NULL
++_002926_hash sfi_map_memory 1-2 5183 _002926_hash NULL
++_002928_hash xlate_dev_mem_ptr 1 15291 _002928_hash &_001958_hash
++_002929_hash a4t_cs_init 3 27734 _002929_hash NULL
++_002930_hash aac_nark_ioremap 2 50163 _002930_hash &_000009_hash
++_002931_hash aac_rkt_ioremap 2 3333 _002931_hash NULL
++_002932_hash aac_rx_ioremap 2 52410 _002932_hash NULL
++_002933_hash aac_sa_ioremap 2 13596 _002933_hash &_000201_hash
++_002934_hash aac_src_ioremap 2 41688 _002934_hash NULL
++_002935_hash acpi_os_map_memory 1-2 11161 _002935_hash NULL
++_002937_hash acpi_os_read_memory 1-3 54186 _002937_hash NULL
++_002938_hash acpi_os_write_memory 1-3 56416 _002938_hash &_002904_hash
++_002939_hash acpi_pre_map 1 51532 _002939_hash NULL
++_002940_hash c101_run 2 37279 _002940_hash NULL
++_002941_hash check586 2 29914 _002941_hash NULL
++_002942_hash check_mirror 1-2 57342 _002942_hash &_001368_hash
++_002944_hash cru_detect 1 11272 _002944_hash NULL
++_002945_hash cs553x_init_one 3 58886 _002945_hash NULL
++_002946_hash cycx_setup 4 47562 _002946_hash NULL
++_002947_hash DepcaSignature 2 80 _002947_hash &_000976_hash
++_002948_hash devm_ioremap 2-3 29235 _002948_hash NULL
++_002950_hash dma_declare_coherent_memory 2-4 14244 _002950_hash NULL
++_002952_hash doc_probe 1 23285 _002952_hash NULL
++_002953_hash DoC_Probe 1 57534 _002953_hash NULL
++_002954_hash ems_pcmcia_add_card 2 62627 _002954_hash NULL
++_002955_hash gdth_init_isa 1 28091 _002955_hash NULL
++_002956_hash gdth_search_isa 1 58595 _002956_hash NULL
++_002957_hash isp1760_register 1-2 628 _002957_hash NULL
++_002959_hash mthca_map_reg 2-3 5664 _002959_hash NULL
++_002961_hash n2_run 3 53459 _002961_hash NULL
++_002962_hash pcim_iomap 3 58334 _002962_hash NULL
++_002963_hash probe_bios 1 17467 _002963_hash NULL
++_002964_hash register_device 2-3 60015 _002964_hash NULL
++_002966_hash remap_pci_mem 1-2 15966 _002966_hash NULL
++_002968_hash rtl_port_map 1-2 2385 _002968_hash NULL
++_002970_hash sfi_map_table 1 5462 _002970_hash NULL
++_002971_hash sriov_enable_migration 2 14889 _002971_hash NULL
++_002972_hash ssb_bus_scan 2 36578 _002972_hash NULL
++_002973_hash ssb_ioremap 2 5228 _002973_hash NULL
++_002974_hash tpm_tis_init 2-3 15304 _002974_hash NULL
++_002975_hash acpi_ex_system_memory_space_handler 2 31192 _002975_hash NULL
++_002976_hash acpi_tb_check_xsdt 1 21862 _002976_hash NULL
++_002977_hash acpi_tb_install_table 1 12988 _002977_hash NULL
++_002978_hash acpi_tb_parse_root_table 1 53455 _002978_hash NULL
++_002979_hash com90xx_found 3 13974 _002979_hash NULL
++_002980_hash dmam_declare_coherent_memory 2-4 43679 _002980_hash NULL
++_002982_hash gdth_isa_probe_one 1 48925 _002982_hash NULL
++_002983_hash sfi_check_table 1 6772 _002983_hash NULL
++_002984_hash sfi_sysfs_install_table 1 51688 _002984_hash NULL
++_002985_hash sriov_enable 2 59689 _002985_hash NULL
++_002986_hash ssb_bus_register 3 65183 _002986_hash NULL
++_002987_hash pci_enable_sriov 2 35745 _002987_hash NULL
++_002988_hash ssb_bus_pcmciabus_register 3 56020 _002988_hash NULL
++_002989_hash ssb_bus_ssbbus_register 2 2217 _002989_hash NULL
++_002990_hash lpfc_sli_probe_sriov_nr_virtfn 2 26004 _002990_hash NULL
++_002991_hash lguest_map 1-2 42008 _002991_hash NULL
++_002994_hash alloc_vm_area 1 15989 _002994_hash NULL
++_002996_hash efi_ioremap 1-2 3492 _002996_hash &_000763_hash
++_002998_hash init_chip_wc_pat 2 62768 _002998_hash NULL
++_002999_hash io_mapping_create_wc 1-2 1354 _002999_hash NULL
++_003001_hash iommu_map_mmio_space 1 30919 _003001_hash NULL
++_003002_hash ca91cx42_alloc_resource 2 10502 _003002_hash NULL
++_003003_hash tsi148_alloc_resource 2 24563 _003003_hash NULL
++_003004_hash ca91cx42_master_set 4 23146 _003004_hash NULL
++_003005_hash tsi148_master_set 4 14685 _003005_hash NULL
++_003006_hash alloc_ftrace_hash 1 57431 _003006_hash &_002350_hash
++_003007_hash alloc_ieee80211 1 20063 _003007_hash NULL
++_003008_hash alloc_ieee80211_rsl 1 34564 _003008_hash NULL
++_003009_hash alloc_private 2 22399 _003009_hash &_002274_hash
++_003010_hash alloc_rtllib 1 51136 _003010_hash NULL
++_003011_hash alloc_rx_desc_ring 2 18016 _003011_hash NULL
++_003012_hash alloc_sched_domains 1 47756 _003012_hash NULL
++_003013_hash alloc_subdevices 2 43300 _003013_hash NULL
++_003014_hash arcfb_write 3 8702 _003014_hash NULL
++_003015_hash arch_gnttab_map_shared 3 7970 _003015_hash NULL
++_003016_hash atyfb_setup_generic 3 49151 _003016_hash NULL
++_003017_hash b1_alloc_card 1 36155 _003017_hash NULL
++_003018_hash broadsheetfb_write 3 39976 _003018_hash NULL
++_003019_hash broadsheet_spiflash_rewrite_sector 2 54864 _003019_hash NULL
++_003020_hash capabilities_read 3 58457 _003020_hash NULL
++_003021_hash capinc_tty_write 3 28539 _003021_hash NULL
++_003022_hash capi_write 3 35104 _003022_hash NULL
++_003023_hash cmpk_message_handle_tx 4 54024 _003023_hash NULL
++_003024_hash cmtp_add_msgpart 4 9252 _003024_hash NULL
++_003025_hash cmtp_send_interopmsg 7 376 _003025_hash NULL
++_003026_hash comedi_buf_alloc 3 24822 _003026_hash NULL
++_003027_hash comedi_read 3 13199 _003027_hash NULL
++_003028_hash comedi_write 3 47926 _003028_hash NULL
++_003029_hash dccpprobe_read 3 52549 _003029_hash NULL
++_003030_hash __devres_alloc 2 25598 _003030_hash NULL
++_003031_hash diva_os_alloc_message_buffer 1 64568 _003031_hash NULL
++_003032_hash diva_os_copy_from_user 4 7792 _003032_hash NULL
++_003033_hash diva_os_copy_to_user 4 48508 _003033_hash &_001671_hash
++_003034_hash diva_os_malloc 2 16406 _003034_hash NULL
++_003035_hash divasa_remap_pci_bar 3-4 23485 _003035_hash &_000678_hash
++_003037_hash do_test 1 15766 _003037_hash NULL
++_003038_hash event_enable_write 3 45238 _003038_hash NULL
++_003039_hash evtchn_read 3 3569 _003039_hash NULL
++_003040_hash evtchn_write 3 43278 _003040_hash NULL
++_003041_hash ext_sd_execute_read_data 9 48589 _003041_hash NULL
++_003042_hash ext_sd_execute_write_data 9 8175 _003042_hash NULL
++_003043_hash fb_sys_read 3 13778 _003043_hash NULL
++_003044_hash fb_sys_write 3 33130 _003044_hash NULL
++_003045_hash firmwareUpload 3 32794 _003045_hash NULL
++_003046_hash ftrace_profile_write 3 53327 _003046_hash NULL
++_003047_hash fw_download_code 3 13249 _003047_hash NULL
++_003048_hash fwSendNullPacket 2 54618 _003048_hash NULL
++_003049_hash gather_array 3 56641 _003049_hash NULL
++_003050_hash gntdev_alloc_map 2 35145 _003050_hash NULL
++_003051_hash gnttab_map 2 56439 _003051_hash NULL
++_003052_hash gru_alloc_gts 2-3 60056 _003052_hash NULL
++_003054_hash hecubafb_write 3 26942 _003054_hash NULL
++_003055_hash hycapi_rx_capipkt 3 11602 _003055_hash NULL
++_003056_hash ieee80211_alloc_txb 1-2 52477 _003056_hash NULL
++_003058_hash ieee80211_authentication_req 3 63973 _003058_hash NULL
++_003059_hash ieee80211_wx_set_gen_ie 3 51399 _003059_hash NULL
++_003060_hash ieee80211_wx_set_gen_ie_rsl 3 3521 _003060_hash NULL
++_003061_hash init_per_cpu 1 17880 _003061_hash NULL
++_003062_hash ivtvfb_write 3 40023 _003062_hash NULL
++_003063_hash metronomefb_write 3 8823 _003063_hash NULL
++_003064_hash mga_ioremap 1-2 8571 _003064_hash NULL
++_003066_hash netfs_trans_alloc 2-4 6136 _003066_hash NULL
++_003068_hash ni_gpct_device_construct 5 610 _003068_hash NULL
++_003069_hash odev_update 2 50169 _003069_hash NULL
++_003070_hash options_write 3 47243 _003070_hash NULL
++_003071_hash pmcraid_copy_sglist 3 38431 _003071_hash NULL
++_003072_hash pohmelfs_name_alloc 1 1036 _003072_hash NULL
++_003073_hash pohmelfs_readpages_trans_complete 2 63912 _003073_hash NULL
++_003074_hash proc_fault_inject_read 3 36802 _003074_hash NULL
++_003075_hash proc_fault_inject_write 3 21058 _003075_hash NULL
++_003076_hash ptc_proc_write 3 12076 _003076_hash NULL
++_003077_hash queue_reply 3 22416 _003077_hash NULL
++_003078_hash rb_simple_write 3 20890 _003078_hash NULL
++_003079_hash Realloc 2 34961 _003079_hash NULL
++_003080_hash reportdesc_callback 3 38603 _003080_hash NULL
++_003081_hash rtllib_alloc_txb 1-2 21687 _003081_hash NULL
++_003083_hash rtllib_authentication_req 3 26713 _003083_hash NULL
++_003084_hash rtllib_wx_set_gen_ie 3 59808 _003084_hash NULL
++_003085_hash rts51x_transfer_data_partial 6 5735 _003085_hash NULL
++_003086_hash SendTxCommandPacket 3 42901 _003086_hash NULL
++_003087_hash slow_kernel_write 2 19764 _003087_hash NULL
++_003088_hash split 2 11691 _003088_hash NULL
++_003089_hash stack_max_size_write 3 36068 _003089_hash NULL
++_003090_hash store_debug_level 3 35652 _003090_hash NULL
++_003091_hash system_enable_write 3 61396 _003091_hash NULL
++_003092_hash trace_options_core_write 3 61551 _003092_hash NULL
++_003093_hash trace_options_write 3 48275 _003093_hash NULL
++_003094_hash tracing_ctrl_write 3 42324 _003094_hash &_001563_hash
++_003095_hash tracing_entries_write 3 60563 _003095_hash NULL
++_003096_hash tracing_max_lat_write 3 8728 _003096_hash NULL
++_003097_hash tracing_read_dyn_info 3 45468 _003097_hash NULL
++_003098_hash ttm_bo_ioremap 2-3 31082 _003098_hash NULL
++_003100_hash ttm_bo_kmap_ttm 3 5922 _003100_hash NULL
++_003101_hash ttm_put_pages 2 38411 _003101_hash NULL
++_003102_hash tunables_read 3 36385 _003102_hash NULL
++_003103_hash tunables_write 3 59563 _003103_hash NULL
++_003104_hash u32_array_read 3 2219 _003104_hash NULL
++_003105_hash ufx_alloc_urb_list 3 10349 _003105_hash NULL
++_003106_hash um_idi_write 3 18293 _003106_hash NULL
++_003107_hash usb_buffer_alloc 2 36276 _003107_hash NULL
++_003108_hash viafb_dfph_proc_write 3 49288 _003108_hash NULL
++_003109_hash viafb_dfpl_proc_write 3 627 _003109_hash NULL
++_003110_hash viafb_dvp0_proc_write 3 23023 _003110_hash NULL
++_003111_hash viafb_dvp1_proc_write 3 48864 _003111_hash NULL
++_003112_hash viafb_vt1636_proc_write 3 16018 _003112_hash NULL
++_003113_hash vivi_read 3 23073 _003113_hash NULL
++_003114_hash xdi_copy_from_user 4 8395 _003114_hash NULL
++_003115_hash xdi_copy_to_user 4 48900 _003115_hash NULL
++_003116_hash xenbus_file_write 3 6282 _003116_hash NULL
++_003117_hash xpc_kmalloc_cacheline_aligned 1 42895 _003117_hash NULL
++_003118_hash xpc_kzalloc_cacheline_aligned 1 65433 _003118_hash NULL
++_003119_hash xsd_read 3 15653 _003119_hash NULL
++_003120_hash alloc_and_copy_ftrace_hash 1 29368 _003120_hash NULL
++_003121_hash c4_add_card 3 54968 _003121_hash NULL
++_003122_hash picolcd_fb_write 3 2318 _003122_hash NULL
++_003123_hash ttm_bo_kmap 2-3 60118 _003123_hash NULL
++_003124_hash dlfb_ops_write 3 64150 _003124_hash NULL
++_003125_hash ieee80211_auth_challenge 3 18810 _003125_hash NULL
++_003126_hash ieee80211_rtl_auth_challenge 3 61897 _003126_hash NULL
++_003127_hash ms_read_multiple_pages 4-5 8052 _003127_hash NULL
++_003129_hash ms_write_multiple_pages 5-6 10362 _003129_hash NULL
++_003131_hash pohmelfs_send_readpages 3 9537 _003131_hash NULL
++_003132_hash pohmelfs_send_xattr_req 6 49783 _003132_hash NULL
++_003133_hash resize_async_buffer 4 64031 _003133_hash &_002048_hash
++_003134_hash rtllib_auth_challenge 3 12493 _003134_hash NULL
++_003135_hash ufx_ops_write 3 54848 _003135_hash NULL
++_003136_hash viafb_iga1_odev_proc_write 3 36241 _003136_hash NULL
++_003137_hash viafb_iga2_odev_proc_write 3 2363 _003137_hash NULL
++_003138_hash xd_read_multiple_pages 4-5 11422 _003138_hash NULL
++_003140_hash xd_write_multiple_pages 5-6 53633 _003140_hash NULL
++_003142_hash xenfb_write 3 43412 _003142_hash NULL
++_003143_hash ms_rw_multi_sector 4 7459 _003143_hash NULL
++_003144_hash pohmelfs_setxattr 4 39281 _003144_hash NULL
++_003145_hash xd_rw 4 49020 _003145_hash NULL
++_003146_hash ms_rw 4 17220 _003146_hash NULL
++_003147_hash create_table 2 16213 _003147_hash NULL
++_003148_hash acl_alloc 1 35979 _003148_hash NULL
++_003149_hash acl_alloc_stack_init 1 60630 _003149_hash NULL
++_003150_hash acl_alloc_num 1-2 60778 _003150_hash NULL
diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
new file mode 100644
-index 0000000..cc96254
+index 0000000..5af42b5
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin.c
-@@ -0,0 +1,1204 @@
+@@ -0,0 +1,1558 @@
+/*
+ * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -86607,6 +87207,8 @@ index 0000000..cc96254
+#define CREATE_NEW_VAR NULL_TREE
+#define CODES_LIMIT 32
+#define MAX_PARAM 10
++#define MY_STMT GF_PLF_1
++#define NO_CAST_CHECK GF_PLF_2
+
+#if BUILDING_GCC_VERSION == 4005
+#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
@@ -86616,20 +87218,30 @@ index 0000000..cc96254
+void debug_gimple_stmt(gimple gs);
+
+static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var);
-+static tree signed_size_overflow_type;
-+static tree unsigned_size_overflow_type;
+static tree report_size_overflow_decl;
+static tree const_char_ptr_type_node;
+static unsigned int handle_function(void);
++static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool *potentionally_overflowed, bool before);
++static tree get_size_overflow_type(gimple stmt, tree node);
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20120618beta",
++ .version = "20120811beta",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
+static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
+{
-+ unsigned int arg_count = type_num_arguments(*node);
++ unsigned int arg_count;
++
++ if (TREE_CODE(*node) == FUNCTION_DECL)
++ arg_count = type_num_arguments(TREE_TYPE(*node));
++ else if (TREE_CODE(*node) == FUNCTION_TYPE || TREE_CODE(*node) == METHOD_TYPE)
++ arg_count = type_num_arguments(*node);
++ else {
++ *no_add_attrs = true;
++ error("%qE attribute only applies to functions", name);
++ return NULL_TREE;
++ }
+
+ for (; args; args = TREE_CHAIN(args)) {
+ tree position = TREE_VALUE(args);
@@ -86641,13 +87253,13 @@ index 0000000..cc96254
+ return NULL_TREE;
+}
+
-+static struct attribute_spec no_size_overflow_attr = {
++static struct attribute_spec size_overflow_attr = {
+ .name = "size_overflow",
+ .min_length = 1,
+ .max_length = -1,
-+ .decl_required = false,
-+ .type_required = true,
-+ .function_type_required = true,
++ .decl_required = true,
++ .type_required = false,
++ .function_type_required = false,
+ .handler = handle_size_overflow_attribute,
+#if BUILDING_GCC_VERSION >= 4007
+ .affects_type_identity = false
@@ -86656,7 +87268,7 @@ index 0000000..cc96254
+
+static void register_attributes(void __unused *event_data, void __unused *data)
+{
-+ register_attribute(&no_size_overflow_attr);
++ register_attribute(&size_overflow_attr);
+}
+
+// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
@@ -86707,6 +87319,7 @@ index 0000000..cc96254
+
+static inline gimple get_def_stmt(tree node)
+{
++ gcc_assert(node != NULL_TREE);
+ gcc_assert(TREE_CODE(node) == SSA_NAME);
+ return SSA_NAME_DEF_STMT(node);
+}
@@ -86869,11 +87482,11 @@ index 0000000..cc96254
+ gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
+
+ type = TREE_TYPE(arg);
-+ // skip function pointers
-+ if (TREE_CODE(type) == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == FUNCTION_TYPE)
++
++ if (TREE_CODE(type) == POINTER_TYPE)
+ return;
+
-+ if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func))))
++ if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(func)))
+ return;
+
+ argnum = find_arg_number(arg, func);
@@ -86894,6 +87507,22 @@ index 0000000..cc96254
+ return new_var;
+}
+
++static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++ tree type = TREE_TYPE(rhs1);
++ tree lhs = create_new_var(type);
++
++ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
++ gimple_set_lhs(assign, make_ssa_name(lhs, assign));
++
++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++ gimple_set_plf(assign, MY_STMT, true);
++ return assign;
++}
++
+static bool is_bool(tree node)
+{
+ tree type;
@@ -86913,34 +87542,63 @@ index 0000000..cc96254
+
+static tree cast_a_tree(tree type, tree var)
+{
-+ gcc_assert(type != NULL_TREE && var != NULL_TREE);
++ gcc_assert(type != NULL_TREE);
++ gcc_assert(var != NULL_TREE);
+ gcc_assert(fold_convertible_p(type, var));
+
+ return fold_convert(type, var);
+}
+
-+static tree signed_cast(tree var)
-+{
-+ return cast_a_tree(signed_size_overflow_type, var);
-+}
-+
-+static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc)
++static gimple build_cast_stmt(tree type, tree var, tree new_var, gimple_stmt_iterator *gsi, bool before)
+{
+ gimple assign;
++ location_t loc;
++
++ gcc_assert(type != NULL_TREE && var != NULL_TREE);
++ if (gsi_end_p(*gsi) && before == BEFORE_STMT)
++ gcc_unreachable();
+
+ if (new_var == CREATE_NEW_VAR)
+ new_var = create_new_var(type);
+
+ assign = gimple_build_assign(new_var, cast_a_tree(type, var));
-+ gimple_set_location(assign, loc);
++
++ if (!gsi_end_p(*gsi)) {
++ loc = gimple_location(gsi_stmt(*gsi));
++ gimple_set_location(assign, loc);
++ }
++
+ gimple_set_lhs(assign, make_ssa_name(new_var, assign));
+
++ if (before)
++ gsi_insert_before(gsi, assign, GSI_NEW_STMT);
++ else
++ gsi_insert_after(gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++ gimple_set_plf(assign, MY_STMT, true);
++
+ return assign;
+}
+
++static tree cast_to_new_size_overflow_type(gimple stmt, tree new_rhs1, tree size_overflow_type, bool before)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi;
++
++ if (new_rhs1 == NULL_TREE)
++ return NULL_TREE;
++
++ if (!useless_type_conversion_p(TREE_TYPE(new_rhs1), size_overflow_type)) {
++ gsi = gsi_for_stmt(stmt);
++ assign = build_cast_stmt(size_overflow_type, new_rhs1, CREATE_NEW_VAR, &gsi, before);
++ return gimple_get_lhs(assign);
++ }
++ return new_rhs1;
++}
++
+static tree create_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, bool before)
+{
-+ tree oldstmt_rhs1;
++ tree oldstmt_rhs1, size_overflow_type, lhs;
+ enum tree_code code;
+ gimple stmt;
+ gimple_stmt_iterator gsi;
@@ -86954,13 +87612,18 @@ index 0000000..cc96254
+ gcc_unreachable();
+ }
+
++ if (gimple_code(oldstmt) == GIMPLE_ASM)
++ lhs = rhs1;
++ else
++ lhs = gimple_get_lhs(oldstmt);
++
+ oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
+ code = TREE_CODE(oldstmt_rhs1);
+ if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP))
+ check_missing_attribute(oldstmt_rhs1);
+
-+ stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt));
+ gsi = gsi_for_stmt(oldstmt);
++ pointer_set_insert(visited, oldstmt);
+ if (lookup_stmt_eh_lp(oldstmt) != 0) {
+ basic_block next_bb, cur_bb;
+ edge e;
@@ -86978,18 +87641,20 @@ index 0000000..cc96254
+
+ gsi = gsi_after_labels(next_bb);
+ gcc_assert(!gsi_end_p(gsi));
++
+ before = true;
++ oldstmt = gsi_stmt(gsi);
++ pointer_set_insert(visited, oldstmt);
+ }
-+ if (before)
-+ gsi_insert_before(&gsi, stmt, GSI_NEW_STMT);
-+ else
-+ gsi_insert_after(&gsi, stmt, GSI_NEW_STMT);
-+ update_stmt(stmt);
-+ pointer_set_insert(visited, oldstmt);
++
++ size_overflow_type = get_size_overflow_type(oldstmt, lhs);
++
++ stmt = build_cast_stmt(size_overflow_type, rhs1, CREATE_NEW_VAR, &gsi, before);
++ gimple_set_plf(stmt, MY_STMT, true);
+ return gimple_get_lhs(stmt);
+}
+
-+static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3)
++static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree size_overflow_type, tree rhs1, tree rhs2, tree __unused rhs3)
+{
+ tree new_var, lhs = gimple_get_lhs(oldstmt);
+ gimple stmt;
@@ -86998,6 +87663,9 @@ index 0000000..cc96254
+ if (!*potentionally_overflowed)
+ return NULL_TREE;
+
++ if (gimple_plf(oldstmt, MY_STMT))
++ return lhs;
++
+ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
+ rhs1 = gimple_assign_rhs1(oldstmt);
+ rhs1 = create_assign(visited, potentionally_overflowed, oldstmt, rhs1, BEFORE_STMT);
@@ -87009,6 +87677,7 @@ index 0000000..cc96254
+
+ stmt = gimple_copy(oldstmt);
+ gimple_set_location(stmt, gimple_location(oldstmt));
++ gimple_set_plf(stmt, MY_STMT, true);
+
+ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
+ gimple_assign_set_rhs_code(stmt, MULT_EXPR);
@@ -87016,13 +87685,13 @@ index 0000000..cc96254
+ if (is_bool(lhs))
+ new_var = SSA_NAME_VAR(lhs);
+ else
-+ new_var = create_new_var(signed_size_overflow_type);
++ new_var = create_new_var(size_overflow_type);
+ new_var = make_ssa_name(new_var, stmt);
+ gimple_set_lhs(stmt, new_var);
+
+ if (rhs1 != NULL_TREE) {
+ if (!gimple_assign_cast_p(oldstmt))
-+ rhs1 = signed_cast(rhs1);
++ rhs1 = cast_a_tree(size_overflow_type, rhs1);
+ gimple_assign_set_rhs1(stmt, rhs1);
+ }
+
@@ -87057,6 +87726,7 @@ index 0000000..cc96254
+ gsi = gsi_for_stmt(oldstmt);
+ gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
+ gimple_set_bb(phi, bb);
++ gimple_set_plf(phi, MY_STMT, true);
+ return phi;
+}
+
@@ -87070,28 +87740,29 @@ index 0000000..cc96254
+ return first_bb;
+}
+
-+static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var, unsigned int i)
++static tree cast_old_phi_arg(gimple oldstmt, tree size_overflow_type, tree arg, tree new_var, unsigned int i)
+{
+ basic_block bb;
-+ gimple newstmt, def_stmt;
++ gimple newstmt;
+ gimple_stmt_iterator gsi;
++ bool before = BEFORE_STMT;
+
-+ newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt));
-+ if (TREE_CODE(arg) == SSA_NAME) {
-+ def_stmt = get_def_stmt(arg);
-+ if (gimple_code(def_stmt) != GIMPLE_NOP) {
-+ gsi = gsi_for_stmt(def_stmt);
-+ gsi_insert_after(&gsi, newstmt, GSI_NEW_STMT);
-+ return newstmt;
-+ }
++ if (TREE_CODE(arg) == SSA_NAME && gimple_code(get_def_stmt(arg)) != GIMPLE_NOP) {
++ gsi = gsi_for_stmt(get_def_stmt(arg));
++ newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, AFTER_STMT);
++ return gimple_get_lhs(newstmt);
+ }
+
+ bb = gimple_phi_arg_edge(oldstmt, i)->src;
-+ if (bb->index == 0)
-+ bb = create_a_first_bb();
+ gsi = gsi_after_labels(bb);
-+ gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT);
-+ return newstmt;
++ if (bb->index == 0) {
++ bb = create_a_first_bb();
++ gsi = gsi_start_bb(bb);
++ }
++ if (gsi_end_p(gsi))
++ before = AFTER_STMT;
++ newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, before);
++ return gimple_get_lhs(newstmt);
+}
+
+static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs)
@@ -87124,30 +87795,36 @@ index 0000000..cc96254
+
+ gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
+ gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
++ gimple_set_plf(newstmt, MY_STMT, true);
+ update_stmt(newstmt);
+ return newstmt;
+}
+
-+static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree arg, tree new_var)
++static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree size_overflow_type, tree arg, tree new_var)
+{
+ gimple newstmt;
+ tree new_rhs;
+
+ new_rhs = expand(visited, potentionally_overflowed, arg);
-+
+ if (new_rhs == NULL_TREE)
+ return NULL_TREE;
+
++ new_rhs = cast_to_new_size_overflow_type(get_def_stmt(new_rhs), new_rhs, size_overflow_type, AFTER_STMT);
++
+ newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
+ return gimple_get_lhs(newstmt);
+}
+
-+static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt)
++static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
+{
-+ gimple phi;
-+ tree new_var = create_new_var(signed_size_overflow_type);
++ gimple phi, oldstmt = get_def_stmt(var);
++ tree new_var, size_overflow_type;
+ unsigned int i, n = gimple_phi_num_args(oldstmt);
+
++ size_overflow_type = get_size_overflow_type(oldstmt, var);
++
++ new_var = create_new_var(size_overflow_type);
++
+ pointer_set_insert(visited, oldstmt);
+ phi = overflow_create_phi_node(oldstmt, new_var);
+ for (i = 0; i < n; i++) {
@@ -87155,10 +87832,10 @@ index 0000000..cc96254
+
+ arg = gimple_phi_arg_def(oldstmt, i);
+ if (is_gimple_constant(arg))
-+ arg = signed_cast(arg);
-+ lhs = build_new_phi_arg(visited, potentionally_overflowed, arg, new_var);
++ arg = cast_a_tree(size_overflow_type, arg);
++ lhs = build_new_phi_arg(visited, potentionally_overflowed, size_overflow_type, arg, new_var);
+ if (lhs == NULL_TREE)
-+ lhs = gimple_get_lhs(cast_old_phi_arg(oldstmt, arg, new_var, i));
++ lhs = cast_old_phi_arg(oldstmt, size_overflow_type, arg, new_var, i);
+ add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
+ }
+
@@ -87166,35 +87843,132 @@ index 0000000..cc96254
+ return gimple_phi_result(phi);
+}
+
-+static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
++static tree change_assign_rhs(gimple stmt, tree orig_rhs, tree new_rhs)
+{
-+ gimple def_stmt = get_def_stmt(var);
-+ tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt);
++ gimple assign;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++ tree origtype = TREE_TYPE(orig_rhs);
++
++ gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
++
++ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
++ return gimple_get_lhs(assign);
++}
++
++static void change_rhs1(gimple stmt, tree new_rhs1)
++{
++ tree assign_rhs;
++ tree rhs = gimple_assign_rhs1(stmt);
++
++ assign_rhs = change_assign_rhs(stmt, rhs, new_rhs1);
++ gimple_assign_set_rhs1(stmt, assign_rhs);
++ update_stmt(stmt);
++}
++
++static bool check_mode_type(gimple stmt)
++{
++ tree lhs = gimple_get_lhs(stmt);
++ tree lhs_type = TREE_TYPE(lhs);
++ tree rhs_type = TREE_TYPE(gimple_assign_rhs1(stmt));
++ enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
++ enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
++
++ if (rhs_mode == lhs_mode && TYPE_UNSIGNED(rhs_type) == TYPE_UNSIGNED(lhs_type))
++ return false;
++
++ if (rhs_mode == SImode && lhs_mode == DImode && (TYPE_UNSIGNED(rhs_type) || !TYPE_UNSIGNED(lhs_type)))
++ return false;
++
++ return true;
++}
++
++static bool check_undefined_integer_operation(gimple stmt)
++{
++ gimple def_stmt;
++ tree lhs = gimple_get_lhs(stmt);
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs1_type = TREE_TYPE(rhs1);
++ tree lhs_type = TREE_TYPE(lhs);
++
++ if (TYPE_MODE(rhs1_type) != TYPE_MODE(lhs_type) || TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
++ return false;
++
++ def_stmt = get_def_stmt(rhs1);
++ if (gimple_code(def_stmt) != GIMPLE_ASSIGN)
++ return false;
++
++ if (gimple_assign_rhs_code(def_stmt) != MINUS_EXPR)
++ return false;
++ return true;
++}
++
++static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple stmt)
++{
++ tree size_overflow_type, lhs = gimple_get_lhs(stmt);
++ tree new_rhs1, rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs1_type = TREE_TYPE(rhs1);
++ tree lhs_type = TREE_TYPE(lhs);
+
+ *potentionally_overflowed = true;
++
+ new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
-+ if (new_rhs1 == NULL_TREE) {
-+ if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
-+ else
-+ return create_assign(visited, potentionally_overflowed, def_stmt, rhs1, AFTER_STMT);
++
++ if (new_rhs1 == NULL_TREE || TREE_CODE(rhs1_type) == POINTER_TYPE)
++ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
++
++ if (gimple_plf(stmt, MY_STMT))
++ return lhs;
++
++ if (gimple_plf(stmt, NO_CAST_CHECK)) {
++ size_overflow_type = get_size_overflow_type(stmt, rhs1);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, NULL_TREE, NULL_TREE);
++ }
++
++ if (!gimple_assign_cast_p(stmt)) {
++ size_overflow_type = get_size_overflow_type(stmt, lhs);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, NULL_TREE, NULL_TREE);
++ }
++
++ if (check_undefined_integer_operation(stmt)) {
++ size_overflow_type = get_size_overflow_type(stmt, lhs);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, NULL_TREE, NULL_TREE);
+ }
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, NULL_TREE, NULL_TREE);
++
++ size_overflow_type = get_size_overflow_type(stmt, rhs1);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++
++ change_rhs1(stmt, new_rhs1);
++ check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, potentionally_overflowed, BEFORE_STMT);
++
++ if (TYPE_UNSIGNED(rhs1_type) != TYPE_UNSIGNED(lhs_type))
++ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
++
++ if (!check_mode_type(stmt))
++ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
++
++ size_overflow_type = get_size_overflow_type(stmt, lhs);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++
++ check_size_overflow(stmt, size_overflow_type, new_rhs1, lhs, potentionally_overflowed, BEFORE_STMT);
++
++ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
+}
+
-+static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
++static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree lhs)
+{
-+ gimple def_stmt = get_def_stmt(var);
++ gimple def_stmt = get_def_stmt(lhs);
+ tree rhs1 = gimple_assign_rhs1(def_stmt);
+
+ if (is_gimple_constant(rhs1))
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, signed_cast(rhs1), NULL_TREE, NULL_TREE);
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+
+ gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
+ switch (TREE_CODE(rhs1)) {
+ case SSA_NAME:
-+ return handle_unary_rhs(visited, potentionally_overflowed, var);
-+
++ return handle_unary_rhs(visited, potentionally_overflowed, def_stmt);
+ case ARRAY_REF:
+ case BIT_FIELD_REF:
+ case ADDR_EXPR:
@@ -87206,7 +87980,7 @@ index 0000000..cc96254
+ case PARM_DECL:
+ case TARGET_MEM_REF:
+ case VAR_DECL:
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+
+ default:
+ debug_gimple_stmt(def_stmt);
@@ -87242,11 +88016,12 @@ index 0000000..cc96254
+ return build1(ADDR_EXPR, ptr_type_node, string);
+}
+
-+static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg)
++static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg, bool min)
+{
+ gimple func_stmt, def_stmt;
-+ tree current_func, loc_file, loc_line;
++ tree current_func, loc_file, loc_line, ssa_name;
+ expanded_location xloc;
++ char ssa_name_buf[100];
+ gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
+
+ def_stmt = get_def_stmt(arg);
@@ -87266,8 +88041,15 @@ index 0000000..cc96254
+ current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
+ current_func = create_string_param(current_func);
+
-+ // void report_size_overflow(const char *file, unsigned int line, const char *func)
-+ func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func);
++ if (min)
++ snprintf(ssa_name_buf, 100, "%s_%u (min)\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg));
++ else
++ snprintf(ssa_name_buf, 100, "%s_%u (max)\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg));
++ ssa_name = build_string(100, ssa_name_buf);
++ ssa_name = create_string_param(ssa_name);
++
++ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
++ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
+
+ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
+}
@@ -87279,14 +88061,15 @@ index 0000000..cc96254
+ inform(loc, "Integer size_overflow check applied here.");
+}
+
-+static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value)
++static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
+{
+ basic_block cond_bb, join_bb, bb_true;
+ edge e;
+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
+
+ cond_bb = gimple_bb(stmt);
-+ gsi_prev(&gsi);
++ if (before)
++ gsi_prev(&gsi);
+ if (gsi_end_p(gsi))
+ e = split_block_after_labels(cond_bb);
+ else
@@ -87312,80 +88095,218 @@ index 0000000..cc96254
+ }
+
+ insert_cond(cond_bb, arg, cond_code, type_value);
-+ insert_cond_result(bb_true, stmt, arg);
++ insert_cond_result(bb_true, stmt, arg, min);
+
+// print_the_code_insertions(stmt);
+}
+
-+static gimple cast_to_unsigned_size_overflow_type(gimple stmt, tree cast_rhs)
++static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool *potentionally_overflowed, bool before)
+{
-+ gimple ucast_stmt;
-+ gimple_stmt_iterator gsi;
-+ location_t loc = gimple_location(stmt);
++ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min, rhs_type = TREE_TYPE(rhs);
++ gcc_assert(rhs_type != NULL_TREE);
++ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == BOOLEAN_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
+
-+ ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, cast_rhs, CREATE_NEW_VAR, loc);
-+ gsi = gsi_for_stmt(stmt);
-+ gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT);
-+ return ucast_stmt;
++ if (!*potentionally_overflowed)
++ return;
++
++ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
++ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
++
++ gcc_assert(!TREE_OVERFLOW(type_max));
++
++ cast_rhs_type = TREE_TYPE(cast_rhs);
++ type_max_type = TREE_TYPE(type_max);
++ type_min_type = TREE_TYPE(type_min);
++ gcc_assert(useless_type_conversion_p(cast_rhs_type, type_max_type));
++ gcc_assert(useless_type_conversion_p(type_max_type, type_min_type));
++
++ insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max, before, false);
++ insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min, before, true);
+}
+
-+static void check_size_overflow(gimple stmt, tree cast_rhs, tree rhs, bool *potentionally_overflowed)
++static tree get_handle_const_assign_size_overflow_type(gimple def_stmt, tree var_rhs)
+{
-+ tree type_max, type_min, rhs_type = TREE_TYPE(rhs);
-+ gimple ucast_stmt;
++ gimple var_rhs_def_stmt;
++ tree lhs = gimple_get_lhs(def_stmt);
++ tree lhs_type = TREE_TYPE(lhs);
++ tree rhs1_type = TREE_TYPE(gimple_assign_rhs1(def_stmt));
++ tree rhs2_type = TREE_TYPE(gimple_assign_rhs2(def_stmt));
+
-+ if (!*potentionally_overflowed)
-+ return;
++ if (var_rhs == NULL_TREE)
++ return get_size_overflow_type(def_stmt, lhs);
+
-+ if (TYPE_UNSIGNED(rhs_type)) {
-+ ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, cast_rhs);
-+ type_max = cast_a_tree(unsigned_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
-+ insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
-+ } else {
-+ type_max = signed_cast(TYPE_MAX_VALUE(rhs_type));
-+ insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max);
++ var_rhs_def_stmt = get_def_stmt(var_rhs);
+
-+ type_min = signed_cast(TYPE_MIN_VALUE(rhs_type));
-+ insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min);
++ if (TREE_CODE_CLASS(gimple_assign_rhs_code(def_stmt)) == tcc_comparison)
++ return get_size_overflow_type(var_rhs_def_stmt, var_rhs);
++
++ if (gimple_assign_rhs_code(def_stmt) == LSHIFT_EXPR)
++ return get_size_overflow_type(var_rhs_def_stmt, var_rhs);
++
++ if (gimple_assign_rhs_code(def_stmt) == RSHIFT_EXPR)
++ return get_size_overflow_type(var_rhs_def_stmt, var_rhs);
++
++ if (!useless_type_conversion_p(lhs_type, rhs1_type) || !useless_type_conversion_p(rhs1_type, rhs2_type)) {
++ debug_gimple_stmt(def_stmt);
++ gcc_unreachable();
+ }
++
++ return get_size_overflow_type(def_stmt, lhs);
+}
+
-+static tree change_assign_rhs(gimple stmt, tree orig_rhs, tree new_rhs)
++static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var_rhs, tree new_rhs1, tree new_rhs2)
+{
-+ gimple assign;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+ tree origtype = TREE_TYPE(orig_rhs);
++ tree new_rhs, size_overflow_type, orig_rhs;
++ void (*gimple_assign_set_rhs)(gimple, tree);
++ tree rhs1 = gimple_assign_rhs1(def_stmt);
++ tree rhs2 = gimple_assign_rhs2(def_stmt);
++ tree lhs = gimple_get_lhs(def_stmt);
+
-+ gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
++ if (var_rhs == NULL_TREE)
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+
-+ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, gimple_location(stmt));
-+ gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
-+ update_stmt(assign);
-+ return gimple_get_lhs(assign);
-+}
++ if (new_rhs2 == NULL_TREE) {
++ size_overflow_type = get_handle_const_assign_size_overflow_type(def_stmt, new_rhs1);
++ new_rhs2 = cast_a_tree(size_overflow_type, rhs2);
++ orig_rhs = rhs1;
++ gimple_assign_set_rhs = &gimple_assign_set_rhs1;
++ } else {
++ size_overflow_type = get_handle_const_assign_size_overflow_type(def_stmt, new_rhs2);
++ new_rhs1 = cast_a_tree(size_overflow_type, rhs1);
++ orig_rhs = rhs2;
++ gimple_assign_set_rhs = &gimple_assign_set_rhs2;
++ }
+
-+static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var, tree orig_rhs, tree var_rhs, tree new_rhs1, tree new_rhs2, void (*gimple_assign_set_rhs)(gimple, tree))
-+{
-+ tree new_rhs;
++ var_rhs = cast_to_new_size_overflow_type(def_stmt, var_rhs, size_overflow_type, BEFORE_STMT);
+
+ if (gimple_assign_rhs_code(def_stmt) == MIN_EXPR)
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
++ return dup_assign(visited, potentionally_overflowed, def_stmt, size_overflow_type, new_rhs1, new_rhs2, NULL_TREE);
+
-+ if (var_rhs == NULL_TREE)
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ check_size_overflow(def_stmt, size_overflow_type, var_rhs, orig_rhs, potentionally_overflowed, BEFORE_STMT);
+
+ new_rhs = change_assign_rhs(def_stmt, orig_rhs, var_rhs);
+ gimple_assign_set_rhs(def_stmt, new_rhs);
+ update_stmt(def_stmt);
+
-+ check_size_overflow(def_stmt, var_rhs, orig_rhs, potentionally_overflowed);
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+}
+
-+static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
++static tree get_cast_def_stmt_rhs(tree new_rhs)
+{
-+ tree rhs1, rhs2;
-+ gimple def_stmt = get_def_stmt(var);
++ gimple def_stmt;
++
++ def_stmt = get_def_stmt(new_rhs);
++ // get_size_overflow_type
++ if (LONG_TYPE_SIZE != GET_MODE_BITSIZE(SImode))
++ gcc_assert(gimple_assign_cast_p(def_stmt));
++ return gimple_assign_rhs1(def_stmt);
++}
++
++static tree cast_to_int_TI_type_and_check(bool *potentionally_overflowed, gimple stmt, tree new_rhs)
++{
++ gimple_stmt_iterator gsi;
++ gimple cast_stmt, def_stmt;
++ enum machine_mode mode = TYPE_MODE(TREE_TYPE(new_rhs));
++
++ if (mode != TImode && mode != DImode) {
++ def_stmt = get_def_stmt(new_rhs);
++ gcc_assert(gimple_assign_cast_p(def_stmt));
++ new_rhs = gimple_assign_rhs1(def_stmt);
++ mode = TYPE_MODE(TREE_TYPE(new_rhs));
++ }
++
++ gcc_assert(mode == TImode || mode == DImode);
++
++ if (mode == TYPE_MODE(intTI_type_node) && useless_type_conversion_p(TREE_TYPE(new_rhs), intTI_type_node))
++ return new_rhs;
++
++ gsi = gsi_for_stmt(stmt);
++ cast_stmt = build_cast_stmt(intTI_type_node, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
++ new_rhs = gimple_get_lhs(cast_stmt);
++
++ if (mode == DImode)
++ return new_rhs;
++
++ check_size_overflow(stmt, intTI_type_node, new_rhs, new_rhs, potentionally_overflowed, BEFORE_STMT);
++
++ return new_rhs;
++}
++
++static bool is_an_integer_trunction(gimple stmt)
++{
++ gimple rhs1_def_stmt, rhs2_def_stmt;
++ tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1;
++ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode;
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs2 = gimple_assign_rhs2(stmt);
++ enum machine_mode rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1));
++ enum machine_mode rhs2_mode = TYPE_MODE(TREE_TYPE(rhs2));
++
++ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
++ return false;
++
++ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
++
++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR || rhs1_mode != SImode || rhs2_mode != SImode)
++ return false;
++
++ rhs1_def_stmt = get_def_stmt(rhs1);
++ rhs2_def_stmt = get_def_stmt(rhs2);
++ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
++ return false;
++
++ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
++ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
++ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
++ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
++ if (rhs1_def_stmt_rhs1_mode != DImode || rhs2_def_stmt_rhs1_mode != DImode)
++ return false;
++
++ gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true);
++ gimple_set_plf(rhs2_def_stmt, NO_CAST_CHECK, true);
++ return true;
++}
++
++static tree handle_integer_truncation(struct pointer_set_t *visited, bool *potentionally_overflowed, tree lhs)
++{
++ tree new_rhs1, new_rhs2, size_overflow_type;
++ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
++ tree new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type;
++ gimple assign, stmt = get_def_stmt(lhs);
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs2 = gimple_assign_rhs2(stmt);
++
++ if (!is_an_integer_trunction(stmt))
++ return NULL_TREE;
++
++ new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
++ new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
++
++ new_rhs1_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs1);
++ new_rhs2_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs2);
++
++ new_rhs1_def_stmt_rhs1_type = TREE_TYPE(new_rhs1_def_stmt_rhs1);
++ new_rhs2_def_stmt_rhs1_type = TREE_TYPE(new_rhs2_def_stmt_rhs1);
++
++ if (!useless_type_conversion_p(new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type)) {
++ new_rhs1_def_stmt_rhs1 = cast_to_int_TI_type_and_check(potentionally_overflowed, stmt, new_rhs1_def_stmt_rhs1);
++ new_rhs2_def_stmt_rhs1 = cast_to_int_TI_type_and_check(potentionally_overflowed, stmt, new_rhs2_def_stmt_rhs1);
++ }
++
++ assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
++ new_lhs = gimple_get_lhs(assign);
++ check_size_overflow(assign, TREE_TYPE(new_lhs), new_lhs, rhs1, potentionally_overflowed, AFTER_STMT);
++
++ size_overflow_type = get_size_overflow_type(stmt, lhs);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ new_rhs2 = cast_to_new_size_overflow_type(stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
++ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, new_rhs2, NULL_TREE);
++}
++
++static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree lhs)
++{
++ tree rhs1, rhs2, size_overflow_type, new_lhs;
++ gimple def_stmt = get_def_stmt(lhs);
+ tree new_rhs1 = NULL_TREE;
+ tree new_rhs2 = NULL_TREE;
+
@@ -87406,32 +88327,41 @@ index 0000000..cc96254
+ case EXACT_DIV_EXPR:
+ case POINTER_PLUS_EXPR:
+ case BIT_AND_EXPR:
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+ default:
+ break;
+ }
+
+ *potentionally_overflowed = true;
+
++ new_lhs = handle_integer_truncation(visited, potentionally_overflowed, lhs);
++ if (new_lhs != NULL_TREE)
++ return new_lhs;
++
+ if (TREE_CODE(rhs1) == SSA_NAME)
+ new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
+ if (TREE_CODE(rhs2) == SSA_NAME)
+ new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
+
+ if (is_gimple_constant(rhs2))
-+ return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs1, new_rhs1, new_rhs1, signed_cast(rhs2), &gimple_assign_set_rhs1);
++ return handle_const_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs1, NULL_TREE);
+
+ if (is_gimple_constant(rhs1))
-+ return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs2, new_rhs2, signed_cast(rhs1), new_rhs2, &gimple_assign_set_rhs2);
++ return handle_const_assign(visited, potentionally_overflowed, def_stmt, new_rhs2, NULL_TREE, new_rhs2);
++
++ size_overflow_type = get_size_overflow_type(def_stmt, lhs);
++
++ new_rhs1 = cast_to_new_size_overflow_type(def_stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ new_rhs2 = cast_to_new_size_overflow_type(def_stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
+
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
++ return dup_assign(visited, potentionally_overflowed, def_stmt, size_overflow_type, new_rhs1, new_rhs2, NULL_TREE);
+}
+
+#if BUILDING_GCC_VERSION >= 4007
-+static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree rhs)
++static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree size_overflow_type, tree rhs)
+{
+ if (is_gimple_constant(rhs))
-+ return signed_cast(rhs);
++ return cast_a_tree(size_overflow_type, rhs);
+ if (TREE_CODE(rhs) != SSA_NAME)
+ return NULL_TREE;
+ return expand(visited, potentionally_overflowed, rhs);
@@ -87439,61 +88369,72 @@ index 0000000..cc96254
+
+static tree handle_ternary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
+{
-+ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3;
++ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
+ gimple def_stmt = get_def_stmt(var);
+
+ *potentionally_overflowed = true;
+
++ size_overflow_type = get_size_overflow_type(def_stmt, var);
++
+ rhs1 = gimple_assign_rhs1(def_stmt);
+ rhs2 = gimple_assign_rhs2(def_stmt);
+ rhs3 = gimple_assign_rhs3(def_stmt);
-+ new_rhs1 = get_new_rhs(visited, potentionally_overflowed, rhs1);
-+ new_rhs2 = get_new_rhs(visited, potentionally_overflowed, rhs2);
-+ new_rhs3 = get_new_rhs(visited, potentionally_overflowed, rhs3);
++ new_rhs1 = get_new_rhs(visited, potentionally_overflowed, size_overflow_type, rhs1);
++ new_rhs2 = get_new_rhs(visited, potentionally_overflowed, size_overflow_type, rhs2);
++ new_rhs3 = get_new_rhs(visited, potentionally_overflowed, size_overflow_type, rhs3);
++
++ new_rhs1 = cast_to_new_size_overflow_type(def_stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ new_rhs2 = cast_to_new_size_overflow_type(def_stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
++ new_rhs3 = cast_to_new_size_overflow_type(def_stmt, new_rhs3, size_overflow_type, BEFORE_STMT);
+
-+ if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE)
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, new_rhs3);
-+ error("handle_ternary_ops: unknown rhs");
-+ gcc_unreachable();
++ return dup_assign(visited, potentionally_overflowed, def_stmt, size_overflow_type, new_rhs1, new_rhs2, new_rhs3);
+}
+#endif
+
-+static void set_size_overflow_type(tree node)
++static tree get_size_overflow_type(gimple stmt, tree node)
+{
-+ switch (TYPE_MODE(TREE_TYPE(node))) {
++ tree type;
++
++ gcc_assert(node != NULL_TREE);
++
++ type = TREE_TYPE(node);
++
++ if (gimple_plf(stmt, MY_STMT))
++ return TREE_TYPE(node);
++
++ switch (TYPE_MODE(type)) {
++ case QImode:
++ return (TYPE_UNSIGNED(type)) ? unsigned_intHI_type_node : intHI_type_node;
++ case HImode:
++ return (TYPE_UNSIGNED(type)) ? unsigned_intSI_type_node : intSI_type_node;
+ case SImode:
-+ signed_size_overflow_type = intDI_type_node;
-+ unsigned_size_overflow_type = unsigned_intDI_type_node;
-+ break;
++ return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
+ case DImode:
-+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) {
-+ signed_size_overflow_type = intDI_type_node;
-+ unsigned_size_overflow_type = unsigned_intDI_type_node;
-+ } else {
-+ signed_size_overflow_type = intTI_type_node;
-+ unsigned_size_overflow_type = unsigned_intTI_type_node;
-+ }
-+ break;
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
++ return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
++ return (TYPE_UNSIGNED(type)) ? unsigned_intTI_type_node : intTI_type_node;
+ default:
-+ error("set_size_overflow_type: unsupported gcc configuration.");
++ debug_tree(node);
++ error("get_size_overflow_type: unsupported gcc configuration.");
+ gcc_unreachable();
+ }
+}
+
+static tree expand_visited(gimple def_stmt)
+{
-+ gimple tmp;
++ gimple next_stmt;
+ gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
+
+ gsi_next(&gsi);
-+ tmp = gsi_stmt(gsi);
-+ switch (gimple_code(tmp)) {
++ next_stmt = gsi_stmt(gsi);
++
++ switch (gimple_code(next_stmt)) {
+ case GIMPLE_ASSIGN:
-+ return gimple_get_lhs(tmp);
++ return gimple_get_lhs(next_stmt);
+ case GIMPLE_PHI:
-+ return gimple_phi_result(tmp);
++ return gimple_phi_result(next_stmt);
+ case GIMPLE_CALL:
-+ return gimple_call_lhs(tmp);
++ return gimple_call_lhs(next_stmt);
+ default:
+ return NULL_TREE;
+ }
@@ -87511,19 +88452,18 @@ index 0000000..cc96254
+ return NULL_TREE;
+
+ gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
-+ if (code != INTEGER_TYPE)
-+ return NULL_TREE;
+
-+ if (SSA_NAME_IS_DEFAULT_DEF(var)) {
++ if (TREE_CODE(SSA_NAME_VAR(var)) == PARM_DECL)
+ check_missing_attribute(var);
-+ return NULL_TREE;
-+ }
+
+ def_stmt = get_def_stmt(var);
+
+ if (!def_stmt)
+ return NULL_TREE;
+
++ if (gimple_plf(def_stmt, MY_STMT))
++ return var;
++
+ if (pointer_set_contains(visited, def_stmt))
+ return expand_visited(def_stmt);
+
@@ -87532,7 +88472,7 @@ index 0000000..cc96254
+ check_missing_attribute(var);
+ return NULL_TREE;
+ case GIMPLE_PHI:
-+ return build_new_phi(visited, potentionally_overflowed, def_stmt);
++ return build_new_phi(visited, potentionally_overflowed, var);
+ case GIMPLE_CALL:
+ case GIMPLE_ASM:
+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
@@ -87562,9 +88502,7 @@ index 0000000..cc96254
+
+ gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
+
-+ assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt));
-+ gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
-+ update_stmt(assign);
++ assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
+
+ gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
+ update_stmt(stmt);
@@ -87614,8 +88552,6 @@ index 0000000..cc96254
+
+ check_arg_type(arg);
+
-+ set_size_overflow_type(arg);
-+
+ visited = pointer_set_create();
+ potentionally_overflowed = false;
+ newarg = expand(visited, &potentionally_overflowed, arg);
@@ -87626,7 +88562,7 @@ index 0000000..cc96254
+
+ change_function_arg(stmt, arg, argnum, newarg);
+
-+ check_size_overflow(stmt, newarg, arg, &potentionally_overflowed);
++ check_size_overflow(stmt, TREE_TYPE(newarg), newarg, arg, &potentionally_overflowed, BEFORE_STMT);
+}
+
+static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl)
@@ -87654,14 +88590,29 @@ index 0000000..cc96254
+ handle_function_arg(stmt, fndecl, num - 1);
+}
+
++static void set_plf_false(void)
++{
++ basic_block bb;
++
++ FOR_ALL_BB(bb) {
++ gimple_stmt_iterator si;
++
++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
++ gimple_set_plf(gsi_stmt(si), MY_STMT, false);
++ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
++ gimple_set_plf(gsi_stmt(si), MY_STMT, false);
++ }
++}
++
+static unsigned int handle_function(void)
+{
-+ basic_block bb = ENTRY_BLOCK_PTR->next_bb;
-+ int saved_last_basic_block = last_basic_block;
++ basic_block next, bb = ENTRY_BLOCK_PTR->next_bb;
++
++ set_plf_false();
+
+ do {
+ gimple_stmt_iterator gsi;
-+ basic_block next = bb->next_bb;
++ next = bb->next_bb;
+
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+ tree fndecl, attr;
@@ -87674,15 +88625,16 @@ index 0000000..cc96254
+ continue;
+ if (gimple_call_num_args(stmt) == 0)
+ continue;
-+ attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl)));
++ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
+ if (!attr || !TREE_VALUE(attr))
+ handle_function_by_hash(stmt, fndecl);
+ else
+ handle_function_by_attribute(stmt, attr, fndecl);
+ gsi = gsi_for_stmt(stmt);
++ next = gimple_bb(stmt)->next_bb;
+ }
+ bb = next;
-+ } while (bb && bb->index <= saved_last_basic_block);
++ } while (bb);
+ return 0;
+}
+
@@ -87710,11 +88662,12 @@ index 0000000..cc96254
+
+ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
+
-+ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func)
++ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
+ fntype = build_function_type_list(void_type_node,
+ const_char_ptr_type_node,
+ unsigned_type_node,
+ const_char_ptr_type_node,
++ const_char_ptr_type_node,
+ NULL_TREE);
+ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
+
@@ -87722,6 +88675,7 @@ index 0000000..cc96254
+ TREE_PUBLIC(report_size_overflow_decl) = 1;
+ DECL_EXTERNAL(report_size_overflow_decl) = 1;
+ DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
++ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
@@ -87754,7 +88708,7 @@ index 0000000..cc96254
+
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
+ if (enable) {
-+ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
++ register_callback("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
+ }
+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
diff --git a/3.2.26/4430_grsec-remove-localversion-grsec.patch b/3.2.27/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.2.26/4430_grsec-remove-localversion-grsec.patch
+++ b/3.2.27/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.2.26/4435_grsec-mute-warnings.patch b/3.2.27/4435_grsec-mute-warnings.patch
index e85abd6..e85abd6 100644
--- a/3.2.26/4435_grsec-mute-warnings.patch
+++ b/3.2.27/4435_grsec-mute-warnings.patch
diff --git a/3.2.26/4440_grsec-remove-protected-paths.patch b/3.2.27/4440_grsec-remove-protected-paths.patch
index 637934a..637934a 100644
--- a/3.2.26/4440_grsec-remove-protected-paths.patch
+++ b/3.2.27/4440_grsec-remove-protected-paths.patch
diff --git a/3.2.26/4450_grsec-kconfig-default-gids.patch b/3.2.27/4450_grsec-kconfig-default-gids.patch
index 0ab1250..0ab1250 100644
--- a/3.2.26/4450_grsec-kconfig-default-gids.patch
+++ b/3.2.27/4450_grsec-kconfig-default-gids.patch
diff --git a/3.2.26/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.27/4465_selinux-avc_audit-log-curr_ip.patch
index 48acad7..48acad7 100644
--- a/3.2.26/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.2.27/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.2.26/4470_disable-compat_vdso.patch b/3.2.27/4470_disable-compat_vdso.patch
index 4742d01..4742d01 100644
--- a/3.2.26/4470_disable-compat_vdso.patch
+++ b/3.2.27/4470_disable-compat_vdso.patch
diff --git a/3.5.1/0000_README b/3.5.1/0000_README
index f37be86..48a305c 100644
--- a/3.5.1/0000_README
+++ b/3.5.1/0000_README
@@ -6,7 +6,7 @@ Patch: 1000_linux-3.5.1.patch
From: http://www.kernel.org
Desc: Linux 3.5.1
-Patch: 4420_grsecurity-2.9.1-3.5.1-201208091728.patch
+Patch: 4420_grsecurity-2.9.1-3.5.1-201208112021.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.5.1/4420_grsecurity-2.9.1-3.5.1-201208091728.patch b/3.5.1/4420_grsecurity-2.9.1-3.5.1-201208112021.patch
index d9ee574..e9ffa80 100644
--- a/3.5.1/4420_grsecurity-2.9.1-3.5.1-201208091728.patch
+++ b/3.5.1/4420_grsecurity-2.9.1-3.5.1-201208112021.patch
@@ -798,7 +798,7 @@ index 5eecab1..609abc0 100644
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
-index c79f61f..9269ea1 100644
+index c79f61f..9ac0642 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -17,17 +17,35 @@
@@ -959,7 +959,7 @@ index c79f61f..9269ea1 100644
-"1: ldrex %0, [%3]\n"
-" sub %0, %0, %4\n"
+"1: ldrex %1, [%3]\n"
-+" sub %0, %1, %4\n"
++" subs %0, %1, %4\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
@@ -1287,7 +1287,7 @@ index c79f61f..9269ea1 100644
-" sbc %H0, %H0, %H4\n"
+"1: ldrexd %1, %H1, [%3]\n"
+" subs %0, %1, %4\n"
-+" sbc %H0, %H1, %H4\n"
++" sbcs %H0, %H1, %H4\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
@@ -1356,7 +1356,7 @@ index c79f61f..9269ea1 100644
-" sbc %H0, %H0, #0\n"
+"1: ldrexd %1, %H1, [%3]\n"
+" subs %0, %1, #1\n"
-+" sbc %H0, %H1, #0\n"
++" sbcs %H0, %H1, #0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
@@ -1389,7 +1389,8 @@ index c79f61f..9269ea1 100644
-" beq 2f\n"
+" beq 4f\n"
" adds %0, %0, %6\n"
- " adc %H0, %H0, %H6\n"
+-" adc %H0, %H0, %H6\n"
++" adcs %H0, %H0, %H6\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
@@ -17685,15 +17686,15 @@ index d04d3ec..ea4b374 100644
if (regs->sp >= curbase + sizeof(struct thread_info) +
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
-index 1d5d31e..ab846ed 100644
+index 1d5d31e..72731d4 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
-@@ -28,6 +28,8 @@ struct setup_data_node {
+@@ -27,7 +27,7 @@ struct setup_data_node {
+ u32 len;
};
- static ssize_t setup_data_read(struct file *file, char __user *user_buf,
-+ size_t count, loff_t *ppos) __size_overflow(3);
-+static ssize_t setup_data_read(struct file *file, char __user *user_buf,
+-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
++static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct setup_data_node *node = file->private_data;
@@ -22647,7 +22648,7 @@ index a63efd6..ccecad8 100644
ret
CFI_ENDPROC
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
-index 1781b2f..18e3040 100644
+index 1781b2f..90368dd 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -42,10 +42,12 @@ do { \
@@ -22738,7 +22739,7 @@ index 1781b2f..18e3040 100644
".section .fixup,\"ax\"\n"
"101: lea 0(%%eax,%0,4),%0\n"
" jmp 100b\n"
-@@ -201,46 +205,152 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
+@@ -201,46 +205,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
}
static unsigned long
@@ -22845,9 +22846,7 @@ index 1781b2f..18e3040 100644
+ return size;
+}
+
-+static unsigned long
-+__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
-+static unsigned long
++static unsigned long __size_overflow(3)
__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
{
int d0, d1;
@@ -22909,7 +22908,7 @@ index 1781b2f..18e3040 100644
" movl %%eax, 56(%3)\n"
" movl %%edx, 60(%3)\n"
" addl $-64, %0\n"
-@@ -252,9 +362,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+@@ -252,9 +360,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
@@ -22921,12 +22920,12 @@ index 1781b2f..18e3040 100644
"8:\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
-@@ -298,47 +408,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+@@ -297,48 +405,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+ * hyoshiok@miraclelinux.com
*/
- static unsigned long __copy_user_zeroing_intel_nocache(void *to,
-+ const void __user *from, unsigned long size) __size_overflow(3);
-+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
++static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
const void __user *from, unsigned long size)
{
int d0, d1;
@@ -22989,7 +22988,7 @@ index 1781b2f..18e3040 100644
" movnti %%eax, 56(%3)\n"
" movnti %%edx, 60(%3)\n"
" addl $-64, %0\n"
-@@ -351,9 +463,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+@@ -351,9 +459,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
@@ -23001,12 +23000,12 @@ index 1781b2f..18e3040 100644
"8:\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
-@@ -392,47 +504,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+@@ -391,48 +499,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ return size;
}
- static unsigned long __copy_user_intel_nocache(void *to,
-+ const void __user *from, unsigned long size) __size_overflow(3);
-+static unsigned long __copy_user_intel_nocache(void *to,
+-static unsigned long __copy_user_intel_nocache(void *to,
++static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
const void __user *from, unsigned long size)
{
int d0, d1;
@@ -23069,7 +23068,7 @@ index 1781b2f..18e3040 100644
" movnti %%eax, 56(%3)\n"
" movnti %%edx, 60(%3)\n"
" addl $-64, %0\n"
-@@ -445,9 +559,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
+@@ -445,9 +553,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
@@ -23081,7 +23080,7 @@ index 1781b2f..18e3040 100644
"8:\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
-@@ -487,32 +601,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
+@@ -487,32 +595,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
*/
unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
unsigned long size);
@@ -23123,7 +23122,7 @@ index 1781b2f..18e3040 100644
".section .fixup,\"ax\"\n" \
"5: addl %3,%0\n" \
" jmp 2b\n" \
-@@ -537,14 +655,14 @@ do { \
+@@ -537,14 +649,14 @@ do { \
" negl %0\n" \
" andl $7,%0\n" \
" subl %0,%3\n" \
@@ -23141,7 +23140,7 @@ index 1781b2f..18e3040 100644
"2:\n" \
".section .fixup,\"ax\"\n" \
"5: addl %3,%0\n" \
-@@ -627,9 +745,9 @@ survive:
+@@ -627,9 +739,9 @@ survive:
}
#endif
if (movsl_is_ok(to, from, n))
@@ -23153,7 +23152,7 @@ index 1781b2f..18e3040 100644
return n;
}
EXPORT_SYMBOL(__copy_to_user_ll);
-@@ -649,10 +767,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
+@@ -649,10 +761,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
unsigned long n)
{
if (movsl_is_ok(to, from, n))
@@ -23166,7 +23165,7 @@ index 1781b2f..18e3040 100644
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nozero);
-@@ -679,65 +796,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
+@@ -679,65 +790,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
if (n > 64 && cpu_has_xmm2)
n = __copy_user_intel_nocache(to, from, n);
else
@@ -46978,7 +46977,7 @@ index 82c3533..34e929c 100644
lock_flocks();
diff --git a/fs/namei.c b/fs/namei.c
-index 7d69419..1487852 100644
+index 7d69419..10c6af6 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -265,16 +265,32 @@ int generic_permission(struct inode *inode, int mask)
@@ -47053,27 +47052,25 @@ index 7d69419..1487852 100644
error = 0;
if (s)
error = __vfs_follow_link(nd, s);
-@@ -1386,6 +1402,9 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
+@@ -1386,6 +1402,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
if (!res)
res = walk_component(nd, path, &nd->last,
nd->last_type, LOOKUP_FOLLOW);
-+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode)) {
++ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
+ res = -EACCES;
-+ }
put_link(nd, &link, cookie);
} while (res > 0);
-@@ -1779,6 +1798,9 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1779,6 +1797,8 @@ static int path_lookupat(int dfd, const char *name,
err = follow_link(&link, nd, &cookie);
if (!err)
err = lookup_last(nd, &path);
-+ if (!err && gr_handle_symlink_owner(&link, nd->inode)) {
++ if (!err && gr_handle_symlink_owner(&link, nd->inode))
+ err = -EACCES;
-+ }
put_link(nd, &link, cookie);
}
}
-@@ -1786,6 +1808,21 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1786,6 +1806,21 @@ static int path_lookupat(int dfd, const char *name,
if (!err)
err = complete_walk(nd);
@@ -47095,7 +47092,7 @@ index 7d69419..1487852 100644
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!nd->inode->i_op->lookup) {
path_put(&nd->path);
-@@ -1813,6 +1850,15 @@ static int do_path_lookup(int dfd, const char *name,
+@@ -1813,6 +1848,15 @@ static int do_path_lookup(int dfd, const char *name,
retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
if (likely(!retval)) {
@@ -47111,7 +47108,7 @@ index 7d69419..1487852 100644
if (unlikely(!audit_dummy_context())) {
if (nd->path.dentry && nd->inode)
audit_inode(name, nd->path.dentry);
-@@ -2155,6 +2201,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
+@@ -2155,6 +2199,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
@@ -47125,7 +47122,16 @@ index 7d69419..1487852 100644
return 0;
}
-@@ -2220,6 +2273,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+@@ -2190,7 +2241,7 @@ static inline int open_to_namei_flags(int flag)
+ /*
+ * Handle the last step of open()
+ */
+-static struct file *do_last(struct nameidata *nd, struct path *path,
++static struct file *do_last(struct nameidata *nd, struct path *path, struct path *link,
+ const struct open_flags *op, const char *pathname)
+ {
+ struct dentry *dir = nd->path.dentry;
+@@ -2220,16 +2271,44 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
@@ -47142,7 +47148,14 @@ index 7d69419..1487852 100644
audit_inode(pathname, nd->path.dentry);
if (open_flag & O_CREAT) {
error = -EISDIR;
-@@ -2230,6 +2293,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ goto exit;
+ }
++ if (link && gr_handle_symlink_owner(link, nd->inode)) {
++ error = -EACCES;
++ goto exit;
++ }
+ goto ok;
+ case LAST_BIND:
error = complete_walk(nd);
if (error)
return ERR_PTR(error);
@@ -47156,10 +47169,14 @@ index 7d69419..1487852 100644
+ error = -ENOENT;
+ goto exit;
+ }
++ if (link && gr_handle_symlink_owner(link, nd->inode)) {
++ error = -EACCES;
++ goto exit;
++ }
audit_inode(pathname, dir);
goto ok;
}
-@@ -2285,6 +2358,12 @@ retry_lookup:
+@@ -2285,6 +2364,12 @@ retry_lookup:
/* Negative dentry, just create the file */
if (!dentry->d_inode) {
umode_t mode = op->mode;
@@ -47172,7 +47189,7 @@ index 7d69419..1487852 100644
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
/*
-@@ -2308,6 +2387,8 @@ retry_lookup:
+@@ -2308,6 +2393,8 @@ retry_lookup:
error = vfs_create(dir->d_inode, dentry, mode, nd);
if (error)
goto exit_mutex_unlock;
@@ -47181,7 +47198,7 @@ index 7d69419..1487852 100644
mutex_unlock(&dir->d_inode->i_mutex);
dput(nd->path.dentry);
nd->path.dentry = dentry;
-@@ -2317,6 +2398,19 @@ retry_lookup:
+@@ -2317,6 +2404,23 @@ retry_lookup:
/*
* It already exists.
*/
@@ -47190,6 +47207,10 @@ index 7d69419..1487852 100644
+ error = -ENOENT;
+ goto exit_mutex_unlock;
+ }
++ if (link && gr_handle_symlink_owner(link, dentry->d_inode)) {
++ error = -EACCES;
++ goto exit_mutex_unlock;
++ }
+
+ /* only check if O_CREAT is specified, all other checks need to go
+ into may_open */
@@ -47201,7 +47222,27 @@ index 7d69419..1487852 100644
mutex_unlock(&dir->d_inode->i_mutex);
audit_inode(pathname, path->dentry);
-@@ -2367,6 +2461,16 @@ finish_lookup:
+@@ -2349,6 +2453,11 @@ finish_lookup:
+ }
+ }
+ BUG_ON(inode != path->dentry->d_inode);
++ /* if we're resolving a symlink to another symlink */
++ if (link && gr_handle_symlink_owner(link, inode)) {
++ error = -EACCES;
++ goto exit;
++ }
+ return NULL;
+ }
+
+@@ -2358,7 +2467,6 @@ finish_lookup:
+ save_parent.dentry = nd->path.dentry;
+ save_parent.mnt = mntget(path->mnt);
+ nd->path.dentry = path->dentry;
+-
+ }
+ nd->inode = inode;
+ /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
+@@ -2367,6 +2475,21 @@ finish_lookup:
path_put(&save_parent);
return ERR_PTR(error);
}
@@ -47215,26 +47256,36 @@ index 7d69419..1487852 100644
+ error = -ENOENT;
+ goto exit;
+ }
++ if (link && gr_handle_symlink_owner(link, nd->inode)) {
++ error = -EACCES;
++ goto exit;
++ }
++
error = -EISDIR;
if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
goto exit;
-@@ -2476,8 +2580,14 @@ static struct file *path_openat(int dfd, const char *pathname,
+@@ -2461,7 +2584,7 @@ static struct file *path_openat(int dfd, const char *pathname,
+ if (unlikely(error))
+ goto out_filp;
+
+- filp = do_last(nd, &path, op, pathname);
++ filp = do_last(nd, &path, NULL, op, pathname);
+ while (unlikely(!filp)) { /* trailing symlink */
+ struct path link = path;
+ void *cookie;
+@@ -2476,8 +2599,9 @@ static struct file *path_openat(int dfd, const char *pathname,
error = follow_link(&link, nd, &cookie);
if (unlikely(error))
filp = ERR_PTR(error);
- else
+- filp = do_last(nd, &path, op, pathname);
+ else {
- filp = do_last(nd, &path, op, pathname);
-+ if (!IS_ERR(filp) && gr_handle_symlink_owner(&link, nd->inode)) {
-+ if (filp)
-+ fput(filp);
-+ filp = ERR_PTR(-EACCES);
-+ }
++ filp = do_last(nd, &path, &link, op, pathname);
+ }
put_link(nd, &link, cookie);
}
out:
-@@ -2577,6 +2687,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
+@@ -2577,6 +2701,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
*path = nd.path;
return dentry;
eexist:
@@ -47246,7 +47297,7 @@ index 7d69419..1487852 100644
dput(dentry);
dentry = ERR_PTR(-EEXIST);
fail:
-@@ -2599,6 +2714,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
+@@ -2599,6 +2728,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
}
EXPORT_SYMBOL(user_path_create);
@@ -47267,7 +47318,7 @@ index 7d69419..1487852 100644
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
-@@ -2665,6 +2794,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
+@@ -2665,6 +2808,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
@@ -47285,7 +47336,7 @@ index 7d69419..1487852 100644
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out_drop_write;
-@@ -2682,6 +2822,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
+@@ -2682,6 +2836,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
}
out_drop_write:
mnt_drop_write(path.mnt);
@@ -47295,7 +47346,7 @@ index 7d69419..1487852 100644
out_dput:
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
-@@ -2735,12 +2878,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
+@@ -2735,12 +2892,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
@@ -47317,7 +47368,7 @@ index 7d69419..1487852 100644
out_dput:
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
-@@ -2820,6 +2972,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -2820,6 +2986,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
char * name;
struct dentry *dentry;
struct nameidata nd;
@@ -47326,7 +47377,7 @@ index 7d69419..1487852 100644
error = user_path_parent(dfd, pathname, &nd, &name);
if (error)
-@@ -2848,6 +3002,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -2848,6 +3016,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
error = -ENOENT;
goto exit3;
}
@@ -47342,7 +47393,7 @@ index 7d69419..1487852 100644
error = mnt_want_write(nd.path.mnt);
if (error)
goto exit3;
-@@ -2855,6 +3018,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -2855,6 +3032,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
if (error)
goto exit4;
error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
@@ -47351,7 +47402,7 @@ index 7d69419..1487852 100644
exit4:
mnt_drop_write(nd.path.mnt);
exit3:
-@@ -2917,6 +3082,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -2917,6 +3096,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
struct dentry *dentry;
struct nameidata nd;
struct inode *inode = NULL;
@@ -47360,7 +47411,7 @@ index 7d69419..1487852 100644
error = user_path_parent(dfd, pathname, &nd, &name);
if (error)
-@@ -2939,6 +3106,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -2939,6 +3120,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
if (!inode)
goto slashes;
ihold(inode);
@@ -47377,7 +47428,7 @@ index 7d69419..1487852 100644
error = mnt_want_write(nd.path.mnt);
if (error)
goto exit2;
-@@ -2946,6 +3123,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -2946,6 +3137,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
if (error)
goto exit3;
error = vfs_unlink(nd.path.dentry->d_inode, dentry);
@@ -47386,7 +47437,7 @@ index 7d69419..1487852 100644
exit3:
mnt_drop_write(nd.path.mnt);
exit2:
-@@ -3021,10 +3200,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
+@@ -3021,10 +3214,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
error = mnt_want_write(path.mnt);
if (error)
goto out_dput;
@@ -47405,7 +47456,7 @@ index 7d69419..1487852 100644
out_drop_write:
mnt_drop_write(path.mnt);
out_dput:
-@@ -3099,6 +3286,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -3099,6 +3300,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
{
struct dentry *new_dentry;
struct path old_path, new_path;
@@ -47413,7 +47464,7 @@ index 7d69419..1487852 100644
int how = 0;
int error;
-@@ -3122,7 +3310,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -3122,7 +3324,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
if (error)
return error;
@@ -47422,7 +47473,7 @@ index 7d69419..1487852 100644
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto out;
-@@ -3133,13 +3321,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -3133,13 +3335,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
error = mnt_want_write(new_path.mnt);
if (error)
goto out_dput;
@@ -47453,7 +47504,7 @@ index 7d69419..1487852 100644
dput(new_dentry);
mutex_unlock(&new_path.dentry->d_inode->i_mutex);
path_put(&new_path);
-@@ -3373,6 +3578,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
+@@ -3373,6 +3592,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
if (new_dentry == trap)
goto exit5;
@@ -47466,7 +47517,7 @@ index 7d69419..1487852 100644
error = mnt_want_write(oldnd.path.mnt);
if (error)
goto exit5;
-@@ -3382,6 +3593,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
+@@ -3382,6 +3607,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
goto exit6;
error = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry);
@@ -47476,7 +47527,7 @@ index 7d69419..1487852 100644
exit6:
mnt_drop_write(oldnd.path.mnt);
exit5:
-@@ -3407,6 +3621,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
+@@ -3407,6 +3635,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
{
@@ -47485,7 +47536,7 @@ index 7d69419..1487852 100644
int len;
len = PTR_ERR(link);
-@@ -3416,7 +3632,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
+@@ -3416,7 +3646,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
len = strlen(link);
if (len > (unsigned) buflen)
len = buflen;
@@ -58049,7 +58100,7 @@ index 0000000..05a6015
+}
diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
new file mode 100644
-index 0000000..551a2e7
+index 0000000..a225b02
--- /dev/null
+++ b/grsecurity/grsec_link.c
@@ -0,0 +1,59 @@
@@ -58066,7 +58117,7 @@ index 0000000..551a2e7
+
+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
+ /* ignore root-owned links, e.g. /proc/self */
-+ !uid_eq(link_inode->i_uid, GLOBAL_ROOT_UID) &&
++ !uid_eq(link_inode->i_uid, GLOBAL_ROOT_UID) && target &&
+ !uid_eq(link_inode->i_uid, target->i_uid)) {
+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
+ return 1;
@@ -63694,7 +63745,7 @@ index 642cb73..7ff7f9f 100644
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/include/linux/slab.h b/include/linux/slab.h
-index 67d5d94..371d9a7 100644
+index 67d5d94..bbd740b 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -11,12 +11,20 @@
@@ -63744,15 +63795,7 @@ index 67d5d94..371d9a7 100644
/*
* Allocator specific definitions. These are mainly used to establish optimized
-@@ -240,6 +253,7 @@ size_t ksize(const void *);
- * for general use, and so are not documented here. For a full list of
- * potential flags, always refer to linux/gfp.h.
- */
-+static void *kmalloc_array(size_t n, size_t size, gfp_t flags) __size_overflow(1, 2);
- static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
- {
- if (size != 0 && n > SIZE_MAX / size)
-@@ -298,7 +312,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
+@@ -298,7 +311,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
@@ -63761,7 +63804,7 @@ index 67d5d94..371d9a7 100644
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
#else
-@@ -317,7 +331,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
+@@ -317,7 +330,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
@@ -63845,7 +63888,7 @@ index fbd1117..0a3d314 100644
return kmem_cache_alloc_node_trace(size, cachep, flags, node);
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
-index 0ec00b3..39cb7fc 100644
+index 0ec00b3..22b4715 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
@@ -63857,16 +63900,17 @@ index 0ec00b3..39cb7fc 100644
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
-@@ -29,6 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
+@@ -29,7 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return __kmalloc_node(size, flags, -1);
}
-+static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
- static __always_inline void *__kmalloc(size_t size, gfp_t flags)
+-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
++static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
{
return kmalloc(size, flags);
+ }
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
-index c2f8c8b..be9e036 100644
+index c2f8c8b..d992a41 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -92,7 +92,7 @@ struct kmem_cache {
@@ -63878,15 +63922,16 @@ index c2f8c8b..be9e036 100644
void (*ctor)(void *);
int inuse; /* Offset to metadata */
int align; /* Alignment */
-@@ -153,6 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
+@@ -153,7 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
* Sorry that the following has to be that ugly but some versions of GCC
* have trouble with constant propagation and loops.
*/
-+static __always_inline int kmalloc_index(size_t size) __size_overflow(1);
- static __always_inline int kmalloc_index(size_t size)
+-static __always_inline int kmalloc_index(size_t size)
++static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
{
if (!size)
-@@ -218,7 +219,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
+ return 0;
+@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
}
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
@@ -63895,15 +63940,16 @@ index c2f8c8b..be9e036 100644
static __always_inline void *
kmalloc_order(size_t size, gfp_t flags, unsigned int order)
-@@ -259,6 +260,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+@@ -259,7 +259,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
}
#endif
-+static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
- static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
++static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
{
unsigned int order = get_order(size);
-@@ -284,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
+ return kmalloc_order_trace(size, flags, order);
+@@ -284,7 +284,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
}
#ifdef CONFIG_NUMA
@@ -74669,7 +74715,7 @@ index 8c7265a..c96d884 100644
mm->unmap_area = arch_unmap_area;
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index 2aad499..2e608e8 100644
+index 2aad499..a8a740e 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
@@ -74793,18 +74839,16 @@ index 2aad499..2e608e8 100644
if (!pmd_none(*pmd)) {
pte_t *ptep, pte;
-@@ -332,6 +372,10 @@ static void purge_vmap_area_lazy(void);
- static struct vmap_area *alloc_vmap_area(unsigned long size,
+@@ -329,7 +369,7 @@ static void purge_vmap_area_lazy(void);
+ * Allocate a region of KVA of the specified size and alignment, within the
+ * vstart and vend.
+ */
+-static struct vmap_area *alloc_vmap_area(unsigned long size,
++static struct __size_overflow(1) vmap_area *alloc_vmap_area(unsigned long size,
unsigned long align,
unsigned long vstart, unsigned long vend,
-+ int node, gfp_t gfp_mask) __size_overflow(1);
-+static struct vmap_area *alloc_vmap_area(unsigned long size,
-+ unsigned long align,
-+ unsigned long vstart, unsigned long vend,
int node, gfp_t gfp_mask)
- {
- struct vmap_area *va;
-@@ -1320,6 +1364,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
+@@ -1320,6 +1360,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
struct vm_struct *area;
BUG_ON(in_interrupt());
@@ -74821,7 +74865,7 @@ index 2aad499..2e608e8 100644
if (flags & VM_IOREMAP) {
int bit = fls(size);
-@@ -1552,6 +1606,11 @@ void *vmap(struct page **pages, unsigned int count,
+@@ -1552,6 +1602,11 @@ void *vmap(struct page **pages, unsigned int count,
if (count > totalram_pages)
return NULL;
@@ -74833,7 +74877,7 @@ index 2aad499..2e608e8 100644
area = get_vm_area_caller((count << PAGE_SHIFT), flags,
__builtin_return_address(0));
if (!area)
-@@ -1653,6 +1712,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
+@@ -1653,6 +1708,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
goto fail;
@@ -74847,7 +74891,7 @@ index 2aad499..2e608e8 100644
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
start, end, node, gfp_mask, caller);
if (!area)
-@@ -1826,10 +1892,9 @@ EXPORT_SYMBOL(vzalloc_node);
+@@ -1826,10 +1888,9 @@ EXPORT_SYMBOL(vzalloc_node);
* For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*/
@@ -74859,7 +74903,7 @@ index 2aad499..2e608e8 100644
-1, __builtin_return_address(0));
}
-@@ -2124,6 +2189,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+@@ -2124,6 +2185,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long uaddr = vma->vm_start;
unsigned long usize = vma->vm_end - vma->vm_start;
@@ -77347,7 +77391,7 @@ index 06592d8..64860f6 100644
*uaddr_len = sizeof(struct sockaddr_ax25);
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
-index 0f66174..2f50376 100644
+index 0f66174..e7cb04c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1687,7 +1687,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -77368,7 +77412,26 @@ index 0f66174..2f50376 100644
spin_unlock(&sk->sk_receive_queue.lock);
drop_n_restore:
-@@ -3275,7 +3275,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+@@ -2641,6 +2641,7 @@ out:
+
+ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ {
++ struct sock_extended_err ee;
+ struct sock_exterr_skb *serr;
+ struct sk_buff *skb, *skb2;
+ int copied, err;
+@@ -2662,8 +2663,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ sock_recv_timestamp(msg, sk, skb);
+
+ serr = SKB_EXT_ERR(skb);
++ ee = serr->ee;
+ put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
+- sizeof(serr->ee), &serr->ee);
++ sizeof ee, &ee);
+
+ msg->msg_flags |= MSG_ERRQUEUE;
+ err = copied;
+@@ -3275,7 +3277,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
case PACKET_HDRLEN:
if (len > sizeof(int))
len = sizeof(int);
@@ -77377,7 +77440,7 @@ index 0f66174..2f50376 100644
return -EFAULT;
switch (val) {
case TPACKET_V1:
-@@ -3314,7 +3314,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+@@ -3314,7 +3316,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
len = lv;
if (put_user(len, optlen))
return -EFAULT;
@@ -82506,10 +82569,10 @@ index 0000000..b8008f7
+}
diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
new file mode 100644
-index 0000000..daaa86c
+index 0000000..036c9c6
--- /dev/null
+++ b/tools/gcc/size_overflow_hash.data
-@@ -0,0 +1,2486 @@
+@@ -0,0 +1,3057 @@
+_000001_hash alloc_dr 2 65495 _000001_hash NULL
+_000002_hash __copy_from_user 3 10918 _000002_hash NULL
+_000003_hash copy_from_user 3 17559 _000003_hash NULL
@@ -82767,7 +82830,7 @@ index 0000000..daaa86c
+_000263_hash ide_settings_proc_write 3 35110 _000263_hash NULL
+_000264_hash idetape_chrdev_write 3 53976 _000264_hash NULL
+_000265_hash idmap_pipe_downcall 3 14591 _000265_hash NULL
-+_000266_hash ieee80211_build_probe_req 7 27660 _000266_hash NULL
++_000266_hash ieee80211_build_probe_req 7-5 27660 _000266_hash NULL
+_000267_hash ieee80211_if_write 3 34894 _000267_hash NULL
+_000268_hash if_write 3 51756 _000268_hash NULL
+_000269_hash ilo_write 3 64378 _000269_hash NULL
@@ -82832,7 +82895,7 @@ index 0000000..daaa86c
+_000331_hash lcd_write 3 14857 _000331_hash &_000014_hash
+_000332_hash ldm_frag_add 2 5611 _000332_hash NULL
+_000333_hash __lgread 4 31668 _000333_hash NULL
-+_000334_hash libipw_alloc_txb 1 27579 _000334_hash NULL
++_000334_hash libipw_alloc_txb 1-3-2 27579 _000334_hash NULL
+_000335_hash link_send_sections_long 4 46556 _000335_hash NULL
+_000336_hash listxattr 3 12769 _000336_hash NULL
+_000337_hash LoadBitmap 2 19658 _000337_hash NULL
@@ -82860,7 +82923,7 @@ index 0000000..daaa86c
+_000360_hash mpi_resize 2 44674 _000360_hash NULL
+_000361_hash mptctl_getiocinfo 2 28545 _000361_hash NULL
+_000362_hash mtdchar_readoob 4 31200 _000362_hash NULL
-+_000363_hash mtdchar_write 3 56831 _000363_hash NULL
++_000363_hash mtdchar_write 3 56831 _002688_hash NULL nohasharray
+_000364_hash mtdchar_writeoob 4 3393 _000364_hash NULL
+_000365_hash mtd_device_parse_register 5 5024 _000365_hash NULL
+_000366_hash mtf_test_write 3 18844 _000366_hash NULL
@@ -82967,7 +83030,7 @@ index 0000000..daaa86c
+_000472_hash rfcomm_sock_setsockopt 5 18254 _000472_hash NULL
+_000473_hash rndis_add_response 2 58544 _000473_hash NULL
+_000474_hash rndis_set_oid 4 6547 _000474_hash NULL
-+_000475_hash rngapi_reset 3 34366 _000475_hash NULL
++_000475_hash rngapi_reset 3 34366 _002911_hash NULL nohasharray
+_000476_hash roccat_common_receive 4 53407 _000476_hash NULL
+_000477_hash roccat_common_send 4 12284 _000477_hash NULL
+_000478_hash rpc_malloc 2 43573 _000478_hash NULL
@@ -83153,7 +83216,7 @@ index 0000000..daaa86c
+_000667_hash zd_usb_read_fw 4 22049 _000667_hash NULL
+_000668_hash zerocopy_sg_from_iovec 3 11828 _000668_hash NULL
+_000669_hash zoran_write 3 22404 _000669_hash NULL
-+_000671_hash acpi_ex_allocate_name_string 2 7685 _000671_hash NULL
++_000671_hash acpi_ex_allocate_name_string 2 7685 _002855_hash NULL nohasharray
+_000672_hash acpi_os_allocate_zeroed 1 37422 _000672_hash NULL
+_000673_hash acpi_ut_initialize_buffer 2 47143 _002314_hash NULL nohasharray
+_000674_hash ad7879_spi_xfer 3 36311 _000674_hash NULL
@@ -83212,7 +83275,7 @@ index 0000000..daaa86c
+_000733_hash ath6kl_wmi_send_mgmt_cmd 7 17347 _000733_hash NULL
+_000734_hash ath_descdma_setup 5 12257 _000734_hash NULL
+_000735_hash ath_rx_edma_init 2 65483 _000735_hash NULL
-+_000736_hash ati_create_gatt_pages 1 4722 _000736_hash NULL
++_000736_hash ati_create_gatt_pages 1 4722 _003185_hash NULL nohasharray
+_000737_hash au0828_init_isoc 2-3 61917 _000737_hash NULL
+_000739_hash audit_init_entry 1 38644 _000739_hash NULL
+_000740_hash ax25_sendmsg 4 62770 _000740_hash NULL
@@ -83248,7 +83311,7 @@ index 0000000..daaa86c
+_000774_hash cfg80211_roamed_bss 4-6 50198 _000774_hash NULL
+_000776_hash cifs_readdata_alloc 1 50318 _000776_hash NULL
+_000777_hash cifs_readv_from_socket 3 19109 _000777_hash NULL
-+_000778_hash cifs_writedata_alloc 1 32880 _000778_hash NULL
++_000778_hash cifs_writedata_alloc 1 32880 _003119_hash NULL nohasharray
+_000779_hash cnic_alloc_dma 3 34641 _000779_hash NULL
+_000780_hash configfs_write_file 3 61621 _000780_hash NULL
+_000781_hash construct_key 3 11329 _000781_hash NULL
@@ -83280,7 +83343,7 @@ index 0000000..daaa86c
+_000811_hash disconnect 4 32521 _000811_hash NULL
+_000812_hash dma_attach 6-7 50831 _000812_hash NULL
+_000814_hash dn_sendmsg 4 38390 _000814_hash NULL
-+_000815_hash do_dccp_setsockopt 5 54377 _000815_hash NULL
++_000815_hash do_dccp_setsockopt 5 54377 _003160_hash NULL nohasharray
+_000816_hash do_jffs2_setxattr 5 25910 _000816_hash NULL
+_000817_hash do_msgsnd 4 1387 _000817_hash NULL
+_000818_hash do_raw_setsockopt 5 55215 _000818_hash NULL
@@ -83338,7 +83401,7 @@ index 0000000..daaa86c
+_000873_hash ib_send_cm_rtu 3 63138 _000873_hash NULL
+_000874_hash ieee80211_key_alloc 3 19065 _000874_hash NULL
+_000875_hash ieee80211_mgmt_tx 9 46860 _000875_hash NULL
-+_000876_hash ieee80211_send_probe_req 6 6924 _000876_hash NULL
++_000876_hash ieee80211_send_probe_req 6-4 6924 _000876_hash NULL
+_000877_hash if_writecmd 2 815 _000877_hash NULL
+_000878_hash init_bch 1-2 64130 _000878_hash NULL
+_000880_hash init_ipath 1 48187 _000880_hash NULL
@@ -83397,7 +83460,7 @@ index 0000000..daaa86c
+_000937_hash kvm_read_guest_page_mmu 6 37611 _000937_hash NULL
+_000938_hash kvm_set_irq_routing 3 48704 _000938_hash NULL
+_000939_hash kvm_write_guest_cached 4 11106 _000939_hash NULL
-+_000940_hash kvm_write_guest_page 5 63555 _000940_hash NULL
++_000940_hash kvm_write_guest_page 5 63555 _002809_hash NULL nohasharray
+_000941_hash l2cap_skbuff_fromiovec 3-4 35003 _000941_hash NULL
+_000943_hash l2tp_ip_sendmsg 4 50411 _000943_hash NULL
+_000944_hash l2tp_session_create 1 25286 _000944_hash NULL
@@ -83625,7 +83688,7 @@ index 0000000..daaa86c
+_001186_hash timeout_write 3 50991 _001186_hash NULL
+_001187_hash tipc_link_send_sections_fast 4 37920 _001187_hash NULL
+_001188_hash tipc_subseq_alloc 1 5957 _001188_hash NULL
-+_001189_hash tm6000_read_write_usb 7 50774 _001189_hash NULL
++_001189_hash tm6000_read_write_usb 7 50774 _002917_hash NULL nohasharray
+_001190_hash tnode_alloc 1 49407 _001190_hash NULL
+_001191_hash tomoyo_commit_ok 2 20167 _001191_hash NULL
+_001192_hash tomoyo_scan_bprm 2-4 15642 _001192_hash NULL
@@ -83645,7 +83708,7 @@ index 0000000..daaa86c
+_001208_hash update_pmkid 4 2481 _001208_hash NULL
+_001209_hash usb_alloc_coherent 2 65444 _001209_hash NULL
+_001210_hash uvc_alloc_buffers 2 9656 _001210_hash NULL
-+_001211_hash uvc_alloc_entity 3 20836 _001211_hash NULL
++_001211_hash uvc_alloc_entity 3-4 20836 _001211_hash NULL
+_001212_hash v4l2_ctrl_new 7 38725 _001212_hash NULL
+_001213_hash v4l2_event_subscribe 3 19510 _001213_hash NULL
+_001214_hash vb2_read 3 42703 _001214_hash NULL
@@ -83672,7 +83735,7 @@ index 0000000..daaa86c
+_001237_hash _xfs_buf_get_pages 2 46811 _001237_hash NULL
+_001238_hash xfs_da_buf_make 1 55845 _001238_hash NULL
+_001239_hash xfs_da_grow_inode_int 3 21785 _001239_hash NULL
-+_001240_hash xfs_dir_cilookup_result 3 64288 _001240_hash NULL
++_001240_hash xfs_dir_cilookup_result 3 64288 _003139_hash NULL nohasharray
+_001241_hash xfs_iext_add_indirect_multi 3 32400 _001241_hash NULL
+_001242_hash xfs_iext_inline_to_direct 2 12384 _001242_hash NULL
+_001243_hash xfs_iroot_realloc 2 46826 _001243_hash NULL
@@ -83771,7 +83834,7 @@ index 0000000..daaa86c
+_001343_hash dump_midi 3 51040 _001343_hash NULL
+_001344_hash dvb_dmxdev_set_buffer_size 2 55643 _001344_hash NULL
+_001345_hash dvb_dvr_set_buffer_size 2 9840 _001345_hash NULL
-+_001346_hash dvb_ringbuffer_pkt_read_user 3-5 4303 _001346_hash NULL
++_001346_hash dvb_ringbuffer_pkt_read_user 3-5-2 4303 _001346_hash NULL
+_001348_hash dvb_ringbuffer_read_user 3 56702 _001348_hash NULL
+_001349_hash ecryptfs_filldir 3 6622 _001349_hash NULL
+_001350_hash ecryptfs_readlink 3 40775 _001350_hash NULL
@@ -83945,7 +84008,7 @@ index 0000000..daaa86c
+_001530_hash sys_getxattr 4 37418 _001530_hash NULL
+_001531_hash sys_kexec_load 2 14222 _001531_hash NULL
+_001532_hash sys_msgsnd 3 44537 _001532_hash &_000129_hash
-+_001533_hash sys_process_vm_readv 3-5 19090 _001533_hash NULL
++_001533_hash sys_process_vm_readv 3-5 19090 _003125_hash NULL nohasharray
+_001535_hash sys_process_vm_writev 3-5 4928 _001535_hash NULL
+_001537_hash sys_sched_getaffinity 2 60033 _001537_hash NULL
+_001538_hash sys_setsockopt 5 35320 _001538_hash NULL
@@ -84011,7 +84074,7 @@ index 0000000..daaa86c
+_001603_hash xfs_iext_realloc_indirect 2 59211 _001603_hash NULL
+_001604_hash xfs_inumbers_fmt 3 12817 _001604_hash NULL
+_001605_hash xlog_recover_add_to_cont_trans 4 44102 _001605_hash NULL
-+_001606_hash xz_dec_lzma2_create 2 36353 _001606_hash NULL
++_001606_hash xz_dec_lzma2_create 2 36353 _002745_hash NULL nohasharray
+_001607_hash _zd_iowrite32v_locked 3 44725 _001607_hash NULL
+_001608_hash aat2870_reg_read_file 3 12221 _001608_hash NULL
+_001609_hash add_sctp_bind_addr 3 12269 _001609_hash NULL
@@ -84024,7 +84087,7 @@ index 0000000..daaa86c
+_001616_hash afs_cell_lookup 2 8482 _001616_hash NULL
+_001617_hash agp_allocate_memory 2 58761 _001617_hash NULL
+_001618_hash __alloc_bootmem 1 31498 _001618_hash NULL
-+_001619_hash __alloc_bootmem_low 1 43423 _001619_hash NULL
++_001619_hash __alloc_bootmem_low 1 43423 _003150_hash NULL nohasharray
+_001620_hash __alloc_bootmem_node_nopanic 2 6432 _001620_hash NULL
+_001621_hash alloc_cc770dev 1 48186 _001621_hash NULL
+_001622_hash __alloc_ei_netdev 1 29338 _001622_hash NULL
@@ -84051,7 +84114,7 @@ index 0000000..daaa86c
+_001645_hash bfad_debugfs_read 3 13119 _001645_hash NULL
+_001646_hash bfad_debugfs_read_regrd 3 57830 _001646_hash NULL
+_001647_hash blk_init_tags 1 30592 _001647_hash NULL
-+_001648_hash blk_queue_init_tags 2 44355 _001648_hash NULL
++_001648_hash blk_queue_init_tags 2 44355 _002686_hash NULL nohasharray
+_001649_hash blk_rq_map_kern 4 47004 _001649_hash NULL
+_001650_hash bm_entry_read 3 10976 _001650_hash NULL
+_001651_hash bm_status_read 3 19583 _001651_hash NULL
@@ -84125,9 +84188,9 @@ index 0000000..daaa86c
+_001721_hash generic_readlink 3 32654 _001721_hash NULL
+_001722_hash gpio_power_read 3 36059 _001722_hash NULL
+_001723_hash hash_recvmsg 4 50924 _001723_hash NULL
-+_001724_hash ht40allow_map_read 3 55209 _001724_hash NULL
++_001724_hash ht40allow_map_read 3 55209 _002830_hash NULL nohasharray
+_001725_hash hwflags_read 3 52318 _001725_hash NULL
-+_001726_hash hysdn_conf_read 3 42324 _001726_hash NULL
++_001726_hash hysdn_conf_read 3 42324 _003205_hash NULL nohasharray
+_001727_hash i2400m_rx_stats_read 3 57706 _001727_hash NULL
+_001728_hash i2400m_tx_stats_read 3 28527 _001728_hash NULL
+_001729_hash idmouse_read 3 63374 _001729_hash NULL
@@ -84208,7 +84271,7 @@ index 0000000..daaa86c
+_001805_hash iwl_dbgfs_rxon_flags_read 3 20795 _001805_hash NULL
+_001806_hash iwl_dbgfs_rx_queue_read 3 19943 _001806_hash NULL
+_001807_hash iwl_dbgfs_rx_statistics_read 3 62687 _001807_hash &_000425_hash
-+_001808_hash iwl_dbgfs_sensitivity_read 3 63116 _001808_hash NULL
++_001808_hash iwl_dbgfs_sensitivity_read 3 63116 _003026_hash NULL nohasharray
+_001809_hash iwl_dbgfs_sleep_level_override_read 3 3038 _001809_hash NULL
+_001810_hash iwl_dbgfs_sram_read 3 44505 _001810_hash NULL
+_001811_hash iwl_dbgfs_stations_read 3 9309 _001811_hash NULL
@@ -84274,7 +84337,7 @@ index 0000000..daaa86c
+_001873_hash mwifiex_info_read 3 53447 _001873_hash NULL
+_001874_hash mwifiex_rdeeprom_read 3 51429 _001874_hash NULL
+_001875_hash mwifiex_regrdwr_read 3 34472 _001875_hash NULL
-+_001876_hash nfsd_vfs_read 6 62605 _001876_hash NULL
++_001876_hash nfsd_vfs_read 6 62605 _003003_hash NULL nohasharray
+_001877_hash nfsd_vfs_write 6 54577 _001877_hash NULL
+_001878_hash nfs_idmap_lookup_id 2 10660 _001878_hash NULL
+_001879_hash o2hb_debug_read 3 37851 _001879_hash NULL
@@ -84387,7 +84450,7 @@ index 0000000..daaa86c
+_001986_hash rx_out_of_mem_read 3 10157 _001986_hash NULL
+_001987_hash rx_path_reset_read 3 23801 _001987_hash NULL
+_001988_hash rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 _001988_hash NULL
-+_001989_hash rxpipe_descr_host_int_trig_rx_data_read 3 22001 _001989_hash NULL
++_001989_hash rxpipe_descr_host_int_trig_rx_data_read 3 22001 _003089_hash NULL nohasharray
+_001990_hash rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 _001990_hash NULL
+_001991_hash rxpipe_rx_prep_beacon_drop_read 3 2403 _001991_hash NULL
+_001992_hash rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 _001992_hash NULL
@@ -84508,7 +84571,7 @@ index 0000000..daaa86c
+_002116_hash exofs_read_kern 6 39921 _002116_hash &_001885_hash
+_002117_hash fc_change_queue_depth 2 36841 _002117_hash NULL
+_002118_hash forced_ps_read 3 31685 _002118_hash NULL
-+_002119_hash frequency_read 3 64031 _002119_hash NULL
++_002119_hash frequency_read 3 64031 _003106_hash NULL nohasharray
+_002120_hash get_alua_req 3 4166 _002120_hash NULL
+_002121_hash get_rdac_req 3 45882 _002121_hash NULL
+_002122_hash hci_sock_recvmsg 4 7072 _002122_hash NULL
@@ -84546,7 +84609,7 @@ index 0000000..daaa86c
+_002154_hash ieee80211_if_read_flags 3 57470 _002389_hash NULL nohasharray
+_002155_hash ieee80211_if_read_fwded_frames 3 36520 _002155_hash NULL
+_002156_hash ieee80211_if_read_fwded_mcast 3 39571 _002156_hash &_000151_hash
-+_002157_hash ieee80211_if_read_fwded_unicast 3 59740 _002157_hash NULL
++_002157_hash ieee80211_if_read_fwded_unicast 3 59740 _002859_hash NULL nohasharray
+_002158_hash ieee80211_if_read_last_beacon 3 31257 _002158_hash NULL
+_002159_hash ieee80211_if_read_min_discovery_timeout 3 13946 _002159_hash NULL
+_002160_hash ieee80211_if_read_num_buffered_multicast 3 12716 _002160_hash NULL
@@ -84856,7 +84919,7 @@ index 0000000..daaa86c
+_002482_hash gru_alloc_gts 2-3 60056 _002482_hash NULL
+_002484_hash handle_eviocgbit 3 44193 _002484_hash NULL
+_002485_hash hid_parse_report 3 51737 _002485_hash NULL
-+_002486_hash ieee80211_alloc_txb 1 52477 _002486_hash NULL
++_002486_hash ieee80211_alloc_txb 1-2 52477 _002486_hash NULL
+_002487_hash ieee80211_wx_set_gen_ie 3 51399 _002487_hash NULL
+_002488_hash ieee80211_wx_set_gen_ie_rsl 3 3521 _002488_hash NULL
+_002489_hash init_cdev 1 8274 _002489_hash NULL
@@ -84884,7 +84947,7 @@ index 0000000..daaa86c
+_002511_hash queue_reply 3 22416 _002511_hash NULL
+_002512_hash Realloc 2 34961 _002512_hash NULL
+_002513_hash rfc4106_set_key 3 54519 _002513_hash NULL
-+_002514_hash rtllib_alloc_txb 1 21687 _002514_hash NULL
++_002514_hash rtllib_alloc_txb 1-2 21687 _002514_hash NULL
+_002515_hash rtllib_wx_set_gen_ie 3 59808 _002515_hash NULL
+_002516_hash rts51x_transfer_data_partial 6 5735 _002516_hash NULL
+_002517_hash sparse_early_usemaps_alloc_node 4 9269 _002517_hash NULL
@@ -84902,7 +84965,7 @@ index 0000000..daaa86c
+_002529_hash xpc_kzalloc_cacheline_aligned 1 65433 _002529_hash NULL
+_002530_hash xsd_read 3 15653 _002530_hash NULL
+_002531_hash compat_do_readv_writev 4 49102 _002531_hash NULL
-+_002532_hash compat_keyctl_instantiate_key_iov 3 57431 _002532_hash NULL
++_002532_hash compat_keyctl_instantiate_key_iov 3 57431 _003110_hash NULL nohasharray
+_002533_hash compat_process_vm_rw 3-5 22254 _002533_hash NULL
+_002535_hash compat_sys_setsockopt 5 3326 _002535_hash NULL
+_002536_hash ipath_cdev_init 1 37752 _002536_hash NULL
@@ -84993,15 +85056,586 @@ index 0000000..daaa86c
+_002631_hash v9fs_fid_readn 4 60544 _002631_hash NULL
+_002632_hash v9fs_file_read 3 40858 _002632_hash NULL
+_002633_hash __devres_alloc 2 25598 _002633_hash NULL
-+_002634_hash acl_alloc 1 35979 _002634_hash NULL
-+_002635_hash acl_alloc_stack_init 1 60630 _002635_hash NULL
-+_002636_hash acl_alloc_num 1-2 60778 _002636_hash NULL
++_002634_hash alloc_dummy_extent_buffer 2 56374 _002634_hash NULL
++_002635_hash alloc_fdtable 1 17389 _002635_hash NULL
++_002636_hash alloc_large_system_hash 2 22391 _002636_hash NULL
++_002637_hash alloc_ldt 2 21972 _002637_hash NULL
++_002638_hash __alloc_skb 1 23940 _002638_hash NULL
++_002639_hash __ata_change_queue_depth 3 23484 _002639_hash NULL
++_002640_hash btrfs_alloc_free_block 3 8986 _002640_hash NULL
++_002641_hash btrfs_find_device_for_logical 2 44993 _002641_hash NULL
++_002642_hash ccid3_hc_rx_getsockopt 3 62331 _002642_hash NULL
++_002643_hash ccid3_hc_tx_getsockopt 3 16314 _002643_hash NULL
++_002644_hash cifs_readdata_alloc 1 26360 _002644_hash NULL
++_002645_hash cistpl_vers_1 4 15023 _002645_hash NULL
++_002646_hash cmm_read 3 57520 _002646_hash NULL
++_002647_hash cosa_read 3 25966 _002647_hash NULL
++_002648_hash dm_table_create 3 35687 _002648_hash NULL
++_002649_hash dpcm_state_read_file 3 65489 _002649_hash NULL
++_002651_hash edac_mc_alloc 4 3611 _002651_hash NULL
++_002652_hash ep0_read 3 38095 _002652_hash NULL
++_002653_hash event_buffer_read 3 48772 _002765_hash NULL nohasharray
++_002654_hash extend_netdev_table 2 21453 _002654_hash NULL
++_002655_hash extract_entropy_user 3 26952 _002655_hash NULL
++_002656_hash fcoe_ctlr_device_add 3 1793 _002656_hash NULL
++_002657_hash fd_do_readv 3 51297 _002657_hash NULL
++_002658_hash fd_do_writev 3 29329 _002658_hash NULL
++_002659_hash ffs_ep0_read 3 2672 _002659_hash NULL
++_002660_hash fill_readbuf 3 32464 _002660_hash NULL
++_002661_hash fw_iso_buffer_alloc 2 13704 _002661_hash NULL
++_002662_hash get_fd_set 1 3866 _002662_hash NULL
++_002663_hash hidraw_report_event 3 20503 _002663_hash NULL
++_002664_hash ieee80211_if_read_ht_opmode 3 29044 _002664_hash NULL
++_002665_hash ieee80211_if_read_num_mcast_sta 3 12419 _002665_hash NULL
++_002666_hash iwl_dbgfs_calib_disabled_read 3 22649 _002666_hash NULL
++_002667_hash iwl_dbgfs_rf_reset_read 3 26512 _002667_hash NULL
++_002668_hash ixgbe_alloc_q_vector 4-6 24439 _002668_hash NULL
++_002670_hash joydev_handle_JSIOCSAXMAP 3 48898 _002836_hash NULL nohasharray
++_002671_hash joydev_handle_JSIOCSBTNMAP 3 15643 _002671_hash NULL
++_002672_hash __kfifo_from_user_r 3 60345 _002672_hash NULL
++_002673_hash kstrtoint_from_user 2 8778 _002673_hash NULL
++_002674_hash kstrtol_from_user 2 10168 _002674_hash NULL
++_002675_hash kstrtoll_from_user 2 19500 _002675_hash NULL
++_002676_hash kstrtos16_from_user 2 28300 _002676_hash NULL
++_002677_hash kstrtos8_from_user 2 58268 _002677_hash NULL
++_002678_hash kstrtou16_from_user 2 54274 _002678_hash NULL
++_002679_hash kstrtou8_from_user 2 55599 _002679_hash NULL
++_002680_hash kstrtouint_from_user 2 10536 _002680_hash NULL
++_002681_hash kstrtoul_from_user 2 64569 _002681_hash NULL
++_002682_hash kstrtoull_from_user 2 63026 _002682_hash NULL
++_002683_hash l2cap_create_iframe_pdu 3 40055 _002683_hash NULL
++_002684_hash l2tp_ip6_recvmsg 4 62874 _002684_hash NULL
++_002685_hash mem_cgroup_read 5 22461 _002685_hash NULL
++_002686_hash nfs_fscache_get_super_cookie 3 44355 _002686_hash &_001648_hash
++_002687_hash nfs_pgarray_set 2 1085 _002687_hash NULL
++_002688_hash ntfs_rl_realloc 3 56831 _002688_hash &_000363_hash
++_002689_hash ntfs_rl_realloc_nofail 3 32173 _002689_hash NULL
++_002690_hash pn533_dep_link_up 5 22154 _002690_hash NULL
++_002691_hash port_fops_write 3 54627 _002691_hash NULL
++_002692_hash ptp_read 4 63251 _002692_hash NULL
++_002693_hash qla4xxx_change_queue_depth 2 1268 _002693_hash NULL
++_002694_hash reqsk_queue_alloc 2 40272 _002694_hash NULL
++_002695_hash resize_info_buffer 2 62889 _002695_hash NULL
++_002696_hash rfkill_fop_write 3 64808 _002696_hash NULL
++_002697_hash rt2x00debug_write_rfcsr 3 41473 _002697_hash NULL
++_002698_hash rvmalloc 1 46873 _002698_hash NULL
++_002699_hash rw_copy_check_uvector 3 45748 _002699_hash NULL
++_002700_hash sctp_getsockopt_active_key 2 45483 _002700_hash NULL
++_002701_hash sctp_getsockopt_adaptation_layer 2 45375 _002701_hash NULL
++_002702_hash sctp_getsockopt_assoc_ids 2 9043 _002702_hash NULL
++_002703_hash sctp_getsockopt_associnfo 2 58169 _002703_hash NULL
++_002704_hash sctp_getsockopt_assoc_number 2 6384 _002704_hash NULL
++_002705_hash sctp_getsockopt_auto_asconf 2 46584 _002705_hash NULL
++_002706_hash sctp_getsockopt_context 2 52490 _002706_hash NULL
++_002707_hash sctp_getsockopt_default_send_param 2 63056 _002707_hash NULL
++_002708_hash sctp_getsockopt_disable_fragments 2 12330 _002708_hash NULL
++_002709_hash sctp_getsockopt_fragment_interleave 2 51215 _002709_hash NULL
++_002710_hash sctp_getsockopt_initmsg 2 26042 _002710_hash NULL
++_002711_hash sctp_getsockopt_mappedv4 2 20044 _002711_hash NULL
++_002712_hash sctp_getsockopt_nodelay 2 9560 _002712_hash NULL
++_002713_hash sctp_getsockopt_partial_delivery_point 2 60952 _002713_hash NULL
++_002714_hash sctp_getsockopt_peeloff 2 59190 _002714_hash NULL
++_002715_hash sctp_getsockopt_peer_addr_info 2 6024 _002715_hash NULL
++_002716_hash sctp_getsockopt_peer_addr_params 2 53645 _002716_hash NULL
++_002717_hash sctp_getsockopt_primary_addr 2 24639 _002717_hash NULL
++_002718_hash sctp_getsockopt_rtoinfo 2 62027 _002718_hash NULL
++_002719_hash sctp_getsockopt_sctp_status 2 56540 _002719_hash NULL
++_002720_hash self_check_write 5 50856 _002720_hash NULL
++_002721_hash smk_read_mapped 3 7562 _002721_hash NULL
++_002722_hash smk_set_cipso 3 20379 _002722_hash NULL
++_002723_hash smk_user_access 3 24440 _002723_hash NULL
++_002724_hash smk_write_mapped 3 13519 _002724_hash NULL
++_002725_hash smk_write_rules_list 3 18565 _002725_hash NULL
++_002726_hash snd_mixart_BA0_read 5 45069 _002726_hash NULL
++_002727_hash snd_mixart_BA1_read 5 5082 _002727_hash NULL
++_002728_hash snd_pcm_oss_read2 3 54387 _002728_hash NULL
++_002729_hash syslog_print 2 307 _002729_hash NULL
++_002730_hash tcp_dma_try_early_copy 3 4457 _002730_hash NULL
++_002731_hash tcp_send_rcvq 3 11316 _002731_hash NULL
++_002732_hash tomoyo_init_log 2 61526 _002732_hash NULL
++_002733_hash ubi_dump_flash 4 46381 _002733_hash NULL
++_002734_hash ubi_eba_atomic_leb_change 5 60379 _002734_hash NULL
++_002735_hash ubi_eba_write_leb 5-6 36029 _002735_hash NULL
++_002737_hash ubi_eba_write_leb_st 5 44343 _002737_hash NULL
++_002738_hash ubi_self_check_all_ff 4 41959 _002738_hash NULL
++_002739_hash unix_bind 3 15668 _002739_hash NULL
++_002740_hash usbvision_rvmalloc 1 19655 _002740_hash NULL
++_002742_hash v4l2_ctrl_new 7 24927 _002742_hash NULL
++_002743_hash v4l2_event_subscribe 3 53687 _002743_hash NULL
++_002744_hash v9fs_direct_read 3 45546 _002744_hash NULL
++_002745_hash v9fs_file_readn 4 36353 _002745_hash &_001606_hash
++_002746_hash __videobuf_alloc_vb 1 5665 _002746_hash NULL
++_002747_hash wm8350_write 3 24480 _002747_hash NULL
++_002748_hash xfs_buf_read_uncached 3 42844 _002748_hash NULL
++_002749_hash yurex_write 3 8761 _002749_hash NULL
++_002750_hash alloc_skb 1 55439 _002750_hash NULL
++_002751_hash alloc_skb_fclone 1 3467 _002751_hash NULL
++_002752_hash ata_scsi_change_queue_depth 2 23126 _002752_hash NULL
++_002753_hash ath6kl_disconnect_timeout_write 3 794 _002753_hash NULL
++_002754_hash ath6kl_keepalive_write 3 45600 _002754_hash NULL
++_002755_hash ath6kl_lrssi_roam_write 3 8362 _002755_hash NULL
++_002756_hash ath6kl_regread_write 3 14220 _002756_hash NULL
++_002757_hash core_sys_select 1 47494 _002757_hash NULL
++_002758_hash do_syslog 3 56807 _002758_hash NULL
++_002759_hash expand_fdtable 2 39273 _002759_hash NULL
++_002760_hash fd_execute_cmd 3 1132 _002760_hash NULL
++_002761_hash get_chars 3 40373 _002761_hash NULL
++_002762_hash hid_report_raw_event 4 2762 _002762_hash NULL
++_002763_hash inet_csk_listen_start 2 38233 _002763_hash NULL
++_002764_hash kstrtou32_from_user 2 30361 _002764_hash NULL
++_002765_hash l2cap_segment_sdu 4 48772 _002765_hash &_002653_hash
++_002766_hash __netdev_alloc_skb 2 18595 _002766_hash NULL
++_002767_hash nfs_readdata_alloc 2 65015 _002767_hash NULL
++_002768_hash nfs_writedata_alloc 2 12133 _002768_hash NULL
++_002769_hash ntfs_rl_append 2-4 6037 _002769_hash NULL
++_002771_hash ntfs_rl_insert 2-4 4931 _002771_hash NULL
++_002773_hash ntfs_rl_replace 2-4 14136 _002773_hash NULL
++_002775_hash ntfs_rl_split 2-4 52328 _002775_hash NULL
++_002777_hash port_fops_read 3 49626 _002777_hash NULL
++_002778_hash random_read 3 13815 _002778_hash NULL
++_002779_hash sg_proc_write_adio 3 45704 _002779_hash NULL
++_002780_hash sg_proc_write_dressz 3 46316 _002780_hash NULL
++_002781_hash tcp_sendmsg 4 30296 _002781_hash NULL
++_002782_hash tomoyo_write_log2 2 34318 _002782_hash NULL
++_002783_hash ubi_leb_change 4 10289 _002783_hash NULL
++_002784_hash ubi_leb_write 4-5 5478 _002784_hash NULL
++_002786_hash urandom_read 3 30462 _002786_hash NULL
++_002787_hash v9fs_cached_file_read 3 2514 _002787_hash NULL
++_002788_hash __videobuf_alloc_cached 1 12740 _002788_hash NULL
++_002789_hash __videobuf_alloc_uncached 1 55711 _002789_hash NULL
++_002790_hash wm8350_block_write 3 19727 _002790_hash NULL
++_002791_hash alloc_tx 2 32143 _002791_hash NULL
++_002792_hash alloc_wr 1-2 24635 _002792_hash NULL
++_002794_hash ath6kl_endpoint_stats_write 3 59621 _002794_hash NULL
++_002795_hash ath6kl_fwlog_mask_write 3 24810 _002795_hash NULL
++_002796_hash ath9k_wmi_cmd 4 327 _002796_hash NULL
++_002797_hash atm_alloc_charge 2 19517 _002879_hash NULL nohasharray
++_002798_hash ax25_output 2 22736 _002798_hash NULL
++_002799_hash bcsp_prepare_pkt 3 12961 _002799_hash NULL
++_002800_hash bt_skb_alloc 1 6404 _002800_hash NULL
++_002801_hash capinc_tty_write 3 28539 _002801_hash NULL
++_002802_hash cfpkt_create_pfx 1-2 23594 _002802_hash NULL
++_002804_hash cmd_complete 6 51629 _002804_hash NULL
++_002805_hash cmtp_add_msgpart 4 9252 _002805_hash NULL
++_002806_hash cmtp_send_interopmsg 7 376 _002806_hash NULL
++_002807_hash cxgb3_get_cpl_reply_skb 2 10620 _002807_hash NULL
++_002808_hash dbg_leb_change 4 23555 _002808_hash NULL
++_002809_hash dbg_leb_write 4-5 63555 _002809_hash &_000940_hash
++_002811_hash dccp_listen_start 2 35918 _002811_hash NULL
++_002812_hash __dev_alloc_skb 1 28681 _002812_hash NULL
++_002813_hash diva_os_alloc_message_buffer 1 64568 _002813_hash NULL
++_002814_hash dn_alloc_skb 2 6631 _002814_hash NULL
++_002815_hash do_pselect 1 62061 _002815_hash NULL
++_002816_hash _fc_frame_alloc 1 43568 _002816_hash NULL
++_002817_hash find_skb 2 20431 _002817_hash NULL
++_002818_hash fm_send_cmd 5 39639 _002818_hash NULL
++_002819_hash gem_alloc_skb 2 51715 _002819_hash NULL
++_002820_hash get_packet 3 41914 _002820_hash NULL
++_002821_hash get_packet 3 5747 _002821_hash NULL
++_002822_hash get_packet_pg 4 28023 _002822_hash NULL
++_002823_hash get_skb 2 63008 _002823_hash NULL
++_002824_hash hidp_queue_report 3 1881 _002824_hash NULL
++_002825_hash __hidp_send_ctrl_message 4 28303 _002825_hash NULL
++_002826_hash hycapi_rx_capipkt 3 11602 _002826_hash NULL
++_002827_hash i2400m_net_rx 5 27170 _002827_hash NULL
++_002828_hash igmpv3_newpack 2 35912 _002828_hash NULL
++_002829_hash inet_listen 2 14723 _002829_hash NULL
++_002830_hash isdn_net_ciscohdlck_alloc_skb 2 55209 _002830_hash &_001724_hash
++_002831_hash isdn_ppp_ccp_xmit_reset 6 63297 _002831_hash NULL
++_002832_hash kmsg_read 3 46514 _002832_hash NULL
++_002833_hash _l2_alloc_skb 1 11883 _002833_hash NULL
++_002834_hash l3_alloc_skb 1 32289 _002834_hash NULL
++_002835_hash llc_alloc_frame 4 64366 _002835_hash NULL
++_002836_hash mac_drv_rx_init 2 48898 _002836_hash &_002670_hash
++_002837_hash mgmt_event 4 12810 _002837_hash NULL
++_002838_hash mI_alloc_skb 1 24770 _002838_hash NULL
++_002839_hash nci_skb_alloc 2 49757 _002839_hash NULL
++_002840_hash netdev_alloc_skb 2 62437 _002840_hash NULL
++_002841_hash __netdev_alloc_skb_ip_align 2 55067 _002841_hash NULL
++_002842_hash new_skb 1 21148 _002842_hash NULL
++_002843_hash nfc_alloc_recv_skb 1 10244 _002843_hash NULL
++_002844_hash nfcwilink_skb_alloc 1 16167 _002844_hash NULL
++_002845_hash nfulnl_alloc_skb 2 65207 _002845_hash NULL
++_002846_hash ni65_alloc_mem 3 10664 _002846_hash NULL
++_002847_hash pep_alloc_skb 3 46303 _002847_hash NULL
++_002848_hash pn_raw_send 2 54330 _002848_hash NULL
++_002849_hash __pskb_copy 2 9038 _002849_hash NULL
++_002850_hash refill_pool 2 19477 _002850_hash NULL
++_002851_hash rfcomm_wmalloc 2 58090 _002851_hash NULL
++_002852_hash rx 4 57944 _002852_hash NULL
++_002853_hash sctp_ulpevent_new 1 33377 _002853_hash NULL
++_002854_hash send_command 4 10832 _002854_hash NULL
++_002855_hash skb_copy_expand 2-3 7685 _002855_hash &_000671_hash
++_002857_hash sk_stream_alloc_skb 2 57622 _002857_hash NULL
++_002858_hash sock_alloc_send_pskb 2 21246 _002858_hash NULL
++_002859_hash sock_rmalloc 2 59740 _002859_hash &_002157_hash
++_002860_hash sock_wmalloc 2 16472 _002860_hash NULL
++_002861_hash solos_param_store 4 34755 _002861_hash NULL
++_002862_hash sys_select 1 38827 _002862_hash NULL
++_002863_hash sys_syslog 3 10746 _002863_hash NULL
++_002864_hash t4vf_pktgl_to_skb 2 39005 _002864_hash NULL
++_002865_hash tcp_collapse 5-6 63294 _002865_hash NULL
++_002867_hash tipc_cfg_reply_alloc 1 27606 _002867_hash NULL
++_002868_hash ubifs_leb_change 4 17789 _002868_hash NULL
++_002869_hash ubifs_leb_write 4-5 22679 _002869_hash NULL
++_002871_hash ulog_alloc_skb 1 23427 _002871_hash NULL
++_002872_hash _alloc_mISDN_skb 3 52232 _002872_hash NULL
++_002873_hash ath9k_multi_regread 4 65056 _002873_hash NULL
++_002874_hash ath_rxbuf_alloc 2 24745 _002874_hash NULL
++_002875_hash ax25_send_frame 2 19964 _002875_hash NULL
++_002876_hash bchannel_get_rxbuf 2 37213 _002876_hash NULL
++_002877_hash cfpkt_create 1 18197 _002877_hash NULL
++_002878_hash console_store 4 36007 _002878_hash NULL
++_002879_hash dev_alloc_skb 1 19517 _002879_hash &_002797_hash
++_002880_hash dn_nsp_do_disc 2-6 49474 _002880_hash NULL
++_002882_hash do_write_orph_node 2 64343 _002882_hash NULL
++_002883_hash dsp_cmx_send_member 2 15625 _002883_hash NULL
++_002884_hash fc_frame_alloc 2 1596 _002884_hash NULL
++_002885_hash fc_frame_alloc_fill 2 59394 _002885_hash NULL
++_002886_hash fmc_send_cmd 5 20435 _002886_hash NULL
++_002887_hash hci_send_cmd 3 43810 _002887_hash NULL
++_002888_hash hci_si_event 3 1404 _002888_hash NULL
++_002889_hash hfcpci_empty_bfifo 4 62323 _002889_hash NULL
++_002890_hash hidp_send_ctrl_message 4 43702 _002890_hash NULL
++_002891_hash hysdn_sched_rx 3 60533 _002891_hash NULL
++_002892_hash inet_dccp_listen 2 28565 _002892_hash NULL
++_002893_hash ip6_append_data 4-5 36490 _002893_hash NULL
++_002894_hash __ip_append_data 7-8 36191 _002894_hash NULL
++_002895_hash l1oip_socket_recv 6 56537 _002895_hash NULL
++_002896_hash l2cap_build_cmd 4 48676 _002896_hash NULL
++_002897_hash l2down_create 4 21755 _002897_hash NULL
++_002898_hash l2up_create 3 6430 _002898_hash NULL
++_002899_hash ldisc_receive 4 41516 _002899_hash NULL
++_002902_hash lro_gen_skb 6 2644 _002902_hash NULL
++_002903_hash macvtap_alloc_skb 2-4-3 50629 _002903_hash NULL
++_002906_hash mgmt_device_found 10 14146 _002906_hash NULL
++_002907_hash nci_send_cmd 3 58206 _002907_hash NULL
++_002908_hash netdev_alloc_skb_ip_align 2 40811 _002908_hash NULL
++_002909_hash nfcwilink_send_bts_cmd 3 10802 _002909_hash NULL
++_002910_hash nfqnl_mangle 2 14583 _002910_hash NULL
++_002911_hash p54_alloc_skb 3 34366 _002911_hash &_000475_hash
++_002912_hash packet_alloc_skb 2-5-4 62602 _002912_hash NULL
++_002915_hash pep_indicate 5 38611 _002915_hash NULL
++_002916_hash pep_reply 5 50582 _002916_hash NULL
++_002917_hash pipe_handler_request 5 50774 _002917_hash &_001189_hash
++_002918_hash ql_process_mac_rx_page 4 15543 _002918_hash NULL
++_002919_hash ql_process_mac_rx_skb 4 6689 _002919_hash NULL
++_002920_hash rfcomm_tty_write 3 51603 _002920_hash NULL
++_002921_hash send_mpa_reject 3 7135 _002921_hash NULL
++_002922_hash send_mpa_reply 3 32372 _002922_hash NULL
++_002923_hash set_rxd_buffer_pointer 8 9950 _002923_hash NULL
++_002924_hash sge_rx 3 50594 _002924_hash NULL
++_002925_hash skb_cow_data 2 11565 _002925_hash NULL
++_002926_hash smp_build_cmd 3 45853 _002926_hash NULL
++_002927_hash sock_alloc_send_skb 2 23720 _002927_hash NULL
++_002928_hash sys_pselect6 1 57449 _002928_hash NULL
++_002929_hash tcp_fragment 3 20436 _002929_hash NULL
++_002930_hash teiup_create 3 43201 _002930_hash NULL
++_002931_hash tg3_run_loopback 2 30093 _002931_hash NULL
++_002932_hash tun_alloc_skb 2-4-3 41216 _002932_hash NULL
++_002935_hash ubifs_write_node 5 11258 _002935_hash NULL
++_002936_hash use_pool 2 64607 _002936_hash NULL
++_002937_hash vxge_rx_alloc 3 52024 _002937_hash NULL
++_002938_hash add_packet 3 54433 _002938_hash NULL
++_002939_hash add_rx_skb 3 8257 _002939_hash NULL
++_002940_hash ath6kl_buf_alloc 1 57304 _002940_hash NULL
++_002941_hash bat_iv_ogm_aggregate_new 2 2620 _002941_hash NULL
++_002942_hash bnx2fc_process_l2_frame_compl 3 65072 _002942_hash NULL
++_002943_hash brcmu_pkt_buf_get_skb 1 5556 _002943_hash NULL
++_002944_hash br_send_bpdu 3 29669 _002944_hash NULL
++_002945_hash bt_skb_send_alloc 2 6581 _002945_hash NULL
++_002946_hash c4iw_reject_cr 3 28174 _002946_hash NULL
++_002947_hash carl9170_rx_copy_data 2 21656 _002947_hash NULL
++_002948_hash cfpkt_add_body 3 44630 _002948_hash NULL
++_002949_hash cfpkt_append 3 61206 _002949_hash NULL
++_002950_hash cosa_net_setup_rx 2 38594 _002950_hash NULL
++_002951_hash cxgb4_pktgl_to_skb 2 61899 _002951_hash NULL
++_002952_hash dn_alloc_send_pskb 2 4465 _002952_hash NULL
++_002953_hash dn_nsp_return_disc 2 60296 _002953_hash NULL
++_002954_hash dn_nsp_send_disc 2 23469 _002954_hash NULL
++_002955_hash dsp_tone_hw_message 3 17678 _002955_hash NULL
++_002956_hash dvb_net_sec 3 37884 _002956_hash NULL
++_002957_hash e1000_check_copybreak 3 62448 _002957_hash NULL
++_002958_hash fast_rx_path 3 59214 _002958_hash NULL
++_002959_hash fc_fcp_frame_alloc 2 12624 _002959_hash NULL
++_002960_hash fcoe_ctlr_send_keep_alive 3 15308 _002960_hash NULL
++_002961_hash fwnet_incoming_packet 3 40380 _002961_hash NULL
++_002962_hash fwnet_pd_new 4 39947 _002962_hash NULL
++_002963_hash got_frame 2 16028 _002963_hash NULL
++_002964_hash gsm_mux_rx_netchar 3 33336 _002964_hash NULL
++_002965_hash hdlcdev_rx 3 997 _002965_hash NULL
++_002966_hash hdlc_empty_fifo 2 18397 _002966_hash NULL
++_002967_hash hfc_empty_fifo 2 57972 _002967_hash NULL
++_002968_hash hfcpci_empty_fifo 4 2427 _002968_hash NULL
++_002969_hash hfcsusb_rx_frame 3 52745 _002969_hash NULL
++_002970_hash hidp_output_raw_report 3 5629 _002970_hash NULL
++_002971_hash hscx_empty_fifo 2 13360 _002971_hash NULL
++_002972_hash hysdn_rx_netpkt 3 16136 _002972_hash NULL
++_002973_hash ieee80211_fragment 4 33112 _002973_hash NULL
++_002974_hash ieee80211_probereq_get 4-6 29069 _002974_hash NULL
++_002976_hash ieee80211_send_auth 5 24121 _002976_hash NULL
++_002977_hash ieee80211_set_probe_resp 3 10077 _002977_hash NULL
++_002978_hash ieee80211_tdls_mgmt 8 9581 _002978_hash NULL
++_002979_hash ip6_ufo_append_data 5-7-6 4780 _002979_hash NULL
++_002982_hash ip_ufo_append_data 6-8-7 12775 _002982_hash NULL
++_002985_hash ipw_packet_received_skb 2 1230 _002985_hash NULL
++_002986_hash iwch_reject_cr 3 23901 _002986_hash NULL
++_002987_hash iwm_rx_packet_alloc 3 9898 _002987_hash NULL
++_002988_hash ixgb_check_copybreak 3 5847 _002988_hash NULL
++_002989_hash l1oip_socket_parse 4 4507 _002989_hash NULL
++_002990_hash l2cap_send_cmd 4 14548 _002990_hash NULL
++_002991_hash l2tp_ip6_sendmsg 4 7461 _002991_hash NULL
++_002993_hash lowpan_fragment_xmit 3-4 22095 _002993_hash NULL
++_002996_hash mcs_unwrap_fir 3 25733 _002996_hash NULL
++_002997_hash mcs_unwrap_mir 3 9455 _002997_hash NULL
++_002998_hash mld_newpack 2 50950 _002998_hash NULL
++_002999_hash nfc_alloc_send_skb 4 3167 _002999_hash NULL
++_003000_hash p54_download_eeprom 4 43842 _003000_hash NULL
++_003002_hash ppp_tx_cp 5 62044 _003002_hash NULL
++_003003_hash prism2_send_mgmt 4 62605 _003003_hash &_001876_hash
++_003004_hash prism2_sta_send_mgmt 5 43916 _003004_hash NULL
++_003005_hash _queue_data 4 54983 _003005_hash NULL
++_003006_hash read_dma 3 55086 _003006_hash NULL
++_003007_hash read_fifo 3 826 _003007_hash NULL
++_003008_hash receive_copy 3 12216 _003008_hash NULL
++_003009_hash rtl8169_try_rx_copy 3 705 _003009_hash NULL
++_003010_hash _rtl92s_firmware_downloadcode 3 14021 _003010_hash NULL
++_003011_hash rx_data 4 60442 _003011_hash NULL
++_003012_hash sis190_try_rx_copy 3 57069 _003012_hash NULL
++_003013_hash skge_rx_get 3 40598 _003013_hash NULL
++_003014_hash tcp_mark_head_lost 2 35895 _003014_hash NULL
++_003015_hash tcp_match_skb_to_sack 3-4 23568 _003015_hash NULL
++_003017_hash tso_fragment 3 29050 _003017_hash NULL
++_003018_hash tt_response_fill_table 1 57902 _003018_hash NULL
++_003020_hash udpv6_sendmsg 4 22316 _003020_hash NULL
++_003021_hash velocity_rx_copy 2 34583 _003021_hash NULL
++_003022_hash W6692_empty_Bfifo 2 47804 _003022_hash NULL
++_003023_hash zd_mac_rx 3 38296 _003023_hash NULL
++_003024_hash ath6kl_wmi_get_new_buf 1 52304 _003024_hash NULL
++_003025_hash bat_iv_ogm_queue_add 3 30870 _003025_hash NULL
++_003026_hash brcmf_alloc_pkt_and_read 2 63116 _003026_hash &_001808_hash
++_003027_hash brcmf_sdcard_recv_buf 6 38179 _003027_hash NULL
++_003028_hash brcmf_sdcard_rwdata 5 65041 _003028_hash NULL
++_003029_hash brcmf_sdcard_send_buf 6 7713 _003029_hash NULL
++_003030_hash carl9170_handle_mpdu 3 11056 _003030_hash NULL
++_003031_hash cfpkt_add_trail 3 27260 _003031_hash NULL
++_003032_hash cfpkt_pad_trail 2 55511 _003032_hash NULL
++_003033_hash dvb_net_sec_callback 2 28786 _003033_hash NULL
++_003034_hash fwnet_receive_packet 9 50537 _003034_hash NULL
++_003035_hash handle_rx_packet 3 58993 _003035_hash NULL
++_003036_hash HDLC_irq 2 8709 _003036_hash NULL
++_003037_hash hdlc_rpr_irq 2 10240 _003037_hash NULL
++_003043_hash ipwireless_network_packet_received 4 51277 _003043_hash NULL
++_003044_hash l2cap_bredr_sig_cmd 3 49065 _003044_hash NULL
++_003045_hash l2cap_sock_alloc_skb_cb 2 33532 _003045_hash NULL
++_003046_hash llcp_allocate_pdu 3 19866 _003046_hash NULL
++_003047_hash ppp_cp_event 6 2965 _003047_hash NULL
++_003048_hash receive_client_update_packet 3 49104 _003048_hash NULL
++_003049_hash receive_server_sync_packet 3 59021 _003049_hash NULL
++_003050_hash sky2_receive 2 13407 _003050_hash NULL
++_003051_hash tcp_sacktag_walk 5-6 49703 _003051_hash NULL
++_003053_hash tcp_write_xmit 2 64602 _003053_hash NULL
++_003054_hash ath6kl_wmi_add_wow_pattern_cmd 4 12842 _003054_hash NULL
++_003055_hash ath6kl_wmi_beginscan_cmd 8 25462 _003055_hash NULL
++_003056_hash ath6kl_wmi_send_probe_response_cmd 6 31728 _003056_hash NULL
++_003057_hash ath6kl_wmi_set_appie_cmd 5 39266 _003057_hash NULL
++_003058_hash ath6kl_wmi_set_ie_cmd 6 37260 _003058_hash NULL
++_003059_hash ath6kl_wmi_startscan_cmd 8 33674 _003059_hash NULL
++_003060_hash ath6kl_wmi_test_cmd 3 27312 _003060_hash NULL
++_003061_hash brcmf_sdbrcm_membytes 3-5 37324 _003061_hash NULL
++_003063_hash brcmf_sdbrcm_read_control 3 22721 _003063_hash NULL
++_003064_hash brcmf_tx_frame 3 20978 _003064_hash NULL
++_003065_hash __carl9170_rx 3 56784 _003065_hash NULL
++_003066_hash cfpkt_setlen 2 49343 _003066_hash NULL
++_003067_hash hdlc_irq_one 2 3944 _003067_hash NULL
++_003069_hash tcp_push_one 2 48816 _003069_hash NULL
++_003070_hash __tcp_push_pending_frames 2 48148 _003070_hash NULL
++_003071_hash brcmf_sdbrcm_bus_txctl 3 42492 _003071_hash NULL
++_003072_hash carl9170_rx 3 13272 _003072_hash NULL
++_003073_hash carl9170_rx_stream 3 1334 _003073_hash NULL
++_003074_hash tcp_push 3 10680 _003074_hash NULL
++_003075_hash create_log 2 8225 _003075_hash NULL
++_003076_hash expand_files 2 17080 _003076_hash NULL
++_003077_hash iio_device_alloc 1 41440 _003077_hash NULL
++_003078_hash OS_mem_token_alloc 1 14276 _003078_hash NULL
++_003079_hash packet_came 3 18072 _003079_hash NULL
++_003080_hash softsynth_write 3 3455 _003080_hash NULL
++_003081_hash alloc_fd 1 37637 _003081_hash NULL
++_003082_hash sys_dup3 2 33421 _003082_hash NULL
++_003083_hash do_fcntl 3 31468 _003083_hash NULL
++_003084_hash sys_dup2 2 25284 _003084_hash NULL
++_003085_hash sys_fcntl 3 19267 _003085_hash NULL
++_003086_hash sys_fcntl64 3 29031 _003086_hash NULL
++_003087_hash cmpk_message_handle_tx 4 54024 _003087_hash NULL
++_003088_hash comedi_buf_alloc 3 24822 _003088_hash NULL
++_003089_hash compat_rw_copy_check_uvector 3 22001 _003089_hash &_001989_hash
++_003090_hash compat_sys_fcntl64 3 60256 _003090_hash NULL
++_003091_hash evtchn_write 3 43278 _003091_hash NULL
++_003092_hash fw_download_code 3 13249 _003092_hash NULL
++_003093_hash fwSendNullPacket 2 54618 _003093_hash NULL
++_003095_hash ieee80211_authentication_req 3 63973 _003095_hash NULL
++_003097_hash rtllib_authentication_req 3 26713 _003097_hash NULL
++_003098_hash SendTxCommandPacket 3 42901 _003098_hash NULL
++_003099_hash snd_nm256_capture_copy 5 28622 _003099_hash NULL
++_003100_hash snd_nm256_playback_copy 5 38567 _003100_hash NULL
++_003101_hash tomoyo_init_log 2 14806 _003101_hash NULL
++_003102_hash usbdux_attach_common 4 51764 _003102_hash NULL
++_003103_hash compat_sys_fcntl 3 15654 _003103_hash NULL
++_003104_hash ieee80211_auth_challenge 3 18810 _003104_hash NULL
++_003105_hash ieee80211_rtl_auth_challenge 3 61897 _003105_hash NULL
++_003106_hash resize_async_buffer 4 64031 _003106_hash &_002119_hash
++_003107_hash rtllib_auth_challenge 3 12493 _003107_hash NULL
++_003108_hash tomoyo_write_log2 2 11732 _003108_hash NULL
++_003109_hash allocate_probes 1 40204 _003109_hash NULL
++_003110_hash alloc_ftrace_hash 1 57431 _003110_hash &_002532_hash
++_003111_hash __alloc_preds 2 9492 _003111_hash NULL
++_003112_hash __alloc_pred_stack 2 26687 _003112_hash NULL
++_003113_hash alloc_sched_domains 1 47756 _003113_hash NULL
++_003114_hash alloc_trace_probe 6 38720 _003114_hash NULL
++_003115_hash alloc_trace_uprobe 3 13870 _003115_hash NULL
++_003116_hash arcfb_write 3 8702 _003116_hash NULL
++_003117_hash ath6kl_sdio_alloc_prep_scat_req 2 51986 _003117_hash NULL
++_003118_hash ath6kl_usb_post_recv_transfers 2 32892 _003118_hash NULL
++_003119_hash ath6kl_usb_submit_ctrl_in 6 32880 _003119_hash &_000778_hash
++_003120_hash ath6kl_usb_submit_ctrl_out 6 9978 _003120_hash NULL
++_003121_hash auok190xfb_write 3 37001 _003121_hash NULL
++_003122_hash beacon_interval_write 3 17952 _003122_hash NULL
++_003123_hash blk_dropped_read 3 4168 _003123_hash NULL
++_003124_hash blk_msg_write 3 13655 _003124_hash NULL
++_003125_hash brcmf_usbdev_qinit 2 19090 _003125_hash &_001533_hash
++_003126_hash brcmf_usb_dl_cmd 4 53130 _003126_hash NULL
++_003127_hash broadsheetfb_write 3 39976 _003127_hash NULL
++_003128_hash broadsheet_spiflash_rewrite_sector 2 54864 _003128_hash NULL
++_003129_hash cyttsp_probe 4 1940 _003129_hash NULL
++_003130_hash da9052_group_write 3 4534 _003130_hash NULL
++_003131_hash dccpprobe_read 3 52549 _003131_hash NULL
++_003132_hash drm_property_create_bitmask 5 30195 _003132_hash NULL
++_003133_hash dtim_interval_write 3 30489 _003133_hash NULL
++_003134_hash dynamic_ps_timeout_write 3 37713 _003134_hash NULL
++_003135_hash event_enable_read 3 7074 _003135_hash NULL
++_003136_hash event_enable_write 3 45238 _003136_hash NULL
++_003137_hash event_filter_read 3 23494 _003137_hash NULL
++_003138_hash event_filter_write 3 56609 _003138_hash NULL
++_003139_hash event_id_read 3 64288 _003139_hash &_001240_hash
++_003140_hash f_audio_buffer_alloc 1 41110 _003140_hash NULL
++_003141_hash fb_sys_read 3 13778 _003141_hash NULL
++_003142_hash fb_sys_write 3 33130 _003142_hash NULL
++_003143_hash forced_ps_write 3 37209 _003143_hash NULL
++_003144_hash __fprog_create 2 41263 _003144_hash NULL
++_003145_hash fq_codel_zalloc 1 15378 _003145_hash NULL
++_003146_hash ftrace_pid_write 3 39710 _003146_hash NULL
++_003147_hash ftrace_profile_read 3 21327 _003147_hash NULL
++_003148_hash ftrace_profile_write 3 53327 _003148_hash NULL
++_003149_hash ftrace_write 3 29551 _003149_hash NULL
++_003150_hash gdm_wimax_netif_rx 3 43423 _003150_hash &_001619_hash
++_003151_hash gpio_power_write 3 1991 _003151_hash NULL
++_003152_hash hecubafb_write 3 26942 _003152_hash NULL
++_003153_hash hsc_msg_alloc 1 60990 _003153_hash NULL
++_003154_hash hsc_write 3 55875 _003154_hash NULL
++_003155_hash hsi_alloc_controller 1 41802 _003155_hash NULL
++_003156_hash hsi_register_board_info 2 13820 _003156_hash NULL
++_003157_hash i915_ring_stop_read 3 42549 _003157_hash NULL
++_003158_hash i915_ring_stop_write 3 59010 _003158_hash NULL
++_003159_hash ieee802154_alloc_device 1 13767 _003159_hash NULL
++_003160_hash intel_sdvo_write_cmd 4 54377 _003160_hash &_000815_hash
++_003161_hash ivtvfb_write 3 40023 _003161_hash NULL
++_003162_hash metronomefb_write 3 8823 _003162_hash NULL
++_003163_hash mwifiex_usb_submit_rx_urb 2 54558 _003163_hash NULL
++_003164_hash nfc_hci_hcp_message_tx 6 14534 _003164_hash NULL
++_003165_hash nfc_hci_set_param 5 40697 _003165_hash NULL
++_003166_hash nfc_shdlc_alloc_skb 2 12741 _003166_hash NULL
++_003167_hash odev_update 2 50169 _003167_hash NULL
++_003168_hash oz_add_farewell 5 20652 _003168_hash NULL
++_003169_hash oz_cdev_read 3 20659 _003169_hash NULL
++_003170_hash oz_cdev_write 3 33852 _003170_hash NULL
++_003171_hash oz_ep_alloc 2 5587 _003171_hash NULL
++_003172_hash oz_events_read 3 47535 _003172_hash NULL
++_003173_hash pmcraid_copy_sglist 3 38431 _003173_hash NULL
++_003174_hash prctl_set_mm 3 64538 _003174_hash NULL
++_003175_hash ptp_filter_init 2 36780 _003175_hash NULL
++_003176_hash rb_simple_read 3 45972 _003176_hash NULL
++_003177_hash rb_simple_write 3 20890 _003177_hash NULL
++_003178_hash read_file_dfs 3 43145 _003178_hash NULL
++_003179_hash rx_streaming_always_write 3 32357 _003179_hash NULL
++_003180_hash rx_streaming_interval_write 3 50120 _003180_hash NULL
++_003181_hash shmem_pread_fast 3 34147 _003181_hash NULL
++_003182_hash shmem_pread_slow 3 3198 _003182_hash NULL
++_003183_hash shmem_pwrite_fast 3 46842 _003183_hash NULL
++_003184_hash shmem_pwrite_slow 3 31741 _003184_hash NULL
++_003185_hash show_header 3 4722 _003185_hash &_000736_hash
++_003186_hash split_scan_timeout_write 3 52128 _003186_hash NULL
++_003187_hash stack_max_size_read 3 1445 _003187_hash NULL
++_003188_hash stack_max_size_write 3 36068 _003188_hash NULL
++_003189_hash subsystem_filter_read 3 62310 _003189_hash NULL
++_003190_hash subsystem_filter_write 3 13022 _003190_hash NULL
++_003191_hash suspend_dtim_interval_write 3 48854 _003191_hash NULL
++_003192_hash system_enable_read 3 25815 _003192_hash NULL
++_003193_hash system_enable_write 3 61396 _003193_hash NULL
++_003194_hash trace_options_core_read 3 47390 _003194_hash NULL
++_003195_hash trace_options_core_write 3 61551 _003195_hash NULL
++_003196_hash trace_options_read 3 11419 _003196_hash NULL
++_003197_hash trace_options_write 3 48275 _003197_hash NULL
++_003198_hash trace_parser_get_init 2 31379 _003198_hash NULL
++_003199_hash traceprobe_probes_write 3 64969 _003199_hash NULL
++_003200_hash trace_seq_to_user 3 65398 _003200_hash NULL
++_003201_hash tracing_buffers_read 3 11124 _003201_hash NULL
++_003202_hash tracing_clock_write 3 27961 _003202_hash NULL
++_003203_hash tracing_cpumask_read 3 7010 _003203_hash NULL
++_003204_hash tracing_ctrl_read 3 46922 _003204_hash NULL
++_003205_hash tracing_ctrl_write 3 42324 _003205_hash &_001726_hash
++_003206_hash tracing_entries_read 3 8345 _003206_hash NULL
++_003207_hash tracing_entries_write 3 60563 _003207_hash NULL
++_003208_hash tracing_max_lat_read 3 8890 _003208_hash NULL
++_003209_hash tracing_max_lat_write 3 8728 _003209_hash NULL
++_003210_hash tracing_read_dyn_info 3 45468 _003210_hash NULL
++_003211_hash tracing_readme_read 3 16493 _003211_hash NULL
++_003212_hash tracing_saved_cmdlines_read 3 21434 _003212_hash NULL
++_003213_hash tracing_set_trace_read 3 44122 _003213_hash NULL
++_003214_hash tracing_set_trace_write 3 57096 _003214_hash NULL
++_003215_hash tracing_stats_read 3 34537 _003215_hash NULL
++_003216_hash tracing_total_entries_read 3 62817 _003216_hash NULL
++_003217_hash tracing_trace_options_write 3 153 _003217_hash NULL
++_003218_hash ttm_put_pages 2 9179 _003218_hash NULL
++_003219_hash udl_prime_create 2 57159 _003219_hash NULL
++_003220_hash ufx_alloc_urb_list 3 10349 _003220_hash NULL
++_003221_hash u_memcpya 2-3 30139 _003221_hash NULL
++_003223_hash viafb_dfph_proc_write 3 49288 _003223_hash NULL
++_003224_hash viafb_dfpl_proc_write 3 627 _003224_hash NULL
++_003225_hash viafb_dvp0_proc_write 3 23023 _003225_hash NULL
++_003226_hash viafb_dvp1_proc_write 3 48864 _003226_hash NULL
++_003227_hash viafb_vt1636_proc_write 3 16018 _003227_hash NULL
++_003228_hash vivi_read 3 23073 _003228_hash NULL
++_003229_hash wl1271_rx_filter_alloc_field 5 46721 _003229_hash NULL
++_003230_hash wl12xx_cmd_build_probe_req 6-8 3098 _003230_hash NULL
++_003232_hash wlcore_alloc_hw 1 7785 _003232_hash NULL
++_003233_hash alloc_and_copy_ftrace_hash 1 29368 _003233_hash NULL
++_003234_hash create_trace_probe 1 20175 _003234_hash NULL
++_003235_hash create_trace_uprobe 1 13184 _003235_hash NULL
++_003236_hash intel_sdvo_set_value 4 2311 _003236_hash NULL
++_003237_hash mmio_read 4 40348 _003237_hash NULL
++_003238_hash nfc_hci_execute_cmd 5 43882 _003238_hash NULL
++_003239_hash nfc_hci_send_event 5 21452 _003239_hash NULL
++_003240_hash nfc_hci_send_response 5 56462 _003240_hash NULL
++_003241_hash picolcd_fb_write 3 2318 _003241_hash NULL
++_003242_hash probes_write 3 29711 _003242_hash NULL
++_003243_hash sys_prctl 4 8766 _003243_hash NULL
++_003244_hash tracing_read_pipe 3 35312 _003244_hash NULL
++_003245_hash brcmf_usb_attach 1-2 44656 _003245_hash NULL
++_003247_hash dlfb_ops_write 3 64150 _003247_hash NULL
++_003248_hash nfc_hci_send_cmd 5 55714 _003248_hash NULL
++_003249_hash ufx_ops_write 3 54848 _003249_hash NULL
++_003250_hash viafb_iga1_odev_proc_write 3 36241 _003250_hash NULL
++_003251_hash viafb_iga2_odev_proc_write 3 2363 _003251_hash NULL
++_003252_hash xenfb_write 3 43412 _003252_hash NULL
++_003253_hash acl_alloc 1 35979 _003253_hash NULL
++_003254_hash acl_alloc_stack_init 1 60630 _003254_hash NULL
++_003255_hash acl_alloc_num 1-2 60778 _003255_hash NULL
diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
new file mode 100644
-index 0000000..cc96254
+index 0000000..5af42b5
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin.c
-@@ -0,0 +1,1204 @@
+@@ -0,0 +1,1558 @@
+/*
+ * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -85052,6 +85686,8 @@ index 0000000..cc96254
+#define CREATE_NEW_VAR NULL_TREE
+#define CODES_LIMIT 32
+#define MAX_PARAM 10
++#define MY_STMT GF_PLF_1
++#define NO_CAST_CHECK GF_PLF_2
+
+#if BUILDING_GCC_VERSION == 4005
+#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
@@ -85061,20 +85697,30 @@ index 0000000..cc96254
+void debug_gimple_stmt(gimple gs);
+
+static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var);
-+static tree signed_size_overflow_type;
-+static tree unsigned_size_overflow_type;
+static tree report_size_overflow_decl;
+static tree const_char_ptr_type_node;
+static unsigned int handle_function(void);
++static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool *potentionally_overflowed, bool before);
++static tree get_size_overflow_type(gimple stmt, tree node);
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20120618beta",
++ .version = "20120811beta",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
+static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
+{
-+ unsigned int arg_count = type_num_arguments(*node);
++ unsigned int arg_count;
++
++ if (TREE_CODE(*node) == FUNCTION_DECL)
++ arg_count = type_num_arguments(TREE_TYPE(*node));
++ else if (TREE_CODE(*node) == FUNCTION_TYPE || TREE_CODE(*node) == METHOD_TYPE)
++ arg_count = type_num_arguments(*node);
++ else {
++ *no_add_attrs = true;
++ error("%qE attribute only applies to functions", name);
++ return NULL_TREE;
++ }
+
+ for (; args; args = TREE_CHAIN(args)) {
+ tree position = TREE_VALUE(args);
@@ -85086,13 +85732,13 @@ index 0000000..cc96254
+ return NULL_TREE;
+}
+
-+static struct attribute_spec no_size_overflow_attr = {
++static struct attribute_spec size_overflow_attr = {
+ .name = "size_overflow",
+ .min_length = 1,
+ .max_length = -1,
-+ .decl_required = false,
-+ .type_required = true,
-+ .function_type_required = true,
++ .decl_required = true,
++ .type_required = false,
++ .function_type_required = false,
+ .handler = handle_size_overflow_attribute,
+#if BUILDING_GCC_VERSION >= 4007
+ .affects_type_identity = false
@@ -85101,7 +85747,7 @@ index 0000000..cc96254
+
+static void register_attributes(void __unused *event_data, void __unused *data)
+{
-+ register_attribute(&no_size_overflow_attr);
++ register_attribute(&size_overflow_attr);
+}
+
+// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
@@ -85152,6 +85798,7 @@ index 0000000..cc96254
+
+static inline gimple get_def_stmt(tree node)
+{
++ gcc_assert(node != NULL_TREE);
+ gcc_assert(TREE_CODE(node) == SSA_NAME);
+ return SSA_NAME_DEF_STMT(node);
+}
@@ -85314,11 +85961,11 @@ index 0000000..cc96254
+ gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
+
+ type = TREE_TYPE(arg);
-+ // skip function pointers
-+ if (TREE_CODE(type) == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == FUNCTION_TYPE)
++
++ if (TREE_CODE(type) == POINTER_TYPE)
+ return;
+
-+ if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func))))
++ if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(func)))
+ return;
+
+ argnum = find_arg_number(arg, func);
@@ -85339,6 +85986,22 @@ index 0000000..cc96254
+ return new_var;
+}
+
++static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++ tree type = TREE_TYPE(rhs1);
++ tree lhs = create_new_var(type);
++
++ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
++ gimple_set_lhs(assign, make_ssa_name(lhs, assign));
++
++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++ gimple_set_plf(assign, MY_STMT, true);
++ return assign;
++}
++
+static bool is_bool(tree node)
+{
+ tree type;
@@ -85358,34 +86021,63 @@ index 0000000..cc96254
+
+static tree cast_a_tree(tree type, tree var)
+{
-+ gcc_assert(type != NULL_TREE && var != NULL_TREE);
++ gcc_assert(type != NULL_TREE);
++ gcc_assert(var != NULL_TREE);
+ gcc_assert(fold_convertible_p(type, var));
+
+ return fold_convert(type, var);
+}
+
-+static tree signed_cast(tree var)
-+{
-+ return cast_a_tree(signed_size_overflow_type, var);
-+}
-+
-+static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc)
++static gimple build_cast_stmt(tree type, tree var, tree new_var, gimple_stmt_iterator *gsi, bool before)
+{
+ gimple assign;
++ location_t loc;
++
++ gcc_assert(type != NULL_TREE && var != NULL_TREE);
++ if (gsi_end_p(*gsi) && before == BEFORE_STMT)
++ gcc_unreachable();
+
+ if (new_var == CREATE_NEW_VAR)
+ new_var = create_new_var(type);
+
+ assign = gimple_build_assign(new_var, cast_a_tree(type, var));
-+ gimple_set_location(assign, loc);
++
++ if (!gsi_end_p(*gsi)) {
++ loc = gimple_location(gsi_stmt(*gsi));
++ gimple_set_location(assign, loc);
++ }
++
+ gimple_set_lhs(assign, make_ssa_name(new_var, assign));
+
++ if (before)
++ gsi_insert_before(gsi, assign, GSI_NEW_STMT);
++ else
++ gsi_insert_after(gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++ gimple_set_plf(assign, MY_STMT, true);
++
+ return assign;
+}
+
++static tree cast_to_new_size_overflow_type(gimple stmt, tree new_rhs1, tree size_overflow_type, bool before)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi;
++
++ if (new_rhs1 == NULL_TREE)
++ return NULL_TREE;
++
++ if (!useless_type_conversion_p(TREE_TYPE(new_rhs1), size_overflow_type)) {
++ gsi = gsi_for_stmt(stmt);
++ assign = build_cast_stmt(size_overflow_type, new_rhs1, CREATE_NEW_VAR, &gsi, before);
++ return gimple_get_lhs(assign);
++ }
++ return new_rhs1;
++}
++
+static tree create_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, bool before)
+{
-+ tree oldstmt_rhs1;
++ tree oldstmt_rhs1, size_overflow_type, lhs;
+ enum tree_code code;
+ gimple stmt;
+ gimple_stmt_iterator gsi;
@@ -85399,13 +86091,18 @@ index 0000000..cc96254
+ gcc_unreachable();
+ }
+
++ if (gimple_code(oldstmt) == GIMPLE_ASM)
++ lhs = rhs1;
++ else
++ lhs = gimple_get_lhs(oldstmt);
++
+ oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
+ code = TREE_CODE(oldstmt_rhs1);
+ if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP))
+ check_missing_attribute(oldstmt_rhs1);
+
-+ stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt));
+ gsi = gsi_for_stmt(oldstmt);
++ pointer_set_insert(visited, oldstmt);
+ if (lookup_stmt_eh_lp(oldstmt) != 0) {
+ basic_block next_bb, cur_bb;
+ edge e;
@@ -85423,18 +86120,20 @@ index 0000000..cc96254
+
+ gsi = gsi_after_labels(next_bb);
+ gcc_assert(!gsi_end_p(gsi));
++
+ before = true;
++ oldstmt = gsi_stmt(gsi);
++ pointer_set_insert(visited, oldstmt);
+ }
-+ if (before)
-+ gsi_insert_before(&gsi, stmt, GSI_NEW_STMT);
-+ else
-+ gsi_insert_after(&gsi, stmt, GSI_NEW_STMT);
-+ update_stmt(stmt);
-+ pointer_set_insert(visited, oldstmt);
++
++ size_overflow_type = get_size_overflow_type(oldstmt, lhs);
++
++ stmt = build_cast_stmt(size_overflow_type, rhs1, CREATE_NEW_VAR, &gsi, before);
++ gimple_set_plf(stmt, MY_STMT, true);
+ return gimple_get_lhs(stmt);
+}
+
-+static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3)
++static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree size_overflow_type, tree rhs1, tree rhs2, tree __unused rhs3)
+{
+ tree new_var, lhs = gimple_get_lhs(oldstmt);
+ gimple stmt;
@@ -85443,6 +86142,9 @@ index 0000000..cc96254
+ if (!*potentionally_overflowed)
+ return NULL_TREE;
+
++ if (gimple_plf(oldstmt, MY_STMT))
++ return lhs;
++
+ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
+ rhs1 = gimple_assign_rhs1(oldstmt);
+ rhs1 = create_assign(visited, potentionally_overflowed, oldstmt, rhs1, BEFORE_STMT);
@@ -85454,6 +86156,7 @@ index 0000000..cc96254
+
+ stmt = gimple_copy(oldstmt);
+ gimple_set_location(stmt, gimple_location(oldstmt));
++ gimple_set_plf(stmt, MY_STMT, true);
+
+ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
+ gimple_assign_set_rhs_code(stmt, MULT_EXPR);
@@ -85461,13 +86164,13 @@ index 0000000..cc96254
+ if (is_bool(lhs))
+ new_var = SSA_NAME_VAR(lhs);
+ else
-+ new_var = create_new_var(signed_size_overflow_type);
++ new_var = create_new_var(size_overflow_type);
+ new_var = make_ssa_name(new_var, stmt);
+ gimple_set_lhs(stmt, new_var);
+
+ if (rhs1 != NULL_TREE) {
+ if (!gimple_assign_cast_p(oldstmt))
-+ rhs1 = signed_cast(rhs1);
++ rhs1 = cast_a_tree(size_overflow_type, rhs1);
+ gimple_assign_set_rhs1(stmt, rhs1);
+ }
+
@@ -85502,6 +86205,7 @@ index 0000000..cc96254
+ gsi = gsi_for_stmt(oldstmt);
+ gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
+ gimple_set_bb(phi, bb);
++ gimple_set_plf(phi, MY_STMT, true);
+ return phi;
+}
+
@@ -85515,28 +86219,29 @@ index 0000000..cc96254
+ return first_bb;
+}
+
-+static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var, unsigned int i)
++static tree cast_old_phi_arg(gimple oldstmt, tree size_overflow_type, tree arg, tree new_var, unsigned int i)
+{
+ basic_block bb;
-+ gimple newstmt, def_stmt;
++ gimple newstmt;
+ gimple_stmt_iterator gsi;
++ bool before = BEFORE_STMT;
+
-+ newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt));
-+ if (TREE_CODE(arg) == SSA_NAME) {
-+ def_stmt = get_def_stmt(arg);
-+ if (gimple_code(def_stmt) != GIMPLE_NOP) {
-+ gsi = gsi_for_stmt(def_stmt);
-+ gsi_insert_after(&gsi, newstmt, GSI_NEW_STMT);
-+ return newstmt;
-+ }
++ if (TREE_CODE(arg) == SSA_NAME && gimple_code(get_def_stmt(arg)) != GIMPLE_NOP) {
++ gsi = gsi_for_stmt(get_def_stmt(arg));
++ newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, AFTER_STMT);
++ return gimple_get_lhs(newstmt);
+ }
+
+ bb = gimple_phi_arg_edge(oldstmt, i)->src;
-+ if (bb->index == 0)
-+ bb = create_a_first_bb();
+ gsi = gsi_after_labels(bb);
-+ gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT);
-+ return newstmt;
++ if (bb->index == 0) {
++ bb = create_a_first_bb();
++ gsi = gsi_start_bb(bb);
++ }
++ if (gsi_end_p(gsi))
++ before = AFTER_STMT;
++ newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, before);
++ return gimple_get_lhs(newstmt);
+}
+
+static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs)
@@ -85569,30 +86274,36 @@ index 0000000..cc96254
+
+ gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
+ gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
++ gimple_set_plf(newstmt, MY_STMT, true);
+ update_stmt(newstmt);
+ return newstmt;
+}
+
-+static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree arg, tree new_var)
++static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree size_overflow_type, tree arg, tree new_var)
+{
+ gimple newstmt;
+ tree new_rhs;
+
+ new_rhs = expand(visited, potentionally_overflowed, arg);
-+
+ if (new_rhs == NULL_TREE)
+ return NULL_TREE;
+
++ new_rhs = cast_to_new_size_overflow_type(get_def_stmt(new_rhs), new_rhs, size_overflow_type, AFTER_STMT);
++
+ newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
+ return gimple_get_lhs(newstmt);
+}
+
-+static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt)
++static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
+{
-+ gimple phi;
-+ tree new_var = create_new_var(signed_size_overflow_type);
++ gimple phi, oldstmt = get_def_stmt(var);
++ tree new_var, size_overflow_type;
+ unsigned int i, n = gimple_phi_num_args(oldstmt);
+
++ size_overflow_type = get_size_overflow_type(oldstmt, var);
++
++ new_var = create_new_var(size_overflow_type);
++
+ pointer_set_insert(visited, oldstmt);
+ phi = overflow_create_phi_node(oldstmt, new_var);
+ for (i = 0; i < n; i++) {
@@ -85600,10 +86311,10 @@ index 0000000..cc96254
+
+ arg = gimple_phi_arg_def(oldstmt, i);
+ if (is_gimple_constant(arg))
-+ arg = signed_cast(arg);
-+ lhs = build_new_phi_arg(visited, potentionally_overflowed, arg, new_var);
++ arg = cast_a_tree(size_overflow_type, arg);
++ lhs = build_new_phi_arg(visited, potentionally_overflowed, size_overflow_type, arg, new_var);
+ if (lhs == NULL_TREE)
-+ lhs = gimple_get_lhs(cast_old_phi_arg(oldstmt, arg, new_var, i));
++ lhs = cast_old_phi_arg(oldstmt, size_overflow_type, arg, new_var, i);
+ add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
+ }
+
@@ -85611,35 +86322,132 @@ index 0000000..cc96254
+ return gimple_phi_result(phi);
+}
+
-+static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
++static tree change_assign_rhs(gimple stmt, tree orig_rhs, tree new_rhs)
+{
-+ gimple def_stmt = get_def_stmt(var);
-+ tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt);
++ gimple assign;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++ tree origtype = TREE_TYPE(orig_rhs);
++
++ gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
++
++ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
++ return gimple_get_lhs(assign);
++}
++
++static void change_rhs1(gimple stmt, tree new_rhs1)
++{
++ tree assign_rhs;
++ tree rhs = gimple_assign_rhs1(stmt);
++
++ assign_rhs = change_assign_rhs(stmt, rhs, new_rhs1);
++ gimple_assign_set_rhs1(stmt, assign_rhs);
++ update_stmt(stmt);
++}
++
++static bool check_mode_type(gimple stmt)
++{
++ tree lhs = gimple_get_lhs(stmt);
++ tree lhs_type = TREE_TYPE(lhs);
++ tree rhs_type = TREE_TYPE(gimple_assign_rhs1(stmt));
++ enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
++ enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
++
++ if (rhs_mode == lhs_mode && TYPE_UNSIGNED(rhs_type) == TYPE_UNSIGNED(lhs_type))
++ return false;
++
++ if (rhs_mode == SImode && lhs_mode == DImode && (TYPE_UNSIGNED(rhs_type) || !TYPE_UNSIGNED(lhs_type)))
++ return false;
++
++ return true;
++}
++
++static bool check_undefined_integer_operation(gimple stmt)
++{
++ gimple def_stmt;
++ tree lhs = gimple_get_lhs(stmt);
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs1_type = TREE_TYPE(rhs1);
++ tree lhs_type = TREE_TYPE(lhs);
++
++ if (TYPE_MODE(rhs1_type) != TYPE_MODE(lhs_type) || TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
++ return false;
++
++ def_stmt = get_def_stmt(rhs1);
++ if (gimple_code(def_stmt) != GIMPLE_ASSIGN)
++ return false;
++
++ if (gimple_assign_rhs_code(def_stmt) != MINUS_EXPR)
++ return false;
++ return true;
++}
++
++static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple stmt)
++{
++ tree size_overflow_type, lhs = gimple_get_lhs(stmt);
++ tree new_rhs1, rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs1_type = TREE_TYPE(rhs1);
++ tree lhs_type = TREE_TYPE(lhs);
+
+ *potentionally_overflowed = true;
++
+ new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
-+ if (new_rhs1 == NULL_TREE) {
-+ if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
-+ else
-+ return create_assign(visited, potentionally_overflowed, def_stmt, rhs1, AFTER_STMT);
++
++ if (new_rhs1 == NULL_TREE || TREE_CODE(rhs1_type) == POINTER_TYPE)
++ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
++
++ if (gimple_plf(stmt, MY_STMT))
++ return lhs;
++
++ if (gimple_plf(stmt, NO_CAST_CHECK)) {
++ size_overflow_type = get_size_overflow_type(stmt, rhs1);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, NULL_TREE, NULL_TREE);
++ }
++
++ if (!gimple_assign_cast_p(stmt)) {
++ size_overflow_type = get_size_overflow_type(stmt, lhs);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, NULL_TREE, NULL_TREE);
++ }
++
++ if (check_undefined_integer_operation(stmt)) {
++ size_overflow_type = get_size_overflow_type(stmt, lhs);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, NULL_TREE, NULL_TREE);
+ }
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, NULL_TREE, NULL_TREE);
++
++ size_overflow_type = get_size_overflow_type(stmt, rhs1);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++
++ change_rhs1(stmt, new_rhs1);
++ check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, potentionally_overflowed, BEFORE_STMT);
++
++ if (TYPE_UNSIGNED(rhs1_type) != TYPE_UNSIGNED(lhs_type))
++ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
++
++ if (!check_mode_type(stmt))
++ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
++
++ size_overflow_type = get_size_overflow_type(stmt, lhs);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++
++ check_size_overflow(stmt, size_overflow_type, new_rhs1, lhs, potentionally_overflowed, BEFORE_STMT);
++
++ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
+}
+
-+static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
++static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree lhs)
+{
-+ gimple def_stmt = get_def_stmt(var);
++ gimple def_stmt = get_def_stmt(lhs);
+ tree rhs1 = gimple_assign_rhs1(def_stmt);
+
+ if (is_gimple_constant(rhs1))
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, signed_cast(rhs1), NULL_TREE, NULL_TREE);
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+
+ gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
+ switch (TREE_CODE(rhs1)) {
+ case SSA_NAME:
-+ return handle_unary_rhs(visited, potentionally_overflowed, var);
-+
++ return handle_unary_rhs(visited, potentionally_overflowed, def_stmt);
+ case ARRAY_REF:
+ case BIT_FIELD_REF:
+ case ADDR_EXPR:
@@ -85651,7 +86459,7 @@ index 0000000..cc96254
+ case PARM_DECL:
+ case TARGET_MEM_REF:
+ case VAR_DECL:
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+
+ default:
+ debug_gimple_stmt(def_stmt);
@@ -85687,11 +86495,12 @@ index 0000000..cc96254
+ return build1(ADDR_EXPR, ptr_type_node, string);
+}
+
-+static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg)
++static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg, bool min)
+{
+ gimple func_stmt, def_stmt;
-+ tree current_func, loc_file, loc_line;
++ tree current_func, loc_file, loc_line, ssa_name;
+ expanded_location xloc;
++ char ssa_name_buf[100];
+ gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
+
+ def_stmt = get_def_stmt(arg);
@@ -85711,8 +86520,15 @@ index 0000000..cc96254
+ current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
+ current_func = create_string_param(current_func);
+
-+ // void report_size_overflow(const char *file, unsigned int line, const char *func)
-+ func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func);
++ if (min)
++ snprintf(ssa_name_buf, 100, "%s_%u (min)\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg));
++ else
++ snprintf(ssa_name_buf, 100, "%s_%u (max)\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg));
++ ssa_name = build_string(100, ssa_name_buf);
++ ssa_name = create_string_param(ssa_name);
++
++ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
++ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
+
+ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
+}
@@ -85724,14 +86540,15 @@ index 0000000..cc96254
+ inform(loc, "Integer size_overflow check applied here.");
+}
+
-+static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value)
++static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
+{
+ basic_block cond_bb, join_bb, bb_true;
+ edge e;
+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
+
+ cond_bb = gimple_bb(stmt);
-+ gsi_prev(&gsi);
++ if (before)
++ gsi_prev(&gsi);
+ if (gsi_end_p(gsi))
+ e = split_block_after_labels(cond_bb);
+ else
@@ -85757,80 +86574,218 @@ index 0000000..cc96254
+ }
+
+ insert_cond(cond_bb, arg, cond_code, type_value);
-+ insert_cond_result(bb_true, stmt, arg);
++ insert_cond_result(bb_true, stmt, arg, min);
+
+// print_the_code_insertions(stmt);
+}
+
-+static gimple cast_to_unsigned_size_overflow_type(gimple stmt, tree cast_rhs)
++static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool *potentionally_overflowed, bool before)
+{
-+ gimple ucast_stmt;
-+ gimple_stmt_iterator gsi;
-+ location_t loc = gimple_location(stmt);
++ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min, rhs_type = TREE_TYPE(rhs);
++ gcc_assert(rhs_type != NULL_TREE);
++ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == BOOLEAN_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
+
-+ ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, cast_rhs, CREATE_NEW_VAR, loc);
-+ gsi = gsi_for_stmt(stmt);
-+ gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT);
-+ return ucast_stmt;
++ if (!*potentionally_overflowed)
++ return;
++
++ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
++ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
++
++ gcc_assert(!TREE_OVERFLOW(type_max));
++
++ cast_rhs_type = TREE_TYPE(cast_rhs);
++ type_max_type = TREE_TYPE(type_max);
++ type_min_type = TREE_TYPE(type_min);
++ gcc_assert(useless_type_conversion_p(cast_rhs_type, type_max_type));
++ gcc_assert(useless_type_conversion_p(type_max_type, type_min_type));
++
++ insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max, before, false);
++ insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min, before, true);
+}
+
-+static void check_size_overflow(gimple stmt, tree cast_rhs, tree rhs, bool *potentionally_overflowed)
++static tree get_handle_const_assign_size_overflow_type(gimple def_stmt, tree var_rhs)
+{
-+ tree type_max, type_min, rhs_type = TREE_TYPE(rhs);
-+ gimple ucast_stmt;
++ gimple var_rhs_def_stmt;
++ tree lhs = gimple_get_lhs(def_stmt);
++ tree lhs_type = TREE_TYPE(lhs);
++ tree rhs1_type = TREE_TYPE(gimple_assign_rhs1(def_stmt));
++ tree rhs2_type = TREE_TYPE(gimple_assign_rhs2(def_stmt));
+
-+ if (!*potentionally_overflowed)
-+ return;
++ if (var_rhs == NULL_TREE)
++ return get_size_overflow_type(def_stmt, lhs);
+
-+ if (TYPE_UNSIGNED(rhs_type)) {
-+ ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, cast_rhs);
-+ type_max = cast_a_tree(unsigned_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
-+ insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
-+ } else {
-+ type_max = signed_cast(TYPE_MAX_VALUE(rhs_type));
-+ insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max);
++ var_rhs_def_stmt = get_def_stmt(var_rhs);
+
-+ type_min = signed_cast(TYPE_MIN_VALUE(rhs_type));
-+ insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min);
++ if (TREE_CODE_CLASS(gimple_assign_rhs_code(def_stmt)) == tcc_comparison)
++ return get_size_overflow_type(var_rhs_def_stmt, var_rhs);
++
++ if (gimple_assign_rhs_code(def_stmt) == LSHIFT_EXPR)
++ return get_size_overflow_type(var_rhs_def_stmt, var_rhs);
++
++ if (gimple_assign_rhs_code(def_stmt) == RSHIFT_EXPR)
++ return get_size_overflow_type(var_rhs_def_stmt, var_rhs);
++
++ if (!useless_type_conversion_p(lhs_type, rhs1_type) || !useless_type_conversion_p(rhs1_type, rhs2_type)) {
++ debug_gimple_stmt(def_stmt);
++ gcc_unreachable();
+ }
++
++ return get_size_overflow_type(def_stmt, lhs);
+}
+
-+static tree change_assign_rhs(gimple stmt, tree orig_rhs, tree new_rhs)
++static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var_rhs, tree new_rhs1, tree new_rhs2)
+{
-+ gimple assign;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+ tree origtype = TREE_TYPE(orig_rhs);
++ tree new_rhs, size_overflow_type, orig_rhs;
++ void (*gimple_assign_set_rhs)(gimple, tree);
++ tree rhs1 = gimple_assign_rhs1(def_stmt);
++ tree rhs2 = gimple_assign_rhs2(def_stmt);
++ tree lhs = gimple_get_lhs(def_stmt);
+
-+ gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
++ if (var_rhs == NULL_TREE)
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+
-+ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, gimple_location(stmt));
-+ gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
-+ update_stmt(assign);
-+ return gimple_get_lhs(assign);
-+}
++ if (new_rhs2 == NULL_TREE) {
++ size_overflow_type = get_handle_const_assign_size_overflow_type(def_stmt, new_rhs1);
++ new_rhs2 = cast_a_tree(size_overflow_type, rhs2);
++ orig_rhs = rhs1;
++ gimple_assign_set_rhs = &gimple_assign_set_rhs1;
++ } else {
++ size_overflow_type = get_handle_const_assign_size_overflow_type(def_stmt, new_rhs2);
++ new_rhs1 = cast_a_tree(size_overflow_type, rhs1);
++ orig_rhs = rhs2;
++ gimple_assign_set_rhs = &gimple_assign_set_rhs2;
++ }
+
-+static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var, tree orig_rhs, tree var_rhs, tree new_rhs1, tree new_rhs2, void (*gimple_assign_set_rhs)(gimple, tree))
-+{
-+ tree new_rhs;
++ var_rhs = cast_to_new_size_overflow_type(def_stmt, var_rhs, size_overflow_type, BEFORE_STMT);
+
+ if (gimple_assign_rhs_code(def_stmt) == MIN_EXPR)
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
++ return dup_assign(visited, potentionally_overflowed, def_stmt, size_overflow_type, new_rhs1, new_rhs2, NULL_TREE);
+
-+ if (var_rhs == NULL_TREE)
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ check_size_overflow(def_stmt, size_overflow_type, var_rhs, orig_rhs, potentionally_overflowed, BEFORE_STMT);
+
+ new_rhs = change_assign_rhs(def_stmt, orig_rhs, var_rhs);
+ gimple_assign_set_rhs(def_stmt, new_rhs);
+ update_stmt(def_stmt);
+
-+ check_size_overflow(def_stmt, var_rhs, orig_rhs, potentionally_overflowed);
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+}
+
-+static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
++static tree get_cast_def_stmt_rhs(tree new_rhs)
+{
-+ tree rhs1, rhs2;
-+ gimple def_stmt = get_def_stmt(var);
++ gimple def_stmt;
++
++ def_stmt = get_def_stmt(new_rhs);
++ // get_size_overflow_type
++ if (LONG_TYPE_SIZE != GET_MODE_BITSIZE(SImode))
++ gcc_assert(gimple_assign_cast_p(def_stmt));
++ return gimple_assign_rhs1(def_stmt);
++}
++
++static tree cast_to_int_TI_type_and_check(bool *potentionally_overflowed, gimple stmt, tree new_rhs)
++{
++ gimple_stmt_iterator gsi;
++ gimple cast_stmt, def_stmt;
++ enum machine_mode mode = TYPE_MODE(TREE_TYPE(new_rhs));
++
++ if (mode != TImode && mode != DImode) {
++ def_stmt = get_def_stmt(new_rhs);
++ gcc_assert(gimple_assign_cast_p(def_stmt));
++ new_rhs = gimple_assign_rhs1(def_stmt);
++ mode = TYPE_MODE(TREE_TYPE(new_rhs));
++ }
++
++ gcc_assert(mode == TImode || mode == DImode);
++
++ if (mode == TYPE_MODE(intTI_type_node) && useless_type_conversion_p(TREE_TYPE(new_rhs), intTI_type_node))
++ return new_rhs;
++
++ gsi = gsi_for_stmt(stmt);
++ cast_stmt = build_cast_stmt(intTI_type_node, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
++ new_rhs = gimple_get_lhs(cast_stmt);
++
++ if (mode == DImode)
++ return new_rhs;
++
++ check_size_overflow(stmt, intTI_type_node, new_rhs, new_rhs, potentionally_overflowed, BEFORE_STMT);
++
++ return new_rhs;
++}
++
++static bool is_an_integer_trunction(gimple stmt)
++{
++ gimple rhs1_def_stmt, rhs2_def_stmt;
++ tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1;
++ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode;
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs2 = gimple_assign_rhs2(stmt);
++ enum machine_mode rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1));
++ enum machine_mode rhs2_mode = TYPE_MODE(TREE_TYPE(rhs2));
++
++ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
++ return false;
++
++ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
++
++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR || rhs1_mode != SImode || rhs2_mode != SImode)
++ return false;
++
++ rhs1_def_stmt = get_def_stmt(rhs1);
++ rhs2_def_stmt = get_def_stmt(rhs2);
++ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
++ return false;
++
++ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
++ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
++ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
++ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
++ if (rhs1_def_stmt_rhs1_mode != DImode || rhs2_def_stmt_rhs1_mode != DImode)
++ return false;
++
++ gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true);
++ gimple_set_plf(rhs2_def_stmt, NO_CAST_CHECK, true);
++ return true;
++}
++
++static tree handle_integer_truncation(struct pointer_set_t *visited, bool *potentionally_overflowed, tree lhs)
++{
++ tree new_rhs1, new_rhs2, size_overflow_type;
++ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
++ tree new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type;
++ gimple assign, stmt = get_def_stmt(lhs);
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs2 = gimple_assign_rhs2(stmt);
++
++ if (!is_an_integer_trunction(stmt))
++ return NULL_TREE;
++
++ new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
++ new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
++
++ new_rhs1_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs1);
++ new_rhs2_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs2);
++
++ new_rhs1_def_stmt_rhs1_type = TREE_TYPE(new_rhs1_def_stmt_rhs1);
++ new_rhs2_def_stmt_rhs1_type = TREE_TYPE(new_rhs2_def_stmt_rhs1);
++
++ if (!useless_type_conversion_p(new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type)) {
++ new_rhs1_def_stmt_rhs1 = cast_to_int_TI_type_and_check(potentionally_overflowed, stmt, new_rhs1_def_stmt_rhs1);
++ new_rhs2_def_stmt_rhs1 = cast_to_int_TI_type_and_check(potentionally_overflowed, stmt, new_rhs2_def_stmt_rhs1);
++ }
++
++ assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
++ new_lhs = gimple_get_lhs(assign);
++ check_size_overflow(assign, TREE_TYPE(new_lhs), new_lhs, rhs1, potentionally_overflowed, AFTER_STMT);
++
++ size_overflow_type = get_size_overflow_type(stmt, lhs);
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ new_rhs2 = cast_to_new_size_overflow_type(stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
++ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, new_rhs2, NULL_TREE);
++}
++
++static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree lhs)
++{
++ tree rhs1, rhs2, size_overflow_type, new_lhs;
++ gimple def_stmt = get_def_stmt(lhs);
+ tree new_rhs1 = NULL_TREE;
+ tree new_rhs2 = NULL_TREE;
+
@@ -85851,32 +86806,41 @@ index 0000000..cc96254
+ case EXACT_DIV_EXPR:
+ case POINTER_PLUS_EXPR:
+ case BIT_AND_EXPR:
-+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
++ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
+ default:
+ break;
+ }
+
+ *potentionally_overflowed = true;
+
++ new_lhs = handle_integer_truncation(visited, potentionally_overflowed, lhs);
++ if (new_lhs != NULL_TREE)
++ return new_lhs;
++
+ if (TREE_CODE(rhs1) == SSA_NAME)
+ new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
+ if (TREE_CODE(rhs2) == SSA_NAME)
+ new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
+
+ if (is_gimple_constant(rhs2))
-+ return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs1, new_rhs1, new_rhs1, signed_cast(rhs2), &gimple_assign_set_rhs1);
++ return handle_const_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs1, NULL_TREE);
+
+ if (is_gimple_constant(rhs1))
-+ return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs2, new_rhs2, signed_cast(rhs1), new_rhs2, &gimple_assign_set_rhs2);
++ return handle_const_assign(visited, potentionally_overflowed, def_stmt, new_rhs2, NULL_TREE, new_rhs2);
++
++ size_overflow_type = get_size_overflow_type(def_stmt, lhs);
+
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
++ new_rhs1 = cast_to_new_size_overflow_type(def_stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ new_rhs2 = cast_to_new_size_overflow_type(def_stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
++
++ return dup_assign(visited, potentionally_overflowed, def_stmt, size_overflow_type, new_rhs1, new_rhs2, NULL_TREE);
+}
+
+#if BUILDING_GCC_VERSION >= 4007
-+static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree rhs)
++static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree size_overflow_type, tree rhs)
+{
+ if (is_gimple_constant(rhs))
-+ return signed_cast(rhs);
++ return cast_a_tree(size_overflow_type, rhs);
+ if (TREE_CODE(rhs) != SSA_NAME)
+ return NULL_TREE;
+ return expand(visited, potentionally_overflowed, rhs);
@@ -85884,61 +86848,72 @@ index 0000000..cc96254
+
+static tree handle_ternary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
+{
-+ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3;
++ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
+ gimple def_stmt = get_def_stmt(var);
+
+ *potentionally_overflowed = true;
+
++ size_overflow_type = get_size_overflow_type(def_stmt, var);
++
+ rhs1 = gimple_assign_rhs1(def_stmt);
+ rhs2 = gimple_assign_rhs2(def_stmt);
+ rhs3 = gimple_assign_rhs3(def_stmt);
-+ new_rhs1 = get_new_rhs(visited, potentionally_overflowed, rhs1);
-+ new_rhs2 = get_new_rhs(visited, potentionally_overflowed, rhs2);
-+ new_rhs3 = get_new_rhs(visited, potentionally_overflowed, rhs3);
++ new_rhs1 = get_new_rhs(visited, potentionally_overflowed, size_overflow_type, rhs1);
++ new_rhs2 = get_new_rhs(visited, potentionally_overflowed, size_overflow_type, rhs2);
++ new_rhs3 = get_new_rhs(visited, potentionally_overflowed, size_overflow_type, rhs3);
+
-+ if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE)
-+ return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, new_rhs3);
-+ error("handle_ternary_ops: unknown rhs");
-+ gcc_unreachable();
++ new_rhs1 = cast_to_new_size_overflow_type(def_stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ new_rhs2 = cast_to_new_size_overflow_type(def_stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
++ new_rhs3 = cast_to_new_size_overflow_type(def_stmt, new_rhs3, size_overflow_type, BEFORE_STMT);
++
++ return dup_assign(visited, potentionally_overflowed, def_stmt, size_overflow_type, new_rhs1, new_rhs2, new_rhs3);
+}
+#endif
+
-+static void set_size_overflow_type(tree node)
++static tree get_size_overflow_type(gimple stmt, tree node)
+{
-+ switch (TYPE_MODE(TREE_TYPE(node))) {
++ tree type;
++
++ gcc_assert(node != NULL_TREE);
++
++ type = TREE_TYPE(node);
++
++ if (gimple_plf(stmt, MY_STMT))
++ return TREE_TYPE(node);
++
++ switch (TYPE_MODE(type)) {
++ case QImode:
++ return (TYPE_UNSIGNED(type)) ? unsigned_intHI_type_node : intHI_type_node;
++ case HImode:
++ return (TYPE_UNSIGNED(type)) ? unsigned_intSI_type_node : intSI_type_node;
+ case SImode:
-+ signed_size_overflow_type = intDI_type_node;
-+ unsigned_size_overflow_type = unsigned_intDI_type_node;
-+ break;
++ return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
+ case DImode:
-+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) {
-+ signed_size_overflow_type = intDI_type_node;
-+ unsigned_size_overflow_type = unsigned_intDI_type_node;
-+ } else {
-+ signed_size_overflow_type = intTI_type_node;
-+ unsigned_size_overflow_type = unsigned_intTI_type_node;
-+ }
-+ break;
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
++ return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
++ return (TYPE_UNSIGNED(type)) ? unsigned_intTI_type_node : intTI_type_node;
+ default:
-+ error("set_size_overflow_type: unsupported gcc configuration.");
++ debug_tree(node);
++ error("get_size_overflow_type: unsupported gcc configuration.");
+ gcc_unreachable();
+ }
+}
+
+static tree expand_visited(gimple def_stmt)
+{
-+ gimple tmp;
++ gimple next_stmt;
+ gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
+
+ gsi_next(&gsi);
-+ tmp = gsi_stmt(gsi);
-+ switch (gimple_code(tmp)) {
++ next_stmt = gsi_stmt(gsi);
++
++ switch (gimple_code(next_stmt)) {
+ case GIMPLE_ASSIGN:
-+ return gimple_get_lhs(tmp);
++ return gimple_get_lhs(next_stmt);
+ case GIMPLE_PHI:
-+ return gimple_phi_result(tmp);
++ return gimple_phi_result(next_stmt);
+ case GIMPLE_CALL:
-+ return gimple_call_lhs(tmp);
++ return gimple_call_lhs(next_stmt);
+ default:
+ return NULL_TREE;
+ }
@@ -85956,19 +86931,18 @@ index 0000000..cc96254
+ return NULL_TREE;
+
+ gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
-+ if (code != INTEGER_TYPE)
-+ return NULL_TREE;
+
-+ if (SSA_NAME_IS_DEFAULT_DEF(var)) {
++ if (TREE_CODE(SSA_NAME_VAR(var)) == PARM_DECL)
+ check_missing_attribute(var);
-+ return NULL_TREE;
-+ }
+
+ def_stmt = get_def_stmt(var);
+
+ if (!def_stmt)
+ return NULL_TREE;
+
++ if (gimple_plf(def_stmt, MY_STMT))
++ return var;
++
+ if (pointer_set_contains(visited, def_stmt))
+ return expand_visited(def_stmt);
+
@@ -85977,7 +86951,7 @@ index 0000000..cc96254
+ check_missing_attribute(var);
+ return NULL_TREE;
+ case GIMPLE_PHI:
-+ return build_new_phi(visited, potentionally_overflowed, def_stmt);
++ return build_new_phi(visited, potentionally_overflowed, var);
+ case GIMPLE_CALL:
+ case GIMPLE_ASM:
+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
@@ -86007,9 +86981,7 @@ index 0000000..cc96254
+
+ gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
+
-+ assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt));
-+ gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
-+ update_stmt(assign);
++ assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
+
+ gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
+ update_stmt(stmt);
@@ -86059,8 +87031,6 @@ index 0000000..cc96254
+
+ check_arg_type(arg);
+
-+ set_size_overflow_type(arg);
-+
+ visited = pointer_set_create();
+ potentionally_overflowed = false;
+ newarg = expand(visited, &potentionally_overflowed, arg);
@@ -86071,7 +87041,7 @@ index 0000000..cc96254
+
+ change_function_arg(stmt, arg, argnum, newarg);
+
-+ check_size_overflow(stmt, newarg, arg, &potentionally_overflowed);
++ check_size_overflow(stmt, TREE_TYPE(newarg), newarg, arg, &potentionally_overflowed, BEFORE_STMT);
+}
+
+static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl)
@@ -86099,14 +87069,29 @@ index 0000000..cc96254
+ handle_function_arg(stmt, fndecl, num - 1);
+}
+
++static void set_plf_false(void)
++{
++ basic_block bb;
++
++ FOR_ALL_BB(bb) {
++ gimple_stmt_iterator si;
++
++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
++ gimple_set_plf(gsi_stmt(si), MY_STMT, false);
++ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
++ gimple_set_plf(gsi_stmt(si), MY_STMT, false);
++ }
++}
++
+static unsigned int handle_function(void)
+{
-+ basic_block bb = ENTRY_BLOCK_PTR->next_bb;
-+ int saved_last_basic_block = last_basic_block;
++ basic_block next, bb = ENTRY_BLOCK_PTR->next_bb;
++
++ set_plf_false();
+
+ do {
+ gimple_stmt_iterator gsi;
-+ basic_block next = bb->next_bb;
++ next = bb->next_bb;
+
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+ tree fndecl, attr;
@@ -86119,15 +87104,16 @@ index 0000000..cc96254
+ continue;
+ if (gimple_call_num_args(stmt) == 0)
+ continue;
-+ attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl)));
++ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
+ if (!attr || !TREE_VALUE(attr))
+ handle_function_by_hash(stmt, fndecl);
+ else
+ handle_function_by_attribute(stmt, attr, fndecl);
+ gsi = gsi_for_stmt(stmt);
++ next = gimple_bb(stmt)->next_bb;
+ }
+ bb = next;
-+ } while (bb && bb->index <= saved_last_basic_block);
++ } while (bb);
+ return 0;
+}
+
@@ -86155,11 +87141,12 @@ index 0000000..cc96254
+
+ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
+
-+ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func)
++ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
+ fntype = build_function_type_list(void_type_node,
+ const_char_ptr_type_node,
+ unsigned_type_node,
+ const_char_ptr_type_node,
++ const_char_ptr_type_node,
+ NULL_TREE);
+ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
+
@@ -86167,6 +87154,7 @@ index 0000000..cc96254
+ TREE_PUBLIC(report_size_overflow_decl) = 1;
+ DECL_EXTERNAL(report_size_overflow_decl) = 1;
+ DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
++ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
@@ -86199,7 +87187,7 @@ index 0000000..cc96254
+
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
+ if (enable) {
-+ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
++ register_callback("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
+ }
+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);