summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2014-12-18 10:32:59 -0500
committerAnthony G. Basile <blueness@gentoo.org>2014-12-18 10:32:59 -0500
commite98898b3c3f68974fcf3c48348114cdfbf28cf88 (patch)
tree0658201091173978b2fb2c9c344770f30165d23e
parentGrsec/PaX: 3.0-{3.2.65,3.14.26,3.17.6}-201412142045 (diff)
downloadhardened-patchset-e98898b3c3f68974fcf3c48348114cdfbf28cf88.tar.gz
hardened-patchset-e98898b3c3f68974fcf3c48348114cdfbf28cf88.tar.bz2
hardened-patchset-e98898b3c3f68974fcf3c48348114cdfbf28cf88.zip
Grsec/PaX: 3.0-{3.2.65,3.14.27,3.17.7}-20141217070020141217
-rw-r--r--3.14.27/0000_README (renamed from 3.14.26/0000_README)2
-rw-r--r--3.14.27/4420_grsecurity-3.0-3.14.27-201412170659.patch (renamed from 3.14.26/4420_grsecurity-3.0-3.14.26-201412142109.patch)374
-rw-r--r--3.14.27/4425_grsec_remove_EI_PAX.patch (renamed from 3.14.26/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--3.14.27/4427_force_XATTR_PAX_tmpfs.patch (renamed from 3.14.26/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--3.14.27/4430_grsec-remove-localversion-grsec.patch (renamed from 3.14.26/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.14.27/4435_grsec-mute-warnings.patch (renamed from 3.14.26/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.14.27/4440_grsec-remove-protected-paths.patch (renamed from 3.14.26/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.14.27/4450_grsec-kconfig-default-gids.patch (renamed from 3.14.26/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.14.27/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.14.26/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.14.27/4470_disable-compat_vdso.patch (renamed from 3.14.26/4470_disable-compat_vdso.patch)0
-rw-r--r--3.14.27/4475_emutramp_default_on.patch (renamed from 3.14.26/4475_emutramp_default_on.patch)0
-rw-r--r--3.17.7/0000_README (renamed from 3.17.6/0000_README)2
-rw-r--r--3.17.7/4420_grsecurity-3.0-3.17.7-201412170700.patch (renamed from 3.17.6/4420_grsecurity-3.0-3.17.6-201412142110.patch)484
-rw-r--r--3.17.7/4425_grsec_remove_EI_PAX.patch (renamed from 3.17.6/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--3.17.7/4427_force_XATTR_PAX_tmpfs.patch (renamed from 3.17.6/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--3.17.7/4430_grsec-remove-localversion-grsec.patch (renamed from 3.17.6/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.17.7/4435_grsec-mute-warnings.patch (renamed from 3.17.6/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.17.7/4440_grsec-remove-protected-paths.patch (renamed from 3.17.6/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.17.7/4450_grsec-kconfig-default-gids.patch (renamed from 3.17.6/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.17.7/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.17.6/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.17.7/4470_disable-compat_vdso.patch (renamed from 3.17.6/4470_disable-compat_vdso.patch)0
-rw-r--r--3.17.7/4475_emutramp_default_on.patch (renamed from 3.17.6/4475_emutramp_default_on.patch)0
-rw-r--r--3.2.65/0000_README2
-rw-r--r--3.2.65/4420_grsecurity-3.0-3.2.65-201412170654.patch (renamed from 3.2.65/4420_grsecurity-3.0-3.2.65-201412142045.patch)69
24 files changed, 406 insertions, 527 deletions
diff --git a/3.14.26/0000_README b/3.14.27/0000_README
index e231525..373bdca 100644
--- a/3.14.26/0000_README
+++ b/3.14.27/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.14.26-201412142109.patch
+Patch: 4420_grsecurity-3.0-3.14.27-201412170659.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.14.26/4420_grsecurity-3.0-3.14.26-201412142109.patch b/3.14.27/4420_grsecurity-3.0-3.14.27-201412170659.patch
index a5539ed..f606d8d 100644
--- a/3.14.26/4420_grsecurity-3.0-3.14.26-201412142109.patch
+++ b/3.14.27/4420_grsecurity-3.0-3.14.27-201412170659.patch
@@ -292,7 +292,7 @@ index 7116fda..2f71588 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 63a5ee8..d99d2d9 100644
+index 944db23..f799f3e 100644
--- a/Makefile
+++ b/Makefile
@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -12594,7 +12594,7 @@ index 50f8c5e..4f84fff 100644
return diff;
}
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
-index 14fe7cb..829b962 100644
+index b5bb498..74110e8 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
@@ -21662,10 +21662,10 @@ index df5e41f..816c719 100644
extern int generic_get_free_region(unsigned long base, unsigned long size,
int replace_reg);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
-index 79f9f84..38ace52 100644
+index fb345c4..445b2d0 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
-@@ -1351,7 +1351,7 @@ static void __init pmu_check_apic(void)
+@@ -1354,7 +1354,7 @@ static void __init pmu_check_apic(void)
pr_info("no hardware sampling interrupt available.\n");
}
@@ -21674,7 +21674,7 @@ index 79f9f84..38ace52 100644
.name = "format",
.attrs = NULL,
};
-@@ -1450,7 +1450,7 @@ static struct attribute *events_attr[] = {
+@@ -1453,7 +1453,7 @@ static struct attribute *events_attr[] = {
NULL,
};
@@ -21683,7 +21683,7 @@ index 79f9f84..38ace52 100644
.name = "events",
.attrs = events_attr,
};
-@@ -1971,7 +1971,7 @@ static unsigned long get_segment_base(unsigned int segment)
+@@ -1974,7 +1974,7 @@ static unsigned long get_segment_base(unsigned int segment)
if (idx > GDT_ENTRIES)
return 0;
@@ -21692,7 +21692,7 @@ index 79f9f84..38ace52 100644
}
return get_desc_base(desc + idx);
-@@ -2061,7 +2061,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+@@ -2064,7 +2064,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
break;
perf_callchain_store(entry, frame.return_address);
@@ -21715,10 +21715,10 @@ index 639d128..e92d7e5 100644
while (amd_iommu_v2_event_descs[i].attr.attr.name)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
-index 5ee8064..4d32df9 100644
+index d4c0a0e..4057f84 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
-@@ -2318,10 +2318,10 @@ __init int intel_pmu_init(void)
+@@ -2354,10 +2354,10 @@ __init int intel_pmu_init(void)
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
if (boot_cpu_has(X86_FEATURE_PDCM)) {
@@ -41119,10 +41119,10 @@ index 4050450..f67c5c1 100644
iir = I915_READ(IIR);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index b6fb3eb..e0fa1e1 100644
+index c514690..84df88f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -10798,13 +10798,13 @@ struct intel_quirk {
+@@ -10796,13 +10796,13 @@ struct intel_quirk {
int subsystem_vendor;
int subsystem_device;
void (*hook)(struct drm_device *dev);
@@ -41138,7 +41138,7 @@ index b6fb3eb..e0fa1e1 100644
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
{
-@@ -10812,18 +10812,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+@@ -10810,18 +10810,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
return 1;
}
@@ -48289,10 +48289,10 @@ index 841b608..198a8b7 100644
#define VIRTNET_DRIVER_VERSION "1.0.0"
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
-index 5441b49..d8030d2 100644
+index 5988910..be561a2 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
-@@ -2855,7 +2855,7 @@ nla_put_failure:
+@@ -2851,7 +2851,7 @@ nla_put_failure:
return -EMSGSIZE;
}
@@ -48301,7 +48301,7 @@ index 5441b49..d8030d2 100644
.kind = "vxlan",
.maxtype = IFLA_VXLAN_MAX,
.policy = vxlan_policy,
-@@ -2902,7 +2902,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
+@@ -2898,7 +2898,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -49177,29 +49177,6 @@ index a912dc0..a8225ba 100644
u16 int_num;
ZD_ASSERT(in_interrupt());
-diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
-index e30d800..19db057 100644
---- a/drivers/net/xen-netfront.c
-+++ b/drivers/net/xen-netfront.c
-@@ -469,9 +469,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
- len = skb_frag_size(frag);
- offset = frag->page_offset;
-
-- /* Data must not cross a page boundary. */
-- BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
--
- /* Skip unused frames from start of page */
- page += offset >> PAGE_SHIFT;
- offset &= ~PAGE_MASK;
-@@ -479,8 +476,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
- while (len > 0) {
- unsigned long bytes;
-
-- BUG_ON(offset >= PAGE_SIZE);
--
- bytes = PAGE_SIZE - offset;
- if (bytes > len)
- bytes = len;
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 683671a..4519fc2 100644
--- a/drivers/nfc/nfcwilink.c
@@ -61288,6 +61265,33 @@ index 9f9992b..8b59411 100644
return 0;
}
return 1;
+diff --git a/fs/ext2/super.c b/fs/ext2/super.c
+index 20d6697..f77da76 100644
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -264,10 +264,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
+ #ifdef CONFIG_EXT2_FS_XATTR
+ if (test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",user_xattr");
+- if (!test_opt(sb, XATTR_USER) &&
+- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
++ if (!test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",nouser_xattr");
+- }
+ #endif
+
+ #ifdef CONFIG_EXT2_FS_POSIX_ACL
+@@ -841,8 +839,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+ if (def_mount_opts & EXT2_DEFM_UID16)
+ set_opt(sbi->s_mount_opt, NO_UID32);
+ #ifdef CONFIG_EXT2_FS_XATTR
+- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
+- set_opt(sbi->s_mount_opt, XATTR_USER);
++ /* always enable user xattrs */
++ set_opt(sbi->s_mount_opt, XATTR_USER);
+ #endif
+ #ifdef CONFIG_EXT2_FS_POSIX_ACL
+ if (def_mount_opts & EXT2_DEFM_ACL)
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 9142614..97484fa 100644
--- a/fs/ext2/xattr.c
@@ -61330,6 +61334,33 @@ index 22548f5..41521d8 100644
return 0;
}
return 1;
+diff --git a/fs/ext3/super.c b/fs/ext3/super.c
+index 0498390..df00300 100644
+--- a/fs/ext3/super.c
++++ b/fs/ext3/super.c
+@@ -649,10 +649,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
+ #ifdef CONFIG_EXT3_FS_XATTR
+ if (test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",user_xattr");
+- if (!test_opt(sb, XATTR_USER) &&
+- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
++ if (!test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",nouser_xattr");
+- }
+ #endif
+ #ifdef CONFIG_EXT3_FS_POSIX_ACL
+ if (test_opt(sb, POSIX_ACL))
+@@ -1749,8 +1747,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
+ if (def_mount_opts & EXT3_DEFM_UID16)
+ set_opt(sbi->s_mount_opt, NO_UID32);
+ #ifdef CONFIG_EXT3_FS_XATTR
+- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
+- set_opt(sbi->s_mount_opt, XATTR_USER);
++ /* always enable user xattrs */
++ set_opt(sbi->s_mount_opt, XATTR_USER);
+ #endif
+ #ifdef CONFIG_EXT3_FS_POSIX_ACL
+ if (def_mount_opts & EXT3_DEFM_ACL)
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index c6874be..f8a6ae8 100644
--- a/fs/ext3/xattr.c
@@ -67185,6 +67216,21 @@ index 8d06adf..7e1c9f8 100644
#define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
#define __fs_changed(gen,s) (gen != get_generation (s))
#define fs_changed(gen,s) \
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 2c80335..04d987d 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -1783,6 +1783,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
+ sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
+ sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
+ sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
++#ifdef CONFIG_REISERFS_FS_XATTR
++ /* turn on user xattrs by default */
++ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
++#endif
+ /* no preallocation minimum, be smart in
+ reiserfs_file_write instead */
+ sbi->s_alloc_options.preallocmin = 0;
diff --git a/fs/select.c b/fs/select.c
index 467bb1c..cf9d65a 100644
--- a/fs/select.c
@@ -95407,7 +95453,7 @@ index a98c7fc..393f8f1 100644
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/memory.c b/mm/memory.c
-index 492e36f..732f880 100644
+index 48d7365..732f880 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -403,6 +403,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -95457,39 +95503,6 @@ index 492e36f..732f880 100644
vma->vm_file->f_op->mmap);
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
-@@ -808,20 +814,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- if (!pte_file(pte)) {
- swp_entry_t entry = pte_to_swp_entry(pte);
-
-- if (swap_duplicate(entry) < 0)
-- return entry.val;
-+ if (likely(!non_swap_entry(entry))) {
-+ if (swap_duplicate(entry) < 0)
-+ return entry.val;
-
-- /* make sure dst_mm is on swapoff's mmlist. */
-- if (unlikely(list_empty(&dst_mm->mmlist))) {
-- spin_lock(&mmlist_lock);
-- if (list_empty(&dst_mm->mmlist))
-- list_add(&dst_mm->mmlist,
-- &src_mm->mmlist);
-- spin_unlock(&mmlist_lock);
-- }
-- if (likely(!non_swap_entry(entry)))
-+ /* make sure dst_mm is on swapoff's mmlist. */
-+ if (unlikely(list_empty(&dst_mm->mmlist))) {
-+ spin_lock(&mmlist_lock);
-+ if (list_empty(&dst_mm->mmlist))
-+ list_add(&dst_mm->mmlist,
-+ &src_mm->mmlist);
-+ spin_unlock(&mmlist_lock);
-+ }
- rss[MM_SWAPENTS]++;
-- else if (is_migration_entry(entry)) {
-+ } else if (is_migration_entry(entry)) {
- page = migration_entry_to_page(entry);
-
- if (PageAnon(page))
@@ -1137,8 +1143,10 @@ again:
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
@@ -96325,7 +96338,7 @@ index b1eb536..091d154 100644
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff --git a/mm/mmap.c b/mm/mmap.c
-index dfe90657..390920e 100644
+index b91ac80..390920e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -37,6 +37,7 @@
@@ -96411,21 +96424,7 @@ index dfe90657..390920e 100644
if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
(mm->end_data - mm->start_data) > rlim)
goto out;
-@@ -745,8 +773,11 @@ again: remove_next = 1 + (end > next->vm_end);
- * shrinking vma had, to cover any anon pages imported.
- */
- if (exporter && exporter->anon_vma && !importer->anon_vma) {
-- if (anon_vma_clone(importer, exporter))
-- return -ENOMEM;
-+ int error;
-+
-+ error = anon_vma_clone(importer, exporter);
-+ if (error)
-+ return error;
- importer->anon_vma = exporter->anon_vma;
- }
- }
-@@ -942,6 +973,12 @@ static int
+@@ -945,6 +973,12 @@ static int
can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -96438,7 +96437,7 @@ index dfe90657..390920e 100644
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
if (vma->vm_pgoff == vm_pgoff)
-@@ -961,6 +998,12 @@ static int
+@@ -964,6 +998,12 @@ static int
can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -96451,7 +96450,7 @@ index dfe90657..390920e 100644
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
pgoff_t vm_pglen;
-@@ -1003,13 +1046,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+@@ -1006,13 +1046,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct vm_area_struct *prev, unsigned long addr,
unsigned long end, unsigned long vm_flags,
@@ -96473,7 +96472,7 @@ index dfe90657..390920e 100644
/*
* We later require that vma->vm_flags == vm_flags,
* so this tests vma->vm_flags & VM_SPECIAL, too.
-@@ -1025,6 +1075,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1028,6 +1075,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
if (next && next->vm_end == end) /* cases 6, 7, 8 */
next = next->vm_next;
@@ -96489,7 +96488,7 @@ index dfe90657..390920e 100644
/*
* Can it merge with the predecessor?
*/
-@@ -1044,9 +1103,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1047,9 +1103,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
/* cases 1, 6 */
err = vma_adjust(prev, prev->vm_start,
next->vm_end, prev->vm_pgoff, NULL);
@@ -96515,7 +96514,7 @@ index dfe90657..390920e 100644
if (err)
return NULL;
khugepaged_enter_vma_merge(prev);
-@@ -1060,12 +1134,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1063,12 +1134,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
mpol_equal(policy, vma_policy(next)) &&
can_vma_merge_before(next, vm_flags,
anon_vma, file, pgoff+pglen)) {
@@ -96545,7 +96544,7 @@ index dfe90657..390920e 100644
if (err)
return NULL;
khugepaged_enter_vma_merge(area);
-@@ -1174,8 +1263,10 @@ none:
+@@ -1177,8 +1263,10 @@ none:
void vm_stat_account(struct mm_struct *mm, unsigned long flags,
struct file *file, long pages)
{
@@ -96558,7 +96557,7 @@ index dfe90657..390920e 100644
mm->total_vm += pages;
-@@ -1183,7 +1274,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
+@@ -1186,7 +1274,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
mm->shared_vm += pages;
if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
mm->exec_vm += pages;
@@ -96567,7 +96566,7 @@ index dfe90657..390920e 100644
mm->stack_vm += pages;
}
#endif /* CONFIG_PROC_FS */
-@@ -1213,6 +1304,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
+@@ -1216,6 +1304,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
@@ -96575,7 +96574,7 @@ index dfe90657..390920e 100644
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
}
-@@ -1239,7 +1331,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1242,7 +1331,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
* (the exception is when the underlying filesystem is noexec
* mounted, in which case we dont add PROT_EXEC.)
*/
@@ -96584,7 +96583,7 @@ index dfe90657..390920e 100644
if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
prot |= PROT_EXEC;
-@@ -1265,7 +1357,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1268,7 +1357,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
@@ -96593,7 +96592,7 @@ index dfe90657..390920e 100644
if (addr & ~PAGE_MASK)
return addr;
-@@ -1276,6 +1368,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1279,6 +1368,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
@@ -96637,7 +96636,7 @@ index dfe90657..390920e 100644
if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
-@@ -1363,6 +1492,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1366,6 +1492,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags |= VM_NORESERVE;
}
@@ -96647,7 +96646,7 @@ index dfe90657..390920e 100644
addr = mmap_region(file, addr, len, vm_flags, pgoff);
if (!IS_ERR_VALUE(addr) &&
((vm_flags & VM_LOCKED) ||
-@@ -1456,7 +1588,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
+@@ -1459,7 +1588,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
vm_flags_t vm_flags = vma->vm_flags;
/* If it was private or non-writable, the write bit is already clear */
@@ -96656,7 +96655,7 @@ index dfe90657..390920e 100644
return 0;
/* The backer wishes to know when pages are first written to? */
-@@ -1502,7 +1634,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1505,7 +1634,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;
@@ -96679,7 +96678,7 @@ index dfe90657..390920e 100644
if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
unsigned long nr_pages;
-@@ -1521,11 +1668,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1524,11 +1668,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
/* Clear old maps */
error = -ENOMEM;
@@ -96692,7 +96691,7 @@ index dfe90657..390920e 100644
}
/*
-@@ -1556,6 +1702,16 @@ munmap_back:
+@@ -1559,6 +1702,16 @@ munmap_back:
goto unacct_error;
}
@@ -96709,7 +96708,7 @@ index dfe90657..390920e 100644
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
-@@ -1575,6 +1731,13 @@ munmap_back:
+@@ -1578,6 +1731,13 @@ munmap_back:
if (error)
goto unmap_and_free_vma;
@@ -96723,7 +96722,7 @@ index dfe90657..390920e 100644
/* Can addr have changed??
*
* Answer: Yes, several device drivers can do it in their
-@@ -1608,6 +1771,12 @@ munmap_back:
+@@ -1611,6 +1771,12 @@ munmap_back:
}
vma_link(mm, vma, prev, rb_link, rb_parent);
@@ -96736,7 +96735,7 @@ index dfe90657..390920e 100644
/* Once vma denies write, undo our temporary denial count */
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
-@@ -1616,6 +1785,7 @@ out:
+@@ -1619,6 +1785,7 @@ out:
perf_event_mmap(vma);
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -96744,7 +96743,7 @@ index dfe90657..390920e 100644
if (vm_flags & VM_LOCKED) {
if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm)))
-@@ -1648,6 +1818,12 @@ unmap_and_free_vma:
+@@ -1651,6 +1818,12 @@ unmap_and_free_vma:
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
charged = 0;
free_vma:
@@ -96757,7 +96756,7 @@ index dfe90657..390920e 100644
kmem_cache_free(vm_area_cachep, vma);
unacct_error:
if (charged)
-@@ -1655,7 +1831,63 @@ unacct_error:
+@@ -1658,7 +1831,63 @@ unacct_error:
return error;
}
@@ -96822,7 +96821,7 @@ index dfe90657..390920e 100644
{
/*
* We implement the search by looking for an rbtree node that
-@@ -1703,11 +1935,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+@@ -1706,11 +1935,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
}
}
@@ -96853,7 +96852,7 @@ index dfe90657..390920e 100644
if (gap_end >= low_limit && gap_end - gap_start >= length)
goto found;
-@@ -1757,7 +2007,7 @@ found:
+@@ -1760,7 +2007,7 @@ found:
return gap_start;
}
@@ -96862,7 +96861,7 @@ index dfe90657..390920e 100644
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
-@@ -1811,6 +2061,24 @@ check_current:
+@@ -1814,6 +2061,24 @@ check_current:
gap_end = vma->vm_start;
if (gap_end < low_limit)
return -ENOMEM;
@@ -96887,7 +96886,7 @@ index dfe90657..390920e 100644
if (gap_start <= high_limit && gap_end - gap_start >= length)
goto found;
-@@ -1874,6 +2142,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1877,6 +2142,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
@@ -96895,7 +96894,7 @@ index dfe90657..390920e 100644
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
-@@ -1881,11 +2150,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1884,11 +2150,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (flags & MAP_FIXED)
return addr;
@@ -96912,7 +96911,7 @@ index dfe90657..390920e 100644
return addr;
}
-@@ -1894,6 +2167,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1897,6 +2167,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = 0;
@@ -96920,7 +96919,7 @@ index dfe90657..390920e 100644
return vm_unmapped_area(&info);
}
#endif
-@@ -1912,6 +2186,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1915,6 +2186,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
@@ -96928,7 +96927,7 @@ index dfe90657..390920e 100644
/* requested length too big for entire address space */
if (len > TASK_SIZE - mmap_min_addr)
-@@ -1920,12 +2195,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1923,12 +2195,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (flags & MAP_FIXED)
return addr;
@@ -96946,7 +96945,7 @@ index dfe90657..390920e 100644
return addr;
}
-@@ -1934,6 +2213,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1937,6 +2213,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base;
info.align_mask = 0;
@@ -96954,7 +96953,7 @@ index dfe90657..390920e 100644
addr = vm_unmapped_area(&info);
/*
-@@ -1946,6 +2226,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1949,6 +2226,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
@@ -96967,7 +96966,7 @@ index dfe90657..390920e 100644
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
-@@ -2046,6 +2332,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
+@@ -2049,6 +2332,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
return vma;
}
@@ -96996,7 +96995,7 @@ index dfe90657..390920e 100644
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
-@@ -2062,6 +2370,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2065,6 +2370,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
return -ENOMEM;
/* Stack limit test */
@@ -97004,7 +97003,7 @@ index dfe90657..390920e 100644
if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
-@@ -2072,6 +2381,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2075,6 +2381,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
locked = mm->locked_vm + grow;
limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
limit >>= PAGE_SHIFT;
@@ -97012,7 +97011,7 @@ index dfe90657..390920e 100644
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
}
-@@ -2101,37 +2411,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2104,37 +2411,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
@@ -97070,7 +97069,7 @@ index dfe90657..390920e 100644
unsigned long size, grow;
size = address - vma->vm_start;
-@@ -2166,6 +2487,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -2169,6 +2487,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
}
}
}
@@ -97079,7 +97078,7 @@ index dfe90657..390920e 100644
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma);
validate_mm(vma->vm_mm);
-@@ -2180,6 +2503,8 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2183,6 +2503,8 @@ int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
int error;
@@ -97088,7 +97087,7 @@ index dfe90657..390920e 100644
/*
* We must make sure the anon_vma is allocated
-@@ -2193,6 +2518,15 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2196,6 +2518,15 @@ int expand_downwards(struct vm_area_struct *vma,
if (error)
return error;
@@ -97104,7 +97103,7 @@ index dfe90657..390920e 100644
vma_lock_anon_vma(vma);
/*
-@@ -2202,9 +2536,17 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2205,9 +2536,17 @@ int expand_downwards(struct vm_area_struct *vma,
*/
/* Somebody else might have raced and expanded it already */
@@ -97123,7 +97122,7 @@ index dfe90657..390920e 100644
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
-@@ -2229,13 +2571,27 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2232,13 +2571,27 @@ int expand_downwards(struct vm_area_struct *vma,
vma->vm_pgoff -= grow;
anon_vma_interval_tree_post_update_vma(vma);
vma_gap_update(vma);
@@ -97151,7 +97150,7 @@ index dfe90657..390920e 100644
khugepaged_enter_vma_merge(vma);
validate_mm(vma->vm_mm);
return error;
-@@ -2333,6 +2689,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2336,6 +2689,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
do {
long nrpages = vma_pages(vma);
@@ -97165,7 +97164,7 @@ index dfe90657..390920e 100644
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
-@@ -2377,6 +2740,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2380,6 +2740,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
@@ -97182,7 +97181,7 @@ index dfe90657..390920e 100644
vma_rb_erase(vma, &mm->mm_rb);
mm->map_count--;
tail_vma = vma;
-@@ -2404,14 +2777,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2407,14 +2777,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
struct vm_area_struct *new;
int err = -ENOMEM;
@@ -97216,7 +97215,7 @@ index dfe90657..390920e 100644
/* most fields are the same, copy all, and then fixup */
*new = *vma;
-@@ -2424,11 +2816,28 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2427,6 +2816,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
@@ -97239,14 +97238,7 @@ index dfe90657..390920e 100644
err = vma_dup_policy(vma, new);
if (err)
goto out_free_vma;
-
-- if (anon_vma_clone(new, vma))
-+ err = anon_vma_clone(new, vma);
-+ if (err)
- goto out_free_mpol;
-
- if (new->vm_file)
-@@ -2443,6 +2852,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2447,6 +2852,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
@@ -97285,7 +97277,7 @@ index dfe90657..390920e 100644
/* Success. */
if (!err)
return 0;
-@@ -2452,10 +2893,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2456,10 +2893,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_ops->close(new);
if (new->vm_file)
fput(new->vm_file);
@@ -97305,7 +97297,7 @@ index dfe90657..390920e 100644
kmem_cache_free(vm_area_cachep, new);
out_err:
return err;
-@@ -2468,6 +2917,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2472,6 +2917,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
@@ -97321,7 +97313,7 @@ index dfe90657..390920e 100644
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
-@@ -2479,11 +2937,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2483,11 +2937,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
@@ -97352,7 +97344,7 @@ index dfe90657..390920e 100644
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
-@@ -2558,6 +3035,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -2562,6 +3035,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Fix up all other VM information */
remove_vma_list(mm, vma);
@@ -97361,7 +97353,7 @@ index dfe90657..390920e 100644
return 0;
}
-@@ -2566,6 +3045,13 @@ int vm_munmap(unsigned long start, size_t len)
+@@ -2570,6 +3045,13 @@ int vm_munmap(unsigned long start, size_t len)
int ret;
struct mm_struct *mm = current->mm;
@@ -97375,7 +97367,7 @@ index dfe90657..390920e 100644
down_write(&mm->mmap_sem);
ret = do_munmap(mm, start, len);
up_write(&mm->mmap_sem);
-@@ -2579,16 +3065,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -2583,16 +3065,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
return vm_munmap(addr, len);
}
@@ -97392,7 +97384,7 @@ index dfe90657..390920e 100644
/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
-@@ -2602,6 +3078,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2606,6 +3078,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
struct rb_node ** rb_link, * rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
@@ -97400,7 +97392,7 @@ index dfe90657..390920e 100644
len = PAGE_ALIGN(len);
if (!len)
-@@ -2609,10 +3086,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2613,10 +3086,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -97425,7 +97417,7 @@ index dfe90657..390920e 100644
error = mlock_future_check(mm, mm->def_flags, len);
if (error)
return error;
-@@ -2626,21 +3117,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2630,21 +3117,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
/*
* Clear old maps. this also does some error checking for us
*/
@@ -97450,7 +97442,7 @@ index dfe90657..390920e 100644
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
-@@ -2654,7 +3144,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2658,7 +3144,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
@@ -97459,7 +97451,7 @@ index dfe90657..390920e 100644
return -ENOMEM;
}
-@@ -2668,10 +3158,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2672,10 +3158,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
perf_event_mmap(vma);
@@ -97473,7 +97465,7 @@ index dfe90657..390920e 100644
return addr;
}
-@@ -2733,6 +3224,7 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2737,6 +3224,7 @@ void exit_mmap(struct mm_struct *mm)
while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
@@ -97481,7 +97473,7 @@ index dfe90657..390920e 100644
vma = remove_vma(vma);
}
vm_unacct_memory(nr_accounted);
-@@ -2750,6 +3242,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2754,6 +3242,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
struct vm_area_struct *prev;
struct rb_node **rb_link, *rb_parent;
@@ -97495,7 +97487,7 @@ index dfe90657..390920e 100644
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
* until its first write fault, when page's anon_vma and index
-@@ -2773,7 +3272,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2777,7 +3272,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
@@ -97517,7 +97509,7 @@ index dfe90657..390920e 100644
return 0;
}
-@@ -2792,6 +3305,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2796,6 +3305,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
struct rb_node **rb_link, *rb_parent;
bool faulted_in_anon_vma = true;
@@ -97526,7 +97518,7 @@ index dfe90657..390920e 100644
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
-@@ -2856,6 +3371,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2860,6 +3371,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
return NULL;
}
@@ -97566,7 +97558,7 @@ index dfe90657..390920e 100644
/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
-@@ -2867,6 +3415,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+@@ -2871,6 +3415,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
@@ -97574,7 +97566,7 @@ index dfe90657..390920e 100644
if (cur + npages > lim)
return 0;
return 1;
-@@ -2937,6 +3486,22 @@ int install_special_mapping(struct mm_struct *mm,
+@@ -2941,6 +3486,22 @@ int install_special_mapping(struct mm_struct *mm,
vma->vm_start = addr;
vma->vm_end = addr + len;
@@ -98237,7 +98229,7 @@ index fd26d04..0cea1b0 100644
if (!mm || IS_ERR(mm)) {
rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
diff --git a/mm/rmap.c b/mm/rmap.c
-index cdbd312..cb05259 100644
+index cab9820..cb05259 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
@@ -98317,7 +98309,7 @@ index cdbd312..cb05259 100644
{
struct anon_vma_chain *avc, *pavc;
struct anon_vma *root = NULL;
-@@ -270,10 +304,11 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+@@ -270,7 +304,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
* the corresponding VMA in the parent process is attached to.
* Returns 0 on success, non-zero on failure.
*/
@@ -98326,23 +98318,7 @@ index cdbd312..cb05259 100644
{
struct anon_vma_chain *avc;
struct anon_vma *anon_vma;
-+ int error;
-
- /* Don't bother if the parent process has no anon_vma here. */
- if (!pvma->anon_vma)
-@@ -283,8 +318,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
- * First, attach the new VMA to the parent VMA's anon_vmas,
- * so rmap can find non-COWed pages in child processes.
- */
-- if (anon_vma_clone(vma, pvma))
-- return -ENOMEM;
-+ error = anon_vma_clone(vma, pvma);
-+ if (error)
-+ return error;
-
- /* Then add our own anon_vma. */
- anon_vma = anon_vma_alloc();
-@@ -374,8 +410,10 @@ static void anon_vma_ctor(void *data)
+@@ -376,8 +410,10 @@ static void anon_vma_ctor(void *data)
void __init anon_vma_init(void)
{
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
@@ -99910,34 +99886,6 @@ index 0fdf968..991ff6a 100644
if (v->nr_pages)
seq_printf(m, " pages=%d", v->nr_pages);
-diff --git a/mm/vmpressure.c b/mm/vmpressure.c
-index d4042e7..c5afd57 100644
---- a/mm/vmpressure.c
-+++ b/mm/vmpressure.c
-@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
- unsigned long scanned;
- unsigned long reclaimed;
-
-+ spin_lock(&vmpr->sr_lock);
- /*
- * Several contexts might be calling vmpressure(), so it is
- * possible that the work was rescheduled again before the old
-@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
- * here. No need for any locks here since we don't care if
- * vmpr->reclaimed is in sync.
- */
-- if (!vmpr->scanned)
-+ scanned = vmpr->scanned;
-+ if (!scanned) {
-+ spin_unlock(&vmpr->sr_lock);
- return;
-+ }
-
-- spin_lock(&vmpr->sr_lock);
-- scanned = vmpr->scanned;
- reclaimed = vmpr->reclaimed;
- vmpr->scanned = 0;
- vmpr->reclaimed = 0;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index def5dd2..4ce55cec 100644
--- a/mm/vmstat.c
@@ -101313,7 +101261,7 @@ index fdac61c..e5e5b46 100644
pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
return -ENODEV;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
-index b0db904..dc1f9f2 100644
+index 4617586..d6ea668 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -58,7 +58,7 @@ struct rtnl_link {
@@ -101351,7 +101299,7 @@ index b0db904..dc1f9f2 100644
}
EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
-@@ -2684,6 +2687,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
+@@ -2685,6 +2688,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
@@ -101361,7 +101309,7 @@ index b0db904..dc1f9f2 100644
have_flags = true;
flags = nla_get_u16(attr);
break;
-@@ -2754,6 +2760,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
+@@ -2755,6 +2761,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
@@ -101968,7 +101916,7 @@ index 017fa5e..d61ebac 100644
return nh->nh_saddr;
}
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
-index 8c8493e..d5214a4 100644
+index 278836f..482db7b 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -56,13 +56,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
@@ -102175,7 +102123,7 @@ index 580dd96..9fcef7e 100644
msg.msg_flags = flags;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
-index e4a8f76..dd8ad72 100644
+index b0a9cb4..8e8f8d2 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -44,7 +44,7 @@
@@ -103227,7 +103175,7 @@ index 7b32652..0bc348b 100644
table = kmemdup(ipv6_icmp_table_template,
sizeof(ipv6_icmp_table_template),
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
-index b27f6d3..1a2977b 100644
+index 4a230b1..a1d47b8 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -71,7 +71,7 @@ struct ip6gre_net {
@@ -103301,7 +103249,7 @@ index 657639d..8b609c5 100644
.maxtype = IFLA_IPTUN_MAX,
.policy = ip6_tnl_policy,
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
-index 9a5339f..8fc3c37 100644
+index 28456c9..13a4115 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
@@ -103313,7 +103261,7 @@ index 9a5339f..8fc3c37 100644
static int vti6_net_id __read_mostly;
struct vti6_net {
-@@ -892,7 +892,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
+@@ -901,7 +901,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
[IFLA_VTI_OKEY] = { .type = NLA_U32 },
};
diff --git a/3.14.26/4425_grsec_remove_EI_PAX.patch b/3.14.27/4425_grsec_remove_EI_PAX.patch
index 86e242a..86e242a 100644
--- a/3.14.26/4425_grsec_remove_EI_PAX.patch
+++ b/3.14.27/4425_grsec_remove_EI_PAX.patch
diff --git a/3.14.26/4427_force_XATTR_PAX_tmpfs.patch b/3.14.27/4427_force_XATTR_PAX_tmpfs.patch
index aa540ad..aa540ad 100644
--- a/3.14.26/4427_force_XATTR_PAX_tmpfs.patch
+++ b/3.14.27/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/3.14.26/4430_grsec-remove-localversion-grsec.patch b/3.14.27/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.14.26/4430_grsec-remove-localversion-grsec.patch
+++ b/3.14.27/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.14.26/4435_grsec-mute-warnings.patch b/3.14.27/4435_grsec-mute-warnings.patch
index 392cefb..392cefb 100644
--- a/3.14.26/4435_grsec-mute-warnings.patch
+++ b/3.14.27/4435_grsec-mute-warnings.patch
diff --git a/3.14.26/4440_grsec-remove-protected-paths.patch b/3.14.27/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/3.14.26/4440_grsec-remove-protected-paths.patch
+++ b/3.14.27/4440_grsec-remove-protected-paths.patch
diff --git a/3.14.26/4450_grsec-kconfig-default-gids.patch b/3.14.27/4450_grsec-kconfig-default-gids.patch
index 722821b..722821b 100644
--- a/3.14.26/4450_grsec-kconfig-default-gids.patch
+++ b/3.14.27/4450_grsec-kconfig-default-gids.patch
diff --git a/3.14.26/4465_selinux-avc_audit-log-curr_ip.patch b/3.14.27/4465_selinux-avc_audit-log-curr_ip.patch
index f92c155..f92c155 100644
--- a/3.14.26/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.14.27/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.14.26/4470_disable-compat_vdso.patch b/3.14.27/4470_disable-compat_vdso.patch
index d5eed75..d5eed75 100644
--- a/3.14.26/4470_disable-compat_vdso.patch
+++ b/3.14.27/4470_disable-compat_vdso.patch
diff --git a/3.14.26/4475_emutramp_default_on.patch b/3.14.27/4475_emutramp_default_on.patch
index ad4967a..ad4967a 100644
--- a/3.14.26/4475_emutramp_default_on.patch
+++ b/3.14.27/4475_emutramp_default_on.patch
diff --git a/3.17.6/0000_README b/3.17.7/0000_README
index 502f413..202e6df 100644
--- a/3.17.6/0000_README
+++ b/3.17.7/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.17.6-201412142110.patch
+Patch: 4420_grsecurity-3.0-3.17.7-201412170700.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.17.6/4420_grsecurity-3.0-3.17.6-201412142110.patch b/3.17.7/4420_grsecurity-3.0-3.17.7-201412170700.patch
index 44d9bab..f3e2d34 100644
--- a/3.17.6/4420_grsecurity-3.0-3.17.6-201412142110.patch
+++ b/3.17.7/4420_grsecurity-3.0-3.17.7-201412170700.patch
@@ -370,7 +370,7 @@ index 1edd5fd..107ff46 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index bb43e9e..9dfc034 100644
+index 267f893..78c2d4b 100644
--- a/Makefile
+++ b/Makefile
@@ -303,8 +303,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -7467,7 +7467,7 @@ index f1baadd..5472dca 100644
{
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
-index 9f7ecbd..6e370fc 100644
+index 1588716..42cfa97 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -1428,5 +1428,6 @@ void bpf_jit_free(struct bpf_prog *fp)
@@ -13534,7 +13534,7 @@ index bd49ec6..94c7f58 100644
}
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
-index 14fe7cb..829b962 100644
+index b5bb498..74110e8 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
@@ -41459,10 +41459,10 @@ index 2e0613e..a8b94d9 100644
return ret;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 7bd17b3..ffa0a11 100644
+index 8663c21..46f2cbf 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -12441,13 +12441,13 @@ struct intel_quirk {
+@@ -12439,13 +12439,13 @@ struct intel_quirk {
int subsystem_vendor;
int subsystem_device;
void (*hook)(struct drm_device *dev);
@@ -41478,7 +41478,7 @@ index 7bd17b3..ffa0a11 100644
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
{
-@@ -12455,18 +12455,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+@@ -12453,18 +12453,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
return 1;
}
@@ -44144,19 +44144,6 @@ index cdc7df4..a2fdfdb 100644
.kind = "ipoib",
.maxtype = IFLA_IPOIB_MAX,
.policy = ipoib_policy,
-diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
-index de05545..b535322 100644
---- a/drivers/input/evdev.c
-+++ b/drivers/input/evdev.c
-@@ -421,7 +421,7 @@ static int evdev_open(struct inode *inode, struct file *file)
-
- err_free_client:
- evdev_detach_client(evdev, client);
-- kfree(client);
-+ kvfree(client);
- return error;
- }
-
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 24c41ba..102d71f 100644
--- a/drivers/input/gameport/gameport.c
@@ -47787,10 +47774,10 @@ index cf49c22..971b133 100644
struct sm_sysfs_attribute *vendor_attribute;
char *vendor;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
-index d163e11..f517018 100644
+index fc061c3..53853ef 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
-@@ -548,7 +548,7 @@ nla_put_failure:
+@@ -553,7 +553,7 @@ nla_put_failure:
return -EMSGSIZE;
}
@@ -49348,10 +49335,10 @@ index 59caa06..de191b3 100644
#define VIRTNET_DRIVER_VERSION "1.0.0"
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
-index 81a8a29..ae60a58 100644
+index 2aa3a8d..d15e3c4 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
-@@ -2762,7 +2762,7 @@ nla_put_failure:
+@@ -2758,7 +2758,7 @@ nla_put_failure:
return -EMSGSIZE;
}
@@ -49360,7 +49347,7 @@ index 81a8a29..ae60a58 100644
.kind = "vxlan",
.maxtype = IFLA_VXLAN_MAX,
.policy = vxlan_policy,
-@@ -2809,7 +2809,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
+@@ -2805,7 +2805,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -50248,29 +50235,6 @@ index a912dc0..a8225ba 100644
u16 int_num;
ZD_ASSERT(in_interrupt());
-diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
-index ca82f54..3767771 100644
---- a/drivers/net/xen-netfront.c
-+++ b/drivers/net/xen-netfront.c
-@@ -496,9 +496,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
- len = skb_frag_size(frag);
- offset = frag->page_offset;
-
-- /* Data must not cross a page boundary. */
-- BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
--
- /* Skip unused frames from start of page */
- page += offset >> PAGE_SHIFT;
- offset &= ~PAGE_MASK;
-@@ -506,8 +503,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
- while (len > 0) {
- unsigned long bytes;
-
-- BUG_ON(offset >= PAGE_SIZE);
--
- bytes = PAGE_SIZE - offset;
- if (bytes > len)
- bytes = len;
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 683671a..4519fc2 100644
--- a/drivers/nfc/nfcwilink.c
@@ -62170,6 +62134,33 @@ index 9f9992b..8b59411 100644
return 0;
}
return 1;
+diff --git a/fs/ext2/super.c b/fs/ext2/super.c
+index b88edc0..c4088ab 100644
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -264,10 +264,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
+ #ifdef CONFIG_EXT2_FS_XATTR
+ if (test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",user_xattr");
+- if (!test_opt(sb, XATTR_USER) &&
+- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
++ if (!test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",nouser_xattr");
+- }
+ #endif
+
+ #ifdef CONFIG_EXT2_FS_POSIX_ACL
+@@ -841,8 +839,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+ if (def_mount_opts & EXT2_DEFM_UID16)
+ set_opt(sbi->s_mount_opt, NO_UID32);
+ #ifdef CONFIG_EXT2_FS_XATTR
+- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
+- set_opt(sbi->s_mount_opt, XATTR_USER);
++ /* always enable user xattrs */
++ set_opt(sbi->s_mount_opt, XATTR_USER);
+ #endif
+ #ifdef CONFIG_EXT2_FS_POSIX_ACL
+ if (def_mount_opts & EXT2_DEFM_ACL)
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 9142614..97484fa 100644
--- a/fs/ext2/xattr.c
@@ -62212,6 +62203,33 @@ index 158b5d4..2432610 100644
return 0;
}
return 1;
+diff --git a/fs/ext3/super.c b/fs/ext3/super.c
+index 2c42e73..cf5b892 100644
+--- a/fs/ext3/super.c
++++ b/fs/ext3/super.c
+@@ -649,10 +649,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
+ #ifdef CONFIG_EXT3_FS_XATTR
+ if (test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",user_xattr");
+- if (!test_opt(sb, XATTR_USER) &&
+- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
++ if (!test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",nouser_xattr");
+- }
+ #endif
+ #ifdef CONFIG_EXT3_FS_POSIX_ACL
+ if (test_opt(sb, POSIX_ACL))
+@@ -1749,8 +1747,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
+ if (def_mount_opts & EXT3_DEFM_UID16)
+ set_opt(sbi->s_mount_opt, NO_UID32);
+ #ifdef CONFIG_EXT3_FS_XATTR
+- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
+- set_opt(sbi->s_mount_opt, XATTR_USER);
++ /* always enable user xattrs */
++ set_opt(sbi->s_mount_opt, XATTR_USER);
+ #endif
+ #ifdef CONFIG_EXT3_FS_POSIX_ACL
+ if (def_mount_opts & EXT3_DEFM_ACL)
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index c6874be..f8a6ae8 100644
--- a/fs/ext3/xattr.c
@@ -62463,72 +62481,6 @@ index 2d1e5803..1b082d415 100644
}
static int
-diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
-index 6df8d3d..b8b92c2 100644
---- a/fs/fat/namei_vfat.c
-+++ b/fs/fat/namei_vfat.c
-@@ -736,7 +736,12 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
- }
-
- alias = d_find_alias(inode);
-- if (alias && !vfat_d_anon_disconn(alias)) {
-+ /*
-+ * Checking "alias->d_parent == dentry->d_parent" to make sure
-+ * FS is not corrupted (especially double linked dir).
-+ */
-+ if (alias && alias->d_parent == dentry->d_parent &&
-+ !vfat_d_anon_disconn(alias)) {
- /*
- * This inode has non anonymous-DCACHE_DISCONNECTED
- * dentry. This means, the user did ->lookup() by an
-@@ -755,12 +760,9 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
-
- out:
- mutex_unlock(&MSDOS_SB(sb)->s_lock);
-- dentry->d_time = dentry->d_parent->d_inode->i_version;
-- dentry = d_splice_alias(inode, dentry);
-- if (dentry)
-- dentry->d_time = dentry->d_parent->d_inode->i_version;
-- return dentry;
--
-+ if (!inode)
-+ dentry->d_time = dir->i_version;
-+ return d_splice_alias(inode, dentry);
- error:
- mutex_unlock(&MSDOS_SB(sb)->s_lock);
- return ERR_PTR(err);
-@@ -793,7 +795,6 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
- /* timestamp is already written, so mark_inode_dirty() is unneeded. */
-
-- dentry->d_time = dentry->d_parent->d_inode->i_version;
- d_instantiate(dentry, inode);
- out:
- mutex_unlock(&MSDOS_SB(sb)->s_lock);
-@@ -824,6 +825,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
- clear_nlink(inode);
- inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
- fat_detach(inode);
-+ dentry->d_time = dir->i_version;
- out:
- mutex_unlock(&MSDOS_SB(sb)->s_lock);
-
-@@ -849,6 +851,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
- clear_nlink(inode);
- inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
- fat_detach(inode);
-+ dentry->d_time = dir->i_version;
- out:
- mutex_unlock(&MSDOS_SB(sb)->s_lock);
-
-@@ -889,7 +892,6 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
- inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
- /* timestamp is already written, so mark_inode_dirty() is unneeded. */
-
-- dentry->d_time = dentry->d_parent->d_inode->i_version;
- d_instantiate(dentry, inode);
-
- mutex_unlock(&MSDOS_SB(sb)->s_lock);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 22d1c3d..600cf7e 100644
--- a/fs/fcntl.c
@@ -67983,6 +67935,21 @@ index 735c2c2..81b91af 100644
#define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
#define __fs_changed(gen,s) (gen != get_generation (s))
#define fs_changed(gen,s) \
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index d46e88a..1a06a94 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -1857,6 +1857,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
+ sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
+ sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
+ sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
++#ifdef CONFIG_REISERFS_FS_XATTR
++ /* turn on user xattrs by default */
++ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
++#endif
+ /* no preallocation minimum, be smart in reiserfs_file_write instead */
+ sbi->s_alloc_options.preallocmin = 0;
+ /* Preallocate by 16 blocks (17-1) at once */
diff --git a/fs/select.c b/fs/select.c
index 467bb1c..cf9d65a 100644
--- a/fs/select.c
@@ -87008,7 +86975,7 @@ index 6c10762..3e5de0c 100644
/**
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
-index 29d6a94..235d3d8 100644
+index 29d6a94..235d3d84 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
@@ -96426,7 +96393,7 @@ index 44c6bd2..60369dc3 100644
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/memory.c b/mm/memory.c
-index 37b80fc..9cdef79 100644
+index e497def..9cdef79 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -415,6 +415,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -96476,39 +96443,6 @@ index 37b80fc..9cdef79 100644
vma->vm_file->f_op->mmap);
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
-@@ -815,20 +821,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- if (!pte_file(pte)) {
- swp_entry_t entry = pte_to_swp_entry(pte);
-
-- if (swap_duplicate(entry) < 0)
-- return entry.val;
-+ if (likely(!non_swap_entry(entry))) {
-+ if (swap_duplicate(entry) < 0)
-+ return entry.val;
-
-- /* make sure dst_mm is on swapoff's mmlist. */
-- if (unlikely(list_empty(&dst_mm->mmlist))) {
-- spin_lock(&mmlist_lock);
-- if (list_empty(&dst_mm->mmlist))
-- list_add(&dst_mm->mmlist,
-- &src_mm->mmlist);
-- spin_unlock(&mmlist_lock);
-- }
-- if (likely(!non_swap_entry(entry)))
-+ /* make sure dst_mm is on swapoff's mmlist. */
-+ if (unlikely(list_empty(&dst_mm->mmlist))) {
-+ spin_lock(&mmlist_lock);
-+ if (list_empty(&dst_mm->mmlist))
-+ list_add(&dst_mm->mmlist,
-+ &src_mm->mmlist);
-+ spin_unlock(&mmlist_lock);
-+ }
- rss[MM_SWAPENTS]++;
-- else if (is_migration_entry(entry)) {
-+ } else if (is_migration_entry(entry)) {
- page = migration_entry_to_page(entry);
-
- if (PageAnon(page))
@@ -1501,6 +1507,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
@@ -97282,7 +97216,7 @@ index ce84cb0..6d5a9aa 100644
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff --git a/mm/mmap.c b/mm/mmap.c
-index ebc25fa..9135e65 100644
+index a226d31..9135e65 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -41,6 +41,7 @@
@@ -97368,21 +97302,7 @@ index ebc25fa..9135e65 100644
if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
(mm->end_data - mm->start_data) > rlim)
goto out;
-@@ -752,8 +780,11 @@ again: remove_next = 1 + (end > next->vm_end);
- * shrinking vma had, to cover any anon pages imported.
- */
- if (exporter && exporter->anon_vma && !importer->anon_vma) {
-- if (anon_vma_clone(importer, exporter))
-- return -ENOMEM;
-+ int error;
-+
-+ error = anon_vma_clone(importer, exporter);
-+ if (error)
-+ return error;
- importer->anon_vma = exporter->anon_vma;
- }
- }
-@@ -949,6 +980,12 @@ static int
+@@ -952,6 +980,12 @@ static int
can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -97395,7 +97315,7 @@ index ebc25fa..9135e65 100644
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
if (vma->vm_pgoff == vm_pgoff)
-@@ -968,6 +1005,12 @@ static int
+@@ -971,6 +1005,12 @@ static int
can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -97408,7 +97328,7 @@ index ebc25fa..9135e65 100644
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
pgoff_t vm_pglen;
-@@ -1010,13 +1053,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+@@ -1013,13 +1053,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct vm_area_struct *prev, unsigned long addr,
unsigned long end, unsigned long vm_flags,
@@ -97430,7 +97350,7 @@ index ebc25fa..9135e65 100644
/*
* We later require that vma->vm_flags == vm_flags,
* so this tests vma->vm_flags & VM_SPECIAL, too.
-@@ -1032,6 +1082,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1035,6 +1082,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
if (next && next->vm_end == end) /* cases 6, 7, 8 */
next = next->vm_next;
@@ -97446,7 +97366,7 @@ index ebc25fa..9135e65 100644
/*
* Can it merge with the predecessor?
*/
-@@ -1051,9 +1110,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1054,9 +1110,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
/* cases 1, 6 */
err = vma_adjust(prev, prev->vm_start,
next->vm_end, prev->vm_pgoff, NULL);
@@ -97472,7 +97392,7 @@ index ebc25fa..9135e65 100644
if (err)
return NULL;
khugepaged_enter_vma_merge(prev, vm_flags);
-@@ -1067,12 +1141,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1070,12 +1141,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
mpol_equal(policy, vma_policy(next)) &&
can_vma_merge_before(next, vm_flags,
anon_vma, file, pgoff+pglen)) {
@@ -97502,7 +97422,7 @@ index ebc25fa..9135e65 100644
if (err)
return NULL;
khugepaged_enter_vma_merge(area, vm_flags);
-@@ -1181,8 +1270,10 @@ none:
+@@ -1184,8 +1270,10 @@ none:
void vm_stat_account(struct mm_struct *mm, unsigned long flags,
struct file *file, long pages)
{
@@ -97515,7 +97435,7 @@ index ebc25fa..9135e65 100644
mm->total_vm += pages;
-@@ -1190,7 +1281,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
+@@ -1193,7 +1281,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
mm->shared_vm += pages;
if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
mm->exec_vm += pages;
@@ -97524,7 +97444,7 @@ index ebc25fa..9135e65 100644
mm->stack_vm += pages;
}
#endif /* CONFIG_PROC_FS */
-@@ -1220,6 +1311,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
+@@ -1223,6 +1311,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
@@ -97532,7 +97452,7 @@ index ebc25fa..9135e65 100644
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
}
-@@ -1246,7 +1338,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1249,7 +1338,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
* (the exception is when the underlying filesystem is noexec
* mounted, in which case we dont add PROT_EXEC.)
*/
@@ -97541,7 +97461,7 @@ index ebc25fa..9135e65 100644
if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
prot |= PROT_EXEC;
-@@ -1272,7 +1364,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1275,7 +1364,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
@@ -97550,7 +97470,7 @@ index ebc25fa..9135e65 100644
if (addr & ~PAGE_MASK)
return addr;
-@@ -1283,6 +1375,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1286,6 +1375,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
@@ -97594,7 +97514,7 @@ index ebc25fa..9135e65 100644
if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
-@@ -1370,6 +1499,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1373,6 +1499,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags |= VM_NORESERVE;
}
@@ -97604,7 +97524,7 @@ index ebc25fa..9135e65 100644
addr = mmap_region(file, addr, len, vm_flags, pgoff);
if (!IS_ERR_VALUE(addr) &&
((vm_flags & VM_LOCKED) ||
-@@ -1463,7 +1595,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
+@@ -1466,7 +1595,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
vm_flags_t vm_flags = vma->vm_flags;
/* If it was private or non-writable, the write bit is already clear */
@@ -97613,7 +97533,7 @@ index ebc25fa..9135e65 100644
return 0;
/* The backer wishes to know when pages are first written to? */
-@@ -1509,7 +1641,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1512,7 +1641,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;
@@ -97636,7 +97556,7 @@ index ebc25fa..9135e65 100644
if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
unsigned long nr_pages;
-@@ -1528,11 +1675,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1531,11 +1675,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
/* Clear old maps */
error = -ENOMEM;
@@ -97649,7 +97569,7 @@ index ebc25fa..9135e65 100644
}
/*
-@@ -1563,6 +1709,16 @@ munmap_back:
+@@ -1566,6 +1709,16 @@ munmap_back:
goto unacct_error;
}
@@ -97666,7 +97586,7 @@ index ebc25fa..9135e65 100644
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
-@@ -1593,6 +1749,13 @@ munmap_back:
+@@ -1596,6 +1749,13 @@ munmap_back:
if (error)
goto unmap_and_free_vma;
@@ -97680,7 +97600,7 @@ index ebc25fa..9135e65 100644
/* Can addr have changed??
*
* Answer: Yes, several device drivers can do it in their
-@@ -1626,6 +1789,12 @@ munmap_back:
+@@ -1629,6 +1789,12 @@ munmap_back:
}
vma_link(mm, vma, prev, rb_link, rb_parent);
@@ -97693,7 +97613,7 @@ index ebc25fa..9135e65 100644
/* Once vma denies write, undo our temporary denial count */
if (file) {
if (vm_flags & VM_SHARED)
-@@ -1638,6 +1807,7 @@ out:
+@@ -1641,6 +1807,7 @@ out:
perf_event_mmap(vma);
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -97701,7 +97621,7 @@ index ebc25fa..9135e65 100644
if (vm_flags & VM_LOCKED) {
if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm)))
-@@ -1673,6 +1843,12 @@ allow_write_and_free_vma:
+@@ -1676,6 +1843,12 @@ allow_write_and_free_vma:
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
free_vma:
@@ -97714,7 +97634,7 @@ index ebc25fa..9135e65 100644
kmem_cache_free(vm_area_cachep, vma);
unacct_error:
if (charged)
-@@ -1680,7 +1856,63 @@ unacct_error:
+@@ -1683,7 +1856,63 @@ unacct_error:
return error;
}
@@ -97779,7 +97699,7 @@ index ebc25fa..9135e65 100644
{
/*
* We implement the search by looking for an rbtree node that
-@@ -1728,11 +1960,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+@@ -1731,11 +1960,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
}
}
@@ -97810,7 +97730,7 @@ index ebc25fa..9135e65 100644
if (gap_end >= low_limit && gap_end - gap_start >= length)
goto found;
-@@ -1782,7 +2032,7 @@ found:
+@@ -1785,7 +2032,7 @@ found:
return gap_start;
}
@@ -97819,7 +97739,7 @@ index ebc25fa..9135e65 100644
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
-@@ -1836,6 +2086,24 @@ check_current:
+@@ -1839,6 +2086,24 @@ check_current:
gap_end = vma->vm_start;
if (gap_end < low_limit)
return -ENOMEM;
@@ -97844,7 +97764,7 @@ index ebc25fa..9135e65 100644
if (gap_start <= high_limit && gap_end - gap_start >= length)
goto found;
-@@ -1899,6 +2167,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1902,6 +2167,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
@@ -97852,7 +97772,7 @@ index ebc25fa..9135e65 100644
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
-@@ -1906,11 +2175,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1909,11 +2175,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (flags & MAP_FIXED)
return addr;
@@ -97869,7 +97789,7 @@ index ebc25fa..9135e65 100644
return addr;
}
-@@ -1919,6 +2192,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1922,6 +2192,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = 0;
@@ -97877,7 +97797,7 @@ index ebc25fa..9135e65 100644
return vm_unmapped_area(&info);
}
#endif
-@@ -1937,6 +2211,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1940,6 +2211,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
@@ -97885,7 +97805,7 @@ index ebc25fa..9135e65 100644
/* requested length too big for entire address space */
if (len > TASK_SIZE - mmap_min_addr)
-@@ -1945,12 +2220,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1948,12 +2220,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (flags & MAP_FIXED)
return addr;
@@ -97903,7 +97823,7 @@ index ebc25fa..9135e65 100644
return addr;
}
-@@ -1959,6 +2238,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1962,6 +2238,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base;
info.align_mask = 0;
@@ -97911,7 +97831,7 @@ index ebc25fa..9135e65 100644
addr = vm_unmapped_area(&info);
/*
-@@ -1971,6 +2251,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1974,6 +2251,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
@@ -97924,7 +97844,7 @@ index ebc25fa..9135e65 100644
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
-@@ -2071,6 +2357,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
+@@ -2074,6 +2357,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
return vma;
}
@@ -97953,7 +97873,7 @@ index ebc25fa..9135e65 100644
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
-@@ -2087,6 +2395,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2090,6 +2395,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
return -ENOMEM;
/* Stack limit test */
@@ -97961,7 +97881,7 @@ index ebc25fa..9135e65 100644
if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
-@@ -2097,6 +2406,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2100,6 +2406,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
locked = mm->locked_vm + grow;
limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
limit >>= PAGE_SHIFT;
@@ -97969,7 +97889,7 @@ index ebc25fa..9135e65 100644
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
}
-@@ -2126,37 +2436,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2129,37 +2436,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
@@ -98027,7 +97947,7 @@ index ebc25fa..9135e65 100644
unsigned long size, grow;
size = address - vma->vm_start;
-@@ -2191,6 +2512,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -2194,6 +2512,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
}
}
}
@@ -98036,7 +97956,7 @@ index ebc25fa..9135e65 100644
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(vma->vm_mm);
-@@ -2205,6 +2528,8 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2208,6 +2528,8 @@ int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
int error;
@@ -98045,7 +97965,7 @@ index ebc25fa..9135e65 100644
/*
* We must make sure the anon_vma is allocated
-@@ -2218,6 +2543,15 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2221,6 +2543,15 @@ int expand_downwards(struct vm_area_struct *vma,
if (error)
return error;
@@ -98061,7 +97981,7 @@ index ebc25fa..9135e65 100644
vma_lock_anon_vma(vma);
/*
-@@ -2227,9 +2561,17 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2230,9 +2561,17 @@ int expand_downwards(struct vm_area_struct *vma,
*/
/* Somebody else might have raced and expanded it already */
@@ -98080,7 +98000,7 @@ index ebc25fa..9135e65 100644
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
-@@ -2254,13 +2596,27 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2257,13 +2596,27 @@ int expand_downwards(struct vm_area_struct *vma,
vma->vm_pgoff -= grow;
anon_vma_interval_tree_post_update_vma(vma);
vma_gap_update(vma);
@@ -98108,7 +98028,7 @@ index ebc25fa..9135e65 100644
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(vma->vm_mm);
return error;
-@@ -2358,6 +2714,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2361,6 +2714,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
do {
long nrpages = vma_pages(vma);
@@ -98122,7 +98042,7 @@ index ebc25fa..9135e65 100644
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
-@@ -2402,6 +2765,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2405,6 +2765,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
@@ -98139,7 +98059,7 @@ index ebc25fa..9135e65 100644
vma_rb_erase(vma, &mm->mm_rb);
mm->map_count--;
tail_vma = vma;
-@@ -2429,14 +2802,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2432,14 +2802,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
struct vm_area_struct *new;
int err = -ENOMEM;
@@ -98173,7 +98093,7 @@ index ebc25fa..9135e65 100644
/* most fields are the same, copy all, and then fixup */
*new = *vma;
-@@ -2449,11 +2841,28 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2452,6 +2841,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
@@ -98196,14 +98116,7 @@ index ebc25fa..9135e65 100644
err = vma_dup_policy(vma, new);
if (err)
goto out_free_vma;
-
-- if (anon_vma_clone(new, vma))
-+ err = anon_vma_clone(new, vma);
-+ if (err)
- goto out_free_mpol;
-
- if (new->vm_file)
-@@ -2468,6 +2877,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2472,6 +2877,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
@@ -98242,7 +98155,7 @@ index ebc25fa..9135e65 100644
/* Success. */
if (!err)
return 0;
-@@ -2477,10 +2918,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2481,10 +2918,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_ops->close(new);
if (new->vm_file)
fput(new->vm_file);
@@ -98262,7 +98175,7 @@ index ebc25fa..9135e65 100644
kmem_cache_free(vm_area_cachep, new);
out_err:
return err;
-@@ -2493,6 +2942,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2497,6 +2942,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
@@ -98278,7 +98191,7 @@ index ebc25fa..9135e65 100644
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
-@@ -2504,11 +2962,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2508,11 +2962,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
@@ -98309,7 +98222,7 @@ index ebc25fa..9135e65 100644
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
-@@ -2583,6 +3060,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -2587,6 +3060,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Fix up all other VM information */
remove_vma_list(mm, vma);
@@ -98318,7 +98231,7 @@ index ebc25fa..9135e65 100644
return 0;
}
-@@ -2591,6 +3070,13 @@ int vm_munmap(unsigned long start, size_t len)
+@@ -2595,6 +3070,13 @@ int vm_munmap(unsigned long start, size_t len)
int ret;
struct mm_struct *mm = current->mm;
@@ -98332,7 +98245,7 @@ index ebc25fa..9135e65 100644
down_write(&mm->mmap_sem);
ret = do_munmap(mm, start, len);
up_write(&mm->mmap_sem);
-@@ -2604,16 +3090,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -2608,16 +3090,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
return vm_munmap(addr, len);
}
@@ -98349,7 +98262,7 @@ index ebc25fa..9135e65 100644
/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
-@@ -2627,6 +3103,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2631,6 +3103,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
struct rb_node ** rb_link, * rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
@@ -98357,7 +98270,7 @@ index ebc25fa..9135e65 100644
len = PAGE_ALIGN(len);
if (!len)
-@@ -2634,10 +3111,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2638,10 +3111,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -98382,7 +98295,7 @@ index ebc25fa..9135e65 100644
error = mlock_future_check(mm, mm->def_flags, len);
if (error)
return error;
-@@ -2651,21 +3142,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2655,21 +3142,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
/*
* Clear old maps. this also does some error checking for us
*/
@@ -98407,7 +98320,7 @@ index ebc25fa..9135e65 100644
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
-@@ -2679,7 +3169,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2683,7 +3169,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
@@ -98416,7 +98329,7 @@ index ebc25fa..9135e65 100644
return -ENOMEM;
}
-@@ -2693,10 +3183,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2697,10 +3183,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
perf_event_mmap(vma);
@@ -98430,7 +98343,7 @@ index ebc25fa..9135e65 100644
return addr;
}
-@@ -2758,6 +3249,7 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2762,6 +3249,7 @@ void exit_mmap(struct mm_struct *mm)
while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
@@ -98438,7 +98351,7 @@ index ebc25fa..9135e65 100644
vma = remove_vma(vma);
}
vm_unacct_memory(nr_accounted);
-@@ -2775,6 +3267,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2779,6 +3267,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
struct vm_area_struct *prev;
struct rb_node **rb_link, *rb_parent;
@@ -98452,7 +98365,7 @@ index ebc25fa..9135e65 100644
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
* until its first write fault, when page's anon_vma and index
-@@ -2798,7 +3297,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2802,7 +3297,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
@@ -98474,7 +98387,7 @@ index ebc25fa..9135e65 100644
return 0;
}
-@@ -2817,6 +3330,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2821,6 +3330,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
struct rb_node **rb_link, *rb_parent;
bool faulted_in_anon_vma = true;
@@ -98483,7 +98396,7 @@ index ebc25fa..9135e65 100644
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
-@@ -2881,6 +3396,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2885,6 +3396,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
return NULL;
}
@@ -98523,7 +98436,7 @@ index ebc25fa..9135e65 100644
/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
-@@ -2892,6 +3440,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+@@ -2896,6 +3440,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
@@ -98531,7 +98444,7 @@ index ebc25fa..9135e65 100644
if (cur + npages > lim)
return 0;
return 1;
-@@ -2974,6 +3523,22 @@ static struct vm_area_struct *__install_special_mapping(
+@@ -2978,6 +3523,22 @@ static struct vm_area_struct *__install_special_mapping(
vma->vm_start = addr;
vma->vm_end = addr + len;
@@ -99162,7 +99075,7 @@ index 5077afc..846c9ef 100644
if (!mm || IS_ERR(mm)) {
rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
diff --git a/mm/rmap.c b/mm/rmap.c
-index e01318d..7a532bd 100644
+index 1807ca3..7a532bd 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
@@ -99242,7 +99155,7 @@ index e01318d..7a532bd 100644
{
struct anon_vma_chain *avc, *pavc;
struct anon_vma *root = NULL;
-@@ -270,10 +304,11 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+@@ -270,7 +304,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
* the corresponding VMA in the parent process is attached to.
* Returns 0 on success, non-zero on failure.
*/
@@ -99251,23 +99164,7 @@ index e01318d..7a532bd 100644
{
struct anon_vma_chain *avc;
struct anon_vma *anon_vma;
-+ int error;
-
- /* Don't bother if the parent process has no anon_vma here. */
- if (!pvma->anon_vma)
-@@ -283,8 +318,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
- * First, attach the new VMA to the parent VMA's anon_vmas,
- * so rmap can find non-COWed pages in child processes.
- */
-- if (anon_vma_clone(vma, pvma))
-- return -ENOMEM;
-+ error = anon_vma_clone(vma, pvma);
-+ if (error)
-+ return error;
-
- /* Then add our own anon_vma. */
- anon_vma = anon_vma_alloc();
-@@ -374,8 +410,10 @@ static void anon_vma_ctor(void *data)
+@@ -376,8 +410,10 @@ static void anon_vma_ctor(void *data)
void __init anon_vma_init(void)
{
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
@@ -99341,7 +99238,7 @@ index 469f90d..34a09ee 100644
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index 7c52b38..3ccc17e 100644
+index cc91c1e..0676592 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -316,10 +316,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
@@ -100830,34 +100727,6 @@ index 2b0aa54..b451f74 100644
if (v->nr_pages)
seq_printf(m, " pages=%d", v->nr_pages);
-diff --git a/mm/vmpressure.c b/mm/vmpressure.c
-index d4042e7..c5afd57 100644
---- a/mm/vmpressure.c
-+++ b/mm/vmpressure.c
-@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
- unsigned long scanned;
- unsigned long reclaimed;
-
-+ spin_lock(&vmpr->sr_lock);
- /*
- * Several contexts might be calling vmpressure(), so it is
- * possible that the work was rescheduled again before the old
-@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
- * here. No need for any locks here since we don't care if
- * vmpr->reclaimed is in sync.
- */
-- if (!vmpr->scanned)
-+ scanned = vmpr->scanned;
-+ if (!scanned) {
-+ spin_unlock(&vmpr->sr_lock);
- return;
-+ }
-
-- spin_lock(&vmpr->sr_lock);
-- scanned = vmpr->scanned;
- reclaimed = vmpr->reclaimed;
- vmpr->scanned = 0;
- vmpr->reclaimed = 0;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index e9ab104..de275bd 100644
--- a/mm/vmstat.c
@@ -102235,7 +102104,7 @@ index 8b849dd..cd88bfc 100644
pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
return -ENODEV;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
-index f0493e3..c3ffd7f 100644
+index 4921b65..7cb175e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -58,7 +58,7 @@ struct rtnl_link {
@@ -102282,7 +102151,7 @@ index f0493e3..c3ffd7f 100644
goto nla_put_failure;
if (1) {
-@@ -2780,6 +2783,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
+@@ -2781,6 +2784,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
@@ -102292,7 +102161,7 @@ index f0493e3..c3ffd7f 100644
have_flags = true;
flags = nla_get_u16(attr);
break;
-@@ -2850,6 +2856,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
+@@ -2851,6 +2857,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
@@ -102791,6 +102660,19 @@ index 32755cb..236d827 100644
err_alloc:
return -ENOMEM;
}
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index bf78bc3..f6dbf03 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1427,7 +1427,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ return ip_recv_error(sk, msg, len, addr_len);
+ #if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
++ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
+ #endif
+ return -EINVAL;
+ }
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 214882e..ec032f6 100644
--- a/net/ipv4/devinet.c
@@ -102924,7 +102806,7 @@ index 4a74ea8..32335a7 100644
return nh->nh_saddr;
}
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
-index dd73bea..a2eec02 100644
+index 657d80c..0ad744a 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -59,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
@@ -103131,7 +103013,7 @@ index 2407e5d..edc2f1a 100644
msg.msg_flags = flags;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
-index e453cb7..3c8d952 100644
+index 6073952..8449cf5 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -45,7 +45,7 @@
@@ -103317,7 +103199,7 @@ index 2510c02..cfb34fa 100644
pr_err("Unable to proc dir entry\n");
return -ENOMEM;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
-index 3524762..2e88bfd 100644
+index b503a30..c43e3e1 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -59,7 +59,7 @@ struct ping_table {
@@ -103356,16 +103238,7 @@ index 3524762..2e88bfd 100644
info, (u8 *)icmph);
#endif
}
-@@ -860,7 +860,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- return ip_recv_error(sk, msg, len, addr_len);
- #if IS_ENABLED(CONFIG_IPV6)
- } else if (family == AF_INET6) {
-- return pingv6_ops.ipv6_recv_error(sk, msg, len,
-+ return pingv6_ops->ipv6_recv_error(sk, msg, len,
- addr_len);
- #endif
- }
-@@ -918,10 +918,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+@@ -910,10 +910,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
}
if (inet6_sk(sk)->rxopt.all)
@@ -103378,7 +103251,7 @@ index 3524762..2e88bfd 100644
else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
ip_cmsg_recv(msg, skb);
#endif
-@@ -1113,7 +1113,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
+@@ -1105,7 +1105,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
@@ -104134,7 +104007,7 @@ index 06ba3e5..5c08d38 100644
table = kmemdup(ipv6_icmp_table_template,
sizeof(ipv6_icmp_table_template),
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
-index cacb493..3cae894 100644
+index 33d08ab..585b825 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -71,8 +71,8 @@ struct ip6gre_net {
@@ -104210,7 +104083,7 @@ index d2eeb3b..c186e9a 100644
.maxtype = IFLA_IPTUN_MAX,
.policy = ip6_tnl_policy,
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
-index 99c9487..63f4d92 100644
+index 3abcd4a..cefa215 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
@@ -104222,7 +104095,7 @@ index 99c9487..63f4d92 100644
static int vti6_net_id __read_mostly;
struct vti6_net {
-@@ -972,7 +972,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
+@@ -981,7 +981,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
[IFLA_VTI_OKEY] = { .type = NLA_U32 },
};
@@ -105835,7 +105708,7 @@ index 11de55e..f25e448 100644
return 0;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index 0007b81..cb08369 100644
+index b6bf8e8..7884ddf 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -273,7 +273,7 @@ static void netlink_overrun(struct sock *sk)
@@ -118517,10 +118390,10 @@ index 0000000..4378111
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..f527934
+index 0000000..9addbd7
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,5911 @@
+@@ -0,0 +1,5912 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
+compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
@@ -119089,6 +118962,7 @@ index 0000000..f527934
+check_clk_sync_6717 check_clk_sync 2 6717 NULL
+video_proc_write_6724 video_proc_write 3 6724 NULL
+posix_acl_xattr_count_6725 posix_acl_xattr_count 0-1 6725 NULL
++inet_recv_error_6744 inet_recv_error 3 6744 NULL
+kobject_add_varg_6781 kobject_add_varg 0 6781 NULL
+iwl_dbgfs_channels_read_6784 iwl_dbgfs_channels_read 3 6784 NULL
+ieee80211_if_read_6785 ieee80211_if_read 3 6785 NULL
diff --git a/3.17.6/4425_grsec_remove_EI_PAX.patch b/3.17.7/4425_grsec_remove_EI_PAX.patch
index 86e242a..86e242a 100644
--- a/3.17.6/4425_grsec_remove_EI_PAX.patch
+++ b/3.17.7/4425_grsec_remove_EI_PAX.patch
diff --git a/3.17.6/4427_force_XATTR_PAX_tmpfs.patch b/3.17.7/4427_force_XATTR_PAX_tmpfs.patch
index 21c0171..21c0171 100644
--- a/3.17.6/4427_force_XATTR_PAX_tmpfs.patch
+++ b/3.17.7/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/3.17.6/4430_grsec-remove-localversion-grsec.patch b/3.17.7/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.17.6/4430_grsec-remove-localversion-grsec.patch
+++ b/3.17.7/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.17.6/4435_grsec-mute-warnings.patch b/3.17.7/4435_grsec-mute-warnings.patch
index 4a959cc..4a959cc 100644
--- a/3.17.6/4435_grsec-mute-warnings.patch
+++ b/3.17.7/4435_grsec-mute-warnings.patch
diff --git a/3.17.6/4440_grsec-remove-protected-paths.patch b/3.17.7/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/3.17.6/4440_grsec-remove-protected-paths.patch
+++ b/3.17.7/4440_grsec-remove-protected-paths.patch
diff --git a/3.17.6/4450_grsec-kconfig-default-gids.patch b/3.17.7/4450_grsec-kconfig-default-gids.patch
index 039bad1..039bad1 100644
--- a/3.17.6/4450_grsec-kconfig-default-gids.patch
+++ b/3.17.7/4450_grsec-kconfig-default-gids.patch
diff --git a/3.17.6/4465_selinux-avc_audit-log-curr_ip.patch b/3.17.7/4465_selinux-avc_audit-log-curr_ip.patch
index 747ac53..747ac53 100644
--- a/3.17.6/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.17.7/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.17.6/4470_disable-compat_vdso.patch b/3.17.7/4470_disable-compat_vdso.patch
index dec59f7..dec59f7 100644
--- a/3.17.6/4470_disable-compat_vdso.patch
+++ b/3.17.7/4470_disable-compat_vdso.patch
diff --git a/3.17.6/4475_emutramp_default_on.patch b/3.17.7/4475_emutramp_default_on.patch
index ad4967a..ad4967a 100644
--- a/3.17.6/4475_emutramp_default_on.patch
+++ b/3.17.7/4475_emutramp_default_on.patch
diff --git a/3.2.65/0000_README b/3.2.65/0000_README
index 70f7d8b..a5e4133 100644
--- a/3.2.65/0000_README
+++ b/3.2.65/0000_README
@@ -178,7 +178,7 @@ Patch: 1064_linux-3.2.65.patch
From: http://www.kernel.org
Desc: Linux 3.2.65
-Patch: 4420_grsecurity-3.0-3.2.65-201412142045.patch
+Patch: 4420_grsecurity-3.0-3.2.65-201412170654.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.65/4420_grsecurity-3.0-3.2.65-201412142045.patch b/3.2.65/4420_grsecurity-3.0-3.2.65-201412170654.patch
index 209df09..6097404 100644
--- a/3.2.65/4420_grsecurity-3.0-3.2.65-201412142045.patch
+++ b/3.2.65/4420_grsecurity-3.0-3.2.65-201412170654.patch
@@ -59204,10 +59204,33 @@ index a8cbe1b..fed04cb 100644
(sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
return 0;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
-index 94b9e32..4b85c15 100644
+index 94b9e32..3476e8c 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
-@@ -1495,6 +1495,7 @@ static struct file_system_type ext2_fs_type = {
+@@ -259,10 +259,8 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
+ #ifdef CONFIG_EXT2_FS_XATTR
+ if (test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",user_xattr");
+- if (!test_opt(sb, XATTR_USER) &&
+- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
++ if (!test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",nouser_xattr");
+- }
+ #endif
+
+ #ifdef CONFIG_EXT2_FS_POSIX_ACL
+@@ -827,8 +825,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+ if (def_mount_opts & EXT2_DEFM_UID16)
+ set_opt(sbi->s_mount_opt, NO_UID32);
+ #ifdef CONFIG_EXT2_FS_XATTR
+- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
+- set_opt(sbi->s_mount_opt, XATTR_USER);
++ /* always enable user xattrs */
++ set_opt(sbi->s_mount_opt, XATTR_USER);
+ #endif
+ #ifdef CONFIG_EXT2_FS_POSIX_ACL
+ if (def_mount_opts & EXT2_DEFM_ACL)
+@@ -1495,6 +1493,7 @@ static struct file_system_type ext2_fs_type = {
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
@@ -59258,10 +59281,33 @@ index a203892..4e64db5 100644
}
return 1;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
-index 562ede3..5e56315 100644
+index 562ede3..62fff74 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
-@@ -3058,6 +3058,7 @@ static struct file_system_type ext3_fs_type = {
+@@ -655,10 +655,8 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
+ #ifdef CONFIG_EXT3_FS_XATTR
+ if (test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",user_xattr");
+- if (!test_opt(sb, XATTR_USER) &&
+- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
++ if (!test_opt(sb, XATTR_USER))
+ seq_puts(seq, ",nouser_xattr");
+- }
+ #endif
+ #ifdef CONFIG_EXT3_FS_POSIX_ACL
+ if (test_opt(sb, POSIX_ACL))
+@@ -1699,8 +1697,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
+ if (def_mount_opts & EXT3_DEFM_UID16)
+ set_opt(sbi->s_mount_opt, NO_UID32);
+ #ifdef CONFIG_EXT3_FS_XATTR
+- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
+- set_opt(sbi->s_mount_opt, XATTR_USER);
++ /* always enable user xattrs */
++ set_opt(sbi->s_mount_opt, XATTR_USER);
+ #endif
+ #ifdef CONFIG_EXT3_FS_POSIX_ACL
+ if (def_mount_opts & EXT3_DEFM_ACL)
+@@ -3058,6 +3056,7 @@ static struct file_system_type ext3_fs_type = {
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
@@ -65331,10 +65377,21 @@ index 7a99811..a7c96c4 100644
SF(s_do_balance), SF(s_unneeded_left_neighbor),
SF(s_good_search_by_key_reada), SF(s_bmaps),
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
-index 569498a..0886e50f 100644
+index 569498a..636bb35 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
-@@ -2295,6 +2295,7 @@ struct file_system_type reiserfs_fs_type = {
+@@ -1664,6 +1664,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
+ REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
+ REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO);
+ REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
++#ifdef CONFIG_REISERFS_FS_XATTR
++ /* turn on user xattrs by default */
++ REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
++#endif
+ /* no preallocation minimum, be smart in
+ reiserfs_file_write instead */
+ REISERFS_SB(s)->s_alloc_options.preallocmin = 0;
+@@ -2295,6 +2299,7 @@ struct file_system_type reiserfs_fs_type = {
.kill_sb = reiserfs_kill_sb,
.fs_flags = FS_REQUIRES_DEV,
};