summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--4.6.5/0000_README2
-rw-r--r--4.6.5/4420_grsecurity-3.1-4.6.5-201607312210.patch (renamed from 4.6.5/4420_grsecurity-3.1-4.6.5-201607272152.patch)164
2 files changed, 125 insertions, 41 deletions
diff --git a/4.6.5/0000_README b/4.6.5/0000_README
index 016e706..a3be0b4 100644
--- a/4.6.5/0000_README
+++ b/4.6.5/0000_README
@@ -6,7 +6,7 @@ Patch: 1004_linux-4.6.5.patch
From: http://www.kernel.org
Desc: Linux 4.6.5
-Patch: 4420_grsecurity-3.1-4.6.5-201607272152.patch
+Patch: 4420_grsecurity-3.1-4.6.5-201607312210.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.6.5/4420_grsecurity-3.1-4.6.5-201607272152.patch b/4.6.5/4420_grsecurity-3.1-4.6.5-201607312210.patch
index 927b9ba..5a9676a 100644
--- a/4.6.5/4420_grsecurity-3.1-4.6.5-201607272152.patch
+++ b/4.6.5/4420_grsecurity-3.1-4.6.5-201607312210.patch
@@ -956,7 +956,7 @@ index d50430c..01cc53b 100644
# but it is being used too early to link to meaningful stack_chk logic.
nossp_flags := $(call cc-option, -fno-stack-protector)
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
-index 9e10c45..24a14ce 100644
+index 9e10c45..5a423a2 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -18,17 +18,41 @@
@@ -1078,7 +1078,7 @@ index 9e10c45..24a14ce 100644
}
#define atomic_add_return_relaxed atomic_add_return_relaxed
-+#define atomic_add_return_unchecked atomic_add_return_unchecked_relaxed
++#define atomic_add_return_unchecked_relaxed atomic_add_return_unchecked_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
@@ -1190,7 +1190,7 @@ index 9e10c45..24a14ce 100644
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
-@@ -201,16 +300,38 @@ ATOMIC_OP(xor, ^=, eor)
+@@ -201,16 +300,32 @@ ATOMIC_OP(xor, ^=, eor)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
@@ -1216,20 +1216,14 @@ index 9e10c45..24a14ce 100644
+}
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
-+{
-+ return atomic_add_return_unchecked(1, v) == 0;
-+}
++#define atomic_inc_and_test_unchecked(v) (atomic_add_return_unchecked(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
-+static inline int atomic_inc_return_unchecked_relaxed(atomic_unchecked_t *v)
-+{
-+ return atomic_add_return_unchecked_relaxed(1, v);
-+}
++#define atomic_inc_return_unchecked_relaxed(v) (atomic_add_return_unchecked_relaxed(1, v))
#define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-@@ -221,6 +342,14 @@ typedef struct {
+@@ -221,6 +336,14 @@ typedef struct {
long long counter;
} atomic64_t;
@@ -1244,7 +1238,7 @@ index 9e10c45..24a14ce 100644
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
-@@ -237,6 +366,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+@@ -237,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
return result;
}
@@ -1264,7 +1258,7 @@ index 9e10c45..24a14ce 100644
static inline void atomic64_set(atomic64_t *v, long long i)
{
__asm__ __volatile__("@ atomic64_set\n"
-@@ -245,6 +387,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+@@ -245,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
: "r" (&v->counter), "r" (i)
);
}
@@ -1280,7 +1274,7 @@ index 9e10c45..24a14ce 100644
#else
static inline long long atomic64_read(const atomic64_t *v)
{
-@@ -259,6 +410,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+@@ -259,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
return result;
}
@@ -1300,7 +1294,7 @@ index 9e10c45..24a14ce 100644
static inline void atomic64_set(atomic64_t *v, long long i)
{
long long tmp;
-@@ -273,43 +437,73 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+@@ -273,43 +431,73 @@ static inline void atomic64_set(atomic64_t *v, long long i)
: "r" (&v->counter), "r" (i)
: "cc");
}
@@ -1382,7 +1376,7 @@ index 9e10c45..24a14ce 100644
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
: "r" (&v->counter), "r" (i) \
: "cc"); \
-@@ -317,6 +511,9 @@ atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
+@@ -317,6 +505,9 @@ atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
return result; \
}
@@ -1392,15 +1386,15 @@ index 9e10c45..24a14ce 100644
#define ATOMIC64_OPS(op, op1, op2) \
ATOMIC64_OP(op, op1, op2) \
ATOMIC64_OP_RETURN(op, op1, op2)
-@@ -325,6 +522,7 @@ ATOMIC64_OPS(add, adds, adc)
+@@ -325,6 +516,7 @@ ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-+#define atomic64_add_return_unchecked atomic64_add_return_unchecked_relaxed
++#define atomic64_add_return_unchecked_relaxed atomic64_add_return_unchecked_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_andnot atomic64_andnot
-@@ -336,7 +534,12 @@ ATOMIC64_OP(xor, eor, eor)
+@@ -336,7 +528,12 @@ ATOMIC64_OP(xor, eor, eor)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
@@ -1413,11 +1407,11 @@ index 9e10c45..24a14ce 100644
static inline long long
atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
-@@ -361,6 +564,33 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
+@@ -361,6 +558,31 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
return oldval;
}
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-+#define atomic64_cmpxchg_unchecked atomic64_cmpxchg_unchecked_relaxed
++#define atomic64_cmpxchg_unchecked_relaxed atomic64_cmpxchg_unchecked_relaxed
+
+static inline long long
+atomic64_cmpxchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long old,
@@ -1426,7 +1420,7 @@ index 9e10c45..24a14ce 100644
+ long long oldval;
+ unsigned long res;
+
-+ smp_mb();
++ prefetchw(&ptr->counter);
+
+ do {
+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
@@ -1440,14 +1434,37 @@ index 9e10c45..24a14ce 100644
+ : "cc");
+ } while (res);
+
-+ smp_mb();
-+
+ return oldval;
+}
static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
{
-@@ -385,21 +615,35 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
+@@ -380,26 +602,60 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
+
+ return result;
+ }
++
++static inline long long atomic64_xchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long new)
++{
++ long long result;
++ unsigned long tmp;
++
++ prefetchw(&ptr->counter);
++
++ __asm__ __volatile__("@ atomic64_xchg_unchecked\n"
++"1: ldrexd %0, %H0, [%3]\n"
++" strexd %1, %4, %H4, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++ : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
++ : "r" (&ptr->counter), "r" (new)
++ : "cc");
++
++ return result;
++}
+ #define atomic64_xchg_relaxed atomic64_xchg_relaxed
++#define atomic64_xchg_unchecked_relaxed atomic64_xchg_unchecked_relaxed
+
static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
long long result;
@@ -1489,7 +1506,7 @@ index 9e10c45..24a14ce 100644
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter)
: "cc");
-@@ -423,13 +667,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+@@ -423,13 +679,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
" teq %0, %5\n"
" teqeq %H0, %H5\n"
" moveq %1, #0\n"
@@ -1518,7 +1535,7 @@ index 9e10c45..24a14ce 100644
: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
-@@ -442,10 +698,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+@@ -442,10 +710,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v) atomic64_add(1LL, (v))
@@ -1615,7 +1632,7 @@ index 3848259..bee9d84 100644
struct of_cpuidle_method {
const char *method;
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
-index 99d9f63..e3e4da6 100644
+index 99d9f63..ec44cb5 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -42,7 +42,6 @@
@@ -1626,11 +1643,12 @@ index 99d9f63..e3e4da6 100644
/*
* Domain types
-@@ -51,9 +50,27 @@
+@@ -51,9 +50,28 @@
#define DOMAIN_CLIENT 1
#ifdef CONFIG_CPU_USE_DOMAINS
#define DOMAIN_MANAGER 3
+#define DOMAIN_VECTORS 3
++#define DOMAIN_USERCLIENT DOMAIN_CLIENT
#else
+
+#ifdef CONFIG_PAX_KERNEXEC
@@ -1654,7 +1672,7 @@ index 99d9f63..e3e4da6 100644
#define domain_mask(dom) ((3) << (2 * (dom)))
#define domain_val(dom,type) ((type) << (2 * (dom)))
-@@ -62,13 +79,19 @@
+@@ -62,13 +80,19 @@
#define DACR_INIT \
(domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
@@ -1677,7 +1695,7 @@ index 99d9f63..e3e4da6 100644
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
#endif
-@@ -124,6 +147,17 @@ static inline void set_domain(unsigned val)
+@@ -124,6 +148,17 @@ static inline void set_domain(unsigned val)
set_domain(domain); \
} while (0)
@@ -102160,6 +102178,18 @@ index 69b8b52..9b58c2d 100644
*p = res;
put_cpu_var(last_ino);
return res;
+diff --git a/fs/ioctl.c b/fs/ioctl.c
+index 116a333..0f56deb 100644
+--- a/fs/ioctl.c
++++ b/fs/ioctl.c
+@@ -590,6 +590,7 @@ static long ioctl_file_dedupe_range(struct file *file, void __user *arg)
+ goto out;
+ }
+
++ same->dest_count = count;
+ ret = vfs_dedupe_file_range(file, same);
+ if (ret)
+ goto out;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 2ad98d6..00f8858 100644
--- a/fs/jbd2/commit.c
@@ -128001,7 +128031,7 @@ index 5bdab6b..9ae82fe 100644
#define pud_none(pud) 0
#define pud_bad(pud) 0
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
-index 5e1f345..e7a174a 100644
+index 5e1f345..7104090 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -22,6 +22,12 @@
@@ -128210,7 +128240,7 @@ index 5e1f345..e7a174a 100644
#undef ATOMIC_LONG_INC_DEC_OP
-@@ -187,4 +229,56 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+@@ -187,4 +229,58 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
#define atomic_long_inc_not_zero(l) \
ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
@@ -128244,7 +128274,9 @@ index 5e1f345..e7a174a 100644
+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
+#define atomic_inc_unchecked(v) atomic_inc(v)
++#ifndef atomic_inc_and_test_unchecked
+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
++#endif
+#ifndef atomic_inc_return_unchecked
+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
+#endif
@@ -128268,7 +128300,7 @@ index 5e1f345..e7a174a 100644
+
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
-index d48e78c..d29d3a3 100644
+index d48e78c..db16df1 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -16,6 +16,8 @@ typedef struct {
@@ -128280,7 +128312,7 @@ index d48e78c..d29d3a3 100644
#define ATOMIC64_INIT(i) { (i) }
extern long long atomic64_read(const atomic64_t *v);
-@@ -55,4 +57,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
+@@ -55,4 +57,15 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
@@ -128293,6 +128325,7 @@ index d48e78c..d29d3a3 100644
+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v) atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++#define atomic64_xchg_unchecked(v, n) atomic64_xchg((v), (n))
+
#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
@@ -128946,10 +128979,24 @@ index c1da539..1dcec55 100644
struct atmphy_ops {
int (*start)(struct atm_dev *dev);
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
-index 506c353..414ddeb 100644
+index 506c353..10739bd 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
-@@ -113,6 +113,11 @@
+@@ -91,6 +91,13 @@
+ #endif
+ #endif /* atomic_add_return_relaxed */
+
++#ifndef atomic_add_return_unchecked_relaxed
++#define atomic_add_return_unchecked_relaxed atomic_add_return_unchecked
++#else
++#define atomic_add_return_unchecked(...) \
++ __atomic_op_fence(atomic_add_return_unchecked, __VA_ARGS__)
++#endif
++
+ /* atomic_inc_return_relaxed */
+ #ifndef atomic_inc_return_relaxed
+ #define atomic_inc_return_relaxed atomic_inc_return
+@@ -113,6 +120,11 @@
#define atomic_inc_return(...) \
__atomic_op_fence(atomic_inc_return, __VA_ARGS__)
#endif
@@ -128961,7 +129008,19 @@ index 506c353..414ddeb 100644
#endif /* atomic_inc_return_relaxed */
/* atomic_sub_return_relaxed */
-@@ -265,6 +270,11 @@
+@@ -241,6 +253,11 @@
+ #define atomic64_add_return(...) \
+ __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
+ #endif
++
++#ifndef atomic64_add_return_unchecked
++#define atomic64_add_return_unchecked(...) \
++ __atomic_op_fence(atomic64_add_return_unchecked, __VA_ARGS__)
++#endif
+ #endif /* atomic64_add_return_relaxed */
+
+ /* atomic64_inc_return_relaxed */
+@@ -265,6 +282,11 @@
#define atomic64_inc_return(...) \
__atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
#endif
@@ -128973,7 +129032,32 @@ index 506c353..414ddeb 100644
#endif /* atomic64_inc_return_relaxed */
-@@ -442,7 +452,7 @@
+@@ -338,6 +360,11 @@
+ #define atomic64_xchg(...) \
+ __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
+ #endif
++
++#ifndef atomic64_xchg_unchecked
++#define atomic64_xchg_unchecked(...) \
++ __atomic_op_fence(atomic64_xchg_unchecked, __VA_ARGS__)
++#endif
+ #endif /* atomic64_xchg_relaxed */
+
+ /* atomic64_cmpxchg_relaxed */
+@@ -362,6 +389,12 @@
+ #define atomic64_cmpxchg(...) \
+ __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
+ #endif
++
++#ifndef atomic64_cmpxchg_unchecked
++#define atomic64_cmpxchg_unchecked(...) \
++ __atomic_op_fence(atomic64_cmpxchg_unchecked, __VA_ARGS__)
++#endif
++
+ #endif /* atomic64_cmpxchg_relaxed */
+
+ /* cmpxchg_relaxed */
+@@ -442,7 +475,7 @@
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/