summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--4.7.4/0000_README2
-rw-r--r--4.7.4/4420_grsecurity-3.1-4.7.4-201609211951.patch (renamed from 4.7.4/4420_grsecurity-3.1-4.7.4-201609152234.patch)154
2 files changed, 90 insertions, 66 deletions
diff --git a/4.7.4/0000_README b/4.7.4/0000_README
index 6374649..a9515bb 100644
--- a/4.7.4/0000_README
+++ b/4.7.4/0000_README
@@ -18,7 +18,7 @@ Patch: 1003_linux-4.7.4.patch
From: http://www.kernel.org
Desc: Linux 4.7.4
-Patch: 4420_grsecurity-3.1-4.7.4-201609152234.patch
+Patch: 4420_grsecurity-3.1-4.7.4-201609211951.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.7.4/4420_grsecurity-3.1-4.7.4-201609152234.patch b/4.7.4/4420_grsecurity-3.1-4.7.4-201609211951.patch
index 84d74fa..5bddf8c 100644
--- a/4.7.4/4420_grsecurity-3.1-4.7.4-201609152234.patch
+++ b/4.7.4/4420_grsecurity-3.1-4.7.4-201609211951.patch
@@ -956,7 +956,7 @@ index d50430c..01cc53b 100644
# but it is being used too early to link to meaningful stack_chk logic.
nossp_flags := $(call cc-option, -fno-stack-protector)
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
-index 9e10c45..688ea8b 100644
+index 9e10c45..285d152 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -18,17 +18,41 @@
@@ -1404,59 +1404,29 @@ index 9e10c45..688ea8b 100644
static inline long long
atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
-@@ -361,6 +555,30 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
+@@ -360,7 +554,14 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
+
return oldval;
}
- #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-+#define atomic64_cmpxchg_unchecked_relaxed atomic64_cmpxchg_unchecked_relaxed
+
+static inline long long
+atomic64_cmpxchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long old, long long new)
+{
-+ long long oldval;
-+ unsigned long res;
-+
-+ prefetchw(&ptr->counter);
-+
-+ do {
-+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
-+ "ldrexd %1, %H1, [%3]\n"
-+ "mov %0, #0\n"
-+ "teq %1, %4\n"
-+ "teqeq %H1, %H4\n"
-+ "strexdeq %0, %5, %H5, [%3]"
-+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
-+ : "r" (&ptr->counter), "r" (old), "r" (new)
-+ : "cc");
-+ } while (res);
-+
-+ return oldval;
++ return atomic64_cmpxchg_relaxed((atomic64_t *)ptr, old, new);
+}
+ #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
++#define atomic64_cmpxchg_unchecked_relaxed atomic64_cmpxchg_unchecked_relaxed
static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
{
-@@ -380,26 +598,60 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
+@@ -380,26 +581,46 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
return result;
}
+
+static inline long long atomic64_xchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long new)
+{
-+ long long result;
-+ unsigned long tmp;
-+
-+ prefetchw(&ptr->counter);
-+
-+ __asm__ __volatile__("@ atomic64_xchg_unchecked\n"
-+"1: ldrexd %0, %H0, [%3]\n"
-+" strexd %1, %4, %H4, [%3]\n"
-+" teq %1, #0\n"
-+" bne 1b"
-+ : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
-+ : "r" (&ptr->counter), "r" (new)
-+ : "cc");
-+
-+ return result;
++ return atomic64_xchg_relaxed((atomic64_t *)ptr, new);
+}
#define atomic64_xchg_relaxed atomic64_xchg_relaxed
+#define atomic64_xchg_unchecked_relaxed atomic64_xchg_unchecked_relaxed
@@ -1502,7 +1472,7 @@ index 9e10c45..688ea8b 100644
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter)
: "cc");
-@@ -423,13 +675,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+@@ -423,13 +644,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
" teq %0, %5\n"
" teqeq %H0, %H5\n"
" moveq %1, #0\n"
@@ -1531,7 +1501,7 @@ index 9e10c45..688ea8b 100644
: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
-@@ -442,10 +706,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+@@ -442,10 +675,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v) atomic64_add(1LL, (v))
@@ -25839,7 +25809,7 @@ index a972ac4..938c163 100644
/*
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 0fe6953..a253a78 100644
+index 0fe6953f..a253a78 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -93,60 +93,6 @@ static const struct cpu_dev default_cpu = {
@@ -26641,7 +26611,7 @@ index ef8017c..1543ef8 100644
NOKPROBE_SYMBOL(oops_end);
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
-index fef917e..01f2cda 100644
+index fef917e..be394e2 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -61,13 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
@@ -26702,7 +26672,7 @@ index fef917e..01f2cda 100644
return ud2 == 0x0b0f;
}
+
-+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+void __used pax_check_alloca(unsigned long size)
+{
+ unsigned long sp = (unsigned long)&sp, stack_left;
@@ -26714,7 +26684,7 @@ index fef917e..01f2cda 100644
+EXPORT_SYMBOL(pax_check_alloca);
+#endif
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
-index d558a8a..059e830 100644
+index d558a8a..c30e5a9 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -158,6 +158,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
@@ -26791,7 +26761,7 @@ index d558a8a..059e830 100644
return ud2 == 0x0b0f;
}
+
-+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+void __used pax_check_alloca(unsigned long size)
+{
+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
@@ -41630,7 +41600,7 @@ index ddc4ceb..36e29aa 100644
}
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
-index 6ac2b2b..6373ebd 100644
+index 6ac2b2b..6373ebdc 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -849,7 +849,7 @@ static void solos_bh(unsigned long card_arg)
@@ -64024,9 +63994,18 @@ index fdee772..6c3ba123 100644
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
-index e16487c..95eee32 100644
+index e16487c..c0987f1 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
+@@ -960,7 +960,7 @@ static void tun_set_headroom(struct net_device *dev, int new_hr)
+ {
+ struct tun_struct *tun = netdev_priv(dev);
+
+- if (new_hr < NET_SKB_PAD)
++ if (new_hr < 0 || new_hr < NET_SKB_PAD)
+ new_hr = NET_SKB_PAD;
+
+ tun->align = new_hr;
@@ -1558,7 +1558,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
return -EINVAL;
}
@@ -70839,6 +70818,32 @@ index 109e2c9..7d3c9b5 100644
u_long s;
int enint_coal;
+diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
+index 7640498..110eca9 100644
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -2388,7 +2388,8 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
+ }
+ case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
+ unsigned char *ver_addr;
+- int32_t user_len, cnt2end;
++ uint32_t user_len;
++ int32_t cnt2end;
+ uint8_t *pQbuffer, *ptmpuserbuffer;
+ ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
+ if (!ver_addr) {
+@@ -2397,6 +2398,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
+ }
+ ptmpuserbuffer = ver_addr;
+ user_len = pcmdmessagefld->cmdmessage.Length;
++ if (user_len > ARCMSR_API_DATA_BUFLEN) {
++ retvalue = ARCMSR_MESSAGE_FAIL;
++ kfree(ver_addr);
++ goto message_out;
++ }
+ memcpy(ptmpuserbuffer,
+ pcmdmessagefld->messagedatabuffer, user_len);
+ spin_lock_irqsave(&acb->wqbuffer_lock, flags);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index f05e773..b48c418 100644
--- a/drivers/scsi/be2iscsi/be_main.c
@@ -129314,7 +129319,7 @@ index c1da539..1dcec55 100644
struct atmphy_ops {
int (*start)(struct atm_dev *dev);
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
-index e451534..1f808cc 100644
+index e451534..9fc0c28 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -91,6 +91,13 @@
@@ -129379,7 +129384,7 @@ index e451534..1f808cc 100644
#endif /* atomic64_xchg_relaxed */
/* atomic64_cmpxchg_relaxed */
-@@ -362,6 +389,12 @@
+@@ -362,8 +389,23 @@
#define atomic64_cmpxchg(...) \
__atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
#endif
@@ -129391,8 +129396,19 @@ index e451534..1f808cc 100644
+
#endif /* atomic64_cmpxchg_relaxed */
++#ifndef atomic64_cmpxchg_unchecked_relaxed
++#define atomic64_cmpxchg_unchecked_relaxed atomic64_cmpxchg_unchecked
++#else
++#ifndef atomic64_cmpxchg_unchecked
++#define atomic64_cmpxchg_unchecked(...) \
++ __atomic_op_fence(atomic64_cmpxchg_unchecked, __VA_ARGS__)
++#endif
++#endif
++
/* cmpxchg_relaxed */
-@@ -431,6 +464,10 @@
+ #ifndef cmpxchg_relaxed
+ #define cmpxchg_relaxed cmpxchg
+@@ -431,6 +473,10 @@
#ifndef xchg
#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
#endif
@@ -129403,7 +129419,7 @@ index e451534..1f808cc 100644
#endif /* xchg_relaxed */
/**
-@@ -442,7 +479,7 @@
+@@ -442,7 +488,7 @@
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
@@ -149321,7 +149337,7 @@ index 5b72266..dc04ce5 100644
.priority = IPC_CALLBACK_PRI, /* use lowest priority */
};
diff --git a/mm/mmap.c b/mm/mmap.c
-index de2c176..d5c0b5f 100644
+index de2c176..57c6313 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -43,6 +43,7 @@
@@ -149765,13 +149781,7 @@ index de2c176..d5c0b5f 100644
{
/*
* We implement the search by looking for an rbtree node that
-@@ -1630,11 +1853,20 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
- }
- }
-
-- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
-+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
- check_current:
+@@ -1635,6 +1858,15 @@ check_current:
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
return -ENOMEM;
@@ -161810,10 +161820,23 @@ index 9d88c62..53396b6 100644
#define gssx_dec_release_handle NULL
#define gssx_enc_get_mic NULL
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
-index e085f5a..1132fd2 100644
+index e085f5a..c947fa5 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
-@@ -1141,7 +1141,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
+@@ -569,9 +569,10 @@ gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
+ struct rsc *found;
+
+ memset(&rsci, 0, sizeof(rsci));
+- rsci.handle.data = handle->data;
+- rsci.handle.len = handle->len;
++ if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
++ return NULL;
+ found = rsc_lookup(cd, &rsci);
++ rsc_free(&rsci);
+ if (!found)
+ return NULL;
+ if (cache_check(cd, &found->h, NULL))
+@@ -1141,7 +1142,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
uint64_t *handle)
{
struct rsc rsci, *rscp = NULL;
@@ -161822,7 +161845,7 @@ index e085f5a..1132fd2 100644
long long ctxh;
struct gss_api_mech *gm = NULL;
time_t expiry;
-@@ -1152,7 +1152,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
+@@ -1152,7 +1153,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
status = -ENOMEM;
/* the handle needs to be just a unique id,
* use a static counter */
@@ -213303,10 +213326,10 @@ index f72f48f..769a657 100755
# Find all available archs
find_all_archs()
diff --git a/security/Kconfig b/security/Kconfig
-index 176758c..1222b4a 100644
+index 176758c..f682e8b 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,1021 @@
+@@ -4,6 +4,1022 @@
menu "Security options"
@@ -214202,6 +214225,7 @@ index 176758c..1222b4a 100644
+config PAX_CONSTIFY_PLUGIN
+ bool "Automatically constify eligible structures"
+ default y
++ depends on GCC_PLUGINS
+ depends on !UML && PAX_KERNEXEC
+ help
+ By saying Y here the compiler will automatically constify a class
@@ -214328,7 +214352,7 @@ index 176758c..1222b4a 100644
source security/keys/Kconfig
config SECURITY_DMESG_RESTRICT
-@@ -104,7 +1119,7 @@ config INTEL_TXT
+@@ -104,7 +1120,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX