summaryrefslogtreecommitdiff
path: root/2.6.32
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2013-02-16 14:27:53 -0500
committerAnthony G. Basile <blueness@gentoo.org>2013-02-16 14:27:53 -0500
commitc73573a5981eabe77337393eba7548812bf5e811 (patch)
tree174635e495d85dfcb5f320650d14f9f3924b5e21 /2.6.32
parentFix 3.2.38/4470_disable-compat_vdso.patch (diff)
downloadhardened-patchset-c73573a5981eabe77337393eba7548812bf5e811.tar.gz
hardened-patchset-c73573a5981eabe77337393eba7548812bf5e811.tar.bz2
hardened-patchset-c73573a5981eabe77337393eba7548812bf5e811.zip
Grsec/PaX: 2.9.1-{2.6.32.60,3.2.38,3.7.8}-20130216115820130216
Diffstat (limited to '2.6.32')
-rw-r--r--2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201302161146.patch (renamed from 2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201302092140.patch)420
1 files changed, 383 insertions, 37 deletions
diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201302092140.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201302161146.patch
index af2ad8d..beb1e7f 100644
--- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201302092140.patch
+++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201302161146.patch
@@ -13251,7 +13251,7 @@ index 33927d2..ccde329 100644
/*
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
-index af6fd36..da3ffbb 100644
+index af6fd36..fffaf4b 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
@@ -13314,7 +13314,19 @@ index af6fd36..da3ffbb 100644
static inline int pte_dirty(pte_t pte)
{
return pte_flags(pte) & _PAGE_DIRTY;
-@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
+@@ -130,6 +170,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
+ return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
+ }
+
++static inline unsigned long pud_pfn(pud_t pud)
++{
++ return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
++}
++
+ #define pte_page(pte) pfn_to_page(pte_pfn(pte))
+
+ static inline int pmd_large(pmd_t pte)
+@@ -167,9 +212,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
return pte_clear_flags(pte, _PAGE_RW);
}
@@ -13345,7 +13357,7 @@ index af6fd36..da3ffbb 100644
}
static inline pte_t pte_mkdirty(pte_t pte)
-@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
+@@ -302,6 +367,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
#endif
#ifndef __ASSEMBLY__
@@ -13361,7 +13373,7 @@ index af6fd36..da3ffbb 100644
#include <linux/mm_types.h>
static inline int pte_none(pte_t pte)
-@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+@@ -472,7 +546,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
static inline int pgd_bad(pgd_t pgd)
{
@@ -13370,7 +13382,7 @@ index af6fd36..da3ffbb 100644
}
static inline int pgd_none(pgd_t pgd)
-@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
+@@ -495,7 +569,12 @@ static inline int pgd_none(pgd_t pgd)
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
*/
@@ -13384,7 +13396,7 @@ index af6fd36..da3ffbb 100644
/*
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
-@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
+@@ -506,6 +585,20 @@ static inline int pgd_none(pgd_t pgd)
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
@@ -13405,7 +13417,7 @@ index af6fd36..da3ffbb 100644
#ifndef __ASSEMBLY__
extern int direct_gbpages;
-@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
+@@ -611,11 +704,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
* dst and src can be on the same page, but the range must not overlap,
* and must not cross a page boundary.
*/
@@ -28185,7 +28197,7 @@ index 30938c1..bda3d5d 100644
printk(KERN_INFO "Write protecting the kernel text: %luk\n",
size >> 10);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index 7d095ad..f833fa2 100644
+index 7d095ad..704b879 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -123,7 +123,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
@@ -28264,7 +28276,17 @@ index 7d095ad..f833fa2 100644
/* clear_bss() already clear the empty_zero_page */
reservedpages = 0;
-@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
+@@ -839,6 +845,9 @@ int kern_addr_valid(unsigned long addr)
+ if (pud_none(*pud))
+ return 0;
+
++ if (pud_large(*pud))
++ return pfn_valid(pud_pfn(*pud));
++
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ return 0;
+@@ -861,8 +870,8 @@ int kern_addr_valid(unsigned long addr)
static struct vm_area_struct gate_vma = {
.vm_start = VSYSCALL_START,
.vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
@@ -28275,7 +28297,7 @@ index 7d095ad..f833fa2 100644
};
struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
-@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
+@@ -896,7 +905,7 @@ int in_gate_area_no_task(unsigned long addr)
const char *arch_vma_name(struct vm_area_struct *vma)
{
@@ -63668,6 +63690,63 @@ index f450bc9..2b747c8 100644
result =
hso_start_serial_device(serial_table[i], GFP_NOIO);
hso_kick_transmit(dev2ser(serial_table[i]));
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 07f69ee..32a818f 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -331,6 +331,12 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+ unsigned long lockflags;
+ size_t size = dev->rx_urb_size;
+
++ /* prevent rx skb allocation when error ratio is high */
++ if (test_bit(EVENT_RX_KILL, &dev->flags)) {
++ usb_free_urb(urb);
++ return -ENOLINK;
++ }
++
+ if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
+ if (netif_msg_rx_err (dev))
+ devdbg (dev, "no rx skb");
+@@ -481,6 +487,17 @@ block:
+ break;
+ }
+
++ /* stop rx if packet error rate is high */
++ if (++dev->pkt_cnt > 30) {
++ dev->pkt_cnt = 0;
++ dev->pkt_err = 0;
++ } else {
++ if (state == rx_cleanup)
++ dev->pkt_err++;
++ if (dev->pkt_err > 20)
++ set_bit(EVENT_RX_KILL, &dev->flags);
++ }
++
+ defer_bh(dev, skb, &dev->rxq);
+
+ if (urb) {
+@@ -762,6 +779,11 @@ int usbnet_open (struct net_device *net)
+ framing);
+ }
+
++ /* reset rx error state */
++ dev->pkt_cnt = 0;
++ dev->pkt_err = 0;
++ clear_bit(EVENT_RX_KILL, &dev->flags);
++
+ // delay posting reads until we're fully open
+ tasklet_schedule (&dev->bh);
+ return retval;
+@@ -1129,6 +1151,9 @@ static void usbnet_bh (unsigned long param)
+ }
+ }
+
++ /* restart RX again after disabling due to high error rate */
++ clear_bit(EVENT_RX_KILL, &dev->flags);
++
+ // waiting for all pending urbs to complete?
+ if (dev->wait) {
+ if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 9e94c4b..316ee65 100644
--- a/drivers/net/vxge/vxge-config.c
@@ -99343,9 +99422,18 @@ index 379eaed..3471a57 100644
mode_t mode, struct proc_dir_entry *base,
read_proc_t *read_proc, void * data)
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
-index 7456d7d..6c1cfc9 100644
+index 7456d7d..0021b34 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
+@@ -87,7 +87,7 @@ extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned
+ extern int ptrace_attach(struct task_struct *tsk);
+ extern int ptrace_detach(struct task_struct *, unsigned int);
+ extern void ptrace_disable(struct task_struct *);
+-extern int ptrace_check_attach(struct task_struct *task, int kill);
++extern int ptrace_check_attach(struct task_struct *task, bool ignore_state);
+ extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
+ extern void ptrace_notify(int exit_code);
+ extern void __ptrace_link(struct task_struct *child,
@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
extern void exit_ptrace(struct task_struct *tracer);
#define PTRACE_MODE_READ 1
@@ -99504,7 +99592,7 @@ index 14a86bc..17d0700 100644
/*
* CONFIG_RELAY kernel API, kernel/relay.c
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 71849bf..fb7ea50 100644
+index 71849bf..a612150 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -101,6 +101,7 @@ struct bio;
@@ -99776,7 +99864,25 @@ index 71849bf..fb7ea50 100644
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
-@@ -2616,6 +2726,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
+@@ -2459,7 +2569,16 @@ static inline void thread_group_cputime_free(struct signal_struct *sig)
+ extern void recalc_sigpending_and_wake(struct task_struct *t);
+ extern void recalc_sigpending(void);
+
+-extern void signal_wake_up(struct task_struct *t, int resume_stopped);
++extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
++
++static inline void signal_wake_up(struct task_struct *t, bool resume)
++{
++ signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
++}
++static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
++{
++ signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
++}
+
+ /*
+ * Wrappers for p->thread_info->cpu access. No-op on UP.
+@@ -2616,6 +2735,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
@@ -100777,6 +100883,26 @@ index a34fa89..ef176bc 100644
unsigned long active_duration;
+diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
+index f814730..61d9ec3 100644
+--- a/include/linux/usb/usbnet.h
++++ b/include/linux/usb/usbnet.h
+@@ -33,6 +33,7 @@ struct usbnet {
+ wait_queue_head_t *wait;
+ struct mutex phy_mutex;
+ unsigned char suspend_count;
++ unsigned char pkt_cnt, pkt_err;
+
+ /* i/o info: pipes etc */
+ unsigned in, out;
+@@ -65,6 +66,7 @@ struct usbnet {
+ # define EVENT_STS_SPLIT 3
+ # define EVENT_LINK_RESET 4
+ # define EVENT_RX_PAUSED 5
++# define EVENT_RX_KILL 10
+ };
+
+ static inline struct usb_driver *driver_of(struct usb_interface *intf)
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index 79b9837..b5a56f9 100644
--- a/include/linux/vermagic.h
@@ -105817,10 +105943,114 @@ index dfadc5b..7f59404 100644
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
-index 05625f6..702665c 100644
+index 05625f6..741869b 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
-@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
+@@ -56,7 +56,7 @@ static void ptrace_untrace(struct task_struct *child)
+ child->signal->group_stop_count)
+ __set_task_state(child, TASK_STOPPED);
+ else
+- signal_wake_up(child, 1);
++ ptrace_signal_wake_up(child, true);
+ }
+ spin_unlock(&child->sighand->siglock);
+ }
+@@ -80,10 +80,54 @@ void __ptrace_unlink(struct task_struct *child)
+ ptrace_untrace(child);
+ }
+
+-/*
+- * Check that we have indeed attached to the thing..
++/* Ensure that nothing can wake it up, even SIGKILL */
++static bool ptrace_freeze_traced(struct task_struct *task)
++{
++ bool ret = false;
++
++ spin_lock_irq(&task->sighand->siglock);
++ if (task_is_traced(task) && !__fatal_signal_pending(task)) {
++ task->state = __TASK_TRACED;
++ ret = true;
++ }
++ spin_unlock_irq(&task->sighand->siglock);
++
++ return ret;
++}
++
++static void ptrace_unfreeze_traced(struct task_struct *task)
++{
++ if (task->state != __TASK_TRACED)
++ return;
++
++ WARN_ON(!task->ptrace || task->parent != current);
++
++ spin_lock_irq(&task->sighand->siglock);
++ if (__fatal_signal_pending(task))
++ wake_up_state(task, __TASK_TRACED);
++ else
++ task->state = TASK_TRACED;
++ spin_unlock_irq(&task->sighand->siglock);
++}
++
++/**
++ * ptrace_check_attach - check whether ptracee is ready for ptrace operation
++ * @child: ptracee to check for
++ * @ignore_state: don't check whether @child is currently %TASK_TRACED
++ *
++ * Check whether @child is being ptraced by %current and ready for further
++ * ptrace operations. If @ignore_state is %false, @child also should be in
++ * %TASK_TRACED state and on return the child is guaranteed to be traced
++ * and not executing. If @ignore_state is %true, @child can be in any
++ * state.
++ *
++ * CONTEXT:
++ * Grabs and releases tasklist_lock and @child->sighand->siglock.
++ *
++ * RETURNS:
++ * 0 on success, -ESRCH if %child is not ready.
+ */
+-int ptrace_check_attach(struct task_struct *child, int kill)
++int ptrace_check_attach(struct task_struct *child, bool ignore_state)
+ {
+ int ret = -ESRCH;
+
+@@ -95,29 +139,34 @@ int ptrace_check_attach(struct task_struct *child, int kill)
+ * be changed by us so it's not changing right after this.
+ */
+ read_lock(&tasklist_lock);
+- if ((child->ptrace & PT_PTRACED) && child->parent == current) {
+- ret = 0;
++ if (child->ptrace && child->parent == current) {
++ WARN_ON(child->state == __TASK_TRACED);
+ /*
+ * child->sighand can't be NULL, release_task()
+ * does ptrace_unlink() before __exit_signal().
+ */
+- spin_lock_irq(&child->sighand->siglock);
+- if (task_is_stopped(child))
+- child->state = TASK_TRACED;
+- else if (!task_is_traced(child) && !kill)
+- ret = -ESRCH;
+- spin_unlock_irq(&child->sighand->siglock);
++ if (ignore_state || ptrace_freeze_traced(child))
++ ret = 0;
+ }
+ read_unlock(&tasklist_lock);
+
+- if (!ret && !kill)
+- ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
++ if (!ret && !ignore_state) {
++ if (!wait_task_inactive(child, __TASK_TRACED)) {
++ /*
++ * This can only happen if may_ptrace_stop() fails and
++ * ptrace_stop() changes ->state back to TASK_RUNNING,
++ * so we should not worry about leaking __TASK_TRACED.
++ */
++ WARN_ON(child->state == __TASK_TRACED);
++ ret = -ESRCH;
++ }
++ }
+
+- /* All systems go.. */
return ret;
}
@@ -105830,7 +106060,7 @@ index 05625f6..702665c 100644
{
const struct cred *cred = current_cred(), *tcred;
-@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+@@ -141,7 +190,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
cred->gid != tcred->egid ||
cred->gid != tcred->sgid ||
cred->gid != tcred->gid) &&
@@ -105841,7 +106071,7 @@ index 05625f6..702665c 100644
rcu_read_unlock();
return -EPERM;
}
-@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+@@ -149,7 +200,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
smp_rmb();
if (task->mm)
dumpable = get_dumpable(task->mm);
@@ -105852,7 +106082,7 @@ index 05625f6..702665c 100644
return -EPERM;
return security_ptrace_access_check(task, mode);
-@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
+@@ -159,7 +212,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
{
int err;
task_lock(task);
@@ -105870,7 +106100,7 @@ index 05625f6..702665c 100644
task_unlock(task);
return !err;
}
-@@ -182,11 +196,11 @@ int ptrace_attach(struct task_struct *task)
+@@ -182,11 +244,11 @@ int ptrace_attach(struct task_struct *task)
* under ptrace.
*/
retval = -ERESTARTNOINTR;
@@ -105884,7 +106114,7 @@ index 05625f6..702665c 100644
task_unlock(task);
if (retval)
goto unlock_creds;
-@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
+@@ -199,7 +261,7 @@ int ptrace_attach(struct task_struct *task)
goto unlock_tasklist;
task->ptrace = PT_PTRACED;
@@ -105893,7 +106123,7 @@ index 05625f6..702665c 100644
task->ptrace |= PT_PTRACE_CAP;
__ptrace_link(task, current);
-@@ -209,7 +223,7 @@ int ptrace_attach(struct task_struct *task)
+@@ -209,7 +271,7 @@ int ptrace_attach(struct task_struct *task)
unlock_tasklist:
write_unlock_irq(&tasklist_lock);
unlock_creds:
@@ -105902,7 +106132,7 @@ index 05625f6..702665c 100644
out:
return retval;
}
-@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
+@@ -351,6 +413,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
{
int copied = 0;
@@ -105911,7 +106141,7 @@ index 05625f6..702665c 100644
while (len > 0) {
char buf[128];
int this_len, retval;
-@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
+@@ -376,6 +440,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
{
int copied = 0;
@@ -105920,7 +106150,16 @@ index 05625f6..702665c 100644
while (len > 0) {
char buf[128];
int this_len, retval;
-@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
+@@ -506,7 +572,7 @@ static int ptrace_resume(struct task_struct *child, long request, long data)
+ }
+
+ child->exit_code = data;
+- wake_up_process(child);
++ wake_up_state(child, __TASK_TRACED);
+
+ return 0;
+ }
+@@ -517,6 +583,8 @@ int ptrace_request(struct task_struct *child, long request,
int ret = -EIO;
siginfo_t siginfo;
@@ -105929,7 +106168,7 @@ index 05625f6..702665c 100644
switch (request) {
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
-@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
+@@ -532,18 +600,18 @@ int ptrace_request(struct task_struct *child, long request,
ret = ptrace_setoptions(child, data);
break;
case PTRACE_GETEVENTMSG:
@@ -105951,7 +106190,7 @@ index 05625f6..702665c 100644
sizeof siginfo))
ret = -EFAULT;
else
-@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
+@@ -621,14 +689,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
goto out;
}
@@ -105974,7 +106213,16 @@ index 05625f6..702665c 100644
goto out_put_task_struct;
}
-@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
+@@ -637,6 +712,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
+ goto out_put_task_struct;
+
+ ret = arch_ptrace(child, request, addr, data);
++ if (ret || request != PTRACE_DETACH)
++ ptrace_unfreeze_traced(child);
+
+ out_put_task_struct:
+ put_task_struct(child);
+@@ -653,7 +730,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
if (copied != sizeof(tmp))
return -EIO;
@@ -105983,7 +106231,7 @@ index 05625f6..702665c 100644
}
int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
-@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
+@@ -675,6 +752,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
siginfo_t siginfo;
int ret;
@@ -105992,7 +106240,7 @@ index 05625f6..702665c 100644
switch (request) {
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
-@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+@@ -740,20 +819,30 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
goto out;
}
@@ -106015,6 +106263,16 @@ index 05625f6..702665c 100644
goto out_put_task_struct;
}
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+- if (!ret)
++ if (!ret) {
+ ret = compat_arch_ptrace(child, request, addr, data);
++ if (ret || request != PTRACE_DETACH)
++ ptrace_unfreeze_traced(child);
++ }
+
+ out_put_task_struct:
+ put_task_struct(child);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 697c0a0..2402696 100644
--- a/kernel/rcutorture.c
@@ -106448,7 +106706,7 @@ index 0591df8..dcf3f9f 100644
if (cpu != group_first_cpu(sd->groups))
return;
diff --git a/kernel/signal.c b/kernel/signal.c
-index 2494827..cda80a0 100644
+index 2494827..02e4288 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -41,12 +41,12 @@
@@ -106494,7 +106752,34 @@ index 2494827..cda80a0 100644
if (is_global_init(tsk))
return 1;
if (handler != SIG_IGN && handler != SIG_DFL)
-@@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
+@@ -513,23 +516,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+ * No need to set need_resched since signal event passing
+ * goes through ->blocked
+ */
+-void signal_wake_up(struct task_struct *t, int resume)
++void signal_wake_up_state(struct task_struct *t, unsigned int state)
+ {
+- unsigned int mask;
+-
+ set_tsk_thread_flag(t, TIF_SIGPENDING);
+-
+ /*
+- * For SIGKILL, we want to wake it up in the stopped/traced/killable
++ * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
+ * case. We don't check t->state here because there is a race with it
+ * executing another processor and just now entering stopped state.
+ * By using wake_up_state, we ensure the process will wake up and
+ * handle its death signal.
+ */
+- mask = TASK_INTERRUPTIBLE;
+- if (resume)
+- mask |= TASK_WAKEKILL;
+- if (!wake_up_state(t, mask))
++ if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
+ kick_process(t);
+ }
+
+@@ -627,6 +624,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
}
}
@@ -106508,7 +106793,7 @@ index 2494827..cda80a0 100644
return security_task_kill(t, info, sig, 0);
}
-@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+@@ -968,7 +972,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
return send_signal(sig, info, p, 1);
}
@@ -106517,7 +106802,7 @@ index 2494827..cda80a0 100644
specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
return send_signal(sig, info, t, 0);
-@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+@@ -1005,6 +1009,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
unsigned long int flags;
int ret, blocked, ignored;
struct k_sigaction *action;
@@ -106525,7 +106810,7 @@ index 2494827..cda80a0 100644
spin_lock_irqsave(&t->sighand->siglock, flags);
action = &t->sighand->action[sig-1];
-@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+@@ -1019,9 +1024,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
}
if (action->sa.sa_handler == SIG_DFL)
t->signal->flags &= ~SIGNAL_UNKILLABLE;
@@ -106544,7 +106829,7 @@ index 2494827..cda80a0 100644
return ret;
}
-@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+@@ -1081,8 +1095,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
int ret = check_kill_permission(sig, info, p);
@@ -106557,6 +106842,26 @@ index 2494827..cda80a0 100644
return ret;
}
+@@ -1530,6 +1547,10 @@ static inline int may_ptrace_stop(void)
+ * If SIGKILL was already sent before the caller unlocked
+ * ->siglock we must see ->core_state != NULL. Otherwise it
+ * is safe to enter schedule().
++ *
++ * This is almost outdated, a task with the pending SIGKILL can't
++ * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
++ * after SIGKILL was already dequeued.
+ */
+ if (unlikely(current->mm->core_state) &&
+ unlikely(current->mm == current->parent->mm))
+@@ -1611,6 +1632,8 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
+ * By the time we got the lock, our tracer went away.
+ * Don't drop the lock yet, another tracer may come.
+ */
++
++ /* tasklist protects us from ptrace_freeze_traced() */
+ __set_current_state(TASK_RUNNING);
+ if (clear_code)
+ current->exit_code = 0;
@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
{
siginfo_t info;
@@ -116780,9 +117085,18 @@ index 713ac59..306f6ae 100644
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
-index 914c419..7a16d2c 100644
+index 914c419..1b055b5 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
+@@ -70,7 +70,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
+ return;
+
+ if (atomic_dec_and_test(&key->refcnt)) {
+- kfree(key);
++ kzfree(key);
+ SCTP_DBG_OBJCNT_DEC(keys);
+ }
+ }
@@ -81,7 +81,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
struct sctp_auth_bytes *key;
@@ -116824,6 +117138,29 @@ index acf7c4d..b29621d 100644
errout:
list_for_each_safe(pos, temp, &msg->chunks) {
list_del_init(pos);
+diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
+index 905fda5..ca48660 100644
+--- a/net/sctp/endpointola.c
++++ b/net/sctp/endpointola.c
+@@ -249,6 +249,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
+ /* Final destructor for endpoint. */
+ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
+ {
++ int i;
++
+ SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
+
+ /* Free up the HMAC transform. */
+@@ -271,6 +273,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
+ sctp_inq_free(&ep->base.inqueue);
+ sctp_bind_addr_free(&ep->base.bind_addr);
+
++ for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
++ memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
++
+ /* Remove and free the port */
+ if (sctp_sk(ep->base.sk)->bind_hash)
+ sctp_put_port(ep->base.sk);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index bb280e6..747720f 100644
--- a/net/sctp/ipv6.c
@@ -116896,9 +117233,18 @@ index 619f965..bed845a 100644
static int sctp_v4_protosw_init(void)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
-index 1f9843e..9cd0edd 100644
+index 1f9843e..5e9fd60 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
+@@ -3271,7 +3271,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
+
+ ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
+ out:
+- kfree(authkey);
++ kzfree(authkey);
+ return ret;
+ }
+
@@ -5810,7 +5810,6 @@ pp_found:
*/
int reuse = sk->sk_reuse;