summaryrefslogtreecommitdiff
path: root/2.6.32
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2012-09-24 22:55:55 -0400
committerAnthony G. Basile <blueness@gentoo.org>2012-09-24 22:55:55 -0400
commitdc72c678e5438d420fc1fe6692e8e247b412b8af (patch)
tree4641bda24990c0d6c289e201116072cf6fa72178 /2.6.32
parentAdd missing patch for 3.2.30 (diff)
downloadhardened-patchset-dc72c678e5438d420fc1fe6692e8e247b412b8af.tar.gz
hardened-patchset-dc72c678e5438d420fc1fe6692e8e247b412b8af.tar.bz2
hardened-patchset-dc72c678e5438d420fc1fe6692e8e247b412b8af.zip
Grsec/PaX: 2.9.1-{2.6.32.59,3.2.30,3.5.4}-20120924182920120924
Diffstat (limited to '2.6.32')
-rw-r--r--2.6.32/0000_README2
-rw-r--r--2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201209241828.patch (renamed from 2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201209192117.patch)1809
2 files changed, 1651 insertions, 160 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index ac17e52..78b6cc4 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -30,7 +30,7 @@ Patch: 1058_linux-2.6.32.59.patch
From: http://www.kernel.org
Desc: Linux 2.6.32.59
-Patch: 4420_grsecurity-2.9.1-2.6.32.59-201209192117.patch
+Patch: 4420_grsecurity-2.9.1-2.6.32.59-201209241828.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201209192117.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201209241828.patch
index b4d700f..17cdc43 100644
--- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201209192117.patch
+++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201209241828.patch
@@ -2714,6 +2714,22 @@ index 0a0c77b..8e55a81 100644
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
* into percpu page size
*/
+diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
+index 2eb6365..416122b 100644
+--- a/arch/ia64/kvm/kvm-ia64.c
++++ b/arch/ia64/kvm/kvm-ia64.c
+@@ -1185,6 +1185,11 @@ out:
+
+ #define PALE_RESET_ENTRY 0x80000000ffffffb0UL
+
++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
++{
++ return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL);
++}
++
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ {
+ struct kvm_vcpu *v;
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 19261a9..1611b7a 100644
--- a/arch/ia64/mm/fault.c
@@ -12313,6 +12329,39 @@ index 4fe681d..bb6d40c 100644
#define flush_insn_slot(p) do { } while (0)
+diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
+index 5ed59ec..cc44e3d 100644
+--- a/arch/x86/include/asm/kvm_emulate.h
++++ b/arch/x86/include/asm/kvm_emulate.h
+@@ -109,6 +109,8 @@ struct x86_emulate_ops {
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu);
+
++ bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
++ u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
+ };
+
+ /* Type, address-of, and value of an instruction's operand. */
+@@ -190,6 +192,19 @@ struct x86_emulate_ctxt {
+ #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
+ #endif
+
++/* CPUID vendors */
++#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
++#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
++#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
++
++#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41
++#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574
++#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273
++
++#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547
++#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
++#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69
++
+ int x86_decode_insn(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops);
+ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 08bc2ff..2e88d1f 100644
--- a/arch/x86/include/asm/kvm_host.h
@@ -23483,7 +23532,7 @@ index c5ee17e..d63218f 100644
if (unlikely(err)) {
/*
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 1350e43..a94b011 100644
+index 1350e43..d7384e3 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -81,8 +81,8 @@
@@ -23522,6 +23571,144 @@ index 1350e43..a94b011 100644
switch ((_dst).bytes) { \
case 1: \
____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
+@@ -1495,20 +1493,73 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
+ ss->present = 1;
+ }
+
++static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt,
++ struct x86_emulate_ops *ops)
++{
++ u32 eax, ebx, ecx, edx;
++
++ /*
++ * syscall should always be enabled in longmode - so only become
++ * vendor specific (cpuid) if other modes are active...
++ */
++ if (ctxt->mode == X86EMUL_MODE_PROT64)
++ return true;
++
++ eax = 0x00000000;
++ ecx = 0x00000000;
++ if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
++ /*
++ * Intel ("GenuineIntel")
++ * remark: Intel CPUs only support "syscall" in 64bit
++ * longmode. Also an 64bit guest with a
++ * 32bit compat-app running will #UD !! While this
++ * behaviour can be fixed (by emulating) into AMD
++ * response - CPUs of AMD can't behave like Intel.
++ */
++ if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
++ ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
++ edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
++ return false;
++
++ /* AMD ("AuthenticAMD") */
++ if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
++ ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
++ edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
++ return true;
++
++ /* AMD ("AMDisbetter!") */
++ if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
++ ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
++ edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
++ return true;
++ }
++
++ /* default: (not Intel, not AMD), apply Intel's stricter rules... */
++ return false;
++}
++
+ static int
+-emulate_syscall(struct x86_emulate_ctxt *ctxt)
++emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
+ {
+ struct decode_cache *c = &ctxt->decode;
+ struct kvm_segment cs, ss;
+ u64 msr_data;
++ u64 efer = 0;
+
+ /* syscall is not available in real mode */
+ if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL
+ || ctxt->mode == X86EMUL_MODE_VM86)
+ return -1;
+
++ if (!(em_syscall_is_enabled(ctxt, ops)))
++ return -1;
++
++ kvm_x86_ops->get_msr(ctxt->vcpu, MSR_EFER, &efer);
+ setup_syscalls_segments(ctxt, &cs, &ss);
+
++ if (!(efer & EFER_SCE))
++ return -1;
++
+ kvm_x86_ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
+ msr_data >>= 32;
+ cs.selector = (u16)(msr_data & 0xfffc);
+@@ -2342,7 +2393,7 @@ twobyte_insn:
+ }
+ break;
+ case 0x05: /* syscall */
+- if (emulate_syscall(ctxt) == -1)
++ if (emulate_syscall(ctxt, ops) == -1)
+ goto cannot_emulate;
+ else
+ goto writeback;
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index 88ad162..7e361b4 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -277,11 +277,15 @@ static struct kvm_timer_ops kpit_ops = {
+ .is_periodic = kpit_is_periodic,
+ };
+
+-static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
++static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
+ {
++ struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
+ struct kvm_timer *pt = &ps->pit_timer;
+ s64 interval;
+
++ if (!irqchip_in_kernel(kvm))
++ return;
++
+ interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
+
+ pr_debug("pit: create pit timer, interval is %llu nsec\n", interval);
+@@ -333,13 +337,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
+ /* FIXME: enhance mode 4 precision */
+ case 4:
+ if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
+- create_pit_timer(ps, val, 0);
++ create_pit_timer(kvm, val, 0);
+ }
+ break;
+ case 2:
+ case 3:
+ if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
+- create_pit_timer(ps, val, 1);
++ create_pit_timer(kvm, val, 1);
+ }
+ break;
+ default:
+diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
+index 7d6058a..85a8721 100644
+--- a/arch/x86/kvm/irq.h
++++ b/arch/x86/kvm/irq.h
+@@ -85,7 +85,11 @@ static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
+
+ static inline int irqchip_in_kernel(struct kvm *kvm)
+ {
+- return pic_irqchip(kvm) != NULL;
++ int ret;
++
++ ret = (pic_irqchip(kvm) != NULL);
++ smp_rmb();
++ return ret;
+ }
+
+ void kvm_pic_reset(struct kvm_kpic_state *s);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 8dfeaaa..4daa395 100644
--- a/arch/x86/kvm/lapic.c
@@ -23698,7 +23885,7 @@ index e6d925f..6bde4d6 100644
.disabled_by_bios = vmx_disabled_by_bios,
.hardware_setup = hardware_setup,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index df1cefb..5e882ad 100644
+index df1cefb..ea708b4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
@@ -23767,7 +23954,94 @@ index df1cefb..5e882ad 100644
return -EINVAL;
if (irqchip_in_kernel(vcpu->kvm))
return -ENXIO;
-@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
+@@ -2273,25 +2282,42 @@ long kvm_arch_vm_ioctl(struct file *filp,
+ if (r)
+ goto out;
+ break;
+- case KVM_CREATE_IRQCHIP:
++ case KVM_CREATE_IRQCHIP: {
++ struct kvm_pic *vpic;
++
++ mutex_lock(&kvm->lock);
++ r = -EEXIST;
++ if (kvm->arch.vpic)
++ goto create_irqchip_unlock;
++ r = -EINVAL;
++ if (atomic_read(&kvm->online_vcpus))
++ goto create_irqchip_unlock;
+ r = -ENOMEM;
+- kvm->arch.vpic = kvm_create_pic(kvm);
+- if (kvm->arch.vpic) {
++ vpic = kvm_create_pic(kvm);
++ if (vpic) {
+ r = kvm_ioapic_init(kvm);
+ if (r) {
+- kfree(kvm->arch.vpic);
+- kvm->arch.vpic = NULL;
+- goto out;
++ kfree(vpic);
++ goto create_irqchip_unlock;
+ }
+ } else
+- goto out;
++ goto create_irqchip_unlock;
++ smp_wmb();
++ kvm->arch.vpic = vpic;
++ smp_wmb();
+ r = kvm_setup_default_irq_routing(kvm);
+ if (r) {
++ mutex_lock(&kvm->irq_lock);
+ kfree(kvm->arch.vpic);
+ kfree(kvm->arch.vioapic);
+- goto out;
++ kvm->arch.vpic = NULL;
++ kvm->arch.vioapic = NULL;
++ mutex_unlock(&kvm->irq_lock);
+ }
++ create_irqchip_unlock:
++ mutex_unlock(&kvm->lock);
+ break;
++ }
+ case KVM_CREATE_PIT:
+ u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
+ goto create_pit;
+@@ -2871,12 +2897,35 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
+ }
+ EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
+
++static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
++ u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
++{
++ struct kvm_cpuid_entry2 *cpuid = NULL;
++
++ if (eax && ecx)
++ cpuid = kvm_find_cpuid_entry(ctxt->vcpu,
++ *eax, *ecx);
++
++ if (cpuid) {
++ *eax = cpuid->eax;
++ *ecx = cpuid->ecx;
++ if (ebx)
++ *ebx = cpuid->ebx;
++ if (edx)
++ *edx = cpuid->edx;
++ return true;
++ }
++
++ return false;
++}
++
+ static struct x86_emulate_ops emulate_ops = {
+ .read_std = kvm_read_guest_virt_system,
+ .fetch = kvm_fetch_guest_virt,
+ .read_emulated = emulator_read_emulated,
+ .write_emulated = emulator_write_emulated,
+ .cmpxchg_emulated = emulator_cmpxchg_emulated,
++ .get_cpuid = emulator_get_cpuid,
+ };
+
+ static void cache_all_regs(struct kvm_vcpu *vcpu)
+@@ -3260,10 +3309,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
.notifier_call = kvmclock_cpufreq_notifier
};
@@ -23780,6 +24054,18 @@ index df1cefb..5e882ad 100644
if (kvm_x86_ops) {
printk(KERN_ERR "kvm: already loaded the other module\n");
+@@ -4990,6 +5039,11 @@ void kvm_arch_check_processor_compat(void *rtn)
+ kvm_x86_ops->check_processor_compatibility(rtn);
+ }
+
++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
++{
++ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
++}
++
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ {
+ struct page *page;
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 7e59dc1..b88c98f 100644
--- a/arch/x86/lguest/boot.c
@@ -35078,6 +35364,22 @@ index bf2170f..ce8cab9 100644
acpi_os_unmap_memory(virt, len);
return 0;
+diff --git a/drivers/char/tty_audit.c b/drivers/char/tty_audit.c
+index ac16fbe..dd4691a 100644
+--- a/drivers/char/tty_audit.c
++++ b/drivers/char/tty_audit.c
+@@ -94,8 +94,10 @@ static void tty_audit_buf_push(struct task_struct *tsk, uid_t loginuid,
+ {
+ if (buf->valid == 0)
+ return;
+- if (audit_enabled == 0)
++ if (audit_enabled == 0) {
++ buf->valid = 0;
+ return;
++ }
+ tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor,
+ buf->data, buf->valid);
+ buf->valid = 0;
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 123cedf..6664cb4 100644
--- a/drivers/char/tty_io.c
@@ -66781,7 +67083,7 @@ index 0133b5a..3710d09 100644
(unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
#ifdef __alpha__
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index a64fde6..6583da2 100644
+index a64fde6..1535e95 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -31,6 +31,7 @@
@@ -66935,7 +67237,7 @@ index a64fde6..6583da2 100644
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
-+ if (elf_phdata->p_flags & PF_EMUTRAMP)
++ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
+ pax_flags |= MF_PAX_EMUTRAMP;
+#endif
+
@@ -69368,7 +69670,7 @@ index f539204..b2ad18e 100644
fput(tfile);
diff --git a/fs/exec.c b/fs/exec.c
-index 86fafc6..25f0d1a 100644
+index 86fafc6..6dfde4e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -56,12 +56,33 @@
@@ -69576,7 +69878,34 @@ index 86fafc6..25f0d1a 100644
set_fs(old_fs);
return result;
}
-@@ -985,6 +1036,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
+@@ -803,7 +854,6 @@ static int de_thread(struct task_struct *tsk)
+ struct signal_struct *sig = tsk->signal;
+ struct sighand_struct *oldsighand = tsk->sighand;
+ spinlock_t *lock = &oldsighand->siglock;
+- int count;
+
+ if (thread_group_empty(tsk))
+ goto no_thread_group;
+@@ -820,13 +870,13 @@ static int de_thread(struct task_struct *tsk)
+ spin_unlock_irq(lock);
+ return -EAGAIN;
+ }
++
+ sig->group_exit_task = tsk;
+- zap_other_threads(tsk);
++ sig->notify_count = zap_other_threads(tsk);
++ if (!thread_group_leader(tsk))
++ sig->notify_count--;
+
+- /* Account for the thread group leader hanging around: */
+- count = thread_group_leader(tsk) ? 1 : 2;
+- sig->notify_count = count;
+- while (atomic_read(&sig->count) > count) {
++ while (sig->notify_count) {
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(lock);
+ schedule();
+@@ -985,6 +1035,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
perf_event_comm(tsk);
}
@@ -69598,7 +69927,7 @@ index 86fafc6..25f0d1a 100644
int flush_old_exec(struct linux_binprm * bprm)
{
int retval;
-@@ -999,6 +1065,7 @@ int flush_old_exec(struct linux_binprm * bprm)
+@@ -999,6 +1064,7 @@ int flush_old_exec(struct linux_binprm * bprm)
set_mm_exe_file(bprm->mm, bprm->file);
@@ -69606,7 +69935,7 @@ index 86fafc6..25f0d1a 100644
/*
* Release all of the old mmap stuff
*/
-@@ -1023,10 +1090,6 @@ EXPORT_SYMBOL(flush_old_exec);
+@@ -1023,10 +1089,6 @@ EXPORT_SYMBOL(flush_old_exec);
void setup_new_exec(struct linux_binprm * bprm)
{
@@ -69617,7 +69946,7 @@ index 86fafc6..25f0d1a 100644
arch_pick_mmap_layout(current->mm);
/* This is the point of no return */
-@@ -1037,18 +1100,7 @@ void setup_new_exec(struct linux_binprm * bprm)
+@@ -1037,18 +1099,7 @@ void setup_new_exec(struct linux_binprm * bprm)
else
set_dumpable(current->mm, suid_dumpable);
@@ -69637,7 +69966,7 @@ index 86fafc6..25f0d1a 100644
/* Set the new mm task size. We have to do that late because it may
* depend on TIF_32BIT which is only updated in flush_thread() on
-@@ -1090,14 +1142,14 @@ EXPORT_SYMBOL(setup_new_exec);
+@@ -1090,14 +1141,14 @@ EXPORT_SYMBOL(setup_new_exec);
*/
int prepare_bprm_creds(struct linux_binprm *bprm)
{
@@ -69654,7 +69983,7 @@ index 86fafc6..25f0d1a 100644
return -ENOMEM;
}
-@@ -1105,7 +1157,7 @@ void free_bprm(struct linux_binprm *bprm)
+@@ -1105,7 +1156,7 @@ void free_bprm(struct linux_binprm *bprm)
{
free_arg_pages(bprm);
if (bprm->cred) {
@@ -69663,7 +69992,7 @@ index 86fafc6..25f0d1a 100644
abort_creds(bprm->cred);
}
kfree(bprm);
-@@ -1126,13 +1178,13 @@ void install_exec_creds(struct linux_binprm *bprm)
+@@ -1126,13 +1177,13 @@ void install_exec_creds(struct linux_binprm *bprm)
* credentials; any time after this it may be unlocked.
*/
security_bprm_committed_creds(bprm);
@@ -69679,7 +70008,7 @@ index 86fafc6..25f0d1a 100644
* PTRACE_ATTACH
*/
int check_unsafe_exec(struct linux_binprm *bprm)
-@@ -1152,7 +1204,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
+@@ -1152,7 +1203,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
}
rcu_read_unlock();
@@ -69688,7 +70017,7 @@ index 86fafc6..25f0d1a 100644
bprm->unsafe |= LSM_UNSAFE_SHARE;
} else {
res = -EAGAIN;
-@@ -1339,6 +1391,21 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
+@@ -1339,6 +1390,21 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
EXPORT_SYMBOL(search_binary_handler);
@@ -69710,7 +70039,7 @@ index 86fafc6..25f0d1a 100644
/*
* sys_execve() executes a new program.
*/
-@@ -1347,11 +1414,35 @@ int do_execve(char * filename,
+@@ -1347,11 +1413,35 @@ int do_execve(char * filename,
char __user *__user *envp,
struct pt_regs * regs)
{
@@ -69746,7 +70075,7 @@ index 86fafc6..25f0d1a 100644
retval = unshare_files(&displaced);
if (retval)
-@@ -1377,12 +1468,27 @@ int do_execve(char * filename,
+@@ -1377,12 +1467,27 @@ int do_execve(char * filename,
if (IS_ERR(file))
goto out_unmark;
@@ -69774,7 +70103,7 @@ index 86fafc6..25f0d1a 100644
retval = bprm_mm_init(bprm);
if (retval)
goto out_file;
-@@ -1399,25 +1505,66 @@ int do_execve(char * filename,
+@@ -1399,25 +1504,66 @@ int do_execve(char * filename,
if (retval < 0)
goto out;
@@ -69845,7 +70174,7 @@ index 86fafc6..25f0d1a 100644
current->fs->in_exec = 0;
current->in_execve = 0;
acct_update_integrals(current);
-@@ -1426,6 +1573,14 @@ int do_execve(char * filename,
+@@ -1426,6 +1572,14 @@ int do_execve(char * filename,
put_files_struct(displaced);
return retval;
@@ -69860,7 +70189,7 @@ index 86fafc6..25f0d1a 100644
out:
if (bprm->mm) {
acct_arg_size(bprm, 0);
-@@ -1591,6 +1746,251 @@ out:
+@@ -1591,6 +1745,251 @@ out:
return ispipe;
}
@@ -70112,7 +70441,7 @@ index 86fafc6..25f0d1a 100644
static int zap_process(struct task_struct *start)
{
struct task_struct *t;
-@@ -1793,17 +2193,17 @@ static void wait_for_dump_helpers(struct file *file)
+@@ -1793,17 +2192,17 @@ static void wait_for_dump_helpers(struct file *file)
pipe = file->f_path.dentry->d_inode->i_pipe;
pipe_lock(pipe);
@@ -70135,7 +70464,7 @@ index 86fafc6..25f0d1a 100644
pipe_unlock(pipe);
}
-@@ -1826,10 +2226,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -1826,10 +2225,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
char **helper_argv = NULL;
int helper_argc = 0;
int dump_count = 0;
@@ -70150,7 +70479,7 @@ index 86fafc6..25f0d1a 100644
binfmt = mm->binfmt;
if (!binfmt || !binfmt->core_dump)
goto fail;
-@@ -1874,6 +2277,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -1874,6 +2276,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
*/
clear_thread_flag(TIF_SIGPENDING);
@@ -70159,7 +70488,7 @@ index 86fafc6..25f0d1a 100644
/*
* lock_kernel() because format_corename() is controlled by sysctl, which
* uses lock_kernel()
-@@ -1908,7 +2313,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -1908,7 +2312,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
goto fail_unlock;
}
@@ -70168,7 +70497,7 @@ index 86fafc6..25f0d1a 100644
if (core_pipe_limit && (core_pipe_limit < dump_count)) {
printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
task_tgid_vnr(current), current->comm);
-@@ -1972,7 +2377,7 @@ close_fail:
+@@ -1972,7 +2376,7 @@ close_fail:
filp_close(file, NULL);
fail_dropcount:
if (dump_count)
@@ -72237,10 +72566,98 @@ index 43022f3..7298079 100644
if (!sbi)
return -ENOMEM;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
-index 87a1258..5694d91 100644
+index 87a1258..80c3284 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
-@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
+@@ -601,9 +601,15 @@ static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ spin_lock(&sbinfo->stat_lock);
+ /* If no limits set, just report 0 for max/free/used
+ * blocks, like simple_statfs() */
+- if (sbinfo->max_blocks >= 0) {
+- buf->f_blocks = sbinfo->max_blocks;
+- buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
++ if (sbinfo->spool) {
++ long free_pages;
++
++ spin_lock(&sbinfo->spool->lock);
++ buf->f_blocks = sbinfo->spool->max_hpages;
++ free_pages = sbinfo->spool->max_hpages
++ - sbinfo->spool->used_hpages;
++ buf->f_bavail = buf->f_bfree = free_pages;
++ spin_unlock(&sbinfo->spool->lock);
+ buf->f_files = sbinfo->max_inodes;
+ buf->f_ffree = sbinfo->free_inodes;
+ }
+@@ -619,6 +625,10 @@ static void hugetlbfs_put_super(struct super_block *sb)
+
+ if (sbi) {
+ sb->s_fs_info = NULL;
++
++ if (sbi->spool)
++ hugepage_put_subpool(sbi->spool);
++
+ kfree(sbi);
+ }
+ }
+@@ -842,10 +852,14 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
+ sb->s_fs_info = sbinfo;
+ sbinfo->hstate = config.hstate;
+ spin_lock_init(&sbinfo->stat_lock);
+- sbinfo->max_blocks = config.nr_blocks;
+- sbinfo->free_blocks = config.nr_blocks;
+ sbinfo->max_inodes = config.nr_inodes;
+ sbinfo->free_inodes = config.nr_inodes;
++ sbinfo->spool = NULL;
++ if (config.nr_blocks != -1) {
++ sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
++ if (!sbinfo->spool)
++ goto out_free;
++ }
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_blocksize = huge_page_size(config.hstate);
+ sb->s_blocksize_bits = huge_page_shift(config.hstate);
+@@ -865,38 +879,12 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
+ sb->s_root = root;
+ return 0;
+ out_free:
++ if (sbinfo->spool)
++ kfree(sbinfo->spool);
+ kfree(sbinfo);
+ return -ENOMEM;
+ }
+
+-int hugetlb_get_quota(struct address_space *mapping, long delta)
+-{
+- int ret = 0;
+- struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
+-
+- if (sbinfo->free_blocks > -1) {
+- spin_lock(&sbinfo->stat_lock);
+- if (sbinfo->free_blocks - delta >= 0)
+- sbinfo->free_blocks -= delta;
+- else
+- ret = -ENOMEM;
+- spin_unlock(&sbinfo->stat_lock);
+- }
+-
+- return ret;
+-}
+-
+-void hugetlb_put_quota(struct address_space *mapping, long delta)
+-{
+- struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
+-
+- if (sbinfo->free_blocks > -1) {
+- spin_lock(&sbinfo->stat_lock);
+- sbinfo->free_blocks += delta;
+- spin_unlock(&sbinfo->stat_lock);
+- }
+-}
+-
+ static int hugetlbfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+ {
+@@ -909,7 +897,7 @@ static struct file_system_type hugetlbfs_fs_type = {
.kill_sb = kill_litter_super,
};
@@ -72305,6 +72722,19 @@ index b0435dd..81ee0be 100644
jbd_debug(1, "Start checkpoint\n");
/*
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index a051270..5c156ad 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1822,6 +1822,8 @@ zap_buffer_unlocked:
+ clear_buffer_mapped(bh);
+ clear_buffer_req(bh);
+ clear_buffer_new(bh);
++ clear_buffer_delay(bh);
++ clear_buffer_unwritten(bh);
+ bh->b_bdev = NULL;
+ return may_free;
+ }
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
index 546d153..736896c 100644
--- a/fs/jffs2/compr_rtime.c
@@ -74041,7 +74471,7 @@ index 50f8f06..c5755df 100644
help
Various /proc files exist to monitor process memory utilization:
diff --git a/fs/proc/array.c b/fs/proc/array.c
-index c5ef152..28c94f7 100644
+index c5ef152..d33f771 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -60,6 +60,7 @@
@@ -74052,6 +74482,15 @@ index c5ef152..28c94f7 100644
#include <linux/proc_fs.h>
#include <linux/ioport.h>
#include <linux/uaccess.h>
+@@ -263,7 +264,7 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
+ shpending = p->signal->shared_pending.signal;
+ blocked = p->blocked;
+ collect_sigign_sigcatch(p, &ignored, &caught);
+- num_threads = atomic_read(&p->signal->count);
++ num_threads = get_nr_threads(p);
+ qsize = atomic_read(&__task_cred(p)->user->sigpending);
+ qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur;
+ unlock_task_sighand(p, &flags);
@@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
p->nivcsw);
}
@@ -74119,6 +74558,15 @@ index c5ef152..28c94f7 100644
state = *get_task_state(task);
vsize = eip = esp = 0;
permitted = ptrace_may_access(task, PTRACE_MODE_READ);
+@@ -390,7 +430,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ tty_nr = new_encode_dev(tty_devnum(sig->tty));
+ }
+
+- num_threads = atomic_read(&sig->count);
++ num_threads = get_nr_threads(task);
+ collect_sigign_sigcatch(task, &sigign, &sigcatch);
+
+ cmin_flt = sig->cmin_flt;
@@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
gtime = task_gtime(task);
}
@@ -74173,7 +74621,7 @@ index c5ef152..28c94f7 100644
if (mm) {
size = task_statm(mm, &shared, &text, &data, &resident);
mmput(mm);
-@@ -528,3 +595,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+@@ -528,3 +595,10 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
return 0;
}
@@ -74181,19 +74629,11 @@ index c5ef152..28c94f7 100644
+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
+{
-+ u32 curr_ip = 0;
-+ unsigned long flags;
-+
-+ if (lock_task_sighand(task, &flags)) {
-+ curr_ip = task->signal->curr_ip;
-+ unlock_task_sighand(task, &flags);
-+ }
-+
-+ return sprintf(buffer, "%pI4\n", &curr_ip);
++ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
+}
+#endif
diff --git a/fs/proc/base.c b/fs/proc/base.c
-index 67f7dc0..b9b8799 100644
+index 67f7dc0..abb766e 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -102,6 +102,22 @@ struct pid_entry {
@@ -74219,7 +74659,26 @@ index 67f7dc0..b9b8799 100644
#define NOD(NAME, MODE, IOP, FOP, OP) { \
.name = (NAME), \
.len = sizeof(NAME) - 1, \
-@@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
+@@ -165,18 +181,6 @@ static int get_fs_path(struct task_struct *task, struct path *path, bool root)
+ return result;
+ }
+
+-static int get_nr_threads(struct task_struct *tsk)
+-{
+- unsigned long flags;
+- int count = 0;
+-
+- if (lock_task_sighand(tsk, &flags)) {
+- count = atomic_read(&tsk->signal->count);
+- unlock_task_sighand(tsk, &flags);
+- }
+- return count;
+-}
+-
+ static int proc_cwd_link(struct inode *inode, struct path *path)
+ {
+ struct task_struct *task = get_proc_task(inode);
+@@ -213,6 +217,9 @@ static int check_mem_permission(struct task_struct *task)
if (task == current)
return 0;
@@ -74229,7 +74688,7 @@ index 67f7dc0..b9b8799 100644
/*
* If current is actively ptrace'ing, and would also be
* permitted to freshly attach with ptrace now, permit it.
-@@ -236,7 +255,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
+@@ -236,7 +243,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
{
struct mm_struct *mm;
@@ -74238,7 +74697,7 @@ index 67f7dc0..b9b8799 100644
return NULL;
mm = get_task_mm(task);
-@@ -245,7 +264,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
+@@ -245,7 +252,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
mmput(mm);
mm = NULL;
}
@@ -74247,7 +74706,7 @@ index 67f7dc0..b9b8799 100644
return mm;
}
-@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
+@@ -260,6 +267,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
if (!mm->arg_end)
goto out_mm; /* Shh! No looking before we're done */
@@ -74257,7 +74716,7 @@ index 67f7dc0..b9b8799 100644
len = mm->arg_end - mm->arg_start;
if (len > PAGE_SIZE)
-@@ -287,12 +309,28 @@ out:
+@@ -287,12 +297,28 @@ out:
return res;
}
@@ -74286,7 +74745,7 @@ index 67f7dc0..b9b8799 100644
do {
nwords += 2;
} while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
-@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
+@@ -306,7 +332,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
}
@@ -74295,7 +74754,7 @@ index 67f7dc0..b9b8799 100644
/*
* Provides a wchan file via kallsyms in a proper one-value-per-file format.
* Returns the resolved symbol. If that fails, simply return the address.
-@@ -330,11 +368,11 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer)
+@@ -330,11 +356,11 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer)
static int lock_trace(struct task_struct *task)
{
@@ -74309,7 +74768,7 @@ index 67f7dc0..b9b8799 100644
return -EPERM;
}
return 0;
-@@ -342,10 +380,10 @@ static int lock_trace(struct task_struct *task)
+@@ -342,10 +368,10 @@ static int lock_trace(struct task_struct *task)
static void unlock_trace(struct task_struct *task)
{
@@ -74322,7 +74781,7 @@ index 67f7dc0..b9b8799 100644
#define MAX_STACK_TRACE_DEPTH 64
-@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
+@@ -545,7 +571,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
return count;
}
@@ -74331,7 +74790,7 @@ index 67f7dc0..b9b8799 100644
static int proc_pid_syscall(struct task_struct *task, char *buffer)
{
long nr;
-@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
+@@ -574,7 +600,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
/************************************************************************/
/* permission checks */
@@ -74340,7 +74799,7 @@ index 67f7dc0..b9b8799 100644
{
struct task_struct *task;
int allowed = 0;
-@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
+@@ -584,7 +610,10 @@ static int proc_fd_access_allowed(struct inode *inode)
*/
task = get_proc_task(inode);
if (task) {
@@ -74352,7 +74811,7 @@ index 67f7dc0..b9b8799 100644
put_task_struct(task);
}
return allowed;
-@@ -806,9 +847,16 @@ static const struct file_operations proc_single_file_operations = {
+@@ -806,9 +835,16 @@ static const struct file_operations proc_single_file_operations = {
static int mem_open(struct inode* inode, struct file* file)
{
file->private_data = (void*)((long)current->self_exec_id);
@@ -74369,7 +74828,7 @@ index 67f7dc0..b9b8799 100644
static ssize_t mem_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
-@@ -818,6 +866,13 @@ static ssize_t mem_read(struct file * file, char __user * buf,
+@@ -818,6 +854,13 @@ static ssize_t mem_read(struct file * file, char __user * buf,
int ret = -ESRCH;
struct mm_struct *mm;
@@ -74383,7 +74842,7 @@ index 67f7dc0..b9b8799 100644
if (!task)
goto out_no_task;
-@@ -960,9 +1015,18 @@ static ssize_t environ_read(struct file *file, char __user *buf,
+@@ -960,9 +1003,18 @@ static ssize_t environ_read(struct file *file, char __user *buf,
int ret = -ESRCH;
struct mm_struct *mm;
@@ -74402,7 +74861,7 @@ index 67f7dc0..b9b8799 100644
if (!ptrace_may_access(task, PTRACE_MODE_READ))
goto out;
-@@ -978,15 +1042,16 @@ static ssize_t environ_read(struct file *file, char __user *buf,
+@@ -978,15 +1030,16 @@ static ssize_t environ_read(struct file *file, char __user *buf,
goto out_free;
while (count > 0) {
@@ -74425,7 +74884,7 @@ index 67f7dc0..b9b8799 100644
retval = access_process_vm(task, (mm->env_start + src),
page, this_len, 0);
-@@ -1019,6 +1084,7 @@ out_no_task:
+@@ -1019,6 +1072,7 @@ out_no_task:
static const struct file_operations proc_environ_operations = {
.read = environ_read,
@@ -74433,7 +74892,7 @@ index 67f7dc0..b9b8799 100644
};
static ssize_t oom_adjust_read(struct file *file, char __user *buf,
-@@ -1377,7 +1443,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
+@@ -1377,7 +1431,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
path_put(&nd->path);
/* Are we allowed to snoop on the tasks file descriptors? */
@@ -74442,7 +74901,7 @@ index 67f7dc0..b9b8799 100644
goto out;
error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
-@@ -1417,8 +1483,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
+@@ -1417,8 +1471,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
struct path path;
/* Are we allowed to snoop on the tasks file descriptors? */
@@ -74463,7 +74922,7 @@ index 67f7dc0..b9b8799 100644
error = PROC_I(inode)->op.proc_get_link(inode, &path);
if (error)
-@@ -1483,7 +1559,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
+@@ -1483,7 +1547,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
rcu_read_lock();
cred = __task_cred(task);
inode->i_uid = cred->euid;
@@ -74475,7 +74934,7 @@ index 67f7dc0..b9b8799 100644
rcu_read_unlock();
}
security_task_to_inode(task, inode);
-@@ -1501,6 +1581,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
+@@ -1501,6 +1569,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
struct inode *inode = dentry->d_inode;
struct task_struct *task;
const struct cred *cred;
@@ -74485,7 +74944,7 @@ index 67f7dc0..b9b8799 100644
generic_fillattr(inode, stat);
-@@ -1508,13 +1591,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
+@@ -1508,13 +1579,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
stat->uid = 0;
stat->gid = 0;
task = pid_task(proc_pid(inode), PIDTYPE_PID);
@@ -74528,7 +74987,7 @@ index 67f7dc0..b9b8799 100644
}
rcu_read_unlock();
return 0;
-@@ -1545,11 +1656,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
+@@ -1545,11 +1644,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
if (task) {
if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
@@ -74549,7 +75008,7 @@ index 67f7dc0..b9b8799 100644
rcu_read_unlock();
} else {
inode->i_uid = 0;
-@@ -1670,7 +1790,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
+@@ -1670,7 +1778,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
int fd = proc_fd(inode);
if (task) {
@@ -74559,7 +75018,7 @@ index 67f7dc0..b9b8799 100644
put_task_struct(task);
}
if (files) {
-@@ -1922,12 +2043,22 @@ static const struct file_operations proc_fd_operations = {
+@@ -1922,12 +2031,22 @@ static const struct file_operations proc_fd_operations = {
static int proc_fd_permission(struct inode *inode, int mask)
{
int rv;
@@ -74584,7 +75043,7 @@ index 67f7dc0..b9b8799 100644
return rv;
}
-@@ -2036,6 +2167,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
+@@ -2036,6 +2155,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
if (!task)
goto out_no_task;
@@ -74594,7 +75053,7 @@ index 67f7dc0..b9b8799 100644
/*
* Yes, it does not scale. And it should not. Don't add
* new entries into /proc/<tgid>/ without very good reasons.
-@@ -2080,6 +2214,9 @@ static int proc_pident_readdir(struct file *filp,
+@@ -2080,6 +2202,9 @@ static int proc_pident_readdir(struct file *filp,
if (!task)
goto out_no_task;
@@ -74604,7 +75063,7 @@ index 67f7dc0..b9b8799 100644
ret = 0;
i = filp->f_pos;
switch (i) {
-@@ -2171,14 +2308,14 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
+@@ -2171,14 +2296,14 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
goto out_free;
/* Guard against adverse ptrace interaction */
@@ -74621,7 +75080,7 @@ index 67f7dc0..b9b8799 100644
out_free:
free_page((unsigned long) page);
out:
-@@ -2347,7 +2484,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
+@@ -2347,7 +2472,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
void *cookie)
{
@@ -74630,7 +75089,7 @@ index 67f7dc0..b9b8799 100644
if (!IS_ERR(s))
__putname(s);
}
-@@ -2480,9 +2617,16 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
+@@ -2480,9 +2605,16 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
{
struct task_io_accounting acct = task->ioac;
unsigned long flags;
@@ -74649,7 +75108,7 @@ index 67f7dc0..b9b8799 100644
if (whole && lock_task_sighand(task, &flags)) {
struct task_struct *t = task;
-@@ -2493,7 +2637,7 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
+@@ -2493,7 +2625,7 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
unlock_task_sighand(task, &flags);
}
@@ -74658,7 +75117,7 @@ index 67f7dc0..b9b8799 100644
"rchar: %llu\n"
"wchar: %llu\n"
"syscr: %llu\n"
-@@ -2508,6 +2652,9 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
+@@ -2508,6 +2640,9 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
(unsigned long long)acct.read_bytes,
(unsigned long long)acct.write_bytes,
(unsigned long long)acct.cancelled_write_bytes);
@@ -74668,7 +75127,7 @@ index 67f7dc0..b9b8799 100644
}
static int proc_tid_io_accounting(struct task_struct *task, char *buffer)
-@@ -2553,7 +2700,7 @@ static const struct pid_entry tgid_base_stuff[] = {
+@@ -2553,7 +2688,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
@@ -74677,7 +75136,7 @@ index 67f7dc0..b9b8799 100644
INF("syscall", S_IRUGO, proc_pid_syscall),
#endif
INF("cmdline", S_IRUGO, proc_pid_cmdline),
-@@ -2578,10 +2725,10 @@ static const struct pid_entry tgid_base_stuff[] = {
+@@ -2578,10 +2713,10 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_SECURITY
DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
#endif
@@ -74690,7 +75149,7 @@ index 67f7dc0..b9b8799 100644
ONE("stack", S_IRUGO, proc_pid_stack),
#endif
#ifdef CONFIG_SCHEDSTATS
-@@ -2611,6 +2758,9 @@ static const struct pid_entry tgid_base_stuff[] = {
+@@ -2611,6 +2746,9 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_TASK_IO_ACCOUNTING
INF("io", S_IRUSR, proc_tgid_io_accounting),
#endif
@@ -74700,7 +75159,7 @@ index 67f7dc0..b9b8799 100644
};
static int proc_tgid_base_readdir(struct file * filp,
-@@ -2735,7 +2885,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
+@@ -2735,7 +2873,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
if (!inode)
goto out;
@@ -74715,7 +75174,7 @@ index 67f7dc0..b9b8799 100644
inode->i_op = &proc_tgid_base_inode_operations;
inode->i_fop = &proc_tgid_base_operations;
inode->i_flags|=S_IMMUTABLE;
-@@ -2777,7 +2934,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
+@@ -2777,7 +2922,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
if (!task)
goto out;
@@ -74727,7 +75186,7 @@ index 67f7dc0..b9b8799 100644
put_task_struct(task);
out:
return result;
-@@ -2842,6 +3003,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
+@@ -2842,6 +2991,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
unsigned int nr;
struct task_struct *reaper;
@@ -74739,7 +75198,7 @@ index 67f7dc0..b9b8799 100644
struct tgid_iter iter;
struct pid_namespace *ns;
-@@ -2865,8 +3031,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
+@@ -2865,8 +3019,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
for (iter = next_tgid(ns, iter);
iter.task;
iter.tgid += 1, iter = next_tgid(ns, iter)) {
@@ -74768,7 +75227,7 @@ index 67f7dc0..b9b8799 100644
put_task_struct(iter.task);
goto out;
}
-@@ -2892,7 +3077,7 @@ static const struct pid_entry tid_base_stuff[] = {
+@@ -2892,7 +3065,7 @@ static const struct pid_entry tid_base_stuff[] = {
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
@@ -74777,7 +75236,7 @@ index 67f7dc0..b9b8799 100644
INF("syscall", S_IRUGO, proc_pid_syscall),
#endif
INF("cmdline", S_IRUGO, proc_pid_cmdline),
-@@ -2916,10 +3101,10 @@ static const struct pid_entry tid_base_stuff[] = {
+@@ -2916,10 +3089,10 @@ static const struct pid_entry tid_base_stuff[] = {
#ifdef CONFIG_SECURITY
DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
#endif
@@ -76534,9 +76993,18 @@ index e89734e..5e84d8d 100644
*offset = off & 0x7fffffff;
return 0;
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
-index 8f32f50..b6a41e8 100644
+index 8f32f50..5db8d65 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
+@@ -554,7 +554,7 @@ xfs_readlink(
+ char *link)
+ {
+ xfs_mount_t *mp = ip->i_mount;
+- int pathlen;
++ xfs_fsize_t pathlen;
+ int error = 0;
+
+ xfs_itrace_entry(ip);
@@ -564,13 +564,18 @@ xfs_readlink(
xfs_ilock(ip, XFS_ILOCK_SHARED);
@@ -76548,7 +77016,7 @@ index 8f32f50..b6a41e8 100644
if (!pathlen)
goto out;
-+ if (pathlen > MAXPATHLEN) {
++ if (pathlen < 0 || pathlen > MAXPATHLEN) {
+ xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
+ __func__, (unsigned long long)ip->i_ino, pathlen);
+ ASSERT(0);
@@ -89442,6 +89910,49 @@ index 211ff44..00ab6d7 100644
static inline void zero_user_segments(struct page *page,
unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 41a59af..6b3feef 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -12,6 +12,15 @@ struct user_struct;
+ #include <linux/shm.h>
+ #include <asm/tlbflush.h>
+
++struct hugepage_subpool {
++ spinlock_t lock;
++ long count;
++ long max_hpages, used_hpages;
++};
++
++struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
++void hugepage_put_subpool(struct hugepage_subpool *spool);
++
+ int PageHuge(struct page *page);
+
+ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
+@@ -138,12 +147,11 @@ struct hugetlbfs_config {
+ };
+
+ struct hugetlbfs_sb_info {
+- long max_blocks; /* blocks allowed */
+- long free_blocks; /* blocks free */
+ long max_inodes; /* inodes allowed */
+ long free_inodes; /* inodes free */
+ spinlock_t stat_lock;
+ struct hstate *hstate;
++ struct hugepage_subpool *spool;
+ };
+
+
+@@ -166,8 +174,6 @@ extern const struct file_operations hugetlbfs_file_operations;
+ extern const struct vm_operations_struct hugetlb_vm_ops;
+ struct file *hugetlb_file_setup(const char *name, size_t size, int acct,
+ struct user_struct **user, int creat_flags);
+-int hugetlb_get_quota(struct address_space *mapping, long delta);
+-void hugetlb_put_quota(struct address_space *mapping, long delta);
+
+ static inline int is_file_hugepages(struct file *file)
+ {
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 7b40cda..24eb44e 100644
--- a/include/linux/i2c.h
@@ -89516,9 +90027,18 @@ index ff8bde5..0296174 100644
#define __meminitconst __section(.meminit.rodata)
#define __memexit __section(.memexit.text) __exitused __cold
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
-index 21a6f5d..7c7d19f 100644
+index 21a6f5d..577afb1 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
+@@ -16,7 +16,7 @@ extern struct files_struct init_files;
+ extern struct fs_struct init_fs;
+
+ #define INIT_SIGNALS(sig) { \
+- .count = ATOMIC_INIT(1), \
++ .nr_threads = 1, \
+ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
+ .shared_pending = { \
+ .list = LIST_HEAD_INIT(sig.shared_pending.list), \
@@ -29,6 +29,8 @@ extern struct fs_struct init_fs;
.running = 0, \
.lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
@@ -89773,7 +90293,7 @@ index 58ae8e0..3950d3c 100644
static inline struct kset *to_kset(struct kobject *kobj)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
-index c728a50..752d821 100644
+index c728a50..07be660 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
@@ -89794,6 +90314,19 @@ index c728a50..752d821 100644
void kvm_arch_exit(void);
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
+@@ -556,5 +556,12 @@ static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+ {
+ return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
+ }
++
++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
++
++#else
++
++static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
++
+ #endif
+ #endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index a069916..223edde 100644
--- a/include/linux/libata.h
@@ -90754,7 +91287,7 @@ index 3392c59..a746428 100644
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 71849bf..903514a 100644
+index 71849bf..ab66216 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -101,6 +101,7 @@ struct bio;
@@ -90787,7 +91320,18 @@ index 71849bf..903514a 100644
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
-@@ -666,7 +670,20 @@ struct signal_struct {
+@@ -560,8 +564,9 @@ struct thread_group_cputimer {
+ * the locking of signal_struct.
+ */
+ struct signal_struct {
+- atomic_t count;
++ atomic_t sigcnt;
+ atomic_t live;
++ int nr_threads;
+
+ wait_queue_head_t wait_chldexit; /* for wait4() */
+
+@@ -666,7 +671,20 @@ struct signal_struct {
struct tty_audit_buf *tty_audit_buf;
#endif
@@ -90808,7 +91352,7 @@ index 71849bf..903514a 100644
};
/* Context switch must be unlocked if interrupts are to be enabled */
-@@ -723,6 +740,11 @@ struct user_struct {
+@@ -723,6 +741,11 @@ struct user_struct {
struct key *session_keyring; /* UID's default session keyring */
#endif
@@ -90820,7 +91364,7 @@ index 71849bf..903514a 100644
/* Hash table maintenance information */
struct hlist_node uidhash_node;
uid_t uid;
-@@ -1328,8 +1350,8 @@ struct task_struct {
+@@ -1328,8 +1351,8 @@ struct task_struct {
struct list_head thread_group;
struct completion *vfork_done; /* for vfork() */
@@ -90831,7 +91375,7 @@ index 71849bf..903514a 100644
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
-@@ -1343,16 +1365,6 @@ struct task_struct {
+@@ -1343,16 +1366,6 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -90848,7 +91392,7 @@ index 71849bf..903514a 100644
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
-@@ -1369,6 +1381,10 @@ struct task_struct {
+@@ -1369,6 +1382,10 @@ struct task_struct {
#endif
/* CPU-specific state of this task */
struct thread_struct thread;
@@ -90859,7 +91403,7 @@ index 71849bf..903514a 100644
/* filesystem information */
struct fs_struct *fs;
/* open file information */
-@@ -1436,6 +1452,12 @@ struct task_struct {
+@@ -1436,6 +1453,12 @@ struct task_struct {
int hardirq_context;
int softirq_context;
#endif
@@ -90872,7 +91416,7 @@ index 71849bf..903514a 100644
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
-@@ -1456,6 +1478,9 @@ struct task_struct {
+@@ -1456,6 +1479,9 @@ struct task_struct {
struct backing_dev_info *backing_dev_info;
@@ -90882,7 +91426,7 @@ index 71849bf..903514a 100644
struct io_context *io_context;
unsigned long ptrace_message;
-@@ -1519,6 +1544,27 @@ struct task_struct {
+@@ -1519,6 +1545,27 @@ struct task_struct {
unsigned long default_timer_slack_ns;
struct list_head *scm_work_list;
@@ -90910,7 +91454,7 @@ index 71849bf..903514a 100644
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored adress in ret_stack */
int curr_ret_stack;
-@@ -1542,6 +1588,57 @@ struct task_struct {
+@@ -1542,6 +1589,57 @@ struct task_struct {
#endif /* CONFIG_TRACING */
};
@@ -90968,7 +91512,7 @@ index 71849bf..903514a 100644
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
-@@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
+@@ -1740,7 +1838,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define PF_DUMPCORE 0x00000200 /* dumped core */
#define PF_SIGNALED 0x00000400 /* killed by a signal */
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
@@ -90977,7 +91521,7 @@ index 71849bf..903514a 100644
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
-@@ -1978,7 +2075,9 @@ void yield(void);
+@@ -1978,7 +2076,9 @@ void yield(void);
extern struct exec_domain default_exec_domain;
union thread_union {
@@ -90987,7 +91531,7 @@ index 71849bf..903514a 100644
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
-@@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2011,6 +2111,7 @@ extern struct pid_namespace init_pid_ns;
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -90995,7 +91539,22 @@ index 71849bf..903514a 100644
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
-@@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2079,7 +2180,7 @@ extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
+ extern void force_sig(int, struct task_struct *);
+ extern void force_sig_specific(int, struct task_struct *);
+ extern int send_sig(int, struct task_struct *, int);
+-extern void zap_other_threads(struct task_struct *p);
++extern int zap_other_threads(struct task_struct *p);
+ extern struct sigqueue *sigqueue_alloc(void);
+ extern void sigqueue_free(struct sigqueue *);
+ extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
+@@ -2149,13 +2250,12 @@ extern void flush_thread(void);
+ extern void exit_thread(void);
+
+ extern void exit_files(struct task_struct *);
+-extern void __cleanup_signal(struct signal_struct *);
+ extern void __cleanup_sighand(struct sighand_struct *);
+
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -91004,7 +91563,19 @@ index 71849bf..903514a 100644
extern void daemonize(const char *, ...);
extern int allow_signal(int);
-@@ -2284,9 +2384,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2198,6 +2298,11 @@ extern bool current_is_single_threaded(void);
+ #define while_each_thread(g, t) \
+ while ((t = next_thread(t)) != g)
+
++static inline int get_nr_threads(struct task_struct *tsk)
++{
++ return tsk->signal->nr_threads;
++}
++
+ /* de_thread depends on thread_group_leader not being a pid based check */
+ #define thread_group_leader(p) (p == p->group_leader)
+
+@@ -2284,9 +2389,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#endif
@@ -91016,7 +91587,7 @@ index 71849bf..903514a 100644
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
-@@ -2616,6 +2716,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
+@@ -2616,6 +2721,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
@@ -92338,6 +92909,32 @@ index 9a4b8b7..e49e077 100644
#ifdef CONFIG_IP_MROUTE
struct sock *mroute_sk;
+diff --git a/include/net/rose.h b/include/net/rose.h
+index 5ba9f02..555dd19 100644
+--- a/include/net/rose.h
++++ b/include/net/rose.h
+@@ -14,6 +14,12 @@
+
+ #define ROSE_MIN_LEN 3
+
++#define ROSE_CALL_REQ_ADDR_LEN_OFF 3
++#define ROSE_CALL_REQ_ADDR_LEN_VAL 0xAA /* each address is 10 digits */
++#define ROSE_CALL_REQ_DEST_ADDR_OFF 4
++#define ROSE_CALL_REQ_SRC_ADDR_OFF 9
++#define ROSE_CALL_REQ_FACILITIES_OFF 14
++
+ #define ROSE_GFI 0x10
+ #define ROSE_Q_BIT 0x80
+ #define ROSE_D_BIT 0x40
+@@ -214,7 +220,7 @@ extern void rose_requeue_frames(struct sock *);
+ extern int rose_validate_nr(struct sock *, unsigned short);
+ extern void rose_write_internal(struct sock *, int);
+ extern int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
+-extern int rose_parse_facilities(unsigned char *, struct rose_facilities_struct *);
++extern int rose_parse_facilities(unsigned char *, unsigned int, struct rose_facilities_struct *);
+ extern void rose_disconnect(struct sock *, int, int, int);
+
+ /* rose_timer.c */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 8a6d529..171f401 100644
--- a/include/net/sctp/sctp.h
@@ -94156,10 +94753,10 @@ index 0b5b5fc..2a2f00d 100644
if (ret < 0)
return ret;
diff --git a/kernel/exit.c b/kernel/exit.c
-index 0f8fae3..66af9b1 100644
+index 0f8fae3..6636a75 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
-@@ -55,6 +55,10 @@
+@@ -55,13 +55,17 @@
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
@@ -94169,8 +94766,88 @@ index 0f8fae3..66af9b1 100644
+
static void exit_mm(struct task_struct * tsk);
- static void __unhash_process(struct task_struct *p)
-@@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
+-static void __unhash_process(struct task_struct *p)
++static void __unhash_process(struct task_struct *p, bool group_dead)
+ {
+ nr_threads--;
+ detach_pid(p, PIDTYPE_PID);
+- if (thread_group_leader(p)) {
++ if (group_dead) {
+ detach_pid(p, PIDTYPE_PGID);
+ detach_pid(p, PIDTYPE_SID);
+
+@@ -78,18 +82,19 @@ static void __unhash_process(struct task_struct *p)
+ static void __exit_signal(struct task_struct *tsk)
+ {
+ struct signal_struct *sig = tsk->signal;
++ bool group_dead = thread_group_leader(tsk);
+ struct sighand_struct *sighand;
+-
+- BUG_ON(!sig);
+- BUG_ON(!atomic_read(&sig->count));
++ struct tty_struct *uninitialized_var(tty);
+
+ sighand = rcu_dereference(tsk->sighand);
+ spin_lock(&sighand->siglock);
+
+ posix_cpu_timers_exit(tsk);
+- if (atomic_dec_and_test(&sig->count))
++ if (group_dead) {
+ posix_cpu_timers_exit_group(tsk);
+- else {
++ tty = sig->tty;
++ sig->tty = NULL;
++ } else {
+ /*
+ * This can only happen if the caller is de_thread().
+ * FIXME: this is the temporary hack, we should teach
+@@ -102,7 +107,7 @@ static void __exit_signal(struct task_struct *tsk)
+ * If there is any task waiting for the group exit
+ * then notify it:
+ */
+- if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
++ if (sig->notify_count > 0 && !--sig->notify_count)
+ wake_up_process(sig->group_exit_task);
+
+ if (tsk == sig->curr_target)
+@@ -128,32 +133,24 @@ static void __exit_signal(struct task_struct *tsk)
+ sig->oublock += task_io_get_oublock(tsk);
+ task_io_accounting_add(&sig->ioac, &tsk->ioac);
+ sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
+- sig = NULL; /* Marker for below. */
+ }
+
+- __unhash_process(tsk);
++ sig->nr_threads--;
++ __unhash_process(tsk, group_dead);
+
+ /*
+ * Do this under ->siglock, we can race with another thread
+ * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
+ */
+ flush_sigqueue(&tsk->pending);
+-
+- tsk->signal = NULL;
+ tsk->sighand = NULL;
+ spin_unlock(&sighand->siglock);
+
+ __cleanup_sighand(sighand);
+ clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
+- if (sig) {
++ if (group_dead) {
+ flush_sigqueue(&sig->shared_pending);
+- taskstats_tgid_free(sig);
+- /*
+- * Make sure ->signal can't go away under rq->lock,
+- * see account_group_exec_runtime().
+- */
+- task_rq_unlock_wait(tsk);
+- __cleanup_signal(sig);
++ tty_kref_put(tty);
+ }
+ }
+
+@@ -174,6 +171,10 @@ void release_task(struct task_struct * p)
struct task_struct *leader;
int zap_leader;
repeat:
@@ -94181,7 +94858,7 @@ index 0f8fae3..66af9b1 100644
tracehook_prepare_release_task(p);
/* don't need to get the RCU readlock here - the process is dead and
* can't be modifying its own credentials */
-@@ -397,7 +405,7 @@ int allow_signal(int sig)
+@@ -397,7 +398,7 @@ int allow_signal(int sig)
* know it'll be handled, so that they don't get converted to
* SIGKILL or just silently dropped.
*/
@@ -94190,7 +94867,7 @@ index 0f8fae3..66af9b1 100644
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
return 0;
-@@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
+@@ -433,6 +434,17 @@ void daemonize(const char *name, ...)
vsnprintf(current->comm, sizeof(current->comm), name, args);
va_end(args);
@@ -94208,7 +94885,22 @@ index 0f8fae3..66af9b1 100644
/*
* If we were started as result of loading a module, close all of the
* user space pages. We don't need them, and if we didn't close them
-@@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
+@@ -852,12 +864,9 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
+
+ tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
+
+- /* mt-exec, de_thread() is waiting for us */
+- if (thread_group_leader(tsk) &&
+- tsk->signal->group_exit_task &&
+- tsk->signal->notify_count < 0)
++ /* mt-exec, de_thread() is waiting for group leader */
++ if (unlikely(tsk->signal->notify_count < 0))
+ wake_up_process(tsk->signal->group_exit_task);
+-
+ write_unlock_irq(&tasklist_lock);
+
+ tracehook_report_death(tsk, signal, cookie, group_dead);
+@@ -897,17 +906,17 @@ NORET_TYPE void do_exit(long code)
struct task_struct *tsk = current;
int group_dead;
@@ -94233,7 +94925,7 @@ index 0f8fae3..66af9b1 100644
* that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
* continuing. Amongst other possible reasons, this is to prevent
* mm_release()->clear_child_tid() from writing to a user-controlled
-@@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
+@@ -915,6 +924,13 @@ NORET_TYPE void do_exit(long code)
*/
set_fs(USER_DS);
@@ -94247,7 +94939,7 @@ index 0f8fae3..66af9b1 100644
tracehook_report_exit(&code);
validate_creds_for_do_exit(tsk);
-@@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
+@@ -973,6 +989,9 @@ NORET_TYPE void do_exit(long code)
tsk->exit_code = code;
taskstats_exit(tsk, group_dead);
@@ -94257,7 +94949,7 @@ index 0f8fae3..66af9b1 100644
exit_mm(tsk);
if (group_dead)
-@@ -1020,7 +1049,7 @@ NORET_TYPE void do_exit(long code)
+@@ -1020,7 +1039,7 @@ NORET_TYPE void do_exit(long code)
tsk->flags |= PF_EXITPIDONE;
if (tsk->io_context)
@@ -94266,7 +94958,7 @@ index 0f8fae3..66af9b1 100644
if (tsk->splice_pipe)
__free_pipe_info(tsk->splice_pipe);
-@@ -1059,7 +1088,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
+@@ -1059,7 +1078,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
* Take down every thread in the group. This is called by fatal signals
* as well as by sys_exit_group (below).
*/
@@ -94275,7 +94967,7 @@ index 0f8fae3..66af9b1 100644
do_group_exit(int exit_code)
{
struct signal_struct *sig = current->signal;
-@@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
+@@ -1188,7 +1207,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
if (unlikely(wo->wo_flags & WNOWAIT)) {
int exit_code = p->exit_code;
@@ -94285,10 +94977,37 @@ index 0f8fae3..66af9b1 100644
get_task_struct(p);
read_unlock(&tasklist_lock);
diff --git a/kernel/fork.c b/kernel/fork.c
-index 4bde56f..0cc7962 100644
+index 4bde56f..899e40d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -239,21 +239,26 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
+@@ -156,6 +156,18 @@ void free_task(struct task_struct *tsk)
+ }
+ EXPORT_SYMBOL(free_task);
+
++static inline void free_signal_struct(struct signal_struct *sig)
++{
++ taskstats_tgid_free(sig);
++ kmem_cache_free(signal_cachep, sig);
++}
++
++static inline void put_signal_struct(struct signal_struct *sig)
++{
++ if (atomic_dec_and_test(&sig->sigcnt))
++ free_signal_struct(sig);
++}
++
+ void __put_task_struct(struct task_struct *tsk)
+ {
+ WARN_ON(!tsk->exit_state);
+@@ -164,6 +176,7 @@ void __put_task_struct(struct task_struct *tsk)
+
+ exit_creds(tsk);
+ delayacct_tsk_free(tsk);
++ put_signal_struct(tsk->signal);
+
+ if (!profile_handoff_task(tsk))
+ free_task(tsk);
+@@ -239,21 +252,26 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
}
err = arch_dup_task_struct(tsk, orig);
@@ -94323,7 +95042,7 @@ index 4bde56f..0cc7962 100644
#endif
/* One for us, one for whoever does the "release_task()" (usually parent) */
-@@ -293,8 +298,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -293,8 +311,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
mm->locked_vm = 0;
mm->mmap = NULL;
mm->mmap_cache = NULL;
@@ -94334,7 +95053,7 @@ index 4bde56f..0cc7962 100644
mm->map_count = 0;
cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT;
-@@ -318,7 +323,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -318,7 +336,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
}
charge = 0;
if (mpnt->vm_flags & VM_ACCOUNT) {
@@ -94343,7 +95062,7 @@ index 4bde56f..0cc7962 100644
if (security_vm_enough_memory(len))
goto fail_nomem;
charge = len;
-@@ -335,6 +340,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -335,6 +353,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_mm = mm;
tmp->vm_next = tmp->vm_prev = NULL;
@@ -94351,7 +95070,7 @@ index 4bde56f..0cc7962 100644
anon_vma_link(tmp);
file = tmp->vm_file;
if (file) {
-@@ -384,6 +390,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -384,6 +403,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (retval)
goto out;
}
@@ -94383,7 +95102,7 @@ index 4bde56f..0cc7962 100644
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
retval = 0;
-@@ -734,13 +765,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
+@@ -734,13 +778,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
write_unlock(&fs->lock);
return -EAGAIN;
}
@@ -94399,16 +95118,35 @@ index 4bde56f..0cc7962 100644
return 0;
}
-@@ -910,6 +942,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
+@@ -863,8 +908,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
+ if (!sig)
+ return -ENOMEM;
+
+- atomic_set(&sig->count, 1);
++ sig->nr_threads = 1;
+ atomic_set(&sig->live, 1);
++ atomic_set(&sig->sigcnt, 1);
+ init_waitqueue_head(&sig->wait_chldexit);
+ sig->flags = 0;
+ if (clone_flags & CLONE_NEWPID)
+@@ -910,14 +956,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->oom_adj = current->signal->oom_adj;
+- return 0;
+-}
+ mutex_init(&sig->cred_guard_mutex);
-+
- return 0;
+
+-void __cleanup_signal(struct signal_struct *sig)
+-{
+- thread_group_cputime_free(sig);
+- tty_kref_put(sig->tty);
+- kmem_cache_free(signal_cachep, sig);
++ return 0;
}
-@@ -1033,12 +1067,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
+@@ -1033,12 +1074,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
@@ -94427,7 +95165,7 @@ index 4bde56f..0cc7962 100644
retval = copy_creds(p, clone_flags);
if (retval < 0)
-@@ -1233,6 +1271,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1233,6 +1278,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
@@ -94437,7 +95175,26 @@ index 4bde56f..0cc7962 100644
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
-@@ -1299,7 +1340,8 @@ bad_fork_free_pid:
+@@ -1261,8 +1309,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ }
+
+ if (clone_flags & CLONE_THREAD) {
+- atomic_inc(&current->signal->count);
++ current->signal->nr_threads++;
+ atomic_inc(&current->signal->live);
++ atomic_inc(&current->signal->sigcnt);
+ p->group_leader = current->group_leader;
+ list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
+ }
+@@ -1276,7 +1325,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ p->nsproxy->pid_ns->child_reaper = p;
+
+ p->signal->leader_pid = pid;
+- tty_kref_put(p->signal->tty);
+ p->signal->tty = tty_kref_get(current->signal->tty);
+ attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
+ attach_pid(p, PIDTYPE_SID, task_session(current));
+@@ -1299,7 +1347,8 @@ bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_io:
@@ -94447,7 +95204,16 @@ index 4bde56f..0cc7962 100644
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_mm:
-@@ -1333,6 +1375,8 @@ bad_fork_cleanup_count:
+@@ -1307,7 +1356,7 @@ bad_fork_cleanup_mm:
+ mmput(p->mm);
+ bad_fork_cleanup_signal:
+ if (!(clone_flags & CLONE_THREAD))
+- __cleanup_signal(p->signal);
++ free_signal_struct(p->signal);
+ bad_fork_cleanup_sighand:
+ __cleanup_sighand(p->sighand);
+ bad_fork_cleanup_fs:
+@@ -1333,6 +1382,8 @@ bad_fork_cleanup_count:
bad_fork_free:
free_task(p);
fork_out:
@@ -94456,7 +95222,7 @@ index 4bde56f..0cc7962 100644
return ERR_PTR(retval);
}
-@@ -1426,6 +1470,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1426,6 +1477,8 @@ long do_fork(unsigned long clone_flags,
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
@@ -94465,7 +95231,22 @@ index 4bde56f..0cc7962 100644
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
-@@ -1558,7 +1604,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1522,14 +1575,6 @@ static void check_unshare_flags(unsigned long *flags_ptr)
+ *flags_ptr |= CLONE_SIGHAND;
+
+ /*
+- * If unsharing signal handlers and the task was created
+- * using CLONE_THREAD, then must unshare the thread
+- */
+- if ((*flags_ptr & CLONE_SIGHAND) &&
+- (atomic_read(&current->signal->count) > 1))
+- *flags_ptr |= CLONE_THREAD;
+-
+- /*
+ * If unsharing namespace, must also unshare filesystem information.
+ */
+ if (*flags_ptr & CLONE_NEWNS)
+@@ -1558,7 +1603,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
@@ -94474,7 +95255,7 @@ index 4bde56f..0cc7962 100644
return 0;
*new_fsp = copy_fs_struct(fs);
-@@ -1681,7 +1727,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -1681,7 +1726,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
fs = current->fs;
write_lock(&fs->lock);
current->fs = new_fs;
@@ -97444,8 +98225,30 @@ index 0591df8..db35e3d 100644
if (cpu != group_first_cpu(sd->groups))
return;
+diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
+index f9724c0..23ef8d8 100644
+--- a/kernel/sched_debug.c
++++ b/kernel/sched_debug.c
+@@ -369,15 +369,9 @@ __initcall(init_sched_debug_procfs);
+ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+ {
+ unsigned long nr_switches;
+- unsigned long flags;
+- int num_threads = 1;
+
+- if (lock_task_sighand(p, &flags)) {
+- num_threads = atomic_read(&p->signal->count);
+- unlock_task_sighand(p, &flags);
+- }
+-
+- SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
++ SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
++ get_nr_threads(p));
+ SEQ_printf(m,
+ "---------------------------------------------------------\n");
+ #define __P(F) \
diff --git a/kernel/signal.c b/kernel/signal.c
-index 2494827..cda80a0 100644
+index 2494827..3a63757 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -41,12 +41,12 @@
@@ -97541,7 +98344,40 @@ index 2494827..cda80a0 100644
return ret;
}
-@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+@@ -1034,23 +1054,24 @@ force_sig_specific(int sig, struct task_struct *t)
+ /*
+ * Nuke all other threads in the group.
+ */
+-void zap_other_threads(struct task_struct *p)
++int zap_other_threads(struct task_struct *p)
+ {
+- struct task_struct *t;
++ struct task_struct *t = p;
++ int count = 0;
+
+ p->signal->group_stop_count = 0;
+
+- for (t = next_thread(p); t != p; t = next_thread(t)) {
+- /*
+- * Don't bother with already dead threads
+- */
++ while_each_thread(p, t) {
++ count++;
++
++ /* Don't bother with already dead threads */
+ if (t->exit_state)
+ continue;
+-
+- /* SIGKILL will be handled before any pending SIGSTOP */
+ sigaddset(&t->pending.signal, SIGKILL);
+ signal_wake_up(t, 1);
+ }
++
++ return count;
+ }
+
+ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
+@@ -1081,8 +1102,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
int ret = check_kill_permission(sig, info, p);
@@ -97554,7 +98390,7 @@ index 2494827..cda80a0 100644
return ret;
}
-@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
+@@ -1644,6 +1668,8 @@ void ptrace_notify(int exit_code)
{
siginfo_t info;
@@ -97563,7 +98399,7 @@ index 2494827..cda80a0 100644
BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
memset(&info, 0, sizeof info);
-@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
+@@ -2275,7 +2301,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
int error = -ESRCH;
rcu_read_lock();
@@ -99096,10 +99932,184 @@ index 9c1e627..5ca9447 100644
set_page_address(page, (void *)vaddr);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 5e1e508..bd43b5e 100644
+index 5e1e508..0c488e1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
-@@ -1694,6 +1694,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
+@@ -49,6 +49,84 @@ static unsigned long __initdata default_hstate_size;
+ */
+ static DEFINE_SPINLOCK(hugetlb_lock);
+
++static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
++{
++ bool free = (spool->count == 0) && (spool->used_hpages == 0);
++
++ spin_unlock(&spool->lock);
++
++ /* If no pages are used, and no other handles to the subpool
++ * remain, free the subpool the subpool remain */
++ if (free)
++ kfree(spool);
++}
++
++struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
++{
++ struct hugepage_subpool *spool;
++
++ spool = kmalloc(sizeof(*spool), GFP_KERNEL);
++ if (!spool)
++ return NULL;
++
++ spin_lock_init(&spool->lock);
++ spool->count = 1;
++ spool->max_hpages = nr_blocks;
++ spool->used_hpages = 0;
++
++ return spool;
++}
++
++void hugepage_put_subpool(struct hugepage_subpool *spool)
++{
++ spin_lock(&spool->lock);
++ BUG_ON(!spool->count);
++ spool->count--;
++ unlock_or_release_subpool(spool);
++}
++
++static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
++ long delta)
++{
++ int ret = 0;
++
++ if (!spool)
++ return 0;
++
++ spin_lock(&spool->lock);
++ if ((spool->used_hpages + delta) <= spool->max_hpages) {
++ spool->used_hpages += delta;
++ } else {
++ ret = -ENOMEM;
++ }
++ spin_unlock(&spool->lock);
++
++ return ret;
++}
++
++static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
++ long delta)
++{
++ if (!spool)
++ return;
++
++ spin_lock(&spool->lock);
++ spool->used_hpages -= delta;
++ /* If hugetlbfs_put_super couldn't free spool due to
++ * an outstanding quota reference, free it now. */
++ unlock_or_release_subpool(spool);
++}
++
++static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
++{
++ return HUGETLBFS_SB(inode->i_sb)->spool;
++}
++
++static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
++{
++ return subpool_inode(vma->vm_file->f_dentry->d_inode);
++}
++
+ /*
+ * Region tracking -- allows tracking of reservations and instantiated pages
+ * across the pages in a mapping.
+@@ -541,9 +619,9 @@ static void free_huge_page(struct page *page)
+ */
+ struct hstate *h = page_hstate(page);
+ int nid = page_to_nid(page);
+- struct address_space *mapping;
++ struct hugepage_subpool *spool =
++ (struct hugepage_subpool *)page_private(page);
+
+- mapping = (struct address_space *) page_private(page);
+ set_page_private(page, 0);
+ page->mapping = NULL;
+ BUG_ON(page_count(page));
+@@ -558,8 +636,7 @@ static void free_huge_page(struct page *page)
+ enqueue_huge_page(h, page);
+ }
+ spin_unlock(&hugetlb_lock);
+- if (mapping)
+- hugetlb_put_quota(mapping, 1);
++ hugepage_subpool_put_pages(spool, 1);
+ }
+
+ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
+@@ -927,11 +1004,12 @@ static void return_unused_surplus_pages(struct hstate *h,
+ /*
+ * Determine if the huge page at addr within the vma has an associated
+ * reservation. Where it does not we will need to logically increase
+- * reservation and actually increase quota before an allocation can occur.
+- * Where any new reservation would be required the reservation change is
+- * prepared, but not committed. Once the page has been quota'd allocated
+- * an instantiated the change should be committed via vma_commit_reservation.
+- * No action is required on failure.
++ * reservation and actually increase subpool usage before an allocation
++ * can occur. Where any new reservation would be required the
++ * reservation change is prepared, but not committed. Once the page
++ * has been allocated from the subpool and instantiated the change should
++ * be committed via vma_commit_reservation. No action is required on
++ * failure.
+ */
+ static long vma_needs_reservation(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long addr)
+@@ -980,24 +1058,24 @@ static void vma_commit_reservation(struct hstate *h,
+ static struct page *alloc_huge_page(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve)
+ {
++ struct hugepage_subpool *spool = subpool_vma(vma);
+ struct hstate *h = hstate_vma(vma);
+ struct page *page;
+- struct address_space *mapping = vma->vm_file->f_mapping;
+- struct inode *inode = mapping->host;
+ long chg;
+
+ /*
+- * Processes that did not create the mapping will have no reserves and
+- * will not have accounted against quota. Check that the quota can be
+- * made before satisfying the allocation
+- * MAP_NORESERVE mappings may also need pages and quota allocated
+- * if no reserve mapping overlaps.
++ * Processes that did not create the mapping will have no
++ * reserves and will not have accounted against subpool
++ * limit. Check that the subpool limit can be made before
++ * satisfying the allocation MAP_NORESERVE mappings may also
++ * need pages and subpool limit allocated allocated if no reserve
++ * mapping overlaps.
+ */
+ chg = vma_needs_reservation(h, vma, addr);
+ if (chg < 0)
+ return ERR_PTR(-VM_FAULT_OOM);
+ if (chg)
+- if (hugetlb_get_quota(inode->i_mapping, chg))
++ if (hugepage_subpool_get_pages(spool, chg))
+ return ERR_PTR(-VM_FAULT_SIGBUS);
+
+ spin_lock(&hugetlb_lock);
+@@ -1007,13 +1085,13 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
+ if (!page) {
+ page = alloc_buddy_huge_page(h, vma, addr);
+ if (!page) {
+- hugetlb_put_quota(inode->i_mapping, chg);
++ hugepage_subpool_put_pages(spool, chg);
+ return ERR_PTR(-VM_FAULT_SIGBUS);
+ }
+ }
+
+ set_page_refcounted(page);
+- set_page_private(page, (unsigned long) mapping);
++ set_page_private(page, (unsigned long)spool);
+
+ vma_commit_reservation(h, vma, addr);
+
+@@ -1694,10 +1772,20 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
kref_get(&reservations->refs);
}
@@ -99115,7 +100125,12 @@ index 5e1e508..bd43b5e 100644
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{
struct hstate *h = hstate_vma(vma);
-@@ -1709,7 +1718,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
+ struct resv_map *reservations = vma_resv_map(vma);
++ struct hugepage_subpool *spool = subpool_vma(vma);
+ unsigned long reserve;
+ unsigned long start;
+ unsigned long end;
+@@ -1709,11 +1797,11 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
reserve = (end - start) -
region_count(&reservations->regions, start, end);
@@ -99124,7 +100139,21 @@ index 5e1e508..bd43b5e 100644
if (reserve) {
hugetlb_acct_memory(h, -reserve);
-@@ -1933,6 +1942,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+- hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
++ hugepage_subpool_put_pages(spool, reserve);
+ }
+ }
+ }
+@@ -1910,7 +1998,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+ address = address & huge_page_mask(h);
+ pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
+ + (vma->vm_pgoff >> PAGE_SHIFT);
+- mapping = (struct address_space *)page_private(page);
++ mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
+
+ vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ /* Do not unmap the current VMA */
+@@ -1933,6 +2021,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
return 1;
}
@@ -99151,7 +100180,7 @@ index 5e1e508..bd43b5e 100644
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, pte_t pte,
struct page *pagecache_page)
-@@ -2004,6 +2033,11 @@ retry_avoidcopy:
+@@ -2004,6 +2112,11 @@ retry_avoidcopy:
huge_ptep_clear_flush(vma, address, ptep);
set_huge_pte_at(mm, address, ptep,
make_huge_pte(vma, new_page, 1));
@@ -99163,7 +100192,7 @@ index 5e1e508..bd43b5e 100644
/* Make the old page be freed below */
new_page = old_page;
}
-@@ -2135,6 +2169,10 @@ retry:
+@@ -2135,6 +2248,10 @@ retry:
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
@@ -99174,7 +100203,7 @@ index 5e1e508..bd43b5e 100644
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
-@@ -2163,6 +2201,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2163,6 +2280,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma);
@@ -99203,7 +100232,21 @@ index 5e1e508..bd43b5e 100644
ptep = huge_pte_alloc(mm, address, huge_page_size(h));
if (!ptep)
return VM_FAULT_OOM;
-@@ -2392,12 +2452,16 @@ int hugetlb_reserve_pages(struct inode *inode,
+@@ -2364,11 +2503,12 @@ int hugetlb_reserve_pages(struct inode *inode,
+ {
+ long ret, chg;
+ struct hstate *h = hstate_inode(inode);
++ struct hugepage_subpool *spool = subpool_inode(inode);
+
+ /*
+ * Only apply hugepage reservation if asked. At fault time, an
+ * attempt will be made for VM_NORESERVE to allocate a page
+- * and filesystem quota without using reserves
++ * without using reserves
+ */
+ if (acctflag & VM_NORESERVE)
+ return 0;
+@@ -2392,21 +2532,25 @@ int hugetlb_reserve_pages(struct inode *inode,
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
}
@@ -99214,26 +100257,30 @@ index 5e1e508..bd43b5e 100644
+ goto out_err;
+ }
- /* There must be enough filesystem quota for the mapping */
+- /* There must be enough filesystem quota for the mapping */
- if (hugetlb_get_quota(inode->i_mapping, chg))
- return -ENOSPC;
-+ if (hugetlb_get_quota(inode->i_mapping, chg)) {
++ /* There must be enough pages in the subpool for the mapping */
++ if (hugepage_subpool_get_pages(spool, chg)) {
+ ret = -ENOSPC;
+ goto out_err;
+ }
/*
* Check enough hugepages are available for the reservation.
-@@ -2406,7 +2470,7 @@ int hugetlb_reserve_pages(struct inode *inode,
+- * Hand back the quota if there are not
++ * Hand the pages back to the subpool if there are not
+ */
ret = hugetlb_acct_memory(h, chg);
if (ret < 0) {
- hugetlb_put_quota(inode->i_mapping, chg);
+- hugetlb_put_quota(inode->i_mapping, chg);
- return ret;
++ hugepage_subpool_put_pages(spool, chg);
+ goto out_err;
}
/*
-@@ -2423,6 +2487,10 @@ int hugetlb_reserve_pages(struct inode *inode,
+@@ -2423,17 +2567,22 @@ int hugetlb_reserve_pages(struct inode *inode,
if (!vma || vma->vm_flags & VM_MAYSHARE)
region_add(&inode->i_mapping->private_list, from, to);
return 0;
@@ -99244,6 +100291,19 @@ index 5e1e508..bd43b5e 100644
}
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
+ {
+ struct hstate *h = hstate_inode(inode);
+ long chg = region_truncate(&inode->i_mapping->private_list, offset);
++ struct hugepage_subpool *spool = subpool_inode(inode);
+
+ spin_lock(&inode->i_lock);
+ inode->i_blocks -= (blocks_per_huge_page(h) * freed);
+ spin_unlock(&inode->i_lock);
+
+- hugetlb_put_quota(inode->i_mapping, (chg - freed));
++ hugepage_subpool_put_pages(spool, (chg - freed));
+ hugetlb_acct_memory(h, -(chg - freed));
+ }
diff --git a/mm/internal.h b/mm/internal.h
index f03e8e2..7354343 100644
--- a/mm/internal.h
@@ -104108,7 +105168,7 @@ index 025f924..a014894 100644
}
EXPORT_SYMBOL(sock_queue_err_skb);
diff --git a/net/core/sock.c b/net/core/sock.c
-index 6605e75..3acebda 100644
+index 6605e75..3d5236e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
@@ -104128,7 +105188,34 @@ index 6605e75..3acebda 100644
case SO_PEERNAME:
{
-@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+@@ -1391,6 +1395,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+ gfp_t gfp_mask;
+ long timeo;
+ int err;
++ int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
++
++ err = -EMSGSIZE;
++ if (npages > MAX_SKB_FRAGS)
++ goto failure;
+
+ gfp_mask = sk->sk_allocation;
+ if (gfp_mask & __GFP_WAIT)
+@@ -1409,14 +1418,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+ if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+ skb = alloc_skb(header_len, gfp_mask);
+ if (skb) {
+- int npages;
+ int i;
+
+ /* No pages, we're done... */
+ if (!data_len)
+ break;
+
+- npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ skb->truesize += data_len;
+ skb_shinfo(skb)->nr_frags = npages;
+ for (i = 0; i < npages; i++) {
+@@ -1892,7 +1899,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
*/
smp_wmb();
atomic_set(&sk->sk_refcnt, 1);
@@ -104458,9 +105545,27 @@ index f8d04c2..c1188f2 100644
return res;
}
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
-index c8b0cc3..ca974ff 100644
+index c8b0cc3..df2154c 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
+@@ -900,14 +900,14 @@ static int compat_table_info(const struct xt_table_info *info,
+ }
+ #endif
+
+-static int get_info(struct net *net, void __user *user, int *len, int compat)
++static int get_info(struct net *net, void __user *user, int len, int compat)
+ {
+ char name[ARPT_TABLE_MAXNAMELEN];
+ struct xt_table *t;
+ int ret;
+
+- if (*len != sizeof(struct arpt_getinfo)) {
+- duprintf("length %u != %Zu\n", *len,
++ if (len != sizeof(struct arpt_getinfo)) {
++ duprintf("length %u != %Zu\n", len,
+ sizeof(struct arpt_getinfo));
+ return -EINVAL;
+ }
@@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
private = &tmp;
}
@@ -104521,9 +105626,27 @@ index c156db2..e772975 100644
skblen = skb->len;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
-index 0606db1..023c85c 100644
+index 0606db1..2f32531 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1107,14 +1107,14 @@ static int compat_table_info(const struct xt_table_info *info,
+ }
+ #endif
+
+-static int get_info(struct net *net, void __user *user, int *len, int compat)
++static int get_info(struct net *net, void __user *user, int len, int compat)
+ {
+ char name[IPT_TABLE_MAXNAMELEN];
+ struct xt_table *t;
+ int ret;
+
+- if (*len != sizeof(struct ipt_getinfo)) {
+- duprintf("length %u != %zu\n", *len,
++ if (len != sizeof(struct ipt_getinfo)) {
++ duprintf("length %u != %zu\n", len,
+ sizeof(struct ipt_getinfo));
+ return -EINVAL;
+ }
@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
private = &tmp;
}
@@ -105124,9 +106247,27 @@ index 1cf3f0c..1d4376f 100644
skblen = skb->len;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
-index 78b5a36..d2f7291 100644
+index 78b5a36..4a6941c 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1139,14 +1139,14 @@ static int compat_table_info(const struct xt_table_info *info,
+ }
+ #endif
+
+-static int get_info(struct net *net, void __user *user, int *len, int compat)
++static int get_info(struct net *net, void __user *user, int len, int compat)
+ {
+ char name[IP6T_TABLE_MAXNAMELEN];
+ struct xt_table *t;
+ int ret;
+
+- if (*len != sizeof(struct ip6t_getinfo)) {
+- duprintf("length %u != %zu\n", *len,
++ if (len != sizeof(struct ip6t_getinfo)) {
++ duprintf("length %u != %zu\n", len,
+ sizeof(struct ip6t_getinfo));
+ return -EINVAL;
+ }
@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
private = &tmp;
}
@@ -106595,6 +107736,294 @@ index ab545e0..4079b3b 100644
sizeof(val));
set_fs(oldfs);
}
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 7d188bc..523efbb 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -983,7 +983,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
+ struct sock *make;
+ struct rose_sock *make_rose;
+ struct rose_facilities_struct facilities;
+- int n, len;
++ int n;
+
+ skb->sk = NULL; /* Initially we don't know who it's for */
+
+@@ -992,9 +992,9 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
+ */
+ memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
+
+- len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1;
+- len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1;
+- if (!rose_parse_facilities(skb->data + len + 4, &facilities)) {
++ if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
++ skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
++ &facilities)) {
+ rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
+ return 0;
+ }
+diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
+index 114df6e..37965b8 100644
+--- a/net/rose/rose_loopback.c
++++ b/net/rose/rose_loopback.c
+@@ -72,9 +72,20 @@ static void rose_loopback_timer(unsigned long param)
+ unsigned int lci_i, lci_o;
+
+ while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
++ if (skb->len < ROSE_MIN_LEN) {
++ kfree_skb(skb);
++ continue;
++ }
+ lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
+ frametype = skb->data[2];
+- dest = (rose_address *)(skb->data + 4);
++ if (frametype == ROSE_CALL_REQUEST &&
++ (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF ||
++ skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] !=
++ ROSE_CALL_REQ_ADDR_LEN_VAL)) {
++ kfree_skb(skb);
++ continue;
++ }
++ dest = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF);
+ lci_o = 0xFFF - lci_i;
+
+ skb_reset_transport_header(skb);
+diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
+index 08230fa..1646b25 100644
+--- a/net/rose/rose_route.c
++++ b/net/rose/rose_route.c
+@@ -852,7 +852,7 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ unsigned int lci, new_lci;
+ unsigned char cause, diagnostic;
+ struct net_device *dev;
+- int len, res = 0;
++ int res = 0;
+ char buf[11];
+
+ #if 0
+@@ -860,10 +860,17 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ return res;
+ #endif
+
++ if (skb->len < ROSE_MIN_LEN)
++ return res;
+ frametype = skb->data[2];
+ lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
+- src_addr = (rose_address *)(skb->data + 9);
+- dest_addr = (rose_address *)(skb->data + 4);
++ if (frametype == ROSE_CALL_REQUEST &&
++ (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF ||
++ skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] !=
++ ROSE_CALL_REQ_ADDR_LEN_VAL))
++ return res;
++ src_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_SRC_ADDR_OFF);
++ dest_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF);
+
+ spin_lock_bh(&rose_neigh_list_lock);
+ spin_lock_bh(&rose_route_list_lock);
+@@ -1001,12 +1008,11 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ goto out;
+ }
+
+- len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1;
+- len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1;
+-
+ memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
+
+- if (!rose_parse_facilities(skb->data + len + 4, &facilities)) {
++ if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
++ skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
++ &facilities)) {
+ rose_transmit_clear_request(rose_neigh, lci, ROSE_INVALID_FACILITY, 76);
+ goto out;
+ }
+diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
+index 07bca7d..32e5c9f 100644
+--- a/net/rose/rose_subr.c
++++ b/net/rose/rose_subr.c
+@@ -141,7 +141,7 @@ void rose_write_internal(struct sock *sk, int frametype)
+ *dptr++ = ROSE_GFI | lci1;
+ *dptr++ = lci2;
+ *dptr++ = frametype;
+- *dptr++ = 0xAA;
++ *dptr++ = ROSE_CALL_REQ_ADDR_LEN_VAL;
+ memcpy(dptr, &rose->dest_addr, ROSE_ADDR_LEN);
+ dptr += ROSE_ADDR_LEN;
+ memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
+@@ -245,12 +245,16 @@ static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *
+ do {
+ switch (*p & 0xC0) {
+ case 0x00:
++ if (len < 2)
++ return -1;
+ p += 2;
+ n += 2;
+ len -= 2;
+ break;
+
+ case 0x40:
++ if (len < 3)
++ return -1;
+ if (*p == FAC_NATIONAL_RAND)
+ facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF);
+ p += 3;
+@@ -259,32 +263,48 @@ static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *
+ break;
+
+ case 0x80:
++ if (len < 4)
++ return -1;
+ p += 4;
+ n += 4;
+ len -= 4;
+ break;
+
+ case 0xC0:
++ if (len < 2)
++ return -1;
+ l = p[1];
++ if (len < 2 + l)
++ return -1;
+ if (*p == FAC_NATIONAL_DEST_DIGI) {
+ if (!fac_national_digis_received) {
++ if (l < AX25_ADDR_LEN)
++ return -1;
+ memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN);
+ facilities->source_ndigis = 1;
+ }
+ }
+ else if (*p == FAC_NATIONAL_SRC_DIGI) {
+ if (!fac_national_digis_received) {
++ if (l < AX25_ADDR_LEN)
++ return -1;
+ memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN);
+ facilities->dest_ndigis = 1;
+ }
+ }
+ else if (*p == FAC_NATIONAL_FAIL_CALL) {
++ if (l < AX25_ADDR_LEN)
++ return -1;
+ memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN);
+ }
+ else if (*p == FAC_NATIONAL_FAIL_ADD) {
++ if (l < 1 + ROSE_ADDR_LEN)
++ return -1;
+ memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN);
+ }
+ else if (*p == FAC_NATIONAL_DIGIS) {
++ if (l % AX25_ADDR_LEN)
++ return -1;
+ fac_national_digis_received = 1;
+ facilities->source_ndigis = 0;
+ facilities->dest_ndigis = 0;
+@@ -318,24 +338,32 @@ static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *fac
+ do {
+ switch (*p & 0xC0) {
+ case 0x00:
++ if (len < 2)
++ return -1;
+ p += 2;
+ n += 2;
+ len -= 2;
+ break;
+
+ case 0x40:
++ if (len < 3)
++ return -1;
+ p += 3;
+ n += 3;
+ len -= 3;
+ break;
+
+ case 0x80:
++ if (len < 4)
++ return -1;
+ p += 4;
+ n += 4;
+ len -= 4;
+ break;
+
+ case 0xC0:
++ if (len < 2)
++ return -1;
+ l = p[1];
+
+ /* Prevent overflows*/
+@@ -364,49 +392,44 @@ static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *fac
+ return n;
+ }
+
+-int rose_parse_facilities(unsigned char *p,
++int rose_parse_facilities(unsigned char *p, unsigned packet_len,
+ struct rose_facilities_struct *facilities)
+ {
+ int facilities_len, len;
+
+ facilities_len = *p++;
+
+- if (facilities_len == 0)
++ if (facilities_len == 0 || (unsigned)facilities_len > packet_len)
+ return 0;
+
+- while (facilities_len > 0) {
+- if (*p == 0x00) {
+- facilities_len--;
+- p++;
++ while (facilities_len >= 3 && *p == 0x00) {
++ facilities_len--;
++ p++;
+
+- switch (*p) {
+- case FAC_NATIONAL: /* National */
+- len = rose_parse_national(p + 1, facilities, facilities_len - 1);
+- if (len < 0)
+- return 0;
+- facilities_len -= len + 1;
+- p += len + 1;
+- break;
++ switch (*p) {
++ case FAC_NATIONAL: /* National */
++ len = rose_parse_national(p + 1, facilities, facilities_len - 1);
++ break;
+
+- case FAC_CCITT: /* CCITT */
+- len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1);
+- if (len < 0)
+- return 0;
+- facilities_len -= len + 1;
+- p += len + 1;
+- break;
++ case FAC_CCITT: /* CCITT */
++ len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1);
++ break;
+
+- default:
+- printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p);
+- facilities_len--;
+- p++;
+- break;
+- }
+- } else
+- break; /* Error in facilities format */
++ default:
++ printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p);
++ len = 1;
++ break;
++ }
++
++ if (len < 0)
++ return 0;
++ if (WARN_ON(len >= facilities_len))
++ return 0;
++ facilities_len -= len + 1;
++ p += len + 1;
+ }
+
+- return 1;
++ return facilities_len == 0;
+ }
+
+ static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose)
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index a86afce..8657bce 100644
--- a/net/rxrpc/af_rxrpc.c
@@ -109307,6 +110736,19 @@ index e19316d..339f7ae 100644
key = ima_hash_key(entry->digest);
hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
return 0;
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index b0bd910..3ddd470 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -1270,7 +1270,7 @@ long keyctl_session_to_parent(void)
+ goto not_permitted;
+
+ /* the parent must be single threaded */
+- if (atomic_read(&parent->signal->count) != 1)
++ if (!thread_group_empty(parent))
+ goto not_permitted;
+
+ /* the parent and the child must have different session keyrings or
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index e031952..c9a535d 100644
--- a/security/keys/keyring.c
@@ -110830,10 +112272,10 @@ index 0000000..846aeb0
+}
diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
new file mode 100644
-index 0000000..048d4ff
+index 0000000..92ed719
--- /dev/null
+++ b/tools/gcc/constify_plugin.c
-@@ -0,0 +1,328 @@
+@@ -0,0 +1,331 @@
+/*
+ * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
+ * Copyright 2011 by PaX Team <pageexec@freemail.hu>
@@ -111047,6 +112489,9 @@ index 0000000..048d4ff
+ for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
+ tree type = TREE_TYPE(field);
+ enum tree_code code = TREE_CODE(type);
++
++ if (node == type)
++ return false;
+ if (code == RECORD_TYPE || code == UNION_TYPE) {
+ if (!(walk_struct(type)))
+ return false;
@@ -111060,7 +112505,7 @@ index 0000000..048d4ff
+{
+ tree type = (tree)event_data;
+
-+ if (type == NULL_TREE)
++ if (type == NULL_TREE || type == error_mark_node)
+ return;
+
+ if (TYPE_READONLY(type))
@@ -116546,7 +117991,7 @@ index 83b3dde..835bee7 100644
break;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 4f3434f..fc63040 100644
+index 4f3434f..c807237 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -43,6 +43,8 @@
@@ -116558,7 +118003,7 @@ index 4f3434f..fc63040 100644
#include <asm/processor.h>
#include <asm/io.h>
-@@ -575,12 +577,73 @@ out:
+@@ -575,12 +577,76 @@ out:
return r;
}
@@ -116629,10 +118074,13 @@ index 4f3434f..fc63040 100644
struct kvm_assigned_dev_kernel *match;
struct pci_dev *dev;
+ u8 header_type;
++
++ if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
++ return -EINVAL;
down_read(&kvm->slots_lock);
mutex_lock(&kvm->lock);
-@@ -607,6 +670,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
+@@ -607,6 +673,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
r = -EINVAL;
goto out_free;
}
@@ -116651,7 +118099,50 @@ index 4f3434f..fc63040 100644
if (pci_enable_device(dev)) {
printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
r = -EBUSY;
-@@ -2494,7 +2569,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
+@@ -635,16 +713,14 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
+
+ list_add(&match->list, &kvm->arch.assigned_dev_head);
+
+- if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
+- if (!kvm->arch.iommu_domain) {
+- r = kvm_iommu_map_guest(kvm);
+- if (r)
+- goto out_list_del;
+- }
+- r = kvm_assign_device(kvm, match);
++ if (!kvm->arch.iommu_domain) {
++ r = kvm_iommu_map_guest(kvm);
+ if (r)
+ goto out_list_del;
+ }
++ r = kvm_assign_device(kvm, match);
++ if (r)
++ goto out_list_del;
+
+ out:
+ mutex_unlock(&kvm->lock);
+@@ -683,8 +759,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
+ goto out;
+ }
+
+- if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
+- kvm_deassign_device(kvm, match);
++ kvm_deassign_device(kvm, match);
+
+ kvm_free_assigned_device(kvm, match);
+
+@@ -1782,6 +1857,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
+ return r;
+
+ mutex_lock(&kvm->lock);
++ if (!kvm_vcpu_compatible(vcpu)) {
++ r = -EINVAL;
++ goto vcpu_destroy;
++ }
+ if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
+ r = -EINVAL;
+ goto vcpu_destroy;
+@@ -2494,7 +2573,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
if (kvm_rebooting)
/* spin while reset goes on */
while (true)
@@ -116660,7 +118151,7 @@ index 4f3434f..fc63040 100644
/* Fault while not rebooting. We want the trace. */
BUG();
}
-@@ -2714,7 +2789,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
+@@ -2714,7 +2793,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
kvm_arch_vcpu_put(vcpu);
}
@@ -116669,7 +118160,7 @@ index 4f3434f..fc63040 100644
struct module *module)
{
int r;
-@@ -2767,15 +2842,17 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
+@@ -2767,15 +2846,17 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
/* A kmem cache lets us meet the alignment requirements of fx_save. */
kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
__alignof__(struct kvm_vcpu),