summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2015-10-20 17:38:58 -0400
committerAnthony G. Basile <blueness@gentoo.org>2015-10-20 17:38:58 -0400
commitb68c3bf79c56ec566a5080b2e316733096b787bf (patch)
treebf3ce4c63a7a27a633f26af0039ff58bdfd76797
parentgrsecurity-3.1-4.2.3-201510171105 (diff)
downloadhardened-patchset-b68c3bf79c56ec566a5080b2e316733096b787bf.tar.gz
hardened-patchset-b68c3bf79c56ec566a5080b2e316733096b787bf.tar.bz2
hardened-patchset-b68c3bf79c56ec566a5080b2e316733096b787bf.zip
grsecurity-3.1-4.2.3-20151020085820151020
-rw-r--r--4.2.3/0000_README2
-rw-r--r--4.2.3/4420_grsecurity-3.1-4.2.3-201510200858.patch (renamed from 4.2.3/4420_grsecurity-3.1-4.2.3-201510171105.patch)500
2 files changed, 276 insertions, 226 deletions
diff --git a/4.2.3/0000_README b/4.2.3/0000_README
index 71a4c94..d0e396e 100644
--- a/4.2.3/0000_README
+++ b/4.2.3/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.1-4.2.3-201510171105.patch
+Patch: 4420_grsecurity-3.1-4.2.3-201510200858.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.2.3/4420_grsecurity-3.1-4.2.3-201510171105.patch b/4.2.3/4420_grsecurity-3.1-4.2.3-201510200858.patch
index 304030d..0d6eb57 100644
--- a/4.2.3/4420_grsecurity-3.1-4.2.3-201510171105.patch
+++ b/4.2.3/4420_grsecurity-3.1-4.2.3-201510200858.patch
@@ -20403,17 +20403,10 @@ index bf7f8b5..ca5799d 100644
{
return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
-index fd74a11..98bd591 100644
+index fd74a11..35fd5af 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
-@@ -13,12 +13,16 @@
- */
- static inline void native_set_pte(pte_t *ptep , pte_t pte)
- {
-+ pax_open_kernel();
- *ptep = pte;
-+ pax_close_kernel();
- }
+@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -20423,66 +20416,11 @@ index fd74a11..98bd591 100644
}
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
-@@ -34,13 +38,20 @@ static inline void native_pmd_clear(pmd_t *pmdp)
- static inline void native_pte_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *xp)
- {
-+ pax_open_kernel();
- *xp = native_make_pte(0);
-+ pax_close_kernel();
- }
-
- #ifdef CONFIG_SMP
- static inline pte_t native_ptep_get_and_clear(pte_t *xp)
- {
-- return __pte(xchg(&xp->pte_low, 0));
-+ pte_t pte;
-+
-+ pax_open_kernel();
-+ pte = __pte(xchg(&xp->pte_low, 0));
-+ pax_close_kernel();
-+ return pte;
- }
- #else
- #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
-@@ -49,7 +60,12 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
- #ifdef CONFIG_SMP
- static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
- {
-- return __pmd(xchg((pmdval_t *)xp, 0));
-+ pmd_t pmd;
-+
-+ pax_open_kernel();
-+ pmd = __pmd(xchg((pmdval_t *)xp, 0));
-+ pax_close_kernel();
-+ return pmd;
- }
- #else
- #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
-index cdaa58c..4038692 100644
+index cdaa58c..ae30f0d 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
-@@ -26,9 +26,11 @@
- */
- static inline void native_set_pte(pte_t *ptep, pte_t pte)
- {
-+ pax_open_kernel();
- ptep->pte_high = pte.pte_high;
- smp_wmb();
- ptep->pte_low = pte.pte_low;
-+ pax_close_kernel();
- }
-
- #define pmd_read_atomic pmd_read_atomic
-@@ -87,17 +89,23 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
-
- static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
- {
-+ pax_open_kernel();
- set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
-+ pax_close_kernel();
- }
+@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -20499,17 +20437,7 @@ index cdaa58c..4038692 100644
}
/*
-@@ -108,17 +116,22 @@ static inline void native_set_pud(pud_t *pudp, pud_t pud)
- static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
- {
-+ pax_open_kernel();
- ptep->pte_low = 0;
- smp_wmb();
- ptep->pte_high = 0;
-+ pax_close_kernel();
- }
-
+@@ -116,9 +120,12 @@ static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
static inline void native_pmd_clear(pmd_t *pmd)
{
u32 *tmp = (u32 *)pmd;
@@ -20522,30 +20450,6 @@ index cdaa58c..4038692 100644
}
static inline void pud_clear(pud_t *pudp)
-@@ -143,9 +156,11 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
- pte_t res;
-
- /* xchg acts as a barrier before the setting of the high bits */
-+ pax_open_kernel();
- res.pte_low = xchg(&ptep->pte_low, 0);
- res.pte_high = ptep->pte_high;
- ptep->pte_high = 0;
-+ pax_close_kernel();
-
- return res;
- }
-@@ -166,9 +181,11 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
- union split_pmd res, *orig = (union split_pmd *)pmdp;
-
- /* xchg acts as a barrier before setting of the high bits */
-+ pax_open_kernel();
- res.pmd_low = xchg(&orig->pmd_low, 0);
- res.pmd_high = orig->pmd_high;
- orig->pmd_high = 0;
-+ pax_close_kernel();
-
- return res.pmd;
- }
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 867da5b..7ec083d 100644
--- a/arch/x86/include/asm/pgtable.h
@@ -20767,7 +20671,7 @@ index 867da5b..7ec083d 100644
static inline int page_level_shift(enum pg_level level)
{
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
-index b6c0b40..7b497ea 100644
+index b6c0b40..3535d47 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -25,9 +25,6 @@
@@ -20793,7 +20697,15 @@ index b6c0b40..7b497ea 100644
#if defined(CONFIG_HIGHPTE)
#define pte_offset_map(dir, address) \
((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
-@@ -65,6 +68,9 @@ do { \
+@@ -59,12 +62,17 @@ void paging_init(void);
+ /* Clear a kernel PTE and flush it from the TLB */
+ #define kpte_clear_flush(ptep, vaddr) \
+ do { \
++ pax_open_kernel(); \
+ pte_clear(&init_mm, (vaddr), (ptep)); \
++ pax_close_kernel(); \
+ __flush_tlb_one((vaddr)); \
+ } while (0)
#endif /* !__ASSEMBLY__ */
@@ -20846,7 +20758,7 @@ index 9fb2f2b..8e18c70 100644
#define MODULES_END VMALLOC_END
#define MODULES_LEN (MODULES_VADDR - MODULES_END)
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
-index 2ee7811..c985cfd 100644
+index 2ee7811..afd76c0 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -16,11 +16,17 @@
@@ -20870,24 +20782,7 @@ index 2ee7811..c985cfd 100644
#define swapper_pg_dir init_level4_pgt
-@@ -47,12 +53,16 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
- static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
- {
-+ pax_open_kernel();
- *ptep = native_make_pte(0);
-+ pax_close_kernel();
- }
-
- static inline void native_set_pte(pte_t *ptep, pte_t pte)
- {
-+ pax_open_kernel();
- *ptep = pte;
-+ pax_close_kernel();
- }
-
- static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
-@@ -62,7 +72,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+@@ -62,7 +68,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -20897,35 +20792,7 @@ index 2ee7811..c985cfd 100644
}
static inline void native_pmd_clear(pmd_t *pmd)
-@@ -73,7 +85,12 @@ static inline void native_pmd_clear(pmd_t *pmd)
- static inline pte_t native_ptep_get_and_clear(pte_t *xp)
- {
- #ifdef CONFIG_SMP
-- return native_make_pte(xchg(&xp->pte, 0));
-+ pte_t pte;
-+
-+ pax_open_kernel();
-+ pte = native_make_pte(xchg(&xp->pte, 0));
-+ pax_close_kernel();
-+ return pte;
- #else
- /* native_local_ptep_get_and_clear,
- but duplicated because of cyclic dependency */
-@@ -86,7 +103,12 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
- static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
- {
- #ifdef CONFIG_SMP
-- return native_make_pmd(xchg(&xp->pmd, 0));
-+ pmd_t pmd;
-+
-+ pax_open_kernel();
-+ pmd = native_make_pmd(xchg(&xp->pmd, 0));
-+ pax_close_kernel();
-+ return pmd;
- #else
- /* native_local_pmdp_get_and_clear,
- but duplicated because of cyclic dependency */
-@@ -98,7 +120,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+@@ -98,7 +106,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
@@ -20935,7 +20802,7 @@ index 2ee7811..c985cfd 100644
}
static inline void native_pud_clear(pud_t *pud)
-@@ -108,6 +132,13 @@ static inline void native_pud_clear(pud_t *pud)
+@@ -108,6 +118,13 @@ static inline void native_pud_clear(pud_t *pud)
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
@@ -33743,7 +33610,7 @@ index 81bf3d2..7ef25c2 100644
* XXX: batch / limit 'nr', to avoid large irq off latency
* needs some instrumenting to determine the common sizes used by
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
-index eecb207a..808343a 100644
+index eecb207a..e76b7f4 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -35,6 +35,8 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
@@ -33755,12 +33622,14 @@ index eecb207a..808343a 100644
preempt_disable();
pagefault_disable();
-@@ -45,7 +47,9 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+@@ -45,7 +47,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
+
++ pax_open_kernel();
set_pte(kmap_pte-idx, mk_pte(page, prot));
++ pax_close_kernel();
+
arch_flush_lazy_mmu_mode();
@@ -34331,7 +34200,7 @@ index 68aec42..95ad5d3 100644
printk(KERN_INFO "Write protecting the kernel text: %luk\n",
size >> 10);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index 3fba623..a5d0500 100644
+index 3fba623..5ee9802 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -136,7 +136,7 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
@@ -34419,7 +34288,17 @@ index 3fba623..a5d0500 100644
if (pmd != pmd_offset(pud, 0))
printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
pmd, pmd_offset(pud, 0));
-@@ -337,14 +365,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
+@@ -275,7 +303,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
+ pmd = fill_pmd(pud, vaddr);
+ pte = fill_pte(pmd, vaddr);
+
++ pax_open_kernel();
+ set_pte(pte, new_pte);
++ pax_close_kernel();
+
+ /*
+ * It's enough to flush this one mapping.
+@@ -337,14 +367,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
pgd = pgd_offset_k((unsigned long)__va(phys));
if (pgd_none(*pgd)) {
pud = (pud_t *) spp_getpage();
@@ -34436,7 +34315,7 @@ index 3fba623..a5d0500 100644
}
pmd = pmd_offset(pud, phys);
BUG_ON(!pmd_none(*pmd));
-@@ -585,7 +611,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
+@@ -585,7 +613,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
prot);
spin_lock(&init_mm.page_table_lock);
@@ -34445,7 +34324,7 @@ index 3fba623..a5d0500 100644
spin_unlock(&init_mm.page_table_lock);
}
__flush_tlb_all();
-@@ -626,7 +652,7 @@ kernel_physical_mapping_init(unsigned long start,
+@@ -626,7 +654,7 @@ kernel_physical_mapping_init(unsigned long start,
page_size_mask);
spin_lock(&init_mm.page_table_lock);
@@ -34454,6 +34333,22 @@ index 3fba623..a5d0500 100644
spin_unlock(&init_mm.page_table_lock);
pgd_changed = true;
}
+diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
+index 9c0ff04..9020d5f 100644
+--- a/arch/x86/mm/iomap_32.c
++++ b/arch/x86/mm/iomap_32.c
+@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++
++ pax_open_kernel();
+ set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
++ pax_close_kernel();
++
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index b9c78f3..9ca7e24 100644
--- a/arch/x86/mm/ioremap.c
@@ -34718,7 +34613,7 @@ index 4053bb5..b1ad3dc 100644
unsigned long uninitialized_var(pfn_align);
int i, nid;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
-index 727158c..91bc23b 100644
+index 727158c..54dd3ff 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -260,7 +260,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
@@ -34765,9 +34660,11 @@ index 727158c..91bc23b 100644
prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
return prot;
-@@ -437,16 +446,28 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
+@@ -436,23 +445,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
+ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
{
/* change init_mm */
++ pax_open_kernel();
set_pte_atomic(kpte, pte);
+
#ifdef CONFIG_X86_32
@@ -34796,7 +34693,14 @@ index 727158c..91bc23b 100644
pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
set_pte_atomic((pte_t *)pmd, pte);
-@@ -505,7 +526,8 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
+ }
+ }
+ #endif
++ pax_close_kernel();
+ }
+
+ static int
+@@ -505,7 +528,8 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* up accordingly.
*/
old_pte = *kpte;
@@ -34806,6 +34710,16 @@ index 727158c..91bc23b 100644
pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
+@@ -1176,7 +1200,9 @@ repeat:
+ * Do we really change anything ?
+ */
+ if (pte_val(old_pte) != pte_val(new_pte)) {
++ pax_open_kernel();
+ set_pte_atomic(kpte, new_pte);
++ pax_close_kernel();
+ cpa->flags |= CPA_FLUSHTLB;
+ }
+ cpa->numpages = 1;
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 188e3e0..5c75446 100644
--- a/arch/x86/mm/pat.c
@@ -34920,7 +34834,7 @@ index 9f0614d..92ae64a 100644
p += get_opcode(p, &opcode);
for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
-index fb0a9dd..8560f52 100644
+index fb0a9dd..5ab49c4 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -98,10 +98,75 @@ static inline void pgd_list_del(pgd_t *pgd)
@@ -35272,6 +35186,36 @@ index fb0a9dd..8560f52 100644
}
void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
+@@ -620,9 +742,11 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+
+ prot = pgprot_4k_2_large(prot);
+
++ pax_open_kernel();
+ set_pte((pte_t *)pmd, pfn_pte(
+ (u64)addr >> PAGE_SHIFT,
+ __pgprot(pgprot_val(prot) | _PAGE_PSE)));
++ pax_close_kernel();
+
+ return 1;
+ }
+diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
+index 75cc097..79a097f 100644
+--- a/arch/x86/mm/pgtable_32.c
++++ b/arch/x86/mm/pgtable_32.c
+@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
+ return;
+ }
+ pte = pte_offset_kernel(pmd, vaddr);
++
++ pax_open_kernel();
+ if (pte_val(pteval))
+ set_pte_at(&init_mm, vaddr, pte, pteval);
+ else
+ pte_clear(&init_mm, vaddr, pte);
++ pax_close_kernel();
+
+ /*
+ * It's enough to flush this one mapping.
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 90555bf..f5f1828 100644
--- a/arch/x86/mm/setup_nx.c
@@ -58341,6 +58285,19 @@ index 3806e70..55c508b 100644
struct usb_serial_port *port = info->port;
struct usb_serial *serial;
int retval = -ENODEV;
+diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
+index 540add2..2a2c7da 100644
+--- a/drivers/usb/storage/transport.c
++++ b/drivers/usb/storage/transport.c
+@@ -689,7 +689,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
+ if (need_auto_sense) {
+ int temp_result;
+ struct scsi_eh_save ses;
+- int sense_size = US_SENSE_SIZE;
++ unsigned int sense_size = US_SENSE_SIZE;
+ struct scsi_sense_hdr sshdr;
+ const u8 *scdd;
+ u8 fm_ili;
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 43576ed..583589d 100644
--- a/drivers/usb/storage/usb.c
@@ -77083,7 +77040,7 @@ index e4141f2..d8263e8 100644
i += packet_length_size;
if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
diff --git a/fs/exec.c b/fs/exec.c
-index 1977c2a..40e7f8f 100644
+index 1977c2a..6371905 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -56,8 +56,20 @@
@@ -77579,7 +77536,7 @@ index 1977c2a..40e7f8f 100644
out:
if (bprm->mm) {
acct_arg_size(bprm, 0);
-@@ -1743,3 +1918,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
+@@ -1743,3 +1918,317 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
argv, envp, flags);
}
#endif
@@ -77886,9 +77843,14 @@ index 1977c2a..40e7f8f 100644
+#ifdef CONFIG_PAX_SIZE_OVERFLOW
+void __nocapture(1, 3, 4) __used report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
+{
++#ifdef CONFIG_PAX_SIZE_OVERFLOW_DISABLE_KILL
++ printk_ratelimited(KERN_EMERG "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
++ dump_stack();
++#else
+ printk(KERN_EMERG "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
+ dump_stack();
+ do_group_exit(SIGKILL);
++#endif
+}
+EXPORT_SYMBOL(report_size_overflow);
+#endif
@@ -112776,17 +112738,32 @@ index 6297f6b..7652403 100644
}
EXPORT_SYMBOL(__get_user_pages);
diff --git a/mm/highmem.c b/mm/highmem.c
-index 123bcd3..07e8516 100644
+index 123bcd3..c2c85db 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
-@@ -196,7 +196,6 @@ static void flush_all_zero_pkmaps(void)
+@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
+ * So no dangers, even with speculative execution.
*/
page = pte_page(pkmap_page_table[i]);
++ pax_open_kernel();
pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
-
++ pax_close_kernel();
set_page_address(page, NULL);
need_flush = 1;
}
+@@ -259,8 +260,11 @@ start:
+ }
+ }
+ vaddr = PKMAP_ADDR(last_pkmap_nr);
++
++ pax_open_kernel();
+ set_pte_at(&init_mm, vaddr,
+ &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
++ pax_close_kernel();
+
+ pkmap_count[last_pkmap_nr] = 1;
+ set_page_address(page, (void *)vaddr);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a8c3087..ec431dc 100644
--- a/mm/hugetlb.c
@@ -117574,7 +117551,7 @@ index 68ff8a5..40c7a70 100644
if (len > buflen)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index 2faaa29..14881ba 100644
+index 2faaa29..c816cf4 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -40,20 +40,65 @@ struct vfree_deferred {
@@ -117646,9 +117623,11 @@ index 2faaa29..14881ba 100644
/*** Page table manipulation functions ***/
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
-@@ -62,8 +107,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
+@@ -61,10 +106,23 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
+ pte_t *pte;
pte = pte_offset_kernel(pmd, addr);
++ pax_open_kernel();
do {
- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
@@ -117666,9 +117645,16 @@ index 2faaa29..14881ba 100644
+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
+ }
} while (pte++, addr += PAGE_SIZE, addr != end);
++ pax_close_kernel();
}
-@@ -130,10 +186,18 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
+ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
+@@ -127,16 +185,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
+ pte = pte_alloc_kernel(pmd, addr);
+ if (!pte)
+ return -ENOMEM;
++
++ pax_open_kernel();
do {
struct page *page = pages[*nr];
@@ -117678,18 +117664,24 @@ index 2faaa29..14881ba 100644
+#endif
+
+ if (!pte_none(*pte)) {
++ pax_close_kernel();
+ WARN_ON(1);
return -EBUSY;
- if (WARN_ON(!page))
+ }
+ if (!page) {
++ pax_close_kernel();
+ WARN_ON(1);
return -ENOMEM;
+ }
set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
(*nr)++;
} while (pte++, addr += PAGE_SIZE, addr != end);
-@@ -146,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
++ pax_close_kernel();
+ return 0;
+ }
+
+@@ -146,7 +217,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
pmd_t *pmd;
unsigned long next;
@@ -117698,7 +117690,7 @@ index 2faaa29..14881ba 100644
if (!pmd)
return -ENOMEM;
do {
-@@ -163,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
+@@ -163,7 +234,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
pud_t *pud;
unsigned long next;
@@ -117707,7 +117699,7 @@ index 2faaa29..14881ba 100644
if (!pud)
return -ENOMEM;
do {
-@@ -223,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
+@@ -223,6 +294,12 @@ int is_vmalloc_or_module_addr(const void *x)
if (addr >= MODULES_VADDR && addr < MODULES_END)
return 1;
#endif
@@ -117720,7 +117712,7 @@ index 2faaa29..14881ba 100644
return is_vmalloc_addr(x);
}
-@@ -243,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
+@@ -243,8 +320,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
if (!pgd_none(*pgd)) {
pud_t *pud = pud_offset(pgd, addr);
@@ -117735,7 +117727,7 @@ index 2faaa29..14881ba 100644
if (!pmd_none(*pmd)) {
pte_t *ptep, pte;
-@@ -346,7 +422,7 @@ static void purge_vmap_area_lazy(void);
+@@ -346,7 +429,7 @@ static void purge_vmap_area_lazy(void);
* Allocate a region of KVA of the specified size and alignment, within the
* vstart and vend.
*/
@@ -117744,7 +117736,7 @@ index 2faaa29..14881ba 100644
unsigned long align,
unsigned long vstart, unsigned long vend,
int node, gfp_t gfp_mask)
-@@ -1202,13 +1278,27 @@ void __init vmalloc_init(void)
+@@ -1202,13 +1285,27 @@ void __init vmalloc_init(void)
for_each_possible_cpu(i) {
struct vmap_block_queue *vbq;
struct vfree_deferred *p;
@@ -117773,7 +117765,7 @@ index 2faaa29..14881ba 100644
}
/* Import existing vmlist entries. */
-@@ -1333,6 +1423,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
+@@ -1333,6 +1430,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
struct vm_struct *area;
BUG_ON(in_interrupt());
@@ -117790,7 +117782,7 @@ index 2faaa29..14881ba 100644
if (flags & VM_IOREMAP)
align = 1ul << clamp_t(int, fls_long(size),
PAGE_SHIFT, IOREMAP_MAX_ORDER);
-@@ -1531,13 +1631,36 @@ EXPORT_SYMBOL(vfree);
+@@ -1531,13 +1638,36 @@ EXPORT_SYMBOL(vfree);
*/
void vunmap(const void *addr)
{
@@ -117830,7 +117822,7 @@ index 2faaa29..14881ba 100644
/**
* vmap - map an array of pages into virtually contiguous space
* @pages: array of page pointers
-@@ -1558,6 +1681,11 @@ void *vmap(struct page **pages, unsigned int count,
+@@ -1558,6 +1688,11 @@ void *vmap(struct page **pages, unsigned int count,
if (count > totalram_pages)
return NULL;
@@ -117842,7 +117834,7 @@ index 2faaa29..14881ba 100644
area = get_vm_area_caller((count << PAGE_SHIFT), flags,
__builtin_return_address(0));
if (!area)
-@@ -1662,6 +1790,14 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
+@@ -1662,6 +1797,14 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
goto fail;
@@ -117857,7 +117849,7 @@ index 2faaa29..14881ba 100644
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
vm_flags, start, end, node, gfp_mask, caller);
if (!area)
-@@ -1715,6 +1851,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
+@@ -1715,6 +1858,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_mask, prot, 0, node, caller);
}
@@ -117872,7 +117864,7 @@ index 2faaa29..14881ba 100644
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
-@@ -1838,10 +1982,9 @@ EXPORT_SYMBOL(vzalloc_node);
+@@ -1838,10 +1989,9 @@ EXPORT_SYMBOL(vzalloc_node);
* For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*/
@@ -117884,7 +117876,7 @@ index 2faaa29..14881ba 100644
NUMA_NO_NODE, __builtin_return_address(0));
}
-@@ -2148,6 +2291,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
+@@ -2148,6 +2298,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
{
struct vm_struct *area;
@@ -117893,7 +117885,7 @@ index 2faaa29..14881ba 100644
size = PAGE_ALIGN(size);
if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
-@@ -2630,7 +2775,11 @@ static int s_show(struct seq_file *m, void *p)
+@@ -2630,7 +2782,11 @@ static int s_show(struct seq_file *m, void *p)
v->addr, v->addr + v->size, v->size);
if (v->caller)
@@ -125813,10 +125805,10 @@ index c0a932d..817c587 100755
# Find all available archs
find_all_archs()
diff --git a/security/Kconfig b/security/Kconfig
-index bf4ec46..6748ce1 100644
+index bf4ec46..d32a3b8 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,980 @@
+@@ -4,6 +4,985 @@
menu "Security options"
@@ -126762,6 +126754,11 @@ index bf4ec46..6748ce1 100644
+ i.e., gcc 4.5 or newer. You may need to install the supporting
+ headers explicitly in addition to the normal gcc package.
+
++config PAX_SIZE_OVERFLOW_DISABLE_KILL
++ bool "Do not kill process on overflow detection"
++ default y
++ depends on PAX_SIZE_OVERFLOW
++
+config PAX_LATENT_ENTROPY
+ bool "Generate some entropy during boot and runtime"
+ default y if GRKERNSEC_CONFIG_AUTO
@@ -126797,7 +126794,7 @@ index bf4ec46..6748ce1 100644
source security/keys/Kconfig
config SECURITY_DMESG_RESTRICT
-@@ -104,7 +1078,7 @@ config INTEL_TXT
+@@ -104,7 +1083,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
@@ -132442,10 +132439,10 @@ index 0000000..4c7f7c6
+targets += size_overflow_hash.h size_overflow_hash_aux.h disable_size_overflow_hash.h
diff --git a/tools/gcc/size_overflow_plugin/disable_size_overflow_hash.data b/tools/gcc/size_overflow_plugin/disable_size_overflow_hash.data
new file mode 100644
-index 0000000..3414692
+index 0000000..675f934
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/disable_size_overflow_hash.data
-@@ -0,0 +1,12394 @@
+@@ -0,0 +1,12396 @@
+disable_so_interrupt_pnode_gru_message_queue_desc_4 interrupt_pnode gru_message_queue_desc 0 4 NULL
+disable_so_bch_btree_insert_fndecl_12 bch_btree_insert fndecl 0 12 NULL
+disable_so_macvlan_sync_address_fndecl_22 macvlan_sync_address fndecl 0 22 NULL nohasharray
@@ -144840,6 +144837,8 @@ index 0000000..3414692
+disable_so_s5m_rtc_set_time_fndecl_65518 s5m_rtc_set_time fndecl 0 65518 NULL
+disable_so_addr_w83627hf_data_65526 addr w83627hf_data 0 65526 NULL
+disable_so_si_pt_regs_65527 si pt_regs 0 65527 NULL
++enable_so_rts_threshold_wiphy_15170 rts_threshold wiphy 0 15170 NULL
++enable_so_value_iw_param_65472 value iw_param 0 65472 NULL
diff --git a/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh b/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh
new file mode 100644
index 0000000..be9724d
@@ -147036,10 +147035,10 @@ index 0000000..ab2d25a
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..d841329
+index 0000000..ba470a6
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,20763 @@
+@@ -0,0 +1,20761 @@
+enable_so_recv_ctrl_pipe_us_data_0 recv_ctrl_pipe us_data 0 0 NULL
+enable_so___earlyonly_bootmem_alloc_fndecl_3 __earlyonly_bootmem_alloc fndecl 2-3-4 3 NULL
+enable_so_size_ttm_mem_reg_8 size ttm_mem_reg 0 8 NULL
@@ -151811,7 +151810,6 @@ index 0000000..d841329
+enable_so_info0_rx_msdu_start_15164 info0 rx_msdu_start 0 15164 NULL
+enable_so_i_disk_sec_hpfs_inode_info_15165 i_disk_sec hpfs_inode_info 0 15165 NULL
+enable_so_ovl_dir_llseek_fndecl_15169 ovl_dir_llseek fndecl 2 15169 NULL
-+enable_so_rts_threshold_wiphy_15170 rts_threshold wiphy 0 15170 NULL
+enable_so_frontswap_curr_pages_fndecl_15172 frontswap_curr_pages fndecl 0 15172 NULL nohasharray
+enable_so_xfs_idata_realloc_fndecl_15172 xfs_idata_realloc fndecl 2 15172 &enable_so_frontswap_curr_pages_fndecl_15172
+enable_so_si_namelen_nfsd4_secinfo_15174 si_namelen nfsd4_secinfo 0 15174 NULL
@@ -167787,7 +167785,6 @@ index 0000000..d841329
+enable_so_xfs_dir3_data_block_free_fndecl_65456 xfs_dir3_data_block_free fndecl 4 65456 NULL
+enable_so_wNtbOutMaxDatagrams_usb_cdc_ncm_ntb_parameters_65459 wNtbOutMaxDatagrams usb_cdc_ncm_ntb_parameters 0 65459 NULL
+enable_so_cfg80211_calculate_bitrate_60g_fndecl_65469 cfg80211_calculate_bitrate_60g fndecl 0 65469 NULL
-+enable_so_value_iw_param_65472 value iw_param 0 65472 NULL
+enable_so_lfb_base_screen_info_65479 lfb_base screen_info 0 65479 NULL
+enable_so_pci_hotplug_io_size_vardecl_65480 pci_hotplug_io_size vardecl 0 65480 NULL
+enable_so_isoc_size_wa_seg_65482 isoc_size wa_seg 0 65482 NULL
@@ -169645,7 +169642,7 @@ index 0000000..6075e8f
+
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
new file mode 100644
-index 0000000..a0415ac
+index 0000000..8838f04
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
@@ -0,0 +1,318 @@
@@ -169680,7 +169677,7 @@ index 0000000..a0415ac
+tree size_overflow_type_TI;
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20151016",
++ .version = "20151020",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
@@ -169844,7 +169841,7 @@ index 0000000..a0415ac
+ TREE_PUBLIC(report_size_overflow_decl) = 1;
+ DECL_EXTERNAL(report_size_overflow_decl) = 1;
+ DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
-+ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
++// TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
+// !!!
+ DECL_PRESERVE_P(report_size_overflow_decl) = 1;
+ DECL_UNINLINABLE(report_size_overflow_decl) = 1;
@@ -170327,10 +170324,10 @@ index 0000000..317cd6c
+
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_transform.c b/tools/gcc/size_overflow_plugin/size_overflow_transform.c
new file mode 100644
-index 0000000..b6c6bb1
+index 0000000..ee7633e
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_transform.c
-@@ -0,0 +1,731 @@
+@@ -0,0 +1,742 @@
+/*
+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -170560,7 +170557,7 @@ index 0000000..b6c6bb1
+ return TYPE_UNSIGNED(TREE_TYPE(node)) && is_unsigned_error_code_const(node);
+}
+
-+static bool has_error_code(interesting_stmts_t expand_from, gphi *phi)
++static bool has_error_code(gphi *phi)
+{
+ unsigned int i, len = gimple_phi_num_args(phi);
+
@@ -170591,12 +170588,17 @@ index 0000000..b6c6bb1
+static void handle_binary_assign(struct visited *visited, interesting_stmts_t expand_from, gassign *assign, tree rhs)
+{
+ tree new_node;
++ gimple def_orig_node;
+
+ new_node = expand(visited, expand_from, rhs);
+ if (new_node == NULL_TREE)
+ return;
+
++ def_orig_node = get_def_stmt(rhs);
+ change_orig_node(visited, assign, rhs, new_node, 0);
++
++ if (pointer_set_contains(visited->no_cast_check, def_orig_node))
++ return;
+ check_size_overflow(expand_from, assign, TREE_TYPE(new_node), new_node, rhs, BEFORE_STMT);
+}
+
@@ -170646,7 +170648,7 @@ index 0000000..b6c6bb1
+ case GIMPLE_PHI: {
+ unsigned int i;
+
-+ error_code = has_error_code(expand_from, as_a_gphi(def_stmt));
++ error_code = has_error_code(as_a_gphi(def_stmt));
+ for (i = 0; i < gimple_phi_num_args(def_stmt); i++) {
+ error_code = search_error_codes(visited, visited_error_codes, expand_from, gimple_phi_arg_def(def_stmt, i), error_code);
+ }
@@ -170681,6 +170683,7 @@ index 0000000..b6c6bb1
+
+ for (cur = head; cur; cur = cur->next) {
+ tree new_node;
++ gimple orig_def_stmt;
+
+ if (handle_error_codes(visited, cur))
+ continue;
@@ -170689,7 +170692,12 @@ index 0000000..b6c6bb1
+ if (new_node == NULL_TREE)
+ continue;
+
++ orig_def_stmt = get_def_stmt(cur->orig_node);
++
+ change_orig_node(visited, cur->first_stmt, cur->orig_node, new_node, cur->num);
++
++ if (pointer_set_contains(visited->no_cast_check, orig_def_stmt))
++ continue;
+ check_size_overflow(cur, cur->first_stmt, TREE_TYPE(new_node), new_node, cur->orig_node, BEFORE_STMT);
+ }
+}
@@ -171064,10 +171072,10 @@ index 0000000..b6c6bb1
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_transform_core.c b/tools/gcc/size_overflow_plugin/size_overflow_transform_core.c
new file mode 100644
-index 0000000..5fdfca5
+index 0000000..e7a17f5
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_transform_core.c
-@@ -0,0 +1,962 @@
+@@ -0,0 +1,1004 @@
+/*
+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -171647,6 +171655,69 @@ index 0000000..5fdfca5
+ insert_check_size_overflow(expand_from, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK);
+}
+
++static tree get_my_stmt_lhs(struct visited *visited, gimple stmt)
++{
++ gimple_stmt_iterator gsi;
++ gimple next_stmt = NULL;
++
++ gsi = gsi_for_stmt(stmt);
++
++ do {
++ gsi_next(&gsi);
++ next_stmt = gsi_stmt(gsi);
++
++ if (gimple_code(stmt) == GIMPLE_PHI && !pointer_set_contains(visited->my_stmts, next_stmt))
++ return NULL_TREE;
++
++ if (pointer_set_contains(visited->my_stmts, next_stmt) && !pointer_set_contains(visited->skip_expr_casts, next_stmt))
++ break;
++
++ gcc_assert(pointer_set_contains(visited->my_stmts, next_stmt));
++ } while (!gsi_end_p(gsi));
++
++ gcc_assert(next_stmt);
++ return get_lhs(next_stmt);
++}
++
++/* When the result of the negation is cast to a signed type then move
++ * the size_overflow cast check before negation.
++ * ssa:
++ * unsigned _588
++ * _588 = _587 >> 12;
++ * _589 = -_588;
++ * _590 = (long int) _589;
++ */
++static bool handle_unsigned_neg_or_bit_not(struct visited *visited, interesting_stmts_t expand_from, const gassign *stmt)
++{
++ gimple def_neg_stmt, neg_stmt;
++ tree lhs, new_neg_rhs;
++ const_tree rhs, neg_rhs;
++ enum tree_code rhs_code;
++
++ rhs = gimple_assign_rhs1(stmt);
++ lhs = gimple_assign_lhs(stmt);
++ if (TYPE_UNSIGNED(TREE_TYPE(lhs)) || !TYPE_UNSIGNED(TREE_TYPE(rhs)))
++ return false;
++
++ neg_stmt = get_def_stmt(rhs);
++ if (!neg_stmt || !is_gimple_assign(neg_stmt))
++ return false;
++
++ rhs_code = gimple_assign_rhs_code(neg_stmt);
++ if (rhs_code != BIT_NOT_EXPR && rhs_code != NEGATE_EXPR)
++ return false;
++
++ neg_rhs = gimple_assign_rhs1(neg_stmt);
++ def_neg_stmt = get_def_stmt(neg_rhs);
++ if (!def_neg_stmt)
++ return false;
++
++ new_neg_rhs = get_my_stmt_lhs(visited, def_neg_stmt);
++ check_size_overflow(expand_from, neg_stmt, TREE_TYPE(new_neg_rhs), new_neg_rhs, lhs, BEFORE_STMT);
++ pointer_set_insert(visited->no_cast_check, stmt);
++ return true;
++}
++
+static tree create_cast_overflow_check(struct visited *visited, interesting_stmts_t expand_from, tree new_rhs1, gassign *stmt)
+{
+ bool cast_lhs, cast_rhs;
@@ -171673,6 +171744,9 @@ index 0000000..5fdfca5
+ { true, false, true, true }, // lhs < rhs
+ };
+
++ if (handle_unsigned_neg_or_bit_not(visited, expand_from, stmt))
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++
+ // skip lhs check on signed SI -> HI cast or signed SI -> QI cast
+ if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode))
+ return create_assign(visited, stmt, lhs, AFTER_STMT);
@@ -171945,30 +172019,6 @@ index 0000000..5fdfca5
+}
+#endif
+
-+static tree get_my_stmt_lhs(struct visited *visited, gimple stmt)
-+{
-+ gimple_stmt_iterator gsi;
-+ gimple next_stmt = NULL;
-+
-+ gsi = gsi_for_stmt(stmt);
-+
-+ do {
-+ gsi_next(&gsi);
-+ next_stmt = gsi_stmt(gsi);
-+
-+ if (gimple_code(stmt) == GIMPLE_PHI && !pointer_set_contains(visited->my_stmts, next_stmt))
-+ return NULL_TREE;
-+
-+ if (pointer_set_contains(visited->my_stmts, next_stmt) && !pointer_set_contains(visited->skip_expr_casts, next_stmt))
-+ break;
-+
-+ gcc_assert(pointer_set_contains(visited->my_stmts, next_stmt));
-+ } while (!gsi_end_p(gsi));
-+
-+ gcc_assert(next_stmt);
-+ return get_lhs(next_stmt);
-+}
-+
+static tree expand_visited(struct visited *visited, gimple def_stmt)
+{
+ gimple_stmt_iterator gsi;