summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2014-05-15 08:29:39 -0400
committerAnthony G. Basile <blueness@gentoo.org>2014-05-15 08:29:39 -0400
commitd37e777b1e01663874fa697292c51b089ffd6d39 (patch)
tree47e8878c28bf4dd3068bdfc510261dc723ee3766
parentGrsec/PaX: 3.0-{3.2.58,3.14.3}-201405112005 (diff)
downloadhardened-patchset-d37e777b1e01663874fa697292c51b089ffd6d39.tar.gz
hardened-patchset-d37e777b1e01663874fa697292c51b089ffd6d39.tar.bz2
hardened-patchset-d37e777b1e01663874fa697292c51b089ffd6d39.zip
Grsec/PaX: 3.0-3.14.3-201405141623
-rw-r--r--3.14.3/0000_README2
-rw-r--r--3.14.3/4420_grsecurity-3.0-3.14.4-201405141623.patch (renamed from 3.14.3/4420_grsecurity-3.0-3.14.3-201405112005.patch)8556
-rw-r--r--3.14.3/4425_grsec_remove_EI_PAX.patch2
-rw-r--r--3.14.3/4450_grsec-kconfig-default-gids.patch8
-rw-r--r--3.14.3/4475_emutramp_default_on.patch2
5 files changed, 4528 insertions, 4042 deletions
diff --git a/3.14.3/0000_README b/3.14.3/0000_README
index 99900e8..d9b0963 100644
--- a/3.14.3/0000_README
+++ b/3.14.3/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.14.3-201405112005.patch
+Patch: 4420_grsecurity-3.0-3.14.4-201405141623.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.14.3/4420_grsecurity-3.0-3.14.3-201405112005.patch b/3.14.3/4420_grsecurity-3.0-3.14.4-201405141623.patch
index c5fa685..723fbc4 100644
--- a/3.14.3/4420_grsecurity-3.0-3.14.3-201405112005.patch
+++ b/3.14.3/4420_grsecurity-3.0-3.14.4-201405141623.patch
@@ -287,7 +287,7 @@ index 7116fda..d8ed6e8 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index eed07f3..2b75821 100644
+index d7c07fd..d6d4bfa 100644
--- a/Makefile
+++ b/Makefile
@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -352,7 +352,7 @@ index eed07f3..2b75821 100644
+endif
+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
+ifdef CONFIG_PAX_SIZE_OVERFLOW
-+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
++SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
+endif
+ifdef CONFIG_PAX_LATENT_ENTROPY
+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
@@ -8174,10 +8174,10 @@ index 4aad413..85d86bf 100644
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
-index 90c06ec..3517221 100644
+index ce17815..c5574cc 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
-@@ -248,6 +248,7 @@
+@@ -249,6 +249,7 @@
#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
#define DSISR_NOHPTE 0x40000000 /* no translation found */
@@ -8518,10 +8518,10 @@ index 6cff040..74ac5d1 100644
sechdrs, module);
#endif
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
-index af064d2..ce56147 100644
+index 31d0215..206af70 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
-@@ -1009,8 +1009,8 @@ void show_regs(struct pt_regs * regs)
+@@ -1031,8 +1031,8 @@ void show_regs(struct pt_regs * regs)
* Lookup NIP late so we have the best change of getting the
* above info out without failing
*/
@@ -8532,7 +8532,7 @@ index af064d2..ce56147 100644
#endif
show_stack(current, (unsigned long *) regs->gpr[1]);
if (!user_mode(regs))
-@@ -1532,10 +1532,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+@@ -1554,10 +1554,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
newsp = stack[0];
ip = stack[STACK_FRAME_LR_SAVE];
if (!firstframe || ip != lr) {
@@ -8545,7 +8545,7 @@ index af064d2..ce56147 100644
(void *)current->ret_stack[curr_frame].ret);
curr_frame--;
}
-@@ -1555,7 +1555,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+@@ -1577,7 +1577,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
struct pt_regs *regs = (struct pt_regs *)
(sp + STACK_FRAME_OVERHEAD);
lr = regs->link;
@@ -8554,7 +8554,7 @@ index af064d2..ce56147 100644
regs->trap, (void *)regs->nip, (void *)lr);
firstframe = 1;
}
-@@ -1591,58 +1591,3 @@ void notrace __ppc64_runlatch_off(void)
+@@ -1613,58 +1613,3 @@ void notrace __ppc64_runlatch_off(void)
mtspr(SPRN_CTRLT, ctrl);
}
#endif /* CONFIG_PPC64 */
@@ -8653,10 +8653,10 @@ index 2e3d2bf..35df241 100644
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
-index a67e00a..f71d8c7 100644
+index 4e47db6..6dcc96e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
-@@ -1011,7 +1011,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
+@@ -1013,7 +1013,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
/* Save user registers on the stack */
frame = &rt_sf->uc.uc_mcontext;
addr = frame;
@@ -8666,10 +8666,10 @@ index a67e00a..f71d8c7 100644
tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
} else {
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
-index 8d253c2..405b337 100644
+index d501dc4..e5a0de0 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
-@@ -758,7 +758,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
+@@ -760,7 +760,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
current->thread.fp_state.fpscr = 0;
/* Set up to return from userspace. */
@@ -12667,10 +12667,10 @@ index 321a52c..3d51a5e 100644
This option helps catch unintended modifications to loadable
kernel module's text and read-only data. It also prevents execution
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index f8842c4..e893775 100644
+index 0dd99ea..4a63d82 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
-@@ -71,14 +71,12 @@ ifeq ($(CONFIG_X86_32),y)
+@@ -71,9 +71,6 @@ ifeq ($(CONFIG_X86_32),y)
# CPU-specific tuning. Anything which can be shared with UML should go here.
include $(srctree)/arch/x86/Makefile_32.cpu
KBUILD_CFLAGS += $(cflags-y)
@@ -12680,13 +12680,7 @@ index f8842c4..e893775 100644
else
BITS := 64
UTS_MACHINE := x86_64
- CHECKFLAGS += -D__x86_64__ -m64
-
-+ biarch := $(call cc-option,-m64)
- KBUILD_AFLAGS += -m64
- KBUILD_CFLAGS += -m64
-
-@@ -111,6 +109,9 @@ else
+@@ -112,6 +109,9 @@ else
KBUILD_CFLAGS += -maccumulate-outgoing-args
endif
@@ -12696,7 +12690,7 @@ index f8842c4..e893775 100644
# Make sure compiler does not have buggy stack-protector support.
ifdef CONFIG_CC_STACKPROTECTOR
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
-@@ -268,3 +269,12 @@ define archhelp
+@@ -269,3 +269,12 @@ define archhelp
echo ' FDINITRD=file initrd for the booted kernel'
echo ' kvmconfig - Enable additional options for guest kernel support'
endef
@@ -26192,7 +26186,7 @@ index bbb6c73..24a58ef 100644
.lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
.unlock_kick = paravirt_nop,
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
-index 1b10af8..0b58cbc 100644
+index 1b10af8..45bfbec 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
@@ -26205,11 +26199,13 @@ index 1b10af8..0b58cbc 100644
void __init default_banner(void)
{
-@@ -142,15 +145,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+@@ -141,16 +144,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+
if (opfunc == NULL)
/* If there's no function, patch it with a ud2a (BUG) */
- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
+- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
- else if (opfunc == _paravirt_nop)
++ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
+ else if (opfunc == (void *)_paravirt_nop)
/* If the operation is a nop, then nop the callsite */
ret = paravirt_patch_nop();
@@ -35329,6 +35325,24 @@ index dac7b20..72dbaca 100644
movl %eax, %cr0
/*
+diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
+index 9e7e147..25a4158 100644
+--- a/arch/x86/realmode/rm/wakeup_asm.S
++++ b/arch/x86/realmode/rm/wakeup_asm.S
+@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
+ lgdtl pmode_gdt
+
+ /* This really couldn't... */
+- movl pmode_entry, %eax
+ movl pmode_cr0, %ecx
+ movl %ecx, %cr0
+- ljmpl $__KERNEL_CS, $pa_startup_32
+- /* -> jmp *%eax in trampoline_32.S */
++
++ ljmpl *pmode_entry
+ #else
+ jmp trampoline_start
+ #endif
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index e812034..c747134 100644
--- a/arch/x86/tools/Makefile
@@ -36544,7 +36558,7 @@ index 36605ab..6ef6d4b 100644
unsigned long timeout_msec)
{
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index 8cb2522..a815e54 100644
+index 0a79c54..c1b92ed 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
@@ -36556,7 +36570,7 @@ index 8cb2522..a815e54 100644
struct ata_force_param {
const char *name;
-@@ -4851,7 +4851,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
+@@ -4858,7 +4858,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
struct ata_port *ap;
unsigned int tag;
@@ -36565,7 +36579,7 @@ index 8cb2522..a815e54 100644
ap = qc->ap;
qc->flags = 0;
-@@ -4867,7 +4867,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
+@@ -4874,7 +4874,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
struct ata_port *ap;
struct ata_link *link;
@@ -36574,7 +36588,7 @@ index 8cb2522..a815e54 100644
WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
ap = qc->ap;
link = qc->dev->link;
-@@ -5986,6 +5986,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+@@ -5993,6 +5993,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
return;
spin_lock(&lock);
@@ -36582,7 +36596,7 @@ index 8cb2522..a815e54 100644
for (cur = ops->inherits; cur; cur = cur->inherits) {
void **inherit = (void **)cur;
-@@ -5999,8 +6000,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+@@ -6006,8 +6007,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
if (IS_ERR(*pp))
*pp = NULL;
@@ -36593,7 +36607,7 @@ index 8cb2522..a815e54 100644
spin_unlock(&lock);
}
-@@ -6193,7 +6195,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
+@@ -6200,7 +6202,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
/* give ports names and add SCSI hosts */
for (i = 0; i < host->n_ports; i++) {
@@ -38319,44 +38333,6 @@ index d073305..4998fea 100644
static struct asender_cmd asender_tbl[] = {
[P_PING] = { 0, got_Ping },
-diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
-index 2023043..dab515c 100644
---- a/drivers/block/floppy.c
-+++ b/drivers/block/floppy.c
-@@ -3053,7 +3053,10 @@ static int raw_cmd_copyout(int cmd, void __user *param,
- int ret;
-
- while (ptr) {
-- ret = copy_to_user(param, ptr, sizeof(*ptr));
-+ struct floppy_raw_cmd cmd = *ptr;
-+ cmd.next = NULL;
-+ cmd.kernel_data = NULL;
-+ ret = copy_to_user(param, &cmd, sizeof(cmd));
- if (ret)
- return -EFAULT;
- param += sizeof(struct floppy_raw_cmd);
-@@ -3107,10 +3110,11 @@ loop:
- return -ENOMEM;
- *rcmd = ptr;
- ret = copy_from_user(ptr, param, sizeof(*ptr));
-- if (ret)
-- return -EFAULT;
- ptr->next = NULL;
- ptr->buffer_length = 0;
-+ ptr->kernel_data = NULL;
-+ if (ret)
-+ return -EFAULT;
- param += sizeof(struct floppy_raw_cmd);
- if (ptr->cmd_count > 33)
- /* the command may now also take up the space
-@@ -3126,7 +3130,6 @@ loop:
- for (i = 0; i < 16; i++)
- ptr->reply[i] = 0;
- ptr->resultcode = 0;
-- ptr->kernel_data = NULL;
-
- if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
- if (ptr->length <= 0)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 66e8c3b..9b68dd9 100644
--- a/drivers/block/loop.c
@@ -44319,10 +44295,10 @@ index 6a7f2b8..fea0bde 100644
"start=%llu, len=%llu, dev_size=%llu",
dm_device_name(ti->table->md), bdevname(bdev, b),
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
-index fb9efc8..81e8986 100644
+index b086a94..74cb67e 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
-@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
+@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
{
pmd->info.tm = pmd->tm;
pmd->info.levels = 2;
@@ -44331,7 +44307,7 @@ index fb9efc8..81e8986 100644
pmd->info.value_type.size = sizeof(__le64);
pmd->info.value_type.inc = data_block_inc;
pmd->info.value_type.dec = data_block_dec;
-@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
+@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
pmd->bl_info.tm = pmd->tm;
pmd->bl_info.levels = 1;
@@ -46161,7 +46137,7 @@ index 51b9d6a..52af9a7 100644
#include <linux/mtd/nand.h>
#include <linux/mtd/nftl.h>
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
-index 4b8e895..6b3c498 100644
+index cf49c22..971b133 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
@@ -46172,7 +46148,7 @@ index 4b8e895..6b3c498 100644
+ attribute_group_no_const *attr_group;
struct attribute **attributes;
struct sm_sysfs_attribute *vendor_attribute;
-
+ char *vendor;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e5628fc..ffe54d1 100644
--- a/drivers/net/bonding/bond_main.c
@@ -47561,10 +47537,10 @@ index d2fe259..0c4c682 100644
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
-index ba1b1ea..0ff7e98 100644
+index ea7e70c..bc0c45f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
-@@ -1123,7 +1123,7 @@ static void iwl_option_config(struct iwl_priv *priv)
+@@ -1127,7 +1127,7 @@ static void iwl_option_config(struct iwl_priv *priv)
static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
{
struct iwl_nvm_data *data = priv->nvm_data;
@@ -47573,7 +47549,7 @@ index ba1b1ea..0ff7e98 100644
if (data->sku_cap_11n_enable &&
!priv->cfg->ht_params) {
-@@ -1137,7 +1137,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
+@@ -1141,7 +1141,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
return -EINVAL;
}
@@ -48318,7 +48294,7 @@ index 8f8551a..3ace3ca 100644
static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
-index defb6af..7a5d3d1 100644
+index e2a91c8..986cc9f 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
@@ -49706,7 +49682,7 @@ index b2ede05..aaf482ca 100644
/**
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
-index 7f0af4f..193ac3e 100644
+index 6fd7d40..b444223 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev)
@@ -50795,7 +50771,7 @@ index a57bb5a..1f727d33 100644
struct tty_struct *tty;
struct tty_ldisc *ld;
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
-index 94f9e3a..4c8afa8 100644
+index 0ff7fda..dbc7d52 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
@@ -51221,7 +51197,7 @@ index 2ebe47b..3205833 100644
dlci->modem_rx = 0;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
-index d15624c..bd628c6 100644
+index e36d1f5..9938e3e 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -115,7 +115,7 @@ struct n_tty_data {
@@ -51233,35 +51209,7 @@ index d15624c..bd628c6 100644
size_t line_start;
/* protected by output lock */
-@@ -2356,10 +2356,18 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
- if (tty->ops->flush_chars)
- tty->ops->flush_chars(tty);
- } else {
-+ struct n_tty_data *ldata = tty->disc_data;
-+ bool lock;
-+
-+ lock = L_ECHO(tty) || (ldata->icanon & L_ECHONL(tty));
-+ if (lock)
-+ mutex_lock(&ldata->output_lock);
- while (nr > 0) {
- c = tty->ops->write(tty, b, nr);
- if (c < 0) {
- retval = c;
-+ if (lock)
-+ mutex_unlock(&ldata->output_lock);
- goto break_out;
- }
- if (!c)
-@@ -2367,6 +2375,8 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
- b += c;
- nr -= c;
- }
-+ if (lock)
-+ mutex_unlock(&ldata->output_lock);
- }
- if (!nr)
- break;
-@@ -2515,6 +2525,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
+@@ -2519,6 +2519,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
{
*ops = tty_ldisc_N_TTY;
ops->owner = NULL;
@@ -52916,18 +52864,18 @@ index 28fafbf..ae91651 100644
}
diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
-index 95ec042..e6affdd 100644
+index 0fe02e2..ab01b26 100644
--- a/drivers/video/aty/mach64_cursor.c
+++ b/drivers/video/aty/mach64_cursor.c
-@@ -7,6 +7,7 @@
- #include <linux/string.h>
+@@ -8,6 +8,7 @@
+ #include "../fb_draw.h"
#include <asm/io.h>
+#include <asm/pgtable.h>
#ifdef __sparc__
#include <asm/fbio.h>
-@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
+@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
info->sprite.buf_align = 16; /* and 64 lines tall. */
info->sprite.flags = FB_PIXMAP_IO;
@@ -56408,10 +56356,10 @@ index ce25d75..dc09eeb 100644
&data);
if (!inode) {
diff --git a/fs/aio.c b/fs/aio.c
-index 062a5f6..6ecefa2 100644
+index 12a3de0e..25949c1 100644
--- a/fs/aio.c
+++ b/fs/aio.c
-@@ -374,7 +374,7 @@ static int aio_setup_ring(struct kioctx *ctx)
+@@ -375,7 +375,7 @@ static int aio_setup_ring(struct kioctx *ctx)
size += sizeof(struct io_event) * nr_events;
nr_pages = PFN_UP(size);
@@ -56420,7 +56368,7 @@ index 062a5f6..6ecefa2 100644
return -EINVAL;
file = aio_private_file(ctx, nr_pages);
-@@ -1285,10 +1285,8 @@ rw_common:
+@@ -1299,10 +1299,8 @@ rw_common:
&iovec, compat)
: aio_setup_single_vector(req, rw, buf, &nr_segs,
iovec);
@@ -57565,42 +57513,6 @@ index 67be295..83e2f86 100644
static int __init init_elf_binfmt(void)
{
register_binfmt(&elf_format);
-diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
-index d50bbe5..af3b649 100644
---- a/fs/binfmt_flat.c
-+++ b/fs/binfmt_flat.c
-@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
- realdatastart = (unsigned long) -ENOMEM;
- printk("Unable to allocate RAM for process data, errno %d\n",
- (int)-realdatastart);
-+ down_write(&current->mm->mmap_sem);
- vm_munmap(textpos, text_len);
-+ up_write(&current->mm->mmap_sem);
- ret = realdatastart;
- goto err;
- }
-@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
- }
- if (IS_ERR_VALUE(result)) {
- printk("Unable to read data+bss, errno %d\n", (int)-result);
-+ down_write(&current->mm->mmap_sem);
- vm_munmap(textpos, text_len);
- vm_munmap(realdatastart, len);
-+ up_write(&current->mm->mmap_sem);
- ret = result;
- goto err;
- }
-@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
- }
- if (IS_ERR_VALUE(result)) {
- printk("Unable to read code+data+bss, errno %d\n",(int)-result);
-+ down_write(&current->mm->mmap_sem);
- vm_munmap(textpos, text_len + data_len + extra +
- MAX_SHARED_LIBS * sizeof(unsigned long));
-+ up_write(&current->mm->mmap_sem);
- ret = result;
- goto err;
- }
diff --git a/fs/bio.c b/fs/bio.c
index 8754e7b..0669094 100644
--- a/fs/bio.c
@@ -61848,10 +61760,10 @@ index acd3947..1f896e2 100644
memcpy(c->data, &cookie, 4);
c->len=4;
diff --git a/fs/locks.c b/fs/locks.c
-index 92a0f0a..45a48f0 100644
+index 4dd39b9..12d6aaf 100644
--- a/fs/locks.c
+++ b/fs/locks.c
-@@ -2219,16 +2219,16 @@ void locks_remove_flock(struct file *filp)
+@@ -2218,16 +2218,16 @@ void locks_remove_flock(struct file *filp)
return;
if (filp->f_op->flock) {
@@ -80568,10 +80480,10 @@ index b8e9a43..632678d 100644
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
diff --git a/include/linux/libata.h b/include/linux/libata.h
-index bec6dbe..2873d64 100644
+index 3fee55e..42565b7 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
-@@ -975,7 +975,7 @@ struct ata_port_operations {
+@@ -976,7 +976,7 @@ struct ata_port_operations {
* fields must be pointers.
*/
const struct ata_port_operations *inherits;
@@ -95324,7 +95236,7 @@ index 7106cb1..0805f48 100644
unsigned long bg_thresh,
unsigned long dirty,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 7387a67..3994687 100644
+index 7387a67..67105e4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -61,6 +61,7 @@
@@ -95420,6 +95332,21 @@ index 7387a67..3994687 100644
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
+@@ -2401,7 +2441,7 @@ static void reset_alloc_batches(struct zonelist *zonelist,
+ continue;
+ mod_zone_page_state(zone, NR_ALLOC_BATCH,
+ high_wmark_pages(zone) - low_wmark_pages(zone) -
+- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
++ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
+ }
+ }
+
+@@ -6565,4 +6605,4 @@ void dump_page(struct page *page, char *reason)
+ {
+ dump_page_badflags(page, reason, 0);
+ }
+-EXPORT_SYMBOL_GPL(dump_page);
++EXPORT_SYMBOL(dump_page);
diff --git a/mm/page_io.c b/mm/page_io.c
index 7c59ef6..1358905 100644
--- a/mm/page_io.c
@@ -100960,7 +100887,7 @@ index 453e974..b3a43a5 100644
if (local->use_chanctx)
*chandef = local->monitor_chandef;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
-index 5e44e317..3d404a6 100644
+index 6bd4984..d8805c5 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -28,6 +28,7 @@
@@ -101057,10 +100984,10 @@ index ce1c443..6cd39e1 100644
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
-index d767cfb..b4cd07d 100644
+index c7a7a86..a74f57b 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
-@@ -172,7 +172,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
+@@ -174,7 +174,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
IEEE80211_CONF_CHANGE_POWER);
@@ -101070,7 +100997,7 @@ index d767cfb..b4cd07d 100644
/*
* Goal:
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
-index af64fb8..366e371 100644
+index d478b88..8c8d157 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
@@ -101091,7 +101018,7 @@ index af64fb8..366e371 100644
if (local->wowlan) {
int err = drv_suspend(local, wowlan);
if (err < 0) {
-@@ -115,7 +115,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+@@ -123,7 +123,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
WARN_ON(!list_empty(&local->chanctx_list));
/* stop hardware - this must stop RX */
@@ -102835,7 +102762,7 @@ index ae333c1..18521f0 100644
goto out_nomem;
cd->u.procfs.channel_ent = NULL;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
-index 0edada9..9247ea0 100644
+index 3ea5cda..bfb3e08 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1415,7 +1415,9 @@ call_start(struct rpc_task *task)
@@ -106384,19 +106311,18 @@ index 7778b8e..3d619fc 100644
diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
new file mode 100644
-index 0000000..1f0214f
+index 0000000..60e7af2
--- /dev/null
+++ b/tools/gcc/.gitignore
-@@ -0,0 +1,3 @@
+@@ -0,0 +1,2 @@
+randomize_layout_seed.h
-+size_overflow_hash.h
-+size_overflow_hash_aux.h
++randomize_layout_hash.h
diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
new file mode 100644
-index 0000000..5ca9688
+index 0000000..7b8921f
--- /dev/null
+++ b/tools/gcc/Makefile
-@@ -0,0 +1,62 @@
+@@ -0,0 +1,52 @@
+#CC := gcc
+#PLUGIN_SOURCE_FILES := pax_plugin.c
+#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
@@ -106405,23 +106331,29 @@ index 0000000..5ca9688
+
+ifeq ($(PLUGINCC),$(HOSTCC))
+HOSTLIBS := hostlibs
-+HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99 -ggdb
++HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(src) -std=gnu99 -ggdb
++export HOST_EXTRACFLAGS
+else
+HOSTLIBS := hostcxxlibs
-+HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu++98 -fno-rtti -ggdb -Wno-unused-parameter -Wno-narrowing
++HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti -ggdb -Wno-unused-parameter -Wno-narrowing -Wno-unused-variable
++export HOST_EXTRACXXFLAGS
+endif
+
++export GCCPLUGINS_DIR HOSTLIBS
++
+$(HOSTLIBS)-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
+$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
+$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
+$(HOSTLIBS)-y += colorize_plugin.so
-+$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STRUCTLEAK) += structleak_plugin.so
+$(HOSTLIBS)-$(CONFIG_GRKERNSEC_RANDSTRUCT) += randomize_layout_plugin.so
+
++subdir-$(CONFIG_PAX_SIZE_OVERFLOW) := size_overflow_plugin
++subdir- += size_overflow_plugin
++
+always := $($(HOSTLIBS)-y)
+
+constify_plugin-objs := constify_plugin.o
@@ -106430,35 +106362,19 @@ index 0000000..5ca9688
+kernexec_plugin-objs := kernexec_plugin.o
+checker_plugin-objs := checker_plugin.o
+colorize_plugin-objs := colorize_plugin.o
-+size_overflow_plugin-objs := size_overflow_plugin.o
+latent_entropy_plugin-objs := latent_entropy_plugin.o
+structleak_plugin-objs := structleak_plugin.o
+randomize_layout_plugin-objs := randomize_layout_plugin.o
+
-+$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h $(objtree)/$(obj)/size_overflow_hash_aux.h
+$(obj)/randomize_layout_plugin.o: $(objtree)/$(obj)/randomize_layout_seed.h
+
-+quiet_cmd_build_size_overflow_hash = GENHASH $@
-+ cmd_build_size_overflow_hash = \
-+ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash -d $< -o $@
-+$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
-+ $(call if_changed,build_size_overflow_hash)
-+
-+quiet_cmd_build_size_overflow_hash_aux = GENHASH $@
-+ cmd_build_size_overflow_hash_aux = \
-+ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash_aux -d $< -o $@
-+$(objtree)/$(obj)/size_overflow_hash_aux.h: $(src)/size_overflow_hash_aux.data FORCE
-+ $(call if_changed,build_size_overflow_hash_aux)
-+
-+targets += size_overflow_hash.h size_overflow_hash_aux.h
-+
+quiet_cmd_create_randomize_layout_seed = GENSEED $@
+ cmd_create_randomize_layout_seed = \
+ $(CONFIG_SHELL) $(srctree)/$(src)/gen-random-seed.sh $@ $(objtree)/include/generated/randomize_layout_hash.h
+$(objtree)/$(obj)/randomize_layout_seed.h: FORCE
+ $(call if_changed,create_randomize_layout_seed)
+
-+targets += size_overflow_hash.h randomize_layout_seed.h randomize_layout_hash.h
++targets += randomize_layout_seed.h randomize_layout_hash.h
diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
new file mode 100644
index 0000000..5452feea
@@ -107701,109 +107617,6 @@ index 0000000..7514850
+ HASH=`echo -n "$SEED" | sha256sum | cut -d" " -f1 | tr -d ' \n'`
+ echo "#define RANDSTRUCT_HASHED_SEED \"$HASH\"" > "$2"
+fi
-diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh
-new file mode 100644
-index 0000000..791ca76
---- /dev/null
-+++ b/tools/gcc/generate_size_overflow_hash.sh
-@@ -0,0 +1,97 @@
-+#!/bin/bash
-+
-+# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
-+
-+header1="size_overflow_hash.h"
-+database="size_overflow_hash.data"
-+n=65536
-+hashtable_name="size_overflow_hash"
-+
-+usage() {
-+cat <<EOF
-+usage: $0 options
-+OPTIONS:
-+ -h|--help help
-+ -o header file
-+ -d database file
-+ -n hash array size
-+ -s name of the hash table
-+EOF
-+ return 0
-+}
-+
-+while true
-+do
-+ case "$1" in
-+ -h|--help) usage && exit 0;;
-+ -n) n=$2; shift 2;;
-+ -o) header1="$2"; shift 2;;
-+ -d) database="$2"; shift 2;;
-+ -s) hashtable_name="$2"; shift 2;;
-+ --) shift 1; break ;;
-+ *) break ;;
-+ esac
-+done
-+
-+create_defines() {
-+ for i in `seq 0 31`
-+ do
-+ echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
-+ done
-+ echo >> "$header1"
-+}
-+
-+create_structs() {
-+ rm -f "$header1"
-+
-+ create_defines
-+
-+ cat "$database" | while read data
-+ do
-+ data_array=($data)
-+ struct_hash_name="${data_array[0]}"
-+ funcn="${data_array[1]}"
-+ params="${data_array[2]}"
-+ next="${data_array[4]}"
-+
-+ echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
-+
-+ echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
-+ echo -en "\t.param\t= " >> "$header1"
-+ line=
-+ for param_num in ${params//-/ };
-+ do
-+ line="${line}PARAM"$param_num"|"
-+ done
-+
-+ echo -e "${line%?},\n};\n" >> "$header1"
-+ done
-+}
-+
-+create_headers() {
-+ echo "const struct size_overflow_hash * const $hashtable_name[$n] = {" >> "$header1"
-+}
-+
-+create_array_elements() {
-+ index=0
-+ grep -v "nohasharray" $database | sort -n -k 4 | while read data
-+ do
-+ data_array=($data)
-+ i="${data_array[3]}"
-+ hash="${data_array[0]}"
-+ while [[ $index -lt $i ]]
-+ do
-+ echo -e "\t["$index"]\t= NULL," >> "$header1"
-+ index=$(($index + 1))
-+ done
-+ index=$(($index + 1))
-+ echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
-+ done
-+ echo '};' >> $header1
-+}
-+
-+create_structs
-+create_headers
-+create_array_elements
-+
-+exit 0
diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
new file mode 100644
index 0000000..d81c094
@@ -109905,11 +109718,4237 @@ index 0000000..8dafb22
+
+ return 0;
+}
-diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
+diff --git a/tools/gcc/size_overflow_plugin/.gitignore b/tools/gcc/size_overflow_plugin/.gitignore
+new file mode 100644
+index 0000000..92d3b0c
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/.gitignore
+@@ -0,0 +1,2 @@
++size_overflow_hash.h
++size_overflow_hash_aux.h
+diff --git a/tools/gcc/size_overflow_plugin/Makefile b/tools/gcc/size_overflow_plugin/Makefile
+new file mode 100644
+index 0000000..1ae2ed5
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/Makefile
+@@ -0,0 +1,20 @@
++$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
++always := $($(HOSTLIBS)-y)
++
++size_overflow_plugin-objs := $(patsubst $(srctree)/$(src)/%.c,%.o,$(wildcard $(srctree)/$(src)/*.c))
++
++$(patsubst $(srctree)/$(src)/%.c,$(obj)/%.o,$(wildcard $(srctree)/$(src)/*.c)): $(objtree)/$(obj)/size_overflow_hash.h $(objtree)/$(obj)/size_overflow_hash_aux.h
++
++quiet_cmd_build_size_overflow_hash = GENHASH $@
++ cmd_build_size_overflow_hash = \
++ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash -d $< -o $@
++$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
++ $(call if_changed,build_size_overflow_hash)
++
++quiet_cmd_build_size_overflow_hash_aux = GENHASH $@
++ cmd_build_size_overflow_hash_aux = \
++ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash_aux -d $< -o $@
++$(objtree)/$(obj)/size_overflow_hash_aux.h: $(src)/size_overflow_hash_aux.data FORCE
++ $(call if_changed,build_size_overflow_hash_aux)
++
++targets += size_overflow_hash.h size_overflow_hash_aux.h
+diff --git a/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh b/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh
+new file mode 100644
+index 0000000..12b1e3b
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh
+@@ -0,0 +1,102 @@
++#!/bin/bash
++
++# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
++
++header1="size_overflow_hash.h"
++database="size_overflow_hash.data"
++n=65536
++hashtable_name="size_overflow_hash"
++
++usage() {
++cat <<EOF
++usage: $0 options
++OPTIONS:
++ -h|--help help
++ -o header file
++ -d database file
++ -n hash array size
++ -s name of the hash table
++EOF
++ return 0
++}
++
++while true
++do
++ case "$1" in
++ -h|--help) usage && exit 0;;
++ -n) n=$2; shift 2;;
++ -o) header1="$2"; shift 2;;
++ -d) database="$2"; shift 2;;
++ -s) hashtable_name="$2"; shift 2;;
++ --) shift 1; break ;;
++ *) break ;;
++ esac
++done
++
++create_defines() {
++ for i in `seq 0 31`
++ do
++ echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
++ done
++ echo >> "$header1"
++}
++
++create_structs() {
++ rm -f "$header1"
++
++ create_defines
++
++ cat "$database" | while read data
++ do
++ data_array=($data)
++ struct_hash_name="${data_array[0]}"
++ funcn="${data_array[1]}"
++ params="${data_array[2]}"
++ next="${data_array[4]}"
++
++ echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
++
++ echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
++ echo -en "\t.param\t= " >> "$header1"
++ line=
++ for param_num in ${params//-/ };
++ do
++ line="${line}PARAM"$param_num"|"
++ done
++
++ echo -e "${line%?},\n};\n" >> "$header1"
++ done
++}
++
++create_headers() {
++ echo "const struct size_overflow_hash * const $hashtable_name[$n] = {" >> "$header1"
++}
++
++create_array_elements() {
++ index=0
++ grep -v "nohasharray" $database | sort -n -k 4 | while read data
++ do
++ data_array=($data)
++ i="${data_array[3]}"
++ hash="${data_array[0]}"
++ while [[ $index -lt $i ]]
++ do
++ echo -e "\t["$index"]\t= NULL," >> "$header1"
++ index=$(($index + 1))
++ done
++ index=$(($index + 1))
++ echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
++ done
++ echo '};' >> $header1
++}
++
++size_overflow_plugin_dir=`dirname $header1`
++if [ "$size_overflow_plugin_dir" != '.' ]; then
++ mkdir -p "$size_overflow_plugin_dir" 2> /dev/null
++fi
++
++create_structs
++create_headers
++create_array_elements
++
++exit 0
+diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c
+new file mode 100644
+index 0000000..3e8148c
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c
+@@ -0,0 +1,790 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
++
++#include "gcc-common.h"
++#include "size_overflow.h"
++
++static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs);
++static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs);
++
++// data for the size_overflow asm stmt
++struct asm_data {
++ gimple def_stmt;
++ tree input;
++ tree output;
++};
++
++#if BUILDING_GCC_VERSION <= 4007
++static VEC(tree, gc) *create_asm_io_list(tree string, tree io)
++#else
++static vec<tree, va_gc> *create_asm_io_list(tree string, tree io)
++#endif
++{
++ tree list;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC(tree, gc) *vec_list = NULL;
++#else
++ vec<tree, va_gc> *vec_list = NULL;
++#endif
++
++ list = build_tree_list(NULL_TREE, string);
++ list = chainon(NULL_TREE, build_tree_list(list, io));
++#if BUILDING_GCC_VERSION <= 4007
++ VEC_safe_push(tree, gc, vec_list, list);
++#else
++ vec_safe_push(vec_list, list);
++#endif
++ return vec_list;
++}
++
++static void create_asm_stmt(const char *str, tree str_input, tree str_output, struct asm_data *asm_data)
++{
++ gimple asm_stmt;
++ gimple_stmt_iterator gsi;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC(tree, gc) *input, *output = NULL;
++#else
++ vec<tree, va_gc> *input, *output = NULL;
++#endif
++
++ input = create_asm_io_list(str_input, asm_data->input);
++
++ if (asm_data->output)
++ output = create_asm_io_list(str_output, asm_data->output);
++
++ asm_stmt = gimple_build_asm_vec(str, input, output, NULL, NULL);
++ gsi = gsi_for_stmt(asm_data->def_stmt);
++ gsi_insert_after(&gsi, asm_stmt, GSI_NEW_STMT);
++
++ if (asm_data->output)
++ SSA_NAME_DEF_STMT(asm_data->output) = asm_stmt;
++}
++
++static void replace_call_lhs(const struct asm_data *asm_data)
++{
++ gimple_set_lhs(asm_data->def_stmt, asm_data->input);
++ update_stmt(asm_data->def_stmt);
++ SSA_NAME_DEF_STMT(asm_data->input) = asm_data->def_stmt;
++}
++
++static enum mark search_intentional_phi(struct pointer_set_t *visited, const_tree result)
++{
++ enum mark cur_fndecl_attr;
++ gimple phi = get_def_stmt(result);
++ unsigned int i, n = gimple_phi_num_args(phi);
++
++ pointer_set_insert(visited, phi);
++ for (i = 0; i < n; i++) {
++ tree arg = gimple_phi_arg_def(phi, i);
++
++ cur_fndecl_attr = search_intentional(visited, arg);
++ if (cur_fndecl_attr != MARK_NO)
++ return cur_fndecl_attr;
++ }
++ return MARK_NO;
++}
++
++static enum mark search_intentional_binary(struct pointer_set_t *visited, const_tree lhs)
++{
++ enum mark cur_fndecl_attr;
++ const_tree rhs1, rhs2;
++ gimple def_stmt = get_def_stmt(lhs);
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++
++ cur_fndecl_attr = search_intentional(visited, rhs1);
++ if (cur_fndecl_attr != MARK_NO)
++ return cur_fndecl_attr;
++ return search_intentional(visited, rhs2);
++}
++
++// Look up the intentional_overflow attribute on the caller and the callee functions.
++static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs)
++{
++ const_gimple def_stmt;
++
++ if (TREE_CODE(lhs) != SSA_NAME)
++ return get_intentional_attr_type(lhs);
++
++ def_stmt = get_def_stmt(lhs);
++ if (!def_stmt)
++ return MARK_NO;
++
++ if (pointer_set_contains(visited, def_stmt))
++ return MARK_NO;
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ return search_intentional(visited, SSA_NAME_VAR(lhs));
++ case GIMPLE_ASM:
++ if (is_size_overflow_intentional_asm_turn_off(def_stmt))
++ return MARK_TURN_OFF;
++ return MARK_NO;
++ case GIMPLE_CALL:
++ return MARK_NO;
++ case GIMPLE_PHI:
++ return search_intentional_phi(visited, lhs);
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ return search_intentional(visited, gimple_assign_rhs1(def_stmt));
++ case 3:
++ return search_intentional_binary(visited, lhs);
++ }
++ case GIMPLE_RETURN:
++ return MARK_NO;
++ default:
++ debug_gimple_stmt((gimple)def_stmt);
++ error("%s: unknown gimple code", __func__);
++ gcc_unreachable();
++ }
++}
++
++// Check the intentional_overflow attribute and create the asm comment string for the size_overflow asm stmt.
++static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum)
++{
++ const_tree fndecl;
++ struct pointer_set_t *visited;
++ enum mark cur_fndecl_attr, decl_attr = MARK_NO;
++
++ fndecl = get_interesting_orig_fndecl(stmt, argnum);
++ if (is_end_intentional_intentional_attr(fndecl, argnum))
++ decl_attr = MARK_NOT_INTENTIONAL;
++ else if (is_yes_intentional_attr(fndecl, argnum))
++ decl_attr = MARK_YES;
++ else if (is_turn_off_intentional_attr(fndecl) || is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
++ return MARK_TURN_OFF;
++ }
++
++ visited = pointer_set_create();
++ cur_fndecl_attr = search_intentional(visited, arg);
++ pointer_set_destroy(visited);
++
++ switch (cur_fndecl_attr) {
++ case MARK_NO:
++ case MARK_TURN_OFF:
++ return cur_fndecl_attr;
++ default:
++ print_missing_intentional(decl_attr, cur_fndecl_attr, fndecl, argnum);
++ return MARK_YES;
++ }
++}
++
++static void check_missing_size_overflow_attribute(tree var)
++{
++ tree orig_fndecl;
++ unsigned int num;
++
++ if (is_a_return_check(var))
++ orig_fndecl = DECL_ORIGIN(var);
++ else
++ orig_fndecl = DECL_ORIGIN(current_function_decl);
++
++ num = get_function_num(var, orig_fndecl);
++ if (num == CANNOT_FIND_ARG)
++ return;
++
++ is_missing_function(orig_fndecl, num);
++}
++
++static void search_size_overflow_attribute_phi(struct pointer_set_t *visited, const_tree result)
++{
++ gimple phi = get_def_stmt(result);
++ unsigned int i, n = gimple_phi_num_args(phi);
++
++ pointer_set_insert(visited, phi);
++ for (i = 0; i < n; i++) {
++ tree arg = gimple_phi_arg_def(phi, i);
++
++ search_size_overflow_attribute(visited, arg);
++ }
++}
++
++static void search_size_overflow_attribute_binary(struct pointer_set_t *visited, const_tree lhs)
++{
++ const_gimple def_stmt = get_def_stmt(lhs);
++ tree rhs1, rhs2;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++
++ search_size_overflow_attribute(visited, rhs1);
++ search_size_overflow_attribute(visited, rhs2);
++}
++
++static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs)
++{
++ const_gimple def_stmt;
++
++ if (TREE_CODE(lhs) == PARM_DECL) {
++ check_missing_size_overflow_attribute(lhs);
++ return;
++ }
++
++ def_stmt = get_def_stmt(lhs);
++ if (!def_stmt)
++ return;
++
++ if (pointer_set_insert(visited, def_stmt))
++ return;
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ return search_size_overflow_attribute(visited, SSA_NAME_VAR(lhs));
++ case GIMPLE_ASM:
++ return;
++ case GIMPLE_CALL: {
++ tree fndecl = gimple_call_fndecl(def_stmt);
++
++ if (fndecl == NULL_TREE)
++ return;
++ check_missing_size_overflow_attribute(fndecl);
++ return;
++ }
++ case GIMPLE_PHI:
++ return search_size_overflow_attribute_phi(visited, lhs);
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ return search_size_overflow_attribute(visited, gimple_assign_rhs1(def_stmt));
++ case 3:
++ return search_size_overflow_attribute_binary(visited, lhs);
++ }
++ default:
++ debug_gimple_stmt((gimple)def_stmt);
++ error("%s: unknown gimple code", __func__);
++ gcc_unreachable();
++ }
++}
++
++// Search missing entries in the hash table (invoked from the gimple pass)
++static void search_missing_size_overflow_attribute_gimple(const_gimple stmt, unsigned int num)
++{
++ tree fndecl = NULL_TREE;
++ tree lhs;
++ struct pointer_set_t *visited;
++
++ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl)))
++ return;
++
++ if (num == 0) {
++ gcc_assert(gimple_code(stmt) == GIMPLE_RETURN);
++ lhs = gimple_return_retval(stmt);
++ } else {
++ gcc_assert(is_gimple_call(stmt));
++ lhs = gimple_call_arg(stmt, num - 1);
++ fndecl = gimple_call_fndecl(stmt);
++ }
++
++ if (fndecl != NULL_TREE && is_turn_off_intentional_attr(DECL_ORIGIN(fndecl)))
++ return;
++
++ visited = pointer_set_create();
++ search_size_overflow_attribute(visited, lhs);
++ pointer_set_destroy(visited);
++}
++
++static void create_output_from_phi(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
++{
++ gimple_stmt_iterator gsi;
++ gimple assign;
++
++ assign = gimple_build_assign(asm_data->input, asm_data->output);
++ gsi = gsi_for_stmt(stmt);
++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
++ asm_data->def_stmt = assign;
++
++ asm_data->output = create_new_var(TREE_TYPE(asm_data->output));
++ asm_data->output = make_ssa_name(asm_data->output, stmt);
++ if (gimple_code(stmt) == GIMPLE_RETURN)
++ gimple_return_set_retval(stmt, asm_data->output);
++ else
++ gimple_call_set_arg(stmt, argnum - 1, asm_data->output);
++ update_stmt(stmt);
++}
++
++static char *create_asm_comment(unsigned int argnum, const_gimple stmt , const char *mark_str)
++{
++ const char *fn_name;
++ char *asm_comment;
++ unsigned int len;
++
++ if (argnum == 0)
++ fn_name = DECL_NAME_POINTER(current_function_decl);
++ else
++ fn_name = DECL_NAME_POINTER(gimple_call_fndecl(stmt));
++
++ len = asprintf(&asm_comment, "%s %s %u", mark_str, fn_name, argnum);
++ gcc_assert(len > 0);
++
++ return asm_comment;
++}
++
++static const char *convert_mark_to_str(enum mark mark)
++{
++ switch (mark) {
++ case MARK_NO:
++ return OK_ASM_STR;
++ case MARK_YES:
++ case MARK_NOT_INTENTIONAL:
++ return YES_ASM_STR;
++ case MARK_TURN_OFF:
++ return TURN_OFF_ASM_STR;
++ }
++
++ gcc_unreachable();
++}
++
++/* Create the input of the size_overflow asm stmt.
++ * When the arg of the callee function is a parm_decl it creates this kind of size_overflow asm stmt:
++ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
++ * The input field in asm_data will be empty if there is no need for further size_overflow asm stmt insertion.
++ * otherwise create the input (for a phi stmt the output too) of the asm stmt.
++ */
++static void create_asm_input(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
++{
++ if (!asm_data->def_stmt) {
++ asm_data->input = NULL_TREE;
++ return;
++ }
++
++ asm_data->input = create_new_var(TREE_TYPE(asm_data->output));
++ asm_data->input = make_ssa_name(asm_data->input, asm_data->def_stmt);
++
++ switch (gimple_code(asm_data->def_stmt)) {
++ case GIMPLE_ASSIGN:
++ case GIMPLE_CALL:
++ replace_call_lhs(asm_data);
++ break;
++ case GIMPLE_PHI:
++ create_output_from_phi(stmt, argnum, asm_data);
++ break;
++ case GIMPLE_NOP: {
++ enum mark mark;
++ const char *mark_str;
++ char *asm_comment;
++
++ mark = check_intentional_attribute_gimple(asm_data->output, stmt, argnum);
++
++ asm_data->input = asm_data->output;
++ asm_data->output = NULL;
++ asm_data->def_stmt = stmt;
++
++ mark_str = convert_mark_to_str(mark);
++ asm_comment = create_asm_comment(argnum, stmt, mark_str);
++
++ create_asm_stmt(asm_comment, build_string(3, "rm"), NULL, asm_data);
++ free(asm_comment);
++ asm_data->input = NULL_TREE;
++ break;
++ }
++ case GIMPLE_ASM:
++ if (is_size_overflow_asm(asm_data->def_stmt)) {
++ asm_data->input = NULL_TREE;
++ break;
++ }
++ default:
++ debug_gimple_stmt(asm_data->def_stmt);
++ gcc_unreachable();
++ }
++}
++
++/* This is the gimple part of searching for a missing size_overflow attribute. If the intentional_overflow attribute type
++ * is of the right kind create the appropriate size_overflow asm stmts:
++ * __asm__("# size_overflow" : =rm" D.3344_8 : "0" cicus.4_16);
++ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
++ */
++static void create_size_overflow_asm(gimple stmt, tree output_node, unsigned int argnum)
++{
++ struct asm_data asm_data;
++ const char *mark_str;
++ char *asm_comment;
++ enum mark mark;
++
++ if (is_gimple_constant(output_node))
++ return;
++
++ asm_data.output = output_node;
++ mark = check_intentional_attribute_gimple(asm_data.output, stmt, argnum);
++ if (mark != MARK_TURN_OFF)
++ search_missing_size_overflow_attribute_gimple(stmt, argnum);
++
++ asm_data.def_stmt = get_def_stmt(asm_data.output);
++ if (is_size_overflow_intentional_asm_turn_off(asm_data.def_stmt))
++ return;
++
++ create_asm_input(stmt, argnum, &asm_data);
++ if (asm_data.input == NULL_TREE)
++ return;
++
++ mark_str = convert_mark_to_str(mark);
++ asm_comment = create_asm_comment(argnum, stmt, mark_str);
++ create_asm_stmt(asm_comment, build_string(2, "0"), build_string(4, "=rm"), &asm_data);
++ free(asm_comment);
++}
++
++// Insert an asm stmt with "MARK_TURN_OFF", "MARK_YES" or "MARK_NOT_INTENTIONAL".
++static bool create_mark_asm(gimple stmt, enum mark mark)
++{
++ struct asm_data asm_data;
++ const char *asm_str;
++
++ switch (mark) {
++ case MARK_TURN_OFF:
++ asm_str = TURN_OFF_ASM_STR;
++ break;
++ case MARK_NOT_INTENTIONAL:
++ case MARK_YES:
++ asm_str = YES_ASM_STR;
++ break;
++ default:
++ gcc_unreachable();
++ }
++
++ asm_data.def_stmt = stmt;
++ asm_data.output = gimple_call_lhs(stmt);
++
++ if (asm_data.output == NULL_TREE) {
++ asm_data.input = gimple_call_arg(stmt, 0);
++ if (is_gimple_constant(asm_data.input))
++ return false;
++ asm_data.output = NULL;
++ create_asm_stmt(asm_str, build_string(3, "rm"), NULL, &asm_data);
++ return true;
++ }
++
++ create_asm_input(stmt, 0, &asm_data);
++ gcc_assert(asm_data.input != NULL_TREE);
++
++ create_asm_stmt(asm_str, build_string(2, "0"), build_string(4, "=rm"), &asm_data);
++ return true;
++}
++
++static bool is_from_cast(const_tree node)
++{
++ gimple def_stmt = get_def_stmt(node);
++
++ if (!def_stmt)
++ return false;
++
++ if (gimple_assign_cast_p(def_stmt))
++ return true;
++
++ return false;
++}
++
++// Skip duplication when there is a minus expr and the type of rhs1 or rhs2 is a pointer_type.
++static bool skip_ptr_minus(gimple stmt)
++{
++ const_tree rhs1, rhs2, ptr1_rhs, ptr2_rhs;
++
++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
++ return false;
++
++ rhs1 = gimple_assign_rhs1(stmt);
++ if (!is_from_cast(rhs1))
++ return false;
++
++ rhs2 = gimple_assign_rhs2(stmt);
++ if (!is_from_cast(rhs2))
++ return false;
++
++ ptr1_rhs = gimple_assign_rhs1(get_def_stmt(rhs1));
++ ptr2_rhs = gimple_assign_rhs1(get_def_stmt(rhs2));
++
++ if (TREE_CODE(TREE_TYPE(ptr1_rhs)) != POINTER_TYPE && TREE_CODE(TREE_TYPE(ptr2_rhs)) != POINTER_TYPE)
++ return false;
++
++ create_mark_asm(stmt, MARK_YES);
++ return true;
++}
++
++static void walk_use_def_ptr(struct pointer_set_t *visited, const_tree lhs)
++{
++ gimple def_stmt;
++
++ def_stmt = get_def_stmt(lhs);
++ if (!def_stmt)
++ return;
++
++ if (pointer_set_insert(visited, def_stmt))
++ return;
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ case GIMPLE_ASM:
++ case GIMPLE_CALL:
++ break;
++ case GIMPLE_PHI: {
++ unsigned int i, n = gimple_phi_num_args(def_stmt);
++
++ pointer_set_insert(visited, def_stmt);
++
++ for (i = 0; i < n; i++) {
++ tree arg = gimple_phi_arg_def(def_stmt, i);
++
++ walk_use_def_ptr(visited, arg);
++ }
++ }
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
++ return;
++ case 3:
++ if (skip_ptr_minus(def_stmt))
++ return;
++
++ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
++ walk_use_def_ptr(visited, gimple_assign_rhs2(def_stmt));
++ return;
++ default:
++ return;
++ }
++ default:
++ debug_gimple_stmt((gimple)def_stmt);
++ error("%s: unknown gimple code", __func__);
++ gcc_unreachable();
++ }
++}
++
++// Look for a ptr - ptr expression (e.g., cpuset_common_file_read() s - page)
++static void insert_mark_not_intentional_asm_at_ptr(const_tree arg)
++{
++ struct pointer_set_t *visited;
++
++ visited = pointer_set_create();
++ walk_use_def_ptr(visited, arg);
++ pointer_set_destroy(visited);
++}
++
++// Determine the return value and insert the asm stmt to mark the return stmt.
++static void insert_asm_ret(gimple stmt)
++{
++ tree ret;
++
++ ret = gimple_return_retval(stmt);
++ create_size_overflow_asm(stmt, ret, 0);
++}
++
++// Determine the correct arg index and arg and insert the asm stmt to mark the stmt.
++static void insert_asm_arg(gimple stmt, unsigned int orig_argnum)
++{
++ tree arg;
++ unsigned int argnum;
++
++ argnum = get_correct_arg_count(orig_argnum, gimple_call_fndecl(stmt));
++ gcc_assert(argnum != 0);
++ if (argnum == CANNOT_FIND_ARG)
++ return;
++
++ arg = gimple_call_arg(stmt, argnum - 1);
++ gcc_assert(arg != NULL_TREE);
++
++ // skip all ptr - ptr expressions
++ insert_mark_not_intentional_asm_at_ptr(arg);
++
++ create_size_overflow_asm(stmt, arg, argnum);
++}
++
++// If a function arg or the return value is marked by the size_overflow attribute then set its index in the array.
++static void set_argnum_attribute(const_tree attr, bool *argnums)
++{
++ unsigned int argnum;
++ tree attr_value;
++
++ for (attr_value = TREE_VALUE(attr); attr_value; attr_value = TREE_CHAIN(attr_value)) {
++ argnum = TREE_INT_CST_LOW(TREE_VALUE(attr_value));
++ argnums[argnum] = true;
++ }
++}
++
++// If a function arg or the return value is in the hash table then set its index in the array.
++static void set_argnum_hash(tree fndecl, bool *argnums)
++{
++ unsigned int num;
++ const struct size_overflow_hash *hash;
++
++ hash = get_function_hash(DECL_ORIGIN(fndecl));
++ if (!hash)
++ return;
++
++ for (num = 0; num <= MAX_PARAM; num++) {
++ if (!(hash->param & (1U << num)))
++ continue;
++
++ argnums[num] = true;
++ }
++}
++
++static bool is_all_the_argnums_empty(bool *argnums)
++{
++ unsigned int i;
++
++ for (i = 0; i <= MAX_PARAM; i++)
++ if (argnums[i])
++ return false;
++ return true;
++}
++
++// Check whether the arguments or the return value of the function are in the hash table or are marked by the size_overflow attribute.
++static void search_interesting_args(tree fndecl, bool *argnums)
++{
++ const_tree attr;
++
++ set_argnum_hash(fndecl, argnums);
++ if (!is_all_the_argnums_empty(argnums))
++ return;
++
++ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
++ if (attr && TREE_VALUE(attr))
++ set_argnum_attribute(attr, argnums);
++}
++
++/*
++ * Look up the intentional_overflow attribute that turns off ipa based duplication
++ * on the callee function.
++ */
++static bool is_mark_turn_off_attribute(gimple stmt)
++{
++ enum mark mark;
++ const_tree fndecl = gimple_call_fndecl(stmt);
++
++ mark = get_intentional_attr_type(DECL_ORIGIN(fndecl));
++ if (mark == MARK_TURN_OFF)
++ return true;
++ return false;
++}
++
++// If the argument(s) of the callee function is/are in the hash table or are marked by an attribute then mark the call stmt with an asm stmt
++static void handle_interesting_function(gimple stmt)
++{
++ unsigned int argnum;
++ tree fndecl;
++ bool orig_argnums[MAX_PARAM + 1] = {false};
++
++ if (gimple_call_num_args(stmt) == 0)
++ return;
++ fndecl = gimple_call_fndecl(stmt);
++ if (fndecl == NULL_TREE)
++ return;
++ fndecl = DECL_ORIGIN(fndecl);
++
++ if (is_mark_turn_off_attribute(stmt)) {
++ create_mark_asm(stmt, MARK_TURN_OFF);
++ return;
++ }
++
++ search_interesting_args(fndecl, orig_argnums);
++
++ for (argnum = 1; argnum < MAX_PARAM; argnum++)
++ if (orig_argnums[argnum])
++ insert_asm_arg(stmt, argnum);
++}
++
++// If the return value of the caller function is in hash table (its index is 0) then mark the return stmt with an asm stmt
++static void handle_interesting_ret(gimple stmt)
++{
++ bool orig_argnums[MAX_PARAM + 1] = {false};
++
++ search_interesting_args(current_function_decl, orig_argnums);
++
++ if (orig_argnums[0])
++ insert_asm_ret(stmt);
++}
++
++// Iterate over all the stmts and search for call and return stmts and mark them if they're in the hash table
++static unsigned int search_interesting_functions(void)
++{
++ basic_block bb;
++
++ FOR_ALL_BB_FN(bb, cfun) {
++ gimple_stmt_iterator gsi;
++
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ gimple stmt = gsi_stmt(gsi);
++
++ if (is_size_overflow_asm(stmt))
++ continue;
++
++ if (is_gimple_call(stmt))
++ handle_interesting_function(stmt);
++ else if (gimple_code(stmt) == GIMPLE_RETURN)
++ handle_interesting_ret(stmt);
++ }
++ }
++ return 0;
++}
++
++/*
++ * A lot of functions get inlined before the ipa passes so after the build_ssa gimple pass
++ * this pass inserts asm stmts to mark the interesting args
++ * that the ipa pass will detect and insert the size overflow checks for.
++ */
++#if BUILDING_GCC_VERSION >= 4009
++static const struct pass_data insert_size_overflow_asm_pass_data = {
++#else
++static struct gimple_opt_pass insert_size_overflow_asm_pass = {
++ .pass = {
++#endif
++ .type = GIMPLE_PASS,
++ .name = "insert_size_overflow_asm",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++#if BUILDING_GCC_VERSION >= 4009
++ .has_gate = false,
++ .has_execute = true,
++#else
++ .gate = NULL,
++ .execute = search_interesting_functions,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++#endif
++ .tv_id = TV_NONE,
++ .properties_required = PROP_cfg,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
++#if BUILDING_GCC_VERSION < 4009
++ }
++#endif
++};
++
++#if BUILDING_GCC_VERSION >= 4009
++namespace {
++class insert_size_overflow_asm_pass : public gimple_opt_pass {
++public:
++ insert_size_overflow_asm_pass() : gimple_opt_pass(insert_size_overflow_asm_pass_data, g) {}
++ unsigned int execute() { return search_interesting_functions(); }
++};
++}
++#endif
++
++struct opt_pass *make_insert_size_overflow_asm_pass(void)
++{
++#if BUILDING_GCC_VERSION >= 4009
++ return new insert_size_overflow_asm_pass();
++#else
++ return &insert_size_overflow_asm_pass.pass;
++#endif
++}
+diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c
+new file mode 100644
+index 0000000..880cd86
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c
+@@ -0,0 +1,897 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
++
++#include "gcc-common.h"
++#include "size_overflow.h"
++
++#define MIN_CHECK true
++#define MAX_CHECK false
++
++static tree get_size_overflow_type(struct visited *visited, const_gimple stmt, const_tree node)
++{
++ const_tree type;
++ tree new_type;
++
++ gcc_assert(node != NULL_TREE);
++
++ type = TREE_TYPE(node);
++
++ if (pointer_set_contains(visited->my_stmts, stmt))
++ return TREE_TYPE(node);
++
++ switch (TYPE_MODE(type)) {
++ case QImode:
++ new_type = size_overflow_type_HI;
++ break;
++ case HImode:
++ new_type = size_overflow_type_SI;
++ break;
++ case SImode:
++ new_type = size_overflow_type_DI;
++ break;
++ case DImode:
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
++ new_type = TYPE_UNSIGNED(type) ? unsigned_intDI_type_node : intDI_type_node;
++ else
++ new_type = size_overflow_type_TI;
++ break;
++ case TImode:
++ gcc_assert(!TYPE_UNSIGNED(type));
++ new_type = size_overflow_type_TI;
++ break;
++ default:
++ debug_tree((tree)node);
++ error("%s: unsupported gcc configuration (%qE).", __func__, current_function_decl);
++ gcc_unreachable();
++ }
++
++ if (TYPE_QUALS(type) != 0)
++ return build_qualified_type(new_type, TYPE_QUALS(type));
++ return new_type;
++}
++
++static tree get_lhs(const_gimple stmt)
++{
++ switch (gimple_code(stmt)) {
++ case GIMPLE_ASSIGN:
++ case GIMPLE_CALL:
++ return gimple_get_lhs(stmt);
++ case GIMPLE_PHI:
++ return gimple_phi_result(stmt);
++ default:
++ return NULL_TREE;
++ }
++}
++
++static tree cast_to_new_size_overflow_type(struct visited *visited, gimple stmt, tree rhs, tree size_overflow_type, bool before)
++{
++ gimple_stmt_iterator gsi;
++ tree lhs;
++ gimple new_stmt;
++
++ if (rhs == NULL_TREE)
++ return NULL_TREE;
++
++ gsi = gsi_for_stmt(stmt);
++ new_stmt = build_cast_stmt(visited, size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before, false);
++ pointer_set_insert(visited->my_stmts, new_stmt);
++
++ lhs = get_lhs(new_stmt);
++ gcc_assert(lhs != NULL_TREE);
++ return lhs;
++}
++
++tree create_assign(struct visited *visited, gimple oldstmt, tree rhs1, bool before)
++{
++ tree lhs, dst_type;
++ gimple_stmt_iterator gsi;
++
++ if (rhs1 == NULL_TREE) {
++ debug_gimple_stmt(oldstmt);
++ error("%s: rhs1 is NULL_TREE", __func__);
++ gcc_unreachable();
++ }
++
++ switch (gimple_code(oldstmt)) {
++ case GIMPLE_ASM:
++ lhs = rhs1;
++ break;
++ case GIMPLE_CALL:
++ case GIMPLE_ASSIGN:
++ lhs = gimple_get_lhs(oldstmt);
++ break;
++ default:
++ debug_gimple_stmt(oldstmt);
++ gcc_unreachable();
++ }
++
++ gsi = gsi_for_stmt(oldstmt);
++ pointer_set_insert(visited->stmts, oldstmt);
++ if (lookup_stmt_eh_lp(oldstmt) != 0) {
++ basic_block next_bb, cur_bb;
++ const_edge e;
++
++ gcc_assert(before == false);
++ gcc_assert(stmt_can_throw_internal(oldstmt));
++ gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
++ gcc_assert(!gsi_end_p(gsi));
++
++ cur_bb = gimple_bb(oldstmt);
++ next_bb = cur_bb->next_bb;
++ e = find_edge(cur_bb, next_bb);
++ gcc_assert(e != NULL);
++ gcc_assert(e->flags & EDGE_FALLTHRU);
++
++ gsi = gsi_after_labels(next_bb);
++ gcc_assert(!gsi_end_p(gsi));
++
++ before = true;
++ oldstmt = gsi_stmt(gsi);
++ }
++
++ dst_type = get_size_overflow_type(visited, oldstmt, lhs);
++
++ if (is_gimple_constant(rhs1))
++ return cast_a_tree(dst_type, rhs1);
++ return cast_to_new_size_overflow_type(visited, oldstmt, rhs1, dst_type, before);
++}
++
++tree dup_assign(struct visited *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
++{
++ gimple stmt;
++ gimple_stmt_iterator gsi;
++ tree size_overflow_type, new_var, lhs = gimple_assign_lhs(oldstmt);
++
++ if (pointer_set_contains(visited->my_stmts, oldstmt))
++ return lhs;
++
++ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
++ rhs1 = gimple_assign_rhs1(oldstmt);
++ rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
++ }
++ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
++ rhs2 = gimple_assign_rhs2(oldstmt);
++ rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
++ }
++
++ stmt = gimple_copy(oldstmt);
++ gimple_set_location(stmt, gimple_location(oldstmt));
++ pointer_set_insert(visited->my_stmts, stmt);
++
++ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
++ gimple_assign_set_rhs_code(stmt, MULT_EXPR);
++
++ size_overflow_type = get_size_overflow_type(visited, oldstmt, node);
++
++ new_var = create_new_var(size_overflow_type);
++ new_var = make_ssa_name(new_var, stmt);
++ gimple_assign_set_lhs(stmt, new_var);
++
++ if (rhs1 != NULL_TREE)
++ gimple_assign_set_rhs1(stmt, rhs1);
++
++ if (rhs2 != NULL_TREE)
++ gimple_assign_set_rhs2(stmt, rhs2);
++#if BUILDING_GCC_VERSION >= 4006
++ if (rhs3 != NULL_TREE)
++ gimple_assign_set_rhs3(stmt, rhs3);
++#endif
++ gimple_set_vuse(stmt, gimple_vuse(oldstmt));
++ gimple_set_vdef(stmt, gimple_vdef(oldstmt));
++
++ gsi = gsi_for_stmt(oldstmt);
++ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
++ update_stmt(stmt);
++ pointer_set_insert(visited->stmts, oldstmt);
++ return gimple_assign_lhs(stmt);
++}
++
++static tree cast_parm_decl(struct visited *visited, tree phi_ssa_name, tree arg, tree size_overflow_type, basic_block bb)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi;
++ basic_block first_bb;
++
++ gcc_assert(SSA_NAME_IS_DEFAULT_DEF(arg));
++
++ if (bb->index == 0) {
++ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
++ gcc_assert(dom_info_available_p(CDI_DOMINATORS));
++ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
++ bb = first_bb;
++ }
++
++ gsi = gsi_after_labels(bb);
++ assign = build_cast_stmt(visited, size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false);
++ pointer_set_insert(visited->my_stmts, assign);
++
++ return gimple_assign_lhs(assign);
++}
++
++static tree use_phi_ssa_name(struct visited *visited, tree ssa_name_var, tree new_arg)
++{
++ gimple_stmt_iterator gsi;
++ gimple assign, def_stmt = get_def_stmt(new_arg);
++
++ if (gimple_code(def_stmt) == GIMPLE_PHI) {
++ gsi = gsi_after_labels(gimple_bb(def_stmt));
++ assign = build_cast_stmt(visited, TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, BEFORE_STMT, true);
++ } else {
++ gsi = gsi_for_stmt(def_stmt);
++ assign = build_cast_stmt(visited, TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, AFTER_STMT, true);
++ }
++
++ pointer_set_insert(visited->my_stmts, assign);
++ return gimple_assign_lhs(assign);
++}
++
++static tree cast_visited_phi_arg(struct visited *visited, tree ssa_name_var, tree arg, tree size_overflow_type)
++{
++ basic_block bb;
++ gimple_stmt_iterator gsi;
++ const_gimple def_stmt;
++ gimple assign;
++
++ def_stmt = get_def_stmt(arg);
++ bb = gimple_bb(def_stmt);
++ gcc_assert(bb->index != 0);
++ gsi = gsi_after_labels(bb);
++
++ assign = build_cast_stmt(visited, size_overflow_type, arg, ssa_name_var, &gsi, BEFORE_STMT, false);
++ pointer_set_insert(visited->my_stmts, assign);
++ return gimple_assign_lhs(assign);
++}
++
++static tree create_new_phi_arg(struct visited *visited, tree ssa_name_var, tree new_arg, gimple oldstmt, unsigned int i)
++{
++ tree size_overflow_type;
++ tree arg;
++ const_gimple def_stmt;
++
++ if (new_arg != NULL_TREE && is_gimple_constant(new_arg))
++ return new_arg;
++
++ arg = gimple_phi_arg_def(oldstmt, i);
++ def_stmt = get_def_stmt(arg);
++ gcc_assert(def_stmt != NULL);
++ size_overflow_type = get_size_overflow_type(visited, oldstmt, arg);
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_PHI:
++ return cast_visited_phi_arg(visited, ssa_name_var, arg, size_overflow_type);
++ case GIMPLE_NOP: {
++ basic_block bb;
++
++ bb = gimple_phi_arg_edge(oldstmt, i)->src;
++ return cast_parm_decl(visited, ssa_name_var, arg, size_overflow_type, bb);
++ }
++ case GIMPLE_ASM: {
++ gimple_stmt_iterator gsi;
++ gimple assign, stmt = get_def_stmt(arg);
++
++ gsi = gsi_for_stmt(stmt);
++ assign = build_cast_stmt(visited, size_overflow_type, arg, ssa_name_var, &gsi, AFTER_STMT, false);
++ pointer_set_insert(visited->my_stmts, assign);
++ return gimple_assign_lhs(assign);
++ }
++ default:
++ gcc_assert(new_arg != NULL_TREE);
++ gcc_assert(types_compatible_p(TREE_TYPE(new_arg), size_overflow_type));
++ return use_phi_ssa_name(visited, ssa_name_var, new_arg);
++ }
++}
++
++static gimple overflow_create_phi_node(struct visited *visited, gimple oldstmt, tree result)
++{
++ basic_block bb;
++ gimple phi;
++ gimple_seq seq;
++ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
++
++ bb = gsi_bb(gsi);
++
++ if (result == NULL_TREE) {
++ tree old_result = gimple_phi_result(oldstmt);
++ tree size_overflow_type = get_size_overflow_type(visited, oldstmt, old_result);
++
++ result = create_new_var(size_overflow_type);
++ }
++
++ phi = create_phi_node(result, bb);
++ gimple_phi_set_result(phi, make_ssa_name(result, phi));
++ seq = phi_nodes(bb);
++ gsi = gsi_last(seq);
++ gsi_remove(&gsi, false);
++
++ gsi = gsi_for_stmt(oldstmt);
++ gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
++ gimple_set_bb(phi, bb);
++ return phi;
++}
++
++#if BUILDING_GCC_VERSION <= 4007
++static tree create_new_phi_node(struct visited *visited, VEC(tree, heap) **args, tree ssa_name_var, gimple oldstmt)
++#else
++static tree create_new_phi_node(struct visited *visited, vec<tree, va_heap, vl_embed> *&args, tree ssa_name_var, gimple oldstmt)
++#endif
++{
++ gimple new_phi;
++ unsigned int i;
++ tree arg, result;
++ location_t loc = gimple_location(oldstmt);
++
++#if BUILDING_GCC_VERSION <= 4007
++ gcc_assert(!VEC_empty(tree, *args));
++#else
++ gcc_assert(!args->is_empty());
++#endif
++
++ new_phi = overflow_create_phi_node(visited, oldstmt, ssa_name_var);
++ result = gimple_phi_result(new_phi);
++ ssa_name_var = SSA_NAME_VAR(result);
++
++#if BUILDING_GCC_VERSION <= 4007
++ FOR_EACH_VEC_ELT(tree, *args, i, arg) {
++#else
++ FOR_EACH_VEC_SAFE_ELT(args, i, arg) {
++#endif
++ arg = create_new_phi_arg(visited, ssa_name_var, arg, oldstmt, i);
++ add_phi_arg(new_phi, arg, gimple_phi_arg_edge(oldstmt, i), loc);
++ }
++
++#if BUILDING_GCC_VERSION <= 4007
++ VEC_free(tree, heap, *args);
++#else
++ vec_free(args);
++#endif
++ update_stmt(new_phi);
++ pointer_set_insert(visited->my_stmts, new_phi);
++ return result;
++}
++
++static tree handle_phi(struct visited *visited, struct cgraph_node *caller_node, tree orig_result)
++{
++ tree ssa_name_var = NULL_TREE;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC(tree, heap) *args = NULL;
++#else
++ vec<tree, va_heap, vl_embed> *args = NULL;
++#endif
++ gimple oldstmt = get_def_stmt(orig_result);
++ unsigned int i, len = gimple_phi_num_args(oldstmt);
++
++ pointer_set_insert(visited->stmts, oldstmt);
++ for (i = 0; i < len; i++) {
++ tree arg, new_arg;
++
++ arg = gimple_phi_arg_def(oldstmt, i);
++ new_arg = expand(visited, caller_node, arg);
++
++ if (ssa_name_var == NULL_TREE && new_arg != NULL_TREE)
++ ssa_name_var = SSA_NAME_VAR(new_arg);
++
++ if (is_gimple_constant(arg)) {
++ tree size_overflow_type = get_size_overflow_type(visited, oldstmt, arg);
++
++ new_arg = cast_a_tree(size_overflow_type, arg);
++ }
++
++#if BUILDING_GCC_VERSION <= 4007
++ VEC_safe_push(tree, heap, args, new_arg);
++#else
++ vec_safe_push(args, new_arg);
++#endif
++ }
++
++#if BUILDING_GCC_VERSION <= 4007
++ return create_new_phi_node(visited, &args, ssa_name_var, oldstmt);
++#else
++ return create_new_phi_node(visited, args, ssa_name_var, oldstmt);
++#endif
++}
++
++static tree create_cast_assign(struct visited *visited, gimple stmt)
++{
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree lhs = gimple_assign_lhs(stmt);
++ const_tree rhs1_type = TREE_TYPE(rhs1);
++ const_tree lhs_type = TREE_TYPE(lhs);
++
++ if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ return create_assign(visited, stmt, rhs1, AFTER_STMT);
++}
++
++static bool skip_lhs_cast_check(const_gimple stmt)
++{
++ const_tree rhs = gimple_assign_rhs1(stmt);
++ const_gimple def_stmt = get_def_stmt(rhs);
++
++ // 3.8.2 kernel/futex_compat.c compat_exit_robust_list(): get_user() 64 ulong -> int (compat_long_t), int max
++ if (gimple_code(def_stmt) == GIMPLE_ASM)
++ return true;
++
++ if (is_const_plus_unsigned_signed_truncation(rhs))
++ return true;
++
++ return false;
++}
++
++static tree create_string_param(tree string)
++{
++ tree i_type, a_type;
++ const int length = TREE_STRING_LENGTH(string);
++
++ gcc_assert(length > 0);
++
++ i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
++ a_type = build_array_type(char_type_node, i_type);
++
++ TREE_TYPE(string) = a_type;
++ TREE_CONSTANT(string) = 1;
++ TREE_READONLY(string) = 1;
++
++ return build1(ADDR_EXPR, ptr_type_node, string);
++}
++
++static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
++{
++ gimple cond_stmt;
++ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
++
++ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
++ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
++ update_stmt(cond_stmt);
++}
++
++static void insert_cond_result(struct cgraph_node *caller_node, basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
++{
++ gimple func_stmt;
++ const_gimple def_stmt;
++ const_tree loc_line;
++ tree loc_file, ssa_name, current_func;
++ expanded_location xloc;
++ char *ssa_name_buf;
++ int len;
++ struct cgraph_edge *edge;
++ struct cgraph_node *callee_node;
++ int frequency;
++ gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
++
++ def_stmt = get_def_stmt(arg);
++ xloc = expand_location(gimple_location(def_stmt));
++
++ if (!gimple_has_location(def_stmt)) {
++ xloc = expand_location(gimple_location(stmt));
++ if (!gimple_has_location(stmt))
++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
++ }
++
++ loc_line = build_int_cstu(unsigned_type_node, xloc.line);
++
++ loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
++ loc_file = create_string_param(loc_file);
++
++ current_func = build_string(DECL_NAME_LENGTH(current_function_decl) + 1, DECL_NAME_POINTER(current_function_decl));
++ current_func = create_string_param(current_func);
++
++ gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL);
++ call_count++;
++ len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", DECL_NAME_POINTER(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count);
++ gcc_assert(len > 0);
++ ssa_name = build_string(len + 1, ssa_name_buf);
++ free(ssa_name_buf);
++ ssa_name = create_string_param(ssa_name);
++
++ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
++ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
++ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
++
++ callee_node = cgraph_get_create_node(report_size_overflow_decl);
++ frequency = compute_call_stmt_bb_frequency(current_function_decl, bb_true);
++
++ edge = cgraph_create_edge(caller_node, callee_node, func_stmt, bb_true->count, frequency, bb_true->loop_depth);
++ gcc_assert(edge != NULL);
++}
++
++static void insert_check_size_overflow(struct cgraph_node *caller_node, gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
++{
++ basic_block cond_bb, join_bb, bb_true;
++ edge e;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++
++ cond_bb = gimple_bb(stmt);
++ if (before)
++ gsi_prev(&gsi);
++ if (gsi_end_p(gsi))
++ e = split_block_after_labels(cond_bb);
++ else
++ e = split_block(cond_bb, gsi_stmt(gsi));
++ cond_bb = e->src;
++ join_bb = e->dest;
++ e->flags = EDGE_FALSE_VALUE;
++ e->probability = REG_BR_PROB_BASE;
++
++ bb_true = create_empty_bb(cond_bb);
++ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
++ make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
++ make_edge(bb_true, join_bb, EDGE_FALLTHRU);
++
++ gcc_assert(dom_info_available_p(CDI_DOMINATORS));
++ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
++ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
++
++ if (current_loops != NULL) {
++ gcc_assert(cond_bb->loop_father == join_bb->loop_father);
++ add_bb_to_loop(bb_true, cond_bb->loop_father);
++ }
++
++ insert_cond(cond_bb, arg, cond_code, type_value);
++ insert_cond_result(caller_node, bb_true, stmt, arg, min);
++
++// print_the_code_insertions(stmt);
++}
++
++void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before)
++{
++ const_tree rhs_type = TREE_TYPE(rhs);
++ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
++
++ gcc_assert(rhs_type != NULL_TREE);
++ if (TREE_CODE(rhs_type) == POINTER_TYPE)
++ return;
++
++ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
++
++ if (is_const_plus_unsigned_signed_truncation(rhs))
++ return;
++
++ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
++ // typemax (-1) < typemin (0)
++ if (TREE_OVERFLOW(type_max))
++ return;
++
++ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
++
++ cast_rhs_type = TREE_TYPE(cast_rhs);
++ type_max_type = TREE_TYPE(type_max);
++ gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
++
++ insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before, MAX_CHECK);
++
++ // special case: get_size_overflow_type(), 32, u64->s
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type) && !TYPE_UNSIGNED(rhs_type))
++ return;
++
++ type_min_type = TREE_TYPE(type_min);
++ gcc_assert(types_compatible_p(type_max_type, type_min_type));
++ insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK);
++}
++
++static tree create_cast_overflow_check(struct visited *visited, struct cgraph_node *caller_node, tree new_rhs1, gimple stmt)
++{
++ bool cast_lhs, cast_rhs;
++ tree lhs = gimple_assign_lhs(stmt);
++ tree rhs = gimple_assign_rhs1(stmt);
++ const_tree lhs_type = TREE_TYPE(lhs);
++ const_tree rhs_type = TREE_TYPE(rhs);
++ enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
++ enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
++ unsigned int lhs_size = GET_MODE_BITSIZE(lhs_mode);
++ unsigned int rhs_size = GET_MODE_BITSIZE(rhs_mode);
++
++ static bool check_lhs[3][4] = {
++ // ss su us uu
++ { false, true, true, false }, // lhs > rhs
++ { false, false, false, false }, // lhs = rhs
++ { true, true, true, true }, // lhs < rhs
++ };
++
++ static bool check_rhs[3][4] = {
++ // ss su us uu
++ { true, false, true, true }, // lhs > rhs
++ { true, false, true, true }, // lhs = rhs
++ { true, false, true, true }, // lhs < rhs
++ };
++
++ // skip lhs check on signed SI -> HI cast or signed SI -> QI cast !!!!
++ if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode))
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ if (lhs_size > rhs_size) {
++ cast_lhs = check_lhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ cast_rhs = check_rhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ } else if (lhs_size == rhs_size) {
++ cast_lhs = check_lhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ cast_rhs = check_rhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ } else {
++ cast_lhs = check_lhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ cast_rhs = check_rhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ }
++
++ if (!cast_lhs && !cast_rhs)
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++
++ if (cast_lhs && !skip_lhs_cast_check(stmt))
++ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, lhs, BEFORE_STMT);
++
++ if (cast_rhs)
++ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, rhs, BEFORE_STMT);
++
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++}
++
++static tree handle_unary_rhs(struct visited *visited, struct cgraph_node *caller_node, gimple stmt)
++{
++ enum tree_code rhs_code;
++ tree rhs1, new_rhs1, lhs = gimple_assign_lhs(stmt);
++
++ if (pointer_set_contains(visited->my_stmts, stmt))
++ return lhs;
++
++ rhs1 = gimple_assign_rhs1(stmt);
++ if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ new_rhs1 = expand(visited, caller_node, rhs1);
++
++ if (new_rhs1 == NULL_TREE)
++ return create_cast_assign(visited, stmt);
++
++ if (pointer_set_contains(visited->no_cast_check, stmt))
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++
++ rhs_code = gimple_assign_rhs_code(stmt);
++ if (rhs_code == BIT_NOT_EXPR || rhs_code == NEGATE_EXPR) {
++ tree size_overflow_type = get_size_overflow_type(visited, stmt, rhs1);
++
++ new_rhs1 = cast_to_new_size_overflow_type(visited, stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ check_size_overflow(caller_node, stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++ }
++
++ if (!gimple_assign_cast_p(stmt))
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++
++ return create_cast_overflow_check(visited, caller_node, new_rhs1, stmt);
++}
++
++static tree handle_unary_ops(struct visited *visited, struct cgraph_node *caller_node, gimple stmt)
++{
++ tree rhs1, lhs = gimple_assign_lhs(stmt);
++ gimple def_stmt = get_def_stmt(lhs);
++
++ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
++ rhs1 = gimple_assign_rhs1(def_stmt);
++
++ if (is_gimple_constant(rhs1))
++ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++
++ switch (TREE_CODE(rhs1)) {
++ case SSA_NAME:
++ return handle_unary_rhs(visited, caller_node, def_stmt);
++ case ARRAY_REF:
++ case BIT_FIELD_REF:
++ case ADDR_EXPR:
++ case COMPONENT_REF:
++ case INDIRECT_REF:
++#if BUILDING_GCC_VERSION >= 4006
++ case MEM_REF:
++#endif
++ case TARGET_MEM_REF:
++ case VIEW_CONVERT_EXPR:
++ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++ case PARM_DECL:
++ case VAR_DECL:
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ default:
++ debug_gimple_stmt(def_stmt);
++ debug_tree(rhs1);
++ gcc_unreachable();
++ }
++}
++
++static void __unused print_the_code_insertions(const_gimple stmt)
++{
++ location_t loc = gimple_location(stmt);
++
++ inform(loc, "Integer size_overflow check applied here.");
++}
++
++static tree handle_binary_ops(struct visited *visited, struct cgraph_node *caller_node, tree lhs)
++{
++ enum intentional_overflow_type res;
++ tree rhs1, rhs2, new_lhs;
++ gimple def_stmt = get_def_stmt(lhs);
++ tree new_rhs1 = NULL_TREE;
++ tree new_rhs2 = NULL_TREE;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++
++ /* no DImode/TImode division in the 32/64 bit kernel */
++ switch (gimple_assign_rhs_code(def_stmt)) {
++ case RDIV_EXPR:
++ case TRUNC_DIV_EXPR:
++ case CEIL_DIV_EXPR:
++ case FLOOR_DIV_EXPR:
++ case ROUND_DIV_EXPR:
++ case TRUNC_MOD_EXPR:
++ case CEIL_MOD_EXPR:
++ case FLOOR_MOD_EXPR:
++ case ROUND_MOD_EXPR:
++ case EXACT_DIV_EXPR:
++ case POINTER_PLUS_EXPR:
++ case BIT_AND_EXPR:
++ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++ default:
++ break;
++ }
++
++ new_lhs = handle_integer_truncation(visited, caller_node, lhs);
++ if (new_lhs != NULL_TREE)
++ return new_lhs;
++
++ if (TREE_CODE(rhs1) == SSA_NAME)
++ new_rhs1 = expand(visited, caller_node, rhs1);
++ if (TREE_CODE(rhs2) == SSA_NAME)
++ new_rhs2 = expand(visited, caller_node, rhs2);
++
++ res = add_mul_intentional_overflow(def_stmt);
++ if (res != NO_INTENTIONAL_OVERFLOW) {
++ new_lhs = dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
++ insert_cast_expr(visited, get_def_stmt(new_lhs), res);
++ return new_lhs;
++ }
++
++ if (skip_expr_on_double_type(def_stmt)) {
++ new_lhs = dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
++ insert_cast_expr(visited, get_def_stmt(new_lhs), NO_INTENTIONAL_OVERFLOW);
++ return new_lhs;
++ }
++
++ if (is_a_neg_overflow(def_stmt, rhs2))
++ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs1, NULL_TREE);
++ if (is_a_neg_overflow(def_stmt, rhs1))
++ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs2, new_rhs2);
++
++
++ if (is_a_constant_overflow(def_stmt, rhs2))
++ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, NULL_TREE);
++ if (is_a_constant_overflow(def_stmt, rhs1))
++ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, new_rhs2);
++
++ // the const is between 0 and (signed) MAX
++ if (is_gimple_constant(rhs1))
++ new_rhs1 = create_assign(visited, def_stmt, rhs1, BEFORE_STMT);
++ if (is_gimple_constant(rhs2))
++ new_rhs2 = create_assign(visited, def_stmt, rhs2, BEFORE_STMT);
++
++ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
++}
++
++#if BUILDING_GCC_VERSION >= 4006
++static tree get_new_rhs(struct visited *visited, struct cgraph_node *caller_node, tree size_overflow_type, tree rhs)
++{
++ if (is_gimple_constant(rhs))
++ return cast_a_tree(size_overflow_type, rhs);
++ if (TREE_CODE(rhs) != SSA_NAME)
++ return NULL_TREE;
++ return expand(visited, caller_node, rhs);
++}
++
++static tree handle_ternary_ops(struct visited *visited, struct cgraph_node *caller_node, tree lhs)
++{
++ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
++ gimple def_stmt = get_def_stmt(lhs);
++
++ size_overflow_type = get_size_overflow_type(visited, def_stmt, lhs);
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++ rhs3 = gimple_assign_rhs3(def_stmt);
++ new_rhs1 = get_new_rhs(visited, caller_node, size_overflow_type, rhs1);
++ new_rhs2 = get_new_rhs(visited, caller_node, size_overflow_type, rhs2);
++ new_rhs3 = get_new_rhs(visited, caller_node, size_overflow_type, rhs3);
++
++ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
++}
++#endif
++
++static tree get_my_stmt_lhs(struct visited *visited, gimple stmt)
++{
++ gimple_stmt_iterator gsi;
++ gimple next_stmt = NULL;
++
++ gsi = gsi_for_stmt(stmt);
++
++ do {
++ gsi_next(&gsi);
++ next_stmt = gsi_stmt(gsi);
++
++ if (gimple_code(stmt) == GIMPLE_PHI && !pointer_set_contains(visited->my_stmts, next_stmt))
++ return NULL_TREE;
++
++ if (pointer_set_contains(visited->my_stmts, next_stmt) && !pointer_set_contains(visited->skip_expr_casts, next_stmt))
++ break;
++
++ gcc_assert(pointer_set_contains(visited->my_stmts, next_stmt));
++ } while (!gsi_end_p(gsi));
++
++ gcc_assert(next_stmt);
++ return get_lhs(next_stmt);
++}
++
++static tree expand_visited(struct visited *visited, gimple def_stmt)
++{
++ gimple_stmt_iterator gsi;
++ enum gimple_code code = gimple_code(def_stmt);
++
++ if (code == GIMPLE_ASM)
++ return NULL_TREE;
++
++ gsi = gsi_for_stmt(def_stmt);
++ gsi_next(&gsi);
++
++ if (gimple_code(def_stmt) == GIMPLE_PHI && gsi_end_p(gsi))
++ return NULL_TREE;
++ return get_my_stmt_lhs(visited, def_stmt);
++}
++
++tree expand(struct visited *visited, struct cgraph_node *caller_node, tree lhs)
++{
++ gimple def_stmt;
++
++ def_stmt = get_def_stmt(lhs);
++
++ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
++ return NULL_TREE;
++
++ if (pointer_set_contains(visited->my_stmts, def_stmt))
++ return lhs;
++
++ if (pointer_set_contains(visited->stmts, def_stmt))
++ return expand_visited(visited, def_stmt);
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_PHI:
++ return handle_phi(visited, caller_node, lhs);
++ case GIMPLE_CALL:
++ case GIMPLE_ASM:
++ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ return handle_unary_ops(visited, caller_node, def_stmt);
++ case 3:
++ return handle_binary_ops(visited, caller_node, lhs);
++#if BUILDING_GCC_VERSION >= 4006
++ case 4:
++ return handle_ternary_ops(visited, caller_node, lhs);
++#endif
++ }
++ default:
++ debug_gimple_stmt(def_stmt);
++ error("%s: unknown gimple code", __func__);
++ gcc_unreachable();
++ }
++}
++
+diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c
+new file mode 100644
+index 0000000..f8f5dd5
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c
+@@ -0,0 +1,1133 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
++
++#include "gcc-common.h"
++#include "size_overflow.h"
++
++#define VEC_LEN 128
++#define RET_CHECK NULL_TREE
++#define WRONG_NODE 32
++#define NOT_INTENTIONAL_ASM NULL
++
++unsigned int call_count;
++
++static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs);
++static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs);
++
++struct visited_fns {
++ struct visited_fns *next;
++ const_tree fndecl;
++ unsigned int num;
++ const_gimple first_stmt;
++};
++
++struct next_cgraph_node {
++ struct next_cgraph_node *next;
++ struct cgraph_node *current_function;
++ tree callee_fndecl;
++ unsigned int num;
++};
++
++// Don't want to duplicate entries in next_cgraph_node
++static bool is_in_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, const_tree fndecl, unsigned int num)
++{
++ const_tree new_callee_fndecl;
++ struct next_cgraph_node *cur_node;
++
++ if (fndecl == RET_CHECK)
++ new_callee_fndecl = NODE_DECL(node);
++ else
++ new_callee_fndecl = fndecl;
++
++ for (cur_node = head; cur_node; cur_node = cur_node->next) {
++ if (!operand_equal_p(NODE_DECL(cur_node->current_function), NODE_DECL(node), 0))
++ continue;
++ if (!operand_equal_p(cur_node->callee_fndecl, new_callee_fndecl, 0))
++ continue;
++ if (num == cur_node->num)
++ return true;
++ }
++ return false;
++}
++
++/* Add a next_cgraph_node into the list for handle_function().
++ * handle_function() iterates over all the next cgraph nodes and
++ * starts the overflow check insertion process.
++ */
++static struct next_cgraph_node *create_new_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, tree fndecl, unsigned int num)
++{
++ struct next_cgraph_node *new_node;
++
++ if (is_in_next_cgraph_node(head, node, fndecl, num))
++ return head;
++
++ new_node = (struct next_cgraph_node *)xmalloc(sizeof(*new_node));
++ new_node->current_function = node;
++ new_node->next = NULL;
++ new_node->num = num;
++ if (fndecl == RET_CHECK)
++ new_node->callee_fndecl = NODE_DECL(node);
++ else
++ new_node->callee_fndecl = fndecl;
++
++ if (!head)
++ return new_node;
++
++ new_node->next = head;
++ return new_node;
++}
++
++static struct next_cgraph_node *create_new_next_cgraph_nodes(struct next_cgraph_node *head, struct cgraph_node *node, unsigned int num)
++{
++ struct cgraph_edge *e;
++
++ if (num == 0)
++ return create_new_next_cgraph_node(head, node, RET_CHECK, num);
++
++ for (e = node->callers; e; e = e->next_caller) {
++ tree fndecl = gimple_call_fndecl(e->call_stmt);
++
++ gcc_assert(fndecl != NULL_TREE);
++ head = create_new_next_cgraph_node(head, e->caller, fndecl, num);
++ }
++
++ return head;
++}
++
++struct missing_functions {
++ struct missing_functions *next;
++ const_tree node;
++ tree fndecl;
++};
++
++static struct missing_functions *create_new_missing_function(struct missing_functions *missing_fn_head, tree node)
++{
++ struct missing_functions *new_function;
++
++ new_function = (struct missing_functions *)xmalloc(sizeof(*new_function));
++ new_function->node = node;
++ new_function->next = NULL;
++
++ if (TREE_CODE(node) == FUNCTION_DECL)
++ new_function->fndecl = node;
++ else
++ new_function->fndecl = current_function_decl;
++ gcc_assert(new_function->fndecl);
++
++ if (!missing_fn_head)
++ return new_function;
++
++ new_function->next = missing_fn_head;
++ return new_function;
++}
++
++/* If the function is missing from the hash table and it is a static function
++ * then create a next_cgraph_node from it for handle_function()
++ */
++static struct next_cgraph_node *check_missing_overflow_attribute_and_create_next_node(struct next_cgraph_node *cnodes, struct missing_functions *missing_fn_head)
++{
++ unsigned int num;
++ const_tree orig_fndecl;
++ struct cgraph_node *next_node = NULL;
++
++ orig_fndecl = DECL_ORIGIN(missing_fn_head->fndecl);
++
++ num = get_function_num(missing_fn_head->node, orig_fndecl);
++ if (num == CANNOT_FIND_ARG)
++ return cnodes;
++
++ if (!is_missing_function(orig_fndecl, num))
++ return cnodes;
++
++ next_node = cgraph_get_node(missing_fn_head->fndecl);
++ if (next_node && next_node->local.local)
++ cnodes = create_new_next_cgraph_nodes(cnodes, next_node, num);
++ return cnodes;
++}
++
++/* Search for missing size_overflow attributes on the last nodes in ipa and collect them
++ * into the next_cgraph_node list. They will be the next interesting returns or callees.
++ */
++static struct next_cgraph_node *search_overflow_attribute(struct next_cgraph_node *cnodes, struct interesting_node *cur_node)
++{
++ unsigned int i;
++ tree node;
++ struct missing_functions *cur, *missing_fn_head = NULL;
++
++#if BUILDING_GCC_VERSION <= 4007
++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, node) {
++#else
++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, node) {
++#endif
++ switch (TREE_CODE(node)) {
++ case PARM_DECL:
++ if (TREE_CODE(TREE_TYPE(node)) != INTEGER_TYPE)
++ break;
++ case FUNCTION_DECL:
++ missing_fn_head = create_new_missing_function(missing_fn_head, node);
++ break;
++ default:
++ break;
++ }
++ }
++
++ while (missing_fn_head) {
++ cnodes = check_missing_overflow_attribute_and_create_next_node(cnodes, missing_fn_head);
++
++ cur = missing_fn_head->next;
++ free(missing_fn_head);
++ missing_fn_head = cur;
++ }
++
++ return cnodes;
++}
++
++static void walk_phi_set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree result)
++{
++ gimple phi = get_def_stmt(result);
++ unsigned int i, n = gimple_phi_num_args(phi);
++
++ pointer_set_insert(visited, phi);
++ for (i = 0; i < n; i++) {
++ const_tree arg = gimple_phi_arg_def(phi, i);
++
++ set_conditions(visited, interesting_conditions, arg);
++ }
++}
++
++enum conditions {
++ FROM_CONST, NOT_UNARY, CAST
++};
++
++// Search for constants, cast assignments and binary/ternary assignments
++static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs)
++{
++ gimple def_stmt = get_def_stmt(lhs);
++
++ if (is_gimple_constant(lhs)) {
++ interesting_conditions[FROM_CONST] = true;
++ return;
++ }
++
++ if (!def_stmt)
++ return;
++
++ if (pointer_set_contains(visited, def_stmt))
++ return;
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ case GIMPLE_CALL:
++ case GIMPLE_ASM:
++ return;
++ case GIMPLE_PHI:
++ return walk_phi_set_conditions(visited, interesting_conditions, lhs);
++ case GIMPLE_ASSIGN:
++ if (gimple_num_ops(def_stmt) == 2) {
++ const_tree rhs = gimple_assign_rhs1(def_stmt);
++
++ if (gimple_assign_cast_p(def_stmt))
++ interesting_conditions[CAST] = true;
++
++ return set_conditions(visited, interesting_conditions, rhs);
++ } else {
++ interesting_conditions[NOT_UNARY] = true;
++ return;
++ }
++ default:
++ debug_gimple_stmt(def_stmt);
++ gcc_unreachable();
++ }
++}
++
++// determine whether duplication will be necessary or not.
++static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions)
++{
++ struct pointer_set_t *visited;
++
++ if (gimple_assign_cast_p(cur_node->first_stmt))
++ interesting_conditions[CAST] = true;
++ else if (is_gimple_assign(cur_node->first_stmt) && gimple_num_ops(cur_node->first_stmt) > 2)
++ interesting_conditions[NOT_UNARY] = true;
++
++ visited = pointer_set_create();
++ set_conditions(visited, interesting_conditions, cur_node->node);
++ pointer_set_destroy(visited);
++}
++
++// Remove the size_overflow asm stmt and create an assignment from the input and output of the asm
++static void replace_size_overflow_asm_with_assign(gimple asm_stmt, tree lhs, tree rhs)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi;
++
++ // already removed
++ if (gimple_bb(asm_stmt) == NULL)
++ return;
++ gsi = gsi_for_stmt(asm_stmt);
++
++ assign = gimple_build_assign(lhs, rhs);
++ gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
++ SSA_NAME_DEF_STMT(lhs) = assign;
++
++ gsi_remove(&gsi, true);
++}
++
++/* Get the fndecl of an interesting stmt, the fndecl is the caller function if the interesting
++ * stmt is a return otherwise it is the callee function.
++ */
++const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum)
++{
++ const_tree fndecl;
++
++ if (argnum == 0)
++ fndecl = current_function_decl;
++ else
++ fndecl = gimple_call_fndecl(stmt);
++
++ if (fndecl == NULL_TREE)
++ return NULL_TREE;
++
++ return DECL_ORIGIN(fndecl);
++}
++
++// e.g., 3.8.2, 64, arch/x86/ia32/ia32_signal.c copy_siginfo_from_user32(): compat_ptr() u32 max
++static bool skip_asm(const_tree arg)
++{
++ gimple def_stmt = get_def_stmt(arg);
++
++ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
++ return false;
++
++ def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt));
++ return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM;
++}
++
++static void walk_use_def_phi(struct pointer_set_t *visited, struct interesting_node *cur_node, tree result)
++{
++ gimple phi = get_def_stmt(result);
++ unsigned int i, n = gimple_phi_num_args(phi);
++
++ pointer_set_insert(visited, phi);
++ for (i = 0; i < n; i++) {
++ tree arg = gimple_phi_arg_def(phi, i);
++
++ walk_use_def(visited, cur_node, arg);
++ }
++}
++
++static void walk_use_def_binary(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
++{
++ gimple def_stmt = get_def_stmt(lhs);
++ tree rhs1, rhs2;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++
++ walk_use_def(visited, cur_node, rhs1);
++ walk_use_def(visited, cur_node, rhs2);
++}
++
++static void insert_last_node(struct interesting_node *cur_node, tree node)
++{
++ unsigned int i;
++ tree element;
++ enum tree_code code;
++
++ gcc_assert(node != NULL_TREE);
++
++ if (is_gimple_constant(node))
++ return;
++
++ code = TREE_CODE(node);
++ if (code == VAR_DECL) {
++ node = DECL_ORIGIN(node);
++ code = TREE_CODE(node);
++ }
++
++ if (code != PARM_DECL && code != FUNCTION_DECL && code != COMPONENT_REF)
++ return;
++
++#if BUILDING_GCC_VERSION <= 4007
++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, element) {
++#else
++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, element) {
++#endif
++ if (operand_equal_p(node, element, 0))
++ return;
++ }
++
++#if BUILDING_GCC_VERSION <= 4007
++ gcc_assert(VEC_length(tree, cur_node->last_nodes) < VEC_LEN);
++ VEC_safe_push(tree, gc, cur_node->last_nodes, node);
++#else
++ gcc_assert(cur_node->last_nodes->length() < VEC_LEN);
++ vec_safe_push(cur_node->last_nodes, node);
++#endif
++}
++
++// a size_overflow asm stmt in the control flow doesn't stop the recursion
++static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs, const_gimple stmt)
++{
++ if (!is_size_overflow_asm(stmt))
++ walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
++}
++
++/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow)
++ * and component refs (for checking the intentional_overflow attribute).
++ */
++static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
++{
++ const_gimple def_stmt;
++
++ if (TREE_CODE(lhs) != SSA_NAME) {
++ insert_last_node(cur_node, lhs);
++ return;
++ }
++
++ def_stmt = get_def_stmt(lhs);
++ if (!def_stmt)
++ return;
++
++ if (pointer_set_insert(visited, def_stmt))
++ return;
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
++ case GIMPLE_ASM:
++ return handle_asm_stmt(visited, cur_node, lhs, def_stmt);
++ case GIMPLE_CALL: {
++ tree fndecl = gimple_call_fndecl(def_stmt);
++
++ if (fndecl == NULL_TREE)
++ return;
++ insert_last_node(cur_node, fndecl);
++ return;
++ }
++ case GIMPLE_PHI:
++ return walk_use_def_phi(visited, cur_node, lhs);
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ return walk_use_def(visited, cur_node, gimple_assign_rhs1(def_stmt));
++ case 3:
++ return walk_use_def_binary(visited, cur_node, lhs);
++ }
++ default:
++ debug_gimple_stmt((gimple)def_stmt);
++ error("%s: unknown gimple code", __func__);
++ gcc_unreachable();
++ }
++}
++
++// Collect all the last nodes for checking the intentional_overflow and size_overflow attributes
++static void set_last_nodes(struct interesting_node *cur_node)
++{
++ struct pointer_set_t *visited;
++
++ visited = pointer_set_create();
++ walk_use_def(visited, cur_node, cur_node->node);
++ pointer_set_destroy(visited);
++}
++
++enum precond {
++ NO_ATTRIBUTE_SEARCH, NO_CHECK_INSERT, NONE
++};
++
++/* If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere.
++ * There is only missing size_overflow attribute checking if the intentional_overflow attribute is the mark_no type.
++ * Stmt duplication is unnecessary if there are no binary/ternary assignements or if the unary assignment isn't a cast.
++ * It skips the possible error codes too. If the def_stmts trace back to a constant and there are no binary/ternary assigments then we assume that it is some kind of error code.
++ */
++static enum precond check_preconditions(struct interesting_node *cur_node)
++{
++ bool interesting_conditions[3] = {false, false, false};
++
++ set_last_nodes(cur_node);
++
++ check_intentional_attribute_ipa(cur_node);
++ if (cur_node->intentional_attr_decl == MARK_TURN_OFF || cur_node->intentional_attr_cur_fndecl == MARK_TURN_OFF)
++ return NO_ATTRIBUTE_SEARCH;
++
++ search_interesting_conditions(cur_node, interesting_conditions);
++
++ // error code
++ if (interesting_conditions[CAST] && interesting_conditions[FROM_CONST] && !interesting_conditions[NOT_UNARY])
++ return NO_ATTRIBUTE_SEARCH;
++
++ // unnecessary overflow check
++ if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY])
++ return NO_CHECK_INSERT;
++
++ if (cur_node->intentional_attr_cur_fndecl != MARK_NO)
++ return NO_CHECK_INSERT;
++
++ return NONE;
++}
++
++static tree cast_to_orig_type(struct visited *visited, gimple stmt, const_tree orig_node, tree new_node)
++{
++ const_gimple assign;
++ tree orig_type = TREE_TYPE(orig_node);
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++
++ assign = build_cast_stmt(visited, orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++ return gimple_assign_lhs(assign);
++}
++
++static void change_orig_node(struct visited *visited, struct interesting_node *cur_node, tree new_node)
++{
++ void (*set_rhs)(gimple, tree);
++ gimple stmt = cur_node->first_stmt;
++ const_tree orig_node = cur_node->node;
++
++ switch (gimple_code(stmt)) {
++ case GIMPLE_RETURN:
++ gimple_return_set_retval(stmt, cast_to_orig_type(visited, stmt, orig_node, new_node));
++ break;
++ case GIMPLE_CALL:
++ gimple_call_set_arg(stmt, cur_node->num - 1, cast_to_orig_type(visited, stmt, orig_node, new_node));
++ break;
++ case GIMPLE_ASSIGN:
++ switch (cur_node->num) {
++ case 1:
++ set_rhs = &gimple_assign_set_rhs1;
++ break;
++ case 2:
++ set_rhs = &gimple_assign_set_rhs2;
++ break;
++#if BUILDING_GCC_VERSION >= 4006
++ case 3:
++ set_rhs = &gimple_assign_set_rhs3;
++ break;
++#endif
++ default:
++ gcc_unreachable();
++ }
++
++ set_rhs(stmt, cast_to_orig_type(visited, stmt, orig_node, new_node));
++ break;
++ default:
++ debug_gimple_stmt(stmt);
++ gcc_unreachable();
++ }
++
++ update_stmt(stmt);
++}
++
++static struct visited *create_visited(void)
++{
++ struct visited *new_node;
++
++ new_node = (struct visited *)xmalloc(sizeof(*new_node));
++ new_node->stmts = pointer_set_create();
++ new_node->my_stmts = pointer_set_create();
++ new_node->skip_expr_casts = pointer_set_create();
++ new_node->no_cast_check = pointer_set_create();
++ return new_node;
++}
++
++static void free_visited(struct visited *visited)
++{
++ pointer_set_destroy(visited->stmts);
++ pointer_set_destroy(visited->my_stmts);
++ pointer_set_destroy(visited->skip_expr_casts);
++ pointer_set_destroy(visited->no_cast_check);
++
++ free(visited);
++}
++
++/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts,
++ * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node
++ * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value.
++ */
++static struct next_cgraph_node *handle_interesting_stmt(struct visited *visited, struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node)
++{
++ enum precond ret;
++ tree new_node, orig_node = cur_node->node;
++
++ ret = check_preconditions(cur_node);
++ if (ret == NO_ATTRIBUTE_SEARCH)
++ return cnodes;
++
++ cnodes = search_overflow_attribute(cnodes, cur_node);
++
++ if (ret == NO_CHECK_INSERT)
++ return cnodes;
++
++ new_node = expand(visited, caller_node, orig_node);
++ if (new_node == NULL_TREE)
++ return cnodes;
++
++ change_orig_node(visited, cur_node, new_node);
++ check_size_overflow(caller_node, cur_node->first_stmt, TREE_TYPE(new_node), new_node, orig_node, BEFORE_STMT);
++
++ return cnodes;
++}
++
++// Check visited_fns interesting nodes.
++static bool is_in_interesting_node(struct interesting_node *head, const_gimple first_stmt, const_tree node, unsigned int num)
++{
++ struct interesting_node *cur;
++
++ for (cur = head; cur; cur = cur->next) {
++ if (!operand_equal_p(node, cur->node, 0))
++ continue;
++ if (num != cur->num)
++ continue;
++ if (first_stmt == cur->first_stmt)
++ return true;
++ }
++ return false;
++}
++
++/* Create an interesting node. The ipa pass starts to duplicate from these stmts.
++ first_stmt: it is the call or assignment or ret stmt, change_orig_node() will change the original node (retval, or function arg) in this
++ last_nodes: they are the last stmts in the recursion (they haven't a def_stmt). They are useful in the missing size_overflow attribute check and
++ the intentional_overflow attribute check. They are collected by set_last_nodes().
++ num: arg count of a call stmt or 0 when it is a ret
++ node: the recursion starts from here, it is a call arg or a return value
++ fndecl: the fndecl of the interesting node when the node is an arg. it is the fndecl of the callee function otherwise it is the fndecl of the caller (current_function_fndecl) function.
++ intentional_attr_decl: intentional_overflow attribute of the callee function
++ intentional_attr_cur_fndecl: intentional_overflow attribute of the caller function
++ intentional_mark_from_gimple: the intentional overflow type of size_overflow asm stmt from gimple if it exists
++ */
++static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gimple asm_stmt)
++{
++ struct interesting_node *new_node;
++ tree fndecl;
++ enum gimple_code code;
++
++ gcc_assert(node != NULL_TREE);
++ code = gimple_code(first_stmt);
++ gcc_assert(code == GIMPLE_CALL || code == GIMPLE_ASM || code == GIMPLE_ASSIGN || code == GIMPLE_RETURN);
++
++ if (num == CANNOT_FIND_ARG)
++ return head;
++
++ if (skip_types(node))
++ return head;
++
++ if (skip_asm(node))
++ return head;
++
++ if (is_gimple_call(first_stmt))
++ fndecl = gimple_call_fndecl(first_stmt);
++ else
++ fndecl = current_function_decl;
++
++ if (fndecl == NULL_TREE)
++ return head;
++
++ if (is_in_interesting_node(head, first_stmt, node, num))
++ return head;
++
++ new_node = (struct interesting_node *)xmalloc(sizeof(*new_node));
++
++ new_node->next = NULL;
++ new_node->first_stmt = first_stmt;
++#if BUILDING_GCC_VERSION <= 4007
++ new_node->last_nodes = VEC_alloc(tree, gc, VEC_LEN);
++#else
++ vec_alloc(new_node->last_nodes, VEC_LEN);
++#endif
++ new_node->num = num;
++ new_node->node = node;
++ new_node->fndecl = fndecl;
++ new_node->intentional_attr_decl = MARK_NO;
++ new_node->intentional_attr_cur_fndecl = MARK_NO;
++ new_node->intentional_mark_from_gimple = asm_stmt;
++
++ if (!head)
++ return new_node;
++
++ new_node->next = head;
++ return new_node;
++}
++
++/* Check the ret stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
++ * If the ret stmt is in the next cgraph node list then it's an interesting ret.
++ */
++static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
++{
++ struct next_cgraph_node *cur_node;
++ tree ret = gimple_return_retval(stmt);
++
++ if (ret == NULL_TREE)
++ return head;
++
++ for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
++ if (!operand_equal_p(cur_node->callee_fndecl, DECL_ORIGIN(current_function_decl), 0))
++ continue;
++ if (cur_node->num == 0)
++ head = create_new_interesting_node(head, stmt, ret, 0, NOT_INTENTIONAL_ASM);
++ }
++
++ return head;
++}
++
++/* Check the call stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
++ * If the call stmt is in the next cgraph node list then it's an interesting call.
++ */
++static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
++{
++ unsigned int argnum;
++ tree arg;
++ const_tree fndecl;
++ struct next_cgraph_node *cur_node;
++
++ fndecl = gimple_call_fndecl(stmt);
++ if (fndecl == NULL_TREE)
++ return head;
++
++ for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
++ if (!operand_equal_p(cur_node->callee_fndecl, fndecl, 0))
++ continue;
++ argnum = get_correct_arg_count(cur_node->num, fndecl);
++ gcc_assert(argnum != CANNOT_FIND_ARG);
++ if (argnum == 0)
++ continue;
++
++ arg = gimple_call_arg(stmt, argnum - 1);
++ head = create_new_interesting_node(head, stmt, arg, argnum, NOT_INTENTIONAL_ASM);
++ }
++
++ return head;
++}
++
++static unsigned int check_ops(const_tree orig_node, const_tree node, unsigned int ret_count)
++{
++ if (!operand_equal_p(orig_node, node, 0))
++ return WRONG_NODE;
++ if (skip_types(node))
++ return WRONG_NODE;
++ return ret_count;
++}
++
++// Get the index of the rhs node in an assignment
++static unsigned int get_assign_ops_count(const_gimple stmt, tree node)
++{
++ const_tree rhs1, rhs2;
++ unsigned int ret;
++
++ gcc_assert(stmt);
++ gcc_assert(is_gimple_assign(stmt));
++
++ rhs1 = gimple_assign_rhs1(stmt);
++ gcc_assert(rhs1 != NULL_TREE);
++
++ switch (gimple_num_ops(stmt)) {
++ case 2:
++ return check_ops(node, rhs1, 1);
++ case 3:
++ ret = check_ops(node, rhs1, 1);
++ if (ret != WRONG_NODE)
++ return ret;
++
++ rhs2 = gimple_assign_rhs2(stmt);
++ gcc_assert(rhs2 != NULL_TREE);
++ return check_ops(node, rhs2, 2);
++ default:
++ gcc_unreachable();
++ }
++}
++
++// Find the correct arg number of a call stmt. It is needed when the interesting function is a cloned function.
++static unsigned int find_arg_number_gimple(const_tree arg, const_gimple stmt)
++{
++ unsigned int i;
++
++ if (gimple_call_fndecl(stmt) == NULL_TREE)
++ return CANNOT_FIND_ARG;
++
++ for (i = 0; i < gimple_call_num_args(stmt); i++) {
++ tree node;
++
++ node = gimple_call_arg(stmt, i);
++ if (!operand_equal_p(arg, node, 0))
++ continue;
++ if (!skip_types(node))
++ return i + 1;
++ }
++
++ return CANNOT_FIND_ARG;
++}
++
++/* starting from the size_overflow asm stmt collect interesting stmts. They can be
++ * any of return, call or assignment stmts (because of inlining).
++ */
++static struct interesting_node *get_interesting_ret_or_call(struct pointer_set_t *visited, struct interesting_node *head, tree node, gimple intentional_asm)
++{
++ use_operand_p use_p;
++ imm_use_iterator imm_iter;
++ unsigned int argnum;
++
++ gcc_assert(TREE_CODE(node) == SSA_NAME);
++
++ if (pointer_set_insert(visited, node))
++ return head;
++
++ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
++ gimple stmt = USE_STMT(use_p);
++
++ if (stmt == NULL)
++ return head;
++ if (is_gimple_debug(stmt))
++ continue;
++
++ switch (gimple_code(stmt)) {
++ case GIMPLE_CALL:
++ argnum = find_arg_number_gimple(node, stmt);
++ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
++ break;
++ case GIMPLE_RETURN:
++ head = create_new_interesting_node(head, stmt, node, 0, intentional_asm);
++ break;
++ case GIMPLE_ASSIGN:
++ argnum = get_assign_ops_count(stmt, node);
++ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
++ break;
++ case GIMPLE_PHI: {
++ tree result = gimple_phi_result(stmt);
++ head = get_interesting_ret_or_call(visited, head, result, intentional_asm);
++ break;
++ }
++ case GIMPLE_ASM:
++ if (gimple_asm_noutputs(stmt) != 0)
++ break;
++ if (!is_size_overflow_asm(stmt))
++ break;
++ head = create_new_interesting_node(head, stmt, node, 1, intentional_asm);
++ break;
++ case GIMPLE_COND:
++ case GIMPLE_SWITCH:
++ break;
++ default:
++ debug_gimple_stmt(stmt);
++ gcc_unreachable();
++ break;
++ }
++ }
++ return head;
++}
++
++static void remove_size_overflow_asm(gimple stmt)
++{
++ gimple_stmt_iterator gsi;
++ tree input, output;
++
++ if (!is_size_overflow_asm(stmt))
++ return;
++
++ if (gimple_asm_noutputs(stmt) == 0) {
++ gsi = gsi_for_stmt(stmt);
++ ipa_remove_stmt_references(cgraph_get_create_node(current_function_decl), stmt);
++ gsi_remove(&gsi, true);
++ return;
++ }
++
++ input = gimple_asm_input_op(stmt, 0);
++ output = gimple_asm_output_op(stmt, 0);
++ replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input));
++}
++
++/* handle the size_overflow asm stmts from the gimple pass and collect the interesting stmts.
++ * If the asm stmt is a parm_decl kind (noutputs == 0) then remove it.
++ * If it is a simple asm stmt then replace it with an assignment from the asm input to the asm output.
++ */
++static struct interesting_node *handle_stmt_by_size_overflow_asm(gimple stmt, struct interesting_node *head)
++{
++ const_tree output;
++ struct pointer_set_t *visited;
++ gimple intentional_asm = NOT_INTENTIONAL_ASM;
++
++ if (!is_size_overflow_asm(stmt))
++ return head;
++
++ if (is_size_overflow_intentional_asm_yes(stmt) || is_size_overflow_intentional_asm_turn_off(stmt))
++ intentional_asm = stmt;
++
++ gcc_assert(gimple_asm_ninputs(stmt) == 1);
++
++ if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt))
++ return head;
++
++ if (gimple_asm_noutputs(stmt) == 0) {
++ const_tree input;
++
++ if (!is_size_overflow_intentional_asm_turn_off(stmt))
++ return head;
++
++ input = gimple_asm_input_op(stmt, 0);
++ remove_size_overflow_asm(stmt);
++ if (is_gimple_constant(TREE_VALUE(input)))
++ return head;
++ visited = pointer_set_create();
++ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm);
++ pointer_set_destroy(visited);
++ return head;
++ }
++
++ if (!is_size_overflow_intentional_asm_yes(stmt) && !is_size_overflow_intentional_asm_turn_off(stmt))
++ remove_size_overflow_asm(stmt);
++
++ visited = pointer_set_create();
++ output = gimple_asm_output_op(stmt, 0);
++ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm);
++ pointer_set_destroy(visited);
++ return head;
++}
++
++/* Iterate over all the stmts of a function and look for the size_overflow asm stmts (they were created in the gimple pass)
++ * or a call stmt or a return stmt and store them in the interesting_node list
++ */
++static struct interesting_node *collect_interesting_stmts(struct next_cgraph_node *next_node)
++{
++ basic_block bb;
++ struct interesting_node *head = NULL;
++
++ FOR_ALL_BB_FN(bb, cfun) {
++ gimple_stmt_iterator gsi;
++
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ enum gimple_code code;
++ gimple stmt = gsi_stmt(gsi);
++
++ code = gimple_code(stmt);
++
++ if (code == GIMPLE_ASM)
++ head = handle_stmt_by_size_overflow_asm(stmt, head);
++
++ if (!next_node)
++ continue;
++ if (code == GIMPLE_CALL)
++ head = handle_stmt_by_cgraph_nodes_call(head, stmt, next_node);
++ if (code == GIMPLE_RETURN)
++ head = handle_stmt_by_cgraph_nodes_ret(head, stmt, next_node);
++ }
++ }
++ return head;
++}
++
++static void free_interesting_node(struct interesting_node *head)
++{
++ struct interesting_node *cur;
++
++ while (head) {
++ cur = head->next;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC_free(tree, gc, head->last_nodes);
++#else
++ vec_free(head->last_nodes);
++#endif
++ free(head);
++ head = cur;
++ }
++}
++
++static struct visited_fns *insert_visited_fns_function(struct visited_fns *head, struct interesting_node *cur_node)
++{
++ struct visited_fns *new_visited_fns;
++
++ new_visited_fns = (struct visited_fns *)xmalloc(sizeof(*new_visited_fns));
++ new_visited_fns->fndecl = cur_node->fndecl;
++ new_visited_fns->num = cur_node->num;
++ new_visited_fns->first_stmt = cur_node->first_stmt;
++ new_visited_fns->next = NULL;
++
++ if (!head)
++ return new_visited_fns;
++
++ new_visited_fns->next = head;
++ return new_visited_fns;
++}
++
++/* Check whether the function was already visited_fns. If the fndecl, the arg count of the fndecl and the first_stmt (call or return) are same then
++ * it is a visited_fns function.
++ */
++static bool is_visited_fns_function(struct visited_fns *head, struct interesting_node *cur_node)
++{
++ struct visited_fns *cur;
++
++ if (!head)
++ return false;
++
++ for (cur = head; cur; cur = cur->next) {
++ if (cur_node->first_stmt != cur->first_stmt)
++ continue;
++ if (!operand_equal_p(cur_node->fndecl, cur->fndecl, 0))
++ continue;
++ if (cur_node->num == cur->num)
++ return true;
++ }
++ return false;
++}
++
++static void free_next_cgraph_node(struct next_cgraph_node *head)
++{
++ struct next_cgraph_node *cur;
++
++ while (head) {
++ cur = head->next;
++ free(head);
++ head = cur;
++ }
++}
++
++static void remove_all_size_overflow_asm(void)
++{
++ basic_block bb;
++
++ FOR_ALL_BB_FN(bb, cfun) {
++ gimple_stmt_iterator si;
++
++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
++ remove_size_overflow_asm(gsi_stmt(si));
++ }
++}
++
++/* Main recursive walk of the ipa pass: iterate over the collected interesting stmts in a function
++ * (they are interesting if they have an associated size_overflow asm stmt) and recursively walk
++ * the newly collected interesting functions (they are interesting if there is control flow between
++ * the interesting stmts and them).
++ */
++static struct visited_fns *handle_function(struct cgraph_node *node, struct next_cgraph_node *next_node, struct visited_fns *visited_fns)
++{
++ struct visited *visited;
++ struct interesting_node *head, *cur_node;
++ struct next_cgraph_node *cur_cnodes, *cnodes_head = NULL;
++
++ set_current_function_decl(NODE_DECL(node));
++ call_count = 0;
++
++ head = collect_interesting_stmts(next_node);
++
++ visited = create_visited();
++ for (cur_node = head; cur_node; cur_node = cur_node->next) {
++ if (is_visited_fns_function(visited_fns, cur_node))
++ continue;
++ cnodes_head = handle_interesting_stmt(visited, cnodes_head, cur_node, node);
++ visited_fns = insert_visited_fns_function(visited_fns, cur_node);
++ }
++
++ free_visited(visited);
++ free_interesting_node(head);
++ remove_all_size_overflow_asm();
++ unset_current_function_decl();
++
++ for (cur_cnodes = cnodes_head; cur_cnodes; cur_cnodes = cur_cnodes->next)
++ visited_fns = handle_function(cur_cnodes->current_function, cur_cnodes, visited_fns);
++
++ free_next_cgraph_node(cnodes_head);
++ return visited_fns;
++}
++
++static void free_visited_fns(struct visited_fns *head)
++{
++ struct visited_fns *cur;
++
++ while (head) {
++ cur = head->next;
++ free(head);
++ head = cur;
++ }
++}
++
++// Main entry point of the ipa pass: erases the plf flag of all stmts and iterates over all the functions
++unsigned int search_function(void)
++{
++ struct cgraph_node *node;
++ struct visited_fns *visited_fns = NULL;
++
++ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
++ gcc_assert(cgraph_function_flags_ready);
++#if BUILDING_GCC_VERSION <= 4007
++ gcc_assert(node->reachable);
++#endif
++
++ visited_fns = handle_function(node, NULL, visited_fns);
++ }
++
++ free_visited_fns(visited_fns);
++ return 0;
++}
++
++#if BUILDING_GCC_VERSION >= 4009
++static const struct pass_data insert_size_overflow_check_data = {
++#else
++static struct ipa_opt_pass_d insert_size_overflow_check = {
++ .pass = {
++#endif
++ .type = SIMPLE_IPA_PASS,
++ .name = "size_overflow",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++#if BUILDING_GCC_VERSION >= 4009
++ .has_gate = false,
++ .has_execute = true,
++#else
++ .gate = NULL,
++ .execute = search_function,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++#endif
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_ggc_collect | TODO_verify_flow | TODO_dump_cgraph | TODO_dump_func | TODO_update_ssa_no_phi,
++#if BUILDING_GCC_VERSION < 4009
++ },
++ .generate_summary = NULL,
++ .write_summary = NULL,
++ .read_summary = NULL,
++#if BUILDING_GCC_VERSION >= 4006
++ .write_optimization_summary = NULL,
++ .read_optimization_summary = NULL,
++#endif
++ .stmt_fixup = NULL,
++ .function_transform_todo_flags_start = 0,
++ .function_transform = NULL,
++ .variable_transform = NULL,
++#endif
++};
++
++#if BUILDING_GCC_VERSION >= 4009
++namespace {
++class insert_size_overflow_check : public ipa_opt_pass_d {
++public:
++ insert_size_overflow_check() : ipa_opt_pass_d(insert_size_overflow_check_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
++ unsigned int execute() { return search_function(); }
++};
++}
++#endif
++
++struct opt_pass *make_insert_size_overflow_check(void)
++{
++#if BUILDING_GCC_VERSION >= 4009
++ return new insert_size_overflow_check();
++#else
++ return &insert_size_overflow_check.pass;
++#endif
++}
++
+diff --git a/tools/gcc/size_overflow_plugin/intentional_overflow.c b/tools/gcc/size_overflow_plugin/intentional_overflow.c
+new file mode 100644
+index 0000000..967c0b2
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/intentional_overflow.c
+@@ -0,0 +1,635 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
++
++#include "gcc-common.h"
++#include "size_overflow.h"
++
++/* Get the param of the intentional_overflow attribute.
++ * * 0: MARK_NOT_INTENTIONAL
++ * * 1..MAX_PARAM: MARK_YES
++ * * -1: MARK_TURN_OFF
++ */
++static tree get_attribute_param(const_tree decl)
++{
++ const_tree attr;
++
++ if (decl == NULL_TREE)
++ return NULL_TREE;
++
++ attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(decl));
++ if (!attr || !TREE_VALUE(attr))
++ return NULL_TREE;
++
++ return TREE_VALUE(attr);
++}
++
++// MARK_TURN_OFF
++bool is_turn_off_intentional_attr(const_tree decl)
++{
++ const_tree param_head;
++
++ param_head = get_attribute_param(decl);
++ if (param_head == NULL_TREE)
++ return false;
++
++ if (TREE_INT_CST_HIGH(TREE_VALUE(param_head)) == -1)
++ return true;
++ return false;
++}
++
++// MARK_NOT_INTENTIONAL
++bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum)
++{
++ const_tree param_head;
++
++ if (argnum == 0)
++ return false;
++
++ param_head = get_attribute_param(decl);
++ if (param_head == NULL_TREE)
++ return false;
++
++ if (!TREE_INT_CST_LOW(TREE_VALUE(param_head)))
++ return true;
++ return false;
++}
++
++// MARK_YES
++bool is_yes_intentional_attr(const_tree decl, unsigned int argnum)
++{
++ tree param, param_head;
++
++ if (argnum == 0)
++ return false;
++
++ param_head = get_attribute_param(decl);
++ for (param = param_head; param; param = TREE_CHAIN(param))
++ if (argnum == TREE_INT_CST_LOW(TREE_VALUE(param)))
++ return true;
++ return false;
++}
++
++void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum)
++{
++ location_t loc;
++
++ if (caller_attr == MARK_NO || caller_attr == MARK_NOT_INTENTIONAL || caller_attr == MARK_TURN_OFF)
++ return;
++
++ if (callee_attr == MARK_NOT_INTENTIONAL || callee_attr == MARK_YES)
++ return;
++
++ loc = DECL_SOURCE_LOCATION(decl);
++ inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", DECL_NAME_POINTER(decl), argnum);
++}
++
++// Get the field decl of a component ref for intentional_overflow checking
++static const_tree search_field_decl(const_tree comp_ref)
++{
++ const_tree field = NULL_TREE;
++ unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
++
++ for (i = 0; i < len; i++) {
++ field = TREE_OPERAND(comp_ref, i);
++ if (TREE_CODE(field) == FIELD_DECL)
++ break;
++ }
++ gcc_assert(TREE_CODE(field) == FIELD_DECL);
++ return field;
++}
++
++/* Get the type of the intentional_overflow attribute of a node
++ * * MARK_TURN_OFF
++ * * MARK_YES
++ * * MARK_NO
++ * * MARK_NOT_INTENTIONAL
++ */
++enum mark get_intentional_attr_type(const_tree node)
++{
++ const_tree cur_decl;
++
++ if (node == NULL_TREE)
++ return MARK_NO;
++
++ switch (TREE_CODE(node)) {
++ case COMPONENT_REF:
++ cur_decl = search_field_decl(node);
++ if (is_turn_off_intentional_attr(cur_decl))
++ return MARK_TURN_OFF;
++ if (is_end_intentional_intentional_attr(cur_decl, 1))
++ return MARK_YES;
++ break;
++ case PARM_DECL: {
++ unsigned int argnum;
++
++ cur_decl = DECL_ORIGIN(current_function_decl);
++ argnum = find_arg_number_tree(node, cur_decl);
++ if (argnum == CANNOT_FIND_ARG)
++ return MARK_NO;
++ if (is_yes_intentional_attr(cur_decl, argnum))
++ return MARK_YES;
++ if (is_end_intentional_intentional_attr(cur_decl, argnum))
++ return MARK_NOT_INTENTIONAL;
++ break;
++ }
++ case FUNCTION_DECL:
++ if (is_turn_off_intentional_attr(DECL_ORIGIN(node)))
++ return MARK_TURN_OFF;
++ break;
++ default:
++ break;
++ }
++ return MARK_NO;
++}
++
++// Search for the intentional_overflow attribute on the last nodes
++static enum mark search_last_nodes_intentional(struct interesting_node *cur_node)
++{
++ unsigned int i;
++ tree last_node;
++ enum mark mark = MARK_NO;
++
++#if BUILDING_GCC_VERSION <= 4007
++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, last_node) {
++#else
++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, last_node) {
++#endif
++ mark = get_intentional_attr_type(last_node);
++ if (mark != MARK_NO)
++ break;
++ }
++ return mark;
++}
++
++/* Check the intentional kind of size_overflow asm stmt (created by the gimple pass) and
++ * set the appropriate intentional_overflow type. Delete the asm stmt in the end.
++ */
++static bool is_intentional_attribute_from_gimple(struct interesting_node *cur_node)
++{
++ if (!cur_node->intentional_mark_from_gimple)
++ return false;
++
++ if (is_size_overflow_intentional_asm_yes(cur_node->intentional_mark_from_gimple))
++ cur_node->intentional_attr_cur_fndecl = MARK_YES;
++ else
++ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
++
++ // skip param decls
++ if (gimple_asm_noutputs(cur_node->intentional_mark_from_gimple) == 0)
++ return true;
++ return true;
++}
++
++/* Search intentional_overflow attribute on caller and on callee too.
++ * 0</MARK_YES: no dup, search size_overflow and intentional_overflow attributes
++ * 0/MARK_NOT_INTENTIONAL: no dup, search size_overflow attribute (int)
++ * -1/MARK_TURN_OFF: no dup, no search, current_function_decl -> no dup
++*/
++void check_intentional_attribute_ipa(struct interesting_node *cur_node)
++{
++ const_tree fndecl;
++
++ if (is_intentional_attribute_from_gimple(cur_node))
++ return;
++
++ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
++ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
++ return;
++ }
++
++ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASM) {
++ cur_node->intentional_attr_cur_fndecl = MARK_NOT_INTENTIONAL;
++ return;
++ }
++
++ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASSIGN)
++ return;
++
++ fndecl = get_interesting_orig_fndecl(cur_node->first_stmt, cur_node->num);
++ if (is_turn_off_intentional_attr(fndecl)) {
++ cur_node->intentional_attr_decl = MARK_TURN_OFF;
++ return;
++ }
++
++ if (is_end_intentional_intentional_attr(fndecl, cur_node->num))
++ cur_node->intentional_attr_decl = MARK_NOT_INTENTIONAL;
++ else if (is_yes_intentional_attr(fndecl, cur_node->num))
++ cur_node->intentional_attr_decl = MARK_YES;
++
++ cur_node->intentional_attr_cur_fndecl = search_last_nodes_intentional(cur_node);
++ print_missing_intentional(cur_node->intentional_attr_decl, cur_node->intentional_attr_cur_fndecl, cur_node->fndecl, cur_node->num);
++}
++
++bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
++{
++ const_tree rhs1, lhs, rhs1_type, lhs_type;
++ enum machine_mode lhs_mode, rhs_mode;
++ gimple def_stmt = get_def_stmt(no_const_rhs);
++
++ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
++ return false;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ lhs = gimple_assign_lhs(def_stmt);
++ rhs1_type = TREE_TYPE(rhs1);
++ lhs_type = TREE_TYPE(lhs);
++ rhs_mode = TYPE_MODE(rhs1_type);
++ lhs_mode = TYPE_MODE(lhs_type);
++ if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
++ return false;
++
++ return true;
++}
++
++static bool no_uses(tree node)
++{
++ imm_use_iterator imm_iter;
++ use_operand_p use_p;
++
++ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
++ const_gimple use_stmt = USE_STMT(use_p);
++
++ if (use_stmt == NULL)
++ return true;
++ if (is_gimple_debug(use_stmt))
++ continue;
++ return false;
++ }
++ return true;
++}
++
++// 3.8.5 mm/page-writeback.c __ilog2_u64(): ret, uint + uintmax; uint -> int; int max
++bool is_const_plus_unsigned_signed_truncation(const_tree lhs)
++{
++ tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs;
++ gimple def_stmt = get_def_stmt(lhs);
++
++ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
++ return false;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs_type = TREE_TYPE(rhs1);
++ lhs_type = TREE_TYPE(lhs);
++ if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type))
++ return false;
++ if (TYPE_MODE(lhs_type) != TYPE_MODE(rhs_type))
++ return false;
++
++ def_stmt = get_def_stmt(rhs1);
++ if (!def_stmt || !is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 3)
++ return false;
++
++ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR)
++ return false;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++ if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
++ return false;
++
++ if (is_gimple_constant(rhs2))
++ not_const_rhs = rhs1;
++ else
++ not_const_rhs = rhs2;
++
++ return no_uses(not_const_rhs);
++}
++
++static bool is_lt_signed_type_max(const_tree rhs)
++{
++ const_tree new_type, type_max, type = TREE_TYPE(rhs);
++
++ if (!TYPE_UNSIGNED(type))
++ return true;
++
++ switch (TYPE_MODE(type)) {
++ case QImode:
++ new_type = intQI_type_node;
++ break;
++ case HImode:
++ new_type = intHI_type_node;
++ break;
++ case SImode:
++ new_type = intSI_type_node;
++ break;
++ case DImode:
++ new_type = intDI_type_node;
++ break;
++ default:
++ debug_tree((tree)type);
++ gcc_unreachable();
++ }
++
++ type_max = TYPE_MAX_VALUE(new_type);
++ if (!tree_int_cst_lt(type_max, rhs))
++ return true;
++
++ return false;
++}
++
++static bool is_gt_zero(const_tree rhs)
++{
++ const_tree type = TREE_TYPE(rhs);
++
++ if (TYPE_UNSIGNED(type))
++ return true;
++
++ if (!tree_int_cst_lt(rhs, integer_zero_node))
++ return true;
++
++ return false;
++}
++
++bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
++{
++ if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
++ return false;
++ if (!is_gimple_constant(rhs))
++ return false;
++
++ // If the const is between 0 and the max value of the signed type of the same bitsize then there is no intentional overflow
++ if (is_lt_signed_type_max(rhs) && is_gt_zero(rhs))
++ return false;
++
++ return true;
++}
++
++static tree change_assign_rhs(struct visited *visited, gimple stmt, const_tree orig_rhs, tree new_rhs)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++ tree origtype = TREE_TYPE(orig_rhs);
++
++ gcc_assert(is_gimple_assign(stmt));
++
++ assign = build_cast_stmt(visited, origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++ pointer_set_insert(visited->my_stmts, assign);
++ return gimple_assign_lhs(assign);
++}
++
++tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2)
++{
++ tree new_rhs, orig_rhs;
++ void (*gimple_assign_set_rhs)(gimple, tree);
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs2 = gimple_assign_rhs2(stmt);
++ tree lhs = gimple_assign_lhs(stmt);
++
++ if (!check_overflow)
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ if (change_rhs == NULL_TREE)
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ if (new_rhs2 == NULL_TREE) {
++ orig_rhs = rhs1;
++ gimple_assign_set_rhs = &gimple_assign_set_rhs1;
++ } else {
++ orig_rhs = rhs2;
++ gimple_assign_set_rhs = &gimple_assign_set_rhs2;
++ }
++
++ check_size_overflow(caller_node, stmt, TREE_TYPE(change_rhs), change_rhs, orig_rhs, BEFORE_STMT);
++
++ new_rhs = change_assign_rhs(visited, stmt, orig_rhs, change_rhs);
++ gimple_assign_set_rhs(stmt, new_rhs);
++ update_stmt(stmt);
++
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++}
++
++static bool is_subtraction_special(struct visited *visited, const_gimple stmt)
++{
++ gimple rhs1_def_stmt, rhs2_def_stmt;
++ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
++ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
++ const_tree rhs1 = gimple_assign_rhs1(stmt);
++ const_tree rhs2 = gimple_assign_rhs2(stmt);
++
++ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
++ return false;
++
++ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
++
++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
++ return false;
++
++ rhs1_def_stmt = get_def_stmt(rhs1);
++ rhs2_def_stmt = get_def_stmt(rhs2);
++ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
++ return false;
++
++ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
++ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
++ rhs1_def_stmt_lhs = gimple_assign_lhs(rhs1_def_stmt);
++ rhs2_def_stmt_lhs = gimple_assign_lhs(rhs2_def_stmt);
++ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
++ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
++ rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
++ rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs));
++ if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode))
++ return false;
++ if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode))
++ return false;
++
++ pointer_set_insert(visited->no_cast_check, rhs1_def_stmt);
++ pointer_set_insert(visited->no_cast_check, rhs2_def_stmt);
++ return true;
++}
++
++static gimple create_binary_assign(struct visited *visited, enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++ tree type = TREE_TYPE(rhs1);
++ tree lhs = create_new_var(type);
++
++ gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
++ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
++ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
++
++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++ pointer_set_insert(visited->my_stmts, assign);
++ return assign;
++}
++
++static tree cast_to_TI_type(struct visited *visited, gimple stmt, tree node)
++{
++ gimple_stmt_iterator gsi;
++ gimple cast_stmt;
++ tree type = TREE_TYPE(node);
++
++ if (types_compatible_p(type, intTI_type_node))
++ return node;
++
++ gsi = gsi_for_stmt(stmt);
++ cast_stmt = build_cast_stmt(visited, intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++ pointer_set_insert(visited->my_stmts, cast_stmt);
++ return gimple_assign_lhs(cast_stmt);
++}
++
++static tree get_def_stmt_rhs(struct visited *visited, const_tree var)
++{
++ tree rhs1, def_stmt_rhs1;
++ gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
++
++ def_stmt = get_def_stmt(var);
++ if (!gimple_assign_cast_p(def_stmt))
++ return NULL_TREE;
++ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && pointer_set_contains(visited->my_stmts, def_stmt) && gimple_assign_cast_p(def_stmt));
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs1_def_stmt = get_def_stmt(rhs1);
++ if (!gimple_assign_cast_p(rhs1_def_stmt))
++ return rhs1;
++
++ def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
++ def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
++
++ switch (gimple_code(def_stmt_rhs1_def_stmt)) {
++ case GIMPLE_CALL:
++ case GIMPLE_NOP:
++ case GIMPLE_ASM:
++ case GIMPLE_PHI:
++ return def_stmt_rhs1;
++ case GIMPLE_ASSIGN:
++ return rhs1;
++ default:
++ debug_gimple_stmt(def_stmt_rhs1_def_stmt);
++ gcc_unreachable();
++ }
++}
++
++tree handle_integer_truncation(struct visited *visited, struct cgraph_node *caller_node, const_tree lhs)
++{
++ tree new_rhs1, new_rhs2;
++ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
++ gimple assign, stmt = get_def_stmt(lhs);
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs2 = gimple_assign_rhs2(stmt);
++
++ if (!is_subtraction_special(visited, stmt))
++ return NULL_TREE;
++
++ new_rhs1 = expand(visited, caller_node, rhs1);
++ new_rhs2 = expand(visited, caller_node, rhs2);
++
++ new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(visited, new_rhs1);
++ new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(visited, new_rhs2);
++
++ if (new_rhs1_def_stmt_rhs1 == NULL_TREE || new_rhs2_def_stmt_rhs1 == NULL_TREE)
++ return NULL_TREE;
++
++ if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) {
++ new_rhs1_def_stmt_rhs1 = cast_to_TI_type(visited, stmt, new_rhs1_def_stmt_rhs1);
++ new_rhs2_def_stmt_rhs1 = cast_to_TI_type(visited, stmt, new_rhs2_def_stmt_rhs1);
++ }
++
++ assign = create_binary_assign(visited, MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
++ new_lhs = gimple_assign_lhs(assign);
++ check_size_overflow(caller_node, assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
++
++ return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
++}
++
++bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
++{
++ const_gimple def_stmt;
++
++ if (TREE_CODE(rhs) != SSA_NAME)
++ return false;
++
++ if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
++ return false;
++
++ def_stmt = get_def_stmt(rhs);
++ if (!is_gimple_assign(def_stmt) || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
++ return false;
++
++ return true;
++}
++
++/* e.g., drivers/acpi/acpica/utids.c acpi_ut_execute_CID()
++ * ((count - 1) * sizeof(struct acpi_pnp_dee_id_list) -> (count + fffffff) * 16
++ * fffffff * 16 > signed max -> truncate
++ */
++static bool look_for_mult_and_add(const_gimple stmt)
++{
++ const_tree res;
++ tree rhs1, rhs2, def_rhs1, def_rhs2, const_rhs, def_const_rhs;
++ const_gimple def_stmt;
++
++ if (!stmt || gimple_code(stmt) == GIMPLE_NOP)
++ return false;
++ if (!is_gimple_assign(stmt))
++ return false;
++ if (gimple_assign_rhs_code(stmt) != MULT_EXPR)
++ return false;
++
++ rhs1 = gimple_assign_rhs1(stmt);
++ rhs2 = gimple_assign_rhs2(stmt);
++ if (is_gimple_constant(rhs1)) {
++ const_rhs = rhs1;
++ def_stmt = get_def_stmt(rhs2);
++ } else if (is_gimple_constant(rhs2)) {
++ const_rhs = rhs2;
++ def_stmt = get_def_stmt(rhs1);
++ } else
++ return false;
++
++ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR && gimple_assign_rhs_code(def_stmt) != MINUS_EXPR)
++ return false;
++
++ def_rhs1 = gimple_assign_rhs1(def_stmt);
++ def_rhs2 = gimple_assign_rhs2(def_stmt);
++ if (is_gimple_constant(def_rhs1))
++ def_const_rhs = def_rhs1;
++ else if (is_gimple_constant(def_rhs2))
++ def_const_rhs = def_rhs2;
++ else
++ return false;
++
++ res = fold_binary_loc(gimple_location(def_stmt), MULT_EXPR, TREE_TYPE(const_rhs), const_rhs, def_const_rhs);
++ if (is_lt_signed_type_max(res) && is_gt_zero(res))
++ return false;
++ return true;
++}
++
++enum intentional_overflow_type add_mul_intentional_overflow(const_gimple stmt)
++{
++ const_gimple def_stmt_1, def_stmt_2;
++ const_tree rhs1, rhs2;
++ bool add_mul_rhs1, add_mul_rhs2;
++
++ rhs1 = gimple_assign_rhs1(stmt);
++ def_stmt_1 = get_def_stmt(rhs1);
++ add_mul_rhs1 = look_for_mult_and_add(def_stmt_1);
++
++ rhs2 = gimple_assign_rhs2(stmt);
++ def_stmt_2 = get_def_stmt(rhs2);
++ add_mul_rhs2 = look_for_mult_and_add(def_stmt_2);
++
++ if (add_mul_rhs1)
++ return RHS1_INTENTIONAL_OVERFLOW;
++ if (add_mul_rhs2)
++ return RHS2_INTENTIONAL_OVERFLOW;
++ return NO_INTENTIONAL_OVERFLOW;
++}
++
+diff --git a/tools/gcc/size_overflow_plugin/misc.c b/tools/gcc/size_overflow_plugin/misc.c
+new file mode 100644
+index 0000000..ca4def3
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/misc.c
+@@ -0,0 +1,180 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
++
++#include "gcc-common.h"
++#include "size_overflow.h"
++
++void set_current_function_decl(tree fndecl)
++{
++ gcc_assert(fndecl != NULL_TREE);
++
++ push_cfun(DECL_STRUCT_FUNCTION(fndecl));
++ calculate_dominance_info(CDI_DOMINATORS);
++ current_function_decl = fndecl;
++}
++
++void unset_current_function_decl(void)
++{
++ free_dominance_info(CDI_DOMINATORS);
++ pop_cfun();
++ current_function_decl = NULL_TREE;
++}
++
++static bool is_bool(const_tree node)
++{
++ const_tree type;
++
++ if (node == NULL_TREE)
++ return false;
++
++ type = TREE_TYPE(node);
++ if (!INTEGRAL_TYPE_P(type))
++ return false;
++ if (TREE_CODE(type) == BOOLEAN_TYPE)
++ return true;
++ if (TYPE_PRECISION(type) == 1)
++ return true;
++ return false;
++}
++
++bool skip_types(const_tree var)
++{
++ tree type;
++ enum tree_code code;
++
++ if (is_gimple_constant(var))
++ return true;
++
++ switch (TREE_CODE(var)) {
++ case ADDR_EXPR:
++#if BUILDING_GCC_VERSION >= 4006
++ case MEM_REF:
++#endif
++ case ARRAY_REF:
++ case BIT_FIELD_REF:
++ case INDIRECT_REF:
++ case TARGET_MEM_REF:
++ case COMPONENT_REF:
++ case VAR_DECL:
++ case VIEW_CONVERT_EXPR:
++ return true;
++ default:
++ break;
++ }
++
++ code = TREE_CODE(var);
++ gcc_assert(code == SSA_NAME || code == PARM_DECL);
++
++ type = TREE_TYPE(var);
++ switch (TREE_CODE(type)) {
++ case INTEGER_TYPE:
++ case ENUMERAL_TYPE:
++ return false;
++ case BOOLEAN_TYPE:
++ return is_bool(var);
++ default:
++ return true;
++ }
++}
++
++gimple get_def_stmt(const_tree node)
++{
++ gcc_assert(node != NULL_TREE);
++
++ if (skip_types(node))
++ return NULL;
++
++ if (TREE_CODE(node) != SSA_NAME)
++ return NULL;
++ return SSA_NAME_DEF_STMT(node);
++}
++
++tree create_new_var(tree type)
++{
++ tree new_var = create_tmp_var(type, "cicus");
++
++ add_referenced_var(new_var);
++ return new_var;
++}
++
++static bool skip_cast(tree dst_type, const_tree rhs, bool force)
++{
++ const_gimple def_stmt = get_def_stmt(rhs);
++
++ if (force)
++ return false;
++
++ if (is_gimple_constant(rhs))
++ return false;
++
++ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
++ return false;
++
++ if (!types_compatible_p(dst_type, TREE_TYPE(rhs)))
++ return false;
++
++ // DI type can be on 32 bit (from create_assign) but overflow type stays DI
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
++ return false;
++
++ return true;
++}
++
++tree cast_a_tree(tree type, tree var)
++{
++ gcc_assert(type != NULL_TREE);
++ gcc_assert(var != NULL_TREE);
++ gcc_assert(fold_convertible_p(type, var));
++
++ return fold_convert(type, var);
++}
++
++gimple build_cast_stmt(struct visited *visited, tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force)
++{
++ gimple assign, def_stmt;
++
++ gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
++ gcc_assert(!is_gimple_constant(rhs));
++ if (gsi_end_p(*gsi) && before == AFTER_STMT)
++ gcc_unreachable();
++
++ def_stmt = get_def_stmt(rhs);
++ if (def_stmt && gimple_code(def_stmt) != GIMPLE_NOP && skip_cast(dst_type, rhs, force) && pointer_set_contains(visited->my_stmts, def_stmt))
++ return def_stmt;
++
++ if (lhs == CREATE_NEW_VAR)
++ lhs = create_new_var(dst_type);
++
++ assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
++
++ if (!gsi_end_p(*gsi)) {
++ location_t loc = gimple_location(gsi_stmt(*gsi));
++ gimple_set_location(assign, loc);
++ }
++
++ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
++
++ if (before)
++ gsi_insert_before(gsi, assign, GSI_NEW_STMT);
++ else
++ gsi_insert_after(gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++ return assign;
++}
++
+diff --git a/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c
+new file mode 100644
+index 0000000..ee108f0
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c
+@@ -0,0 +1,161 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
++
++#include "gcc-common.h"
++#include "size_overflow.h"
++
++bool skip_expr_on_double_type(const_gimple stmt)
++{
++ enum tree_code code = gimple_assign_rhs_code(stmt);
++
++ switch (code) {
++ case RSHIFT_EXPR:
++ case TRUNC_DIV_EXPR:
++ case CEIL_DIV_EXPR:
++ case FLOOR_DIV_EXPR:
++ case ROUND_DIV_EXPR:
++ case EXACT_DIV_EXPR:
++ case RDIV_EXPR:
++ case TRUNC_MOD_EXPR:
++ case CEIL_MOD_EXPR:
++ case FLOOR_MOD_EXPR:
++ case ROUND_MOD_EXPR:
++ return true;
++ default:
++ return false;
++ }
++}
++
++static bool is_size_overflow_type(const_tree var)
++{
++ const char *name;
++ const_tree type_name, type;
++
++ if (var == NULL_TREE)
++ return false;
++
++ type = TREE_TYPE(var);
++ type_name = TYPE_NAME(type);
++ if (type_name == NULL_TREE)
++ return false;
++
++ if (DECL_P(type_name))
++ name = DECL_NAME_POINTER(type_name);
++ else
++ name = IDENTIFIER_POINTER(type_name);
++
++ if (!strncmp(name, "size_overflow_type", 18))
++ return true;
++ return false;
++}
++
++static void create_up_and_down_cast(struct visited *visited, gimple use_stmt, tree orig_type, tree rhs)
++{
++ const_tree orig_rhs1;
++ tree down_lhs, new_lhs, dup_type = TREE_TYPE(rhs);
++ gimple down_cast, up_cast;
++ gimple_stmt_iterator gsi = gsi_for_stmt(use_stmt);
++
++ down_cast = build_cast_stmt(visited, orig_type, rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++ down_lhs = gimple_assign_lhs(down_cast);
++
++ gsi = gsi_for_stmt(use_stmt);
++ up_cast = build_cast_stmt(visited, dup_type, down_lhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++ new_lhs = gimple_assign_lhs(up_cast);
++
++ orig_rhs1 = gimple_assign_rhs1(use_stmt);
++ if (operand_equal_p(orig_rhs1, rhs, 0))
++ gimple_assign_set_rhs1(use_stmt, new_lhs);
++ else
++ gimple_assign_set_rhs2(use_stmt, new_lhs);
++ update_stmt(use_stmt);
++
++ pointer_set_insert(visited->my_stmts, up_cast);
++ pointer_set_insert(visited->my_stmts, down_cast);
++ pointer_set_insert(visited->skip_expr_casts, up_cast);
++ pointer_set_insert(visited->skip_expr_casts, down_cast);
++}
++
++static tree get_proper_unsigned_half_type(const_tree node)
++{
++ tree new_type, type;
++
++ gcc_assert(is_size_overflow_type(node));
++
++ type = TREE_TYPE(node);
++ switch (TYPE_MODE(type)) {
++ case HImode:
++ new_type = unsigned_intQI_type_node;
++ break;
++ case SImode:
++ new_type = unsigned_intHI_type_node;
++ break;
++ case DImode:
++ new_type = unsigned_intSI_type_node;
++ break;
++ case TImode:
++ new_type = unsigned_intDI_type_node;
++ break;
++ default:
++ gcc_unreachable();
++ }
++
++ if (TYPE_QUALS(type) != 0)
++ return build_qualified_type(new_type, TYPE_QUALS(type));
++ return new_type;
++}
++
++static void insert_cast_rhs(struct visited *visited, gimple stmt, tree rhs)
++{
++ tree type;
++
++ if (rhs == NULL_TREE)
++ return;
++ if (!is_size_overflow_type(rhs))
++ return;
++
++ type = get_proper_unsigned_half_type(rhs);
++ if (is_gimple_constant(rhs))
++ return;
++ create_up_and_down_cast(visited, stmt, type, rhs);
++}
++
++static void insert_cast(struct visited *visited, gimple stmt, tree rhs)
++{
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && !is_size_overflow_type(rhs))
++ return;
++ gcc_assert(is_size_overflow_type(rhs));
++ insert_cast_rhs(visited, stmt, rhs);
++}
++
++void insert_cast_expr(struct visited *visited, gimple stmt, enum intentional_overflow_type type)
++{
++ tree rhs1, rhs2;
++
++ if (type == NO_INTENTIONAL_OVERFLOW || type == RHS1_INTENTIONAL_OVERFLOW) {
++ rhs1 = gimple_assign_rhs1(stmt);
++ insert_cast(visited, stmt, rhs1);
++ }
++
++ if (type == NO_INTENTIONAL_OVERFLOW || type == RHS2_INTENTIONAL_OVERFLOW) {
++ rhs2 = gimple_assign_rhs2(stmt);
++ insert_cast(visited, stmt, rhs2);
++ }
++}
++
+diff --git a/tools/gcc/size_overflow_plugin/size_overflow.h b/tools/gcc/size_overflow_plugin/size_overflow.h
+new file mode 100644
+index 0000000..0729973
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/size_overflow.h
+@@ -0,0 +1,124 @@
++#ifndef SIZE_OVERFLOW_H
++#define SIZE_OVERFLOW_H
++
++#define CREATE_NEW_VAR NULL_TREE
++#define CANNOT_FIND_ARG 32
++#define MAX_PARAM 31
++#define BEFORE_STMT true
++#define AFTER_STMT false
++
++#define TURN_OFF_ASM_STR "# size_overflow MARK_TURN_OFF "
++#define YES_ASM_STR "# size_overflow MARK_YES "
++#define OK_ASM_STR "# size_overflow "
++
++enum mark {
++ MARK_NO, MARK_YES, MARK_NOT_INTENTIONAL, MARK_TURN_OFF
++};
++
++enum intentional_overflow_type {
++ NO_INTENTIONAL_OVERFLOW, RHS1_INTENTIONAL_OVERFLOW, RHS2_INTENTIONAL_OVERFLOW
++};
++
++struct visited {
++ struct pointer_set_t *stmts;
++ struct pointer_set_t *my_stmts;
++ struct pointer_set_t *skip_expr_casts;
++ struct pointer_set_t *no_cast_check;
++};
++
++// size_overflow_plugin.c
++extern tree report_size_overflow_decl;
++extern tree size_overflow_type_HI;
++extern tree size_overflow_type_SI;
++extern tree size_overflow_type_DI;
++extern tree size_overflow_type_TI;
++
++
++// size_overflow_plugin_hash.c
++struct size_overflow_hash {
++ const struct size_overflow_hash * const next;
++ const char * const name;
++ const unsigned int param;
++};
++
++struct interesting_node {
++ struct interesting_node *next;
++ gimple first_stmt;
++ const_tree fndecl;
++ tree node;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC(tree, gc) *last_nodes;
++#else
++ vec<tree, va_gc> *last_nodes;
++#endif
++ unsigned int num;
++ enum mark intentional_attr_decl;
++ enum mark intentional_attr_cur_fndecl;
++ gimple intentional_mark_from_gimple;
++};
++
++extern bool is_size_overflow_asm(const_gimple stmt);
++extern unsigned int get_function_num(const_tree node, const_tree orig_fndecl);
++extern unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl);
++extern bool is_missing_function(const_tree orig_fndecl, unsigned int num);
++extern bool is_a_return_check(const_tree node);
++extern const struct size_overflow_hash *get_function_hash(const_tree fndecl);
++extern unsigned int find_arg_number_tree(const_tree arg, const_tree func);
++
++
++// size_overflow_debug.c
++extern struct opt_pass *make_dump_pass(void);
++
++
++// intentional_overflow.c
++extern enum mark get_intentional_attr_type(const_tree node);
++extern bool is_size_overflow_intentional_asm_yes(const_gimple stmt);
++extern bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt);
++extern bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum);
++extern bool is_yes_intentional_attr(const_tree decl, unsigned int argnum);
++extern bool is_turn_off_intentional_attr(const_tree decl);
++extern void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum);
++extern void check_intentional_attribute_ipa(struct interesting_node *cur_node);
++extern bool is_a_cast_and_const_overflow(const_tree no_const_rhs);
++extern bool is_const_plus_unsigned_signed_truncation(const_tree lhs);
++extern bool is_a_constant_overflow(const_gimple stmt, const_tree rhs);
++extern tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2);
++extern tree handle_integer_truncation(struct visited *visited, struct cgraph_node *caller_node, const_tree lhs);
++extern bool is_a_neg_overflow(const_gimple stmt, const_tree rhs);
++extern enum intentional_overflow_type add_mul_intentional_overflow(const_gimple def_stmt);
++
++
++// insert_size_overflow_check_ipa.c
++extern unsigned int search_function(void);
++extern unsigned int call_count;
++extern struct opt_pass *make_insert_size_overflow_check(void);
++extern const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum);
++
++
++// insert_size_overflow_asm.c
++extern struct opt_pass *make_insert_size_overflow_asm_pass(void);
++
++
++// misc.c
++extern void set_current_function_decl(tree fndecl);
++extern void unset_current_function_decl(void);
++extern gimple get_def_stmt(const_tree node);
++extern tree create_new_var(tree type);
++extern gimple build_cast_stmt(struct visited *visited, tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force);
++extern bool skip_types(const_tree var);
++extern tree cast_a_tree(tree type, tree var);
++
++
++// insert_size_overflow_check_core.c
++extern tree expand(struct visited *visited, struct cgraph_node *caller_node, tree lhs);
++extern void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
++extern tree dup_assign(struct visited *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
++extern tree create_assign(struct visited *visited, gimple oldstmt, tree rhs1, bool before);
++
++
++// remove_unnecessary_dup.c
++extern struct opt_pass *make_remove_unnecessary_dup_pass(void);
++extern void insert_cast_expr(struct visited *visited, gimple stmt, enum intentional_overflow_type type);
++extern bool skip_expr_on_double_type(const_gimple stmt);
++
++#endif
+diff --git a/tools/gcc/size_overflow_plugin/size_overflow_debug.c b/tools/gcc/size_overflow_plugin/size_overflow_debug.c
+new file mode 100644
+index 0000000..4378111
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/size_overflow_debug.c
+@@ -0,0 +1,116 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
++
++#include "gcc-common.h"
++
++static unsigned int dump_functions(void)
++{
++ struct cgraph_node *node;
++
++ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
++ basic_block bb;
++
++ push_cfun(DECL_STRUCT_FUNCTION(NODE_DECL(node)));
++ current_function_decl = NODE_DECL(node);
++
++ fprintf(stderr, "-----------------------------------------\n%s\n-----------------------------------------\n", DECL_NAME_POINTER(current_function_decl));
++
++ FOR_ALL_BB_FN(bb, cfun) {
++ gimple_stmt_iterator si;
++
++ fprintf(stderr, "<bb %u>:\n", bb->index);
++ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
++ debug_gimple_stmt(gsi_stmt(si));
++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
++ debug_gimple_stmt(gsi_stmt(si));
++ fprintf(stderr, "\n");
++ }
++
++ fprintf(stderr, "-------------------------------------------------------------------------\n");
++
++ pop_cfun();
++ current_function_decl = NULL_TREE;
++ }
++
++ fprintf(stderr, "###############################################################################\n");
++
++ return 0;
++}
++
++#if BUILDING_GCC_VERSION >= 4009
++static const struct pass_data dump_pass_data = {
++#else
++static struct ipa_opt_pass_d dump_pass = {
++ .pass = {
++#endif
++ .type = SIMPLE_IPA_PASS,
++ .name = "dump",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++#if BUILDING_GCC_VERSION >= 4009
++ .has_gate = false,
++ .has_execute = true,
++#else
++ .gate = NULL,
++ .execute = dump_functions,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++#endif
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = 0,
++#if BUILDING_GCC_VERSION < 4009
++ },
++ .generate_summary = NULL,
++ .write_summary = NULL,
++ .read_summary = NULL,
++#if BUILDING_GCC_VERSION >= 4006
++ .write_optimization_summary = NULL,
++ .read_optimization_summary = NULL,
++#endif
++ .stmt_fixup = NULL,
++ .function_transform_todo_flags_start = 0,
++ .function_transform = NULL,
++ .variable_transform = NULL,
++#endif
++};
++
++#if BUILDING_GCC_VERSION >= 4009
++namespace {
++class dump_pass : public ipa_opt_pass_d {
++public:
++ dump_pass() : ipa_opt_pass_d(dump_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
++ unsigned int execute() { return dump_functions(); }
++};
++}
++#endif
++
++struct opt_pass *make_dump_pass(void)
++{
++#if BUILDING_GCC_VERSION >= 4009
++ return new dump_pass();
++#else
++ return &dump_pass.pass;
++#endif
++}
+diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
index 0000000..41777a8
--- /dev/null
-+++ b/tools/gcc/size_overflow_hash.data
++++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
@@ -0,0 +1,5934 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL
@@ -115845,11 +119884,11 @@ index 0000000..41777a8
+lookup_inline_extent_backref_65493 lookup_inline_extent_backref 9 65493 NULL
+nvme_trans_standard_inquiry_page_65526 nvme_trans_standard_inquiry_page 4 65526 NULL
+tree_mod_log_eb_copy_65535 tree_mod_log_eb_copy 6 65535 NULL
-diff --git a/tools/gcc/size_overflow_hash_aux.data b/tools/gcc/size_overflow_hash_aux.data
+diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data b/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data
new file mode 100644
index 0000000..560cd7b
--- /dev/null
-+++ b/tools/gcc/size_overflow_hash_aux.data
++++ b/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data
@@ -0,0 +1,92 @@
+spa_set_aux_vdevs_746 spa_set_aux_vdevs 3 746 NULL
+zfs_lookup_2144 zfs_lookup 0 2144 NULL
@@ -115943,12 +119982,12 @@ index 0000000..560cd7b
+proc_copyin_string_62019 proc_copyin_string 4 62019 NULL
+random_get_pseudo_bytes_64611 random_get_pseudo_bytes 2 64611 NULL
+zpios_read_64734 zpios_read 3 64734 NULL
-diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
+diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
new file mode 100644
-index 0000000..948ec25
+index 0000000..0e9dcee
--- /dev/null
-+++ b/tools/gcc/size_overflow_plugin.c
-@@ -0,0 +1,4169 @@
++++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
+@@ -0,0 +1,259 @@
+/*
+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -115964,100 +120003,27 @@ index 0000000..948ec25
+ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
+ *
+ * Usage:
-+ * $ # for 4.5/4.6/C based 4.7
-+ * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -std=gnu99 -ggdb -o size_overflow_plugin.so size_overflow_plugin.c
-+ * $ # for C++ based 4.7/4.8+
-+ * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -std=gnu++98 -fno-rtti -ggdb -o size_overflow_plugin.so size_overflow_plugin.c
-+ *
-+ * $ gcc -fplugin=./size_overflow_plugin.so test.c -O2
++ * $ make
++ * $ make run
+ */
+
+#include "gcc-common.h"
++#include "size_overflow.h"
+
+int plugin_is_GPL_compatible;
+
-+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20140407",
-+ .help = "no-size-overflow\tturn off size overflow checking\n",
-+};
++tree report_size_overflow_decl;
+
-+#define BEFORE_STMT true
-+#define AFTER_STMT false
-+#define CREATE_NEW_VAR NULL_TREE
-+#define CODES_LIMIT 32
-+#define MAX_PARAM 31
-+#define VEC_LEN 128
-+#define RET_CHECK NULL_TREE
-+#define CANNOT_FIND_ARG 32
-+#define WRONG_NODE 32
-+#define NOT_INTENTIONAL_ASM NULL
-+#define MIN_CHECK true
-+#define MAX_CHECK false
-+
-+#define TURN_OFF_ASM_STR "# size_overflow MARK_TURN_OFF "
-+#define YES_ASM_STR "# size_overflow MARK_YES "
-+#define OK_ASM_STR "# size_overflow "
-+
-+struct size_overflow_hash {
-+ const struct size_overflow_hash * const next;
-+ const char * const name;
-+ const unsigned int param;
-+};
-+
-+#include "size_overflow_hash.h"
-+#include "size_overflow_hash_aux.h"
-+
-+enum mark {
-+ MARK_NO, MARK_YES, MARK_NOT_INTENTIONAL, MARK_TURN_OFF
-+};
++tree size_overflow_type_HI;
++tree size_overflow_type_SI;
++tree size_overflow_type_DI;
++tree size_overflow_type_TI;
+
-+static unsigned int call_count;
-+
-+enum stmt_flags {
-+ MY_STMT, NO_CAST_CHECK, VISITED_STMT, NO_FLAGS
-+};
-+
-+struct visited {
-+ struct visited *next;
-+ const_tree fndecl;
-+ unsigned int num;
-+};
-+
-+struct next_cgraph_node {
-+ struct next_cgraph_node *next;
-+ struct cgraph_node *current_function;
-+ tree callee_fndecl;
-+ unsigned int num;
-+};
-+
-+struct interesting_node {
-+ struct interesting_node *next;
-+ gimple first_stmt;
-+ const_tree fndecl;
-+ tree node;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC(tree, gc) *last_nodes;
-+#else
-+ vec<tree, va_gc> *last_nodes;
-+#endif
-+ unsigned int num;
-+ enum mark intentional_attr_decl;
-+ enum mark intentional_attr_cur_fndecl;
-+ gimple intentional_mark_from_gimple;
++static struct plugin_info size_overflow_plugin_info = {
++ .version = "20140515",
++ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
-+static tree report_size_overflow_decl;
-+
-+static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs);
-+static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs);
-+static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs);
-+static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs);
-+static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs);
-+
-+static void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
-+static tree get_size_overflow_type(gimple stmt, const_tree node);
-+static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
-+
+static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
+{
+ unsigned int arg_count;
@@ -116153,117 +120119,166 @@ index 0000000..948ec25
+ register_attribute(&intentional_overflow_attr);
+}
+
-+static enum stmt_flags get_stmt_flag(gimple stmt)
++static tree create_typedef(tree type, const char* ident)
+{
-+ bool bit_1, bit_2;
++ tree new_type, decl;
+
-+ bit_1 = gimple_plf(stmt, GF_PLF_1);
-+ bit_2 = gimple_plf(stmt, GF_PLF_2);
-+
-+ if (!bit_1 && !bit_2)
-+ return NO_FLAGS;
-+ if (bit_1 && bit_2)
-+ return MY_STMT;
-+ if (!bit_1 && bit_2)
-+ return VISITED_STMT;
-+ return NO_CAST_CHECK;
++ new_type = build_variant_type_copy(type);
++ decl = build_decl(BUILTINS_LOCATION, TYPE_DECL, get_identifier(ident), new_type);
++ DECL_ORIGINAL_TYPE(decl) = type;
++ TYPE_NAME(new_type) = decl;
++ return new_type;
+}
+
-+static void set_stmt_flag(gimple stmt, enum stmt_flags new_flag)
++// Create the noreturn report_size_overflow() function decl.
++static void size_overflow_start_unit(void __unused *gcc_data, void __unused *user_data)
+{
-+ bool bit_1, bit_2;
++ tree const_char_ptr_type_node;
++ tree fntype;
+
-+ switch (new_flag) {
-+ case NO_FLAGS:
-+ bit_1 = bit_2 = false;
-+ break;
-+ case MY_STMT:
-+ bit_1 = bit_2 = true;
-+ break;
-+ case VISITED_STMT:
-+ bit_1 = false;
-+ bit_2 = true;
-+ break;
-+ case NO_CAST_CHECK:
-+ bit_1 = true;
-+ bit_2 = false;
-+ break;
-+ default:
-+ gcc_unreachable();
-+ }
++ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
++
++ size_overflow_type_HI = create_typedef(intHI_type_node, "size_overflow_type_HI");
++ size_overflow_type_SI = create_typedef(intSI_type_node, "size_overflow_type_SI");
++ size_overflow_type_DI = create_typedef(intDI_type_node, "size_overflow_type_DI");
++ size_overflow_type_TI = create_typedef(intTI_type_node, "size_overflow_type_TI");
+
-+ gimple_set_plf(stmt, GF_PLF_1, bit_1);
-+ gimple_set_plf(stmt, GF_PLF_2, bit_2);
++ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
++ fntype = build_function_type_list(void_type_node,
++ const_char_ptr_type_node,
++ unsigned_type_node,
++ const_char_ptr_type_node,
++ const_char_ptr_type_node,
++ NULL_TREE);
++ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
++
++ DECL_ASSEMBLER_NAME(report_size_overflow_decl);
++ TREE_PUBLIC(report_size_overflow_decl) = 1;
++ DECL_EXTERNAL(report_size_overflow_decl) = 1;
++ DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
++ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
+}
+
-+static bool is_bool(const_tree node)
-+{
-+ const_tree type;
+
-+ if (node == NULL_TREE)
-+ return false;
++extern struct gimple_opt_pass pass_dce;
+
-+ type = TREE_TYPE(node);
-+ if (!INTEGRAL_TYPE_P(type))
-+ return false;
-+ if (TREE_CODE(type) == BOOLEAN_TYPE)
-+ return true;
-+ if (TYPE_PRECISION(type) == 1)
-+ return true;
-+ return false;
++static struct opt_pass *make_dce_pass(void)
++{
++#if BUILDING_GCC_VERSION >= 4009
++ return make_pass_dce(g);
++#else
++ return &pass_dce.pass;
++#endif
+}
+
-+static bool skip_types(const_tree var)
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
-+ tree type;
-+ enum tree_code code;
++ int i;
++ const char * const plugin_name = plugin_info->base_name;
++ const int argc = plugin_info->argc;
++ const struct plugin_argument * const argv = plugin_info->argv;
++ bool enable = true;
++ struct register_pass_info insert_size_overflow_asm_pass_info;
++ struct register_pass_info __unused dump_before_pass_info;
++ struct register_pass_info __unused dump_after_pass_info;
++ struct register_pass_info insert_size_overflow_check_info;
++ struct register_pass_info dce_pass_info;
++ static const struct ggc_root_tab gt_ggc_r_gt_size_overflow[] = {
++ {
++ .base = &report_size_overflow_decl,
++ .nelt = 1,
++ .stride = sizeof(report_size_overflow_decl),
++ .cb = &gt_ggc_mx_tree_node,
++ .pchw = &gt_pch_nx_tree_node
++ },
++ LAST_GGC_ROOT_TAB
++ };
+
-+ if (is_gimple_constant(var))
-+ return true;
++ insert_size_overflow_asm_pass_info.pass = make_insert_size_overflow_asm_pass();
++ insert_size_overflow_asm_pass_info.reference_pass_name = "ssa";
++ insert_size_overflow_asm_pass_info.ref_pass_instance_number = 1;
++ insert_size_overflow_asm_pass_info.pos_op = PASS_POS_INSERT_AFTER;
+
-+ switch (TREE_CODE(var)) {
-+ case ADDR_EXPR:
-+#if BUILDING_GCC_VERSION >= 4006
-+ case MEM_REF:
-+#endif
-+ case ARRAY_REF:
-+ case BIT_FIELD_REF:
-+ case INDIRECT_REF:
-+ case TARGET_MEM_REF:
-+ case COMPONENT_REF:
-+ case VAR_DECL:
-+ case VIEW_CONVERT_EXPR:
-+ return true;
-+ default:
-+ break;
++ dump_before_pass_info.pass = make_dump_pass();
++ dump_before_pass_info.reference_pass_name = "increase_alignment";
++ dump_before_pass_info.ref_pass_instance_number = 1;
++ dump_before_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
++
++ insert_size_overflow_check_info.pass = make_insert_size_overflow_check();
++ insert_size_overflow_check_info.reference_pass_name = "increase_alignment";
++ insert_size_overflow_check_info.ref_pass_instance_number = 1;
++ insert_size_overflow_check_info.pos_op = PASS_POS_INSERT_BEFORE;
++
++ dump_after_pass_info.pass = make_dump_pass();
++ dump_after_pass_info.reference_pass_name = "increase_alignment";
++ dump_after_pass_info.ref_pass_instance_number = 1;
++ dump_after_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
++
++ dce_pass_info.pass = make_dce_pass();
++ dce_pass_info.reference_pass_name = "vrp";
++ dce_pass_info.ref_pass_instance_number = 1;
++ dce_pass_info.pos_op = PASS_POS_INSERT_AFTER;
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
+ }
+
-+ code = TREE_CODE(var);
-+ gcc_assert(code == SSA_NAME || code == PARM_DECL);
++ for (i = 0; i < argc; ++i) {
++ if (!strcmp(argv[i].key, "no-size-overflow")) {
++ enable = false;
++ continue;
++ }
++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ }
+
-+ type = TREE_TYPE(var);
-+ switch (TREE_CODE(type)) {
-+ case INTEGER_TYPE:
-+ case ENUMERAL_TYPE:
-+ return false;
-+ case BOOLEAN_TYPE:
-+ return is_bool(var);
-+ default:
-+ return true;
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
++ if (enable) {
++ register_callback(plugin_name, PLUGIN_START_UNIT, &size_overflow_start_unit, NULL);
++ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_size_overflow);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_asm_pass_info);
++// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_before_pass_info);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_check_info);
++// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_after_pass_info);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dce_pass_info);
+ }
++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
++
++ return 0;
+}
+diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c
+new file mode 100644
+index 0000000..0888f6c
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c
+@@ -0,0 +1,364 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
+
-+static inline gimple get_def_stmt(const_tree node)
-+{
-+ gcc_assert(node != NULL_TREE);
++#include "gcc-common.h"
++#include "size_overflow.h"
+
-+ if (skip_types(node))
-+ return NULL;
++#include "size_overflow_hash.h"
++#include "size_overflow_hash_aux.h"
+
-+ if (TREE_CODE(node) != SSA_NAME)
-+ return NULL;
-+ return SSA_NAME_DEF_STMT(node);
-+}
++#define CODES_LIMIT 32
+
+static unsigned char get_tree_code(const_tree type)
+{
@@ -116407,7 +120422,7 @@ index 0000000..948ec25
+ return NULL;
+}
+
-+static const struct size_overflow_hash *get_function_hash(const_tree fndecl)
++const struct size_overflow_hash *get_function_hash(const_tree fndecl)
+{
+ const struct size_overflow_hash *entry;
+ struct function_hash fn_hash_data;
@@ -116452,7 +120467,7 @@ index 0000000..948ec25
+ inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, fn_hash_data.hash);
+}
+
-+static unsigned int find_arg_number_tree(const_tree arg, const_tree func)
++unsigned int find_arg_number_tree(const_tree arg, const_tree func)
+{
+ tree var;
+ unsigned int argnum = 1;
@@ -116470,1296 +120485,65 @@ index 0000000..948ec25
+ return CANNOT_FIND_ARG;
+}
+
-+static tree create_new_var(tree type)
-+{
-+ tree new_var = create_tmp_var(type, "cicus");
-+
-+ add_referenced_var(new_var);
-+ return new_var;
-+}
-+
-+static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
-+{
-+ gimple assign;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+ tree type = TREE_TYPE(rhs1);
-+ tree lhs = create_new_var(type);
-+
-+ gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
-+ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
-+ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
-+
-+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
-+ update_stmt(assign);
-+ set_stmt_flag(assign, MY_STMT);
-+ return assign;
-+}
-+
-+static tree cast_a_tree(tree type, tree var)
-+{
-+ gcc_assert(type != NULL_TREE);
-+ gcc_assert(var != NULL_TREE);
-+ gcc_assert(fold_convertible_p(type, var));
-+
-+ return fold_convert(type, var);
-+}
-+
-+static tree get_lhs(const_gimple stmt)
-+{
-+ switch (gimple_code(stmt)) {
-+ case GIMPLE_ASSIGN:
-+ case GIMPLE_CALL:
-+ return gimple_get_lhs(stmt);
-+ case GIMPLE_PHI:
-+ return gimple_phi_result(stmt);
-+ default:
-+ return NULL_TREE;
-+ }
-+}
-+
-+static bool skip_cast(tree dst_type, const_tree rhs, bool force)
-+{
-+ const_gimple def_stmt = get_def_stmt(rhs);
-+
-+ if (force)
-+ return false;
-+
-+ if (is_gimple_constant(rhs))
-+ return false;
-+
-+ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
-+ return false;
-+
-+ if (!types_compatible_p(dst_type, TREE_TYPE(rhs)))
-+ return false;
-+
-+ // DI type can be on 32 bit (from create_assign) but overflow type stays DI
-+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
-+ return false;
-+
-+ return true;
-+}
-+
-+static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force)
-+{
-+ gimple assign, def_stmt;
-+
-+ gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
-+ if (gsi_end_p(*gsi) && before == AFTER_STMT)
-+ gcc_unreachable();
-+
-+ def_stmt = get_def_stmt(rhs);
-+ if (def_stmt && gimple_code(def_stmt) != GIMPLE_NOP && skip_cast(dst_type, rhs, force) && get_stmt_flag(def_stmt) == MY_STMT)
-+ return def_stmt;
-+
-+ if (lhs == CREATE_NEW_VAR)
-+ lhs = create_new_var(dst_type);
-+
-+ assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
-+
-+ if (!gsi_end_p(*gsi)) {
-+ location_t loc = gimple_location(gsi_stmt(*gsi));
-+ gimple_set_location(assign, loc);
-+ }
-+
-+ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
-+
-+ if (before)
-+ gsi_insert_before(gsi, assign, GSI_NEW_STMT);
-+ else
-+ gsi_insert_after(gsi, assign, GSI_NEW_STMT);
-+ update_stmt(assign);
-+ return assign;
-+}
-+
-+static tree cast_to_new_size_overflow_type(gimple stmt, tree rhs, tree size_overflow_type, bool before)
-+{
-+ gimple_stmt_iterator gsi;
-+ tree lhs;
-+ gimple new_stmt;
-+
-+ if (rhs == NULL_TREE)
-+ return NULL_TREE;
-+
-+ gsi = gsi_for_stmt(stmt);
-+ new_stmt = build_cast_stmt(size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before, false);
-+ set_stmt_flag(new_stmt, MY_STMT);
-+
-+ lhs = get_lhs(new_stmt);
-+ gcc_assert(lhs != NULL_TREE);
-+ return lhs;
-+}
-+
-+static tree cast_to_TI_type(gimple stmt, tree node)
-+{
-+ gimple_stmt_iterator gsi;
-+ gimple cast_stmt;
-+ tree type = TREE_TYPE(node);
-+
-+ if (types_compatible_p(type, intTI_type_node))
-+ return node;
-+
-+ gsi = gsi_for_stmt(stmt);
-+ cast_stmt = build_cast_stmt(intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ set_stmt_flag(cast_stmt, MY_STMT);
-+ return gimple_assign_lhs(cast_stmt);
-+}
-+
-+static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before)
-+{
-+ tree lhs, new_lhs;
-+ gimple_stmt_iterator gsi;
-+
-+ if (rhs1 == NULL_TREE) {
-+ debug_gimple_stmt(oldstmt);
-+ error("%s: rhs1 is NULL_TREE", __func__);
-+ gcc_unreachable();
-+ }
-+
-+ switch (gimple_code(oldstmt)) {
-+ case GIMPLE_ASM:
-+ lhs = rhs1;
-+ break;
-+ case GIMPLE_CALL:
-+ case GIMPLE_ASSIGN:
-+ lhs = gimple_get_lhs(oldstmt);
-+ break;
-+ default:
-+ debug_gimple_stmt(oldstmt);
-+ gcc_unreachable();
-+ }
-+
-+ gsi = gsi_for_stmt(oldstmt);
-+ pointer_set_insert(visited, oldstmt);
-+ if (lookup_stmt_eh_lp(oldstmt) != 0) {
-+ basic_block next_bb, cur_bb;
-+ const_edge e;
-+
-+ gcc_assert(before == false);
-+ gcc_assert(stmt_can_throw_internal(oldstmt));
-+ gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
-+ gcc_assert(!gsi_end_p(gsi));
-+
-+ cur_bb = gimple_bb(oldstmt);
-+ next_bb = cur_bb->next_bb;
-+ e = find_edge(cur_bb, next_bb);
-+ gcc_assert(e != NULL);
-+ gcc_assert(e->flags & EDGE_FALLTHRU);
-+
-+ gsi = gsi_after_labels(next_bb);
-+ gcc_assert(!gsi_end_p(gsi));
-+
-+ before = true;
-+ oldstmt = gsi_stmt(gsi);
-+ }
-+
-+ new_lhs = cast_to_new_size_overflow_type(oldstmt, rhs1, get_size_overflow_type(oldstmt, lhs), before);
-+ return new_lhs;
-+}
-+
-+static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
-+{
-+ gimple stmt;
-+ gimple_stmt_iterator gsi;
-+ tree size_overflow_type, new_var, lhs = gimple_assign_lhs(oldstmt);
-+
-+ if (get_stmt_flag(oldstmt) == MY_STMT)
-+ return lhs;
-+
-+ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
-+ rhs1 = gimple_assign_rhs1(oldstmt);
-+ rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
-+ }
-+ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
-+ rhs2 = gimple_assign_rhs2(oldstmt);
-+ rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
-+ }
-+
-+ stmt = gimple_copy(oldstmt);
-+ gimple_set_location(stmt, gimple_location(oldstmt));
-+ set_stmt_flag(stmt, MY_STMT);
-+
-+ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
-+ gimple_assign_set_rhs_code(stmt, MULT_EXPR);
-+
-+ size_overflow_type = get_size_overflow_type(oldstmt, node);
-+
-+ new_var = create_new_var(size_overflow_type);
-+ new_var = make_ssa_name(new_var, stmt);
-+ gimple_assign_set_lhs(stmt, new_var);
-+
-+ if (rhs1 != NULL_TREE)
-+ gimple_assign_set_rhs1(stmt, rhs1);
-+
-+ if (rhs2 != NULL_TREE)
-+ gimple_assign_set_rhs2(stmt, rhs2);
-+#if BUILDING_GCC_VERSION >= 4006
-+ if (rhs3 != NULL_TREE)
-+ gimple_assign_set_rhs3(stmt, rhs3);
-+#endif
-+ gimple_set_vuse(stmt, gimple_vuse(oldstmt));
-+ gimple_set_vdef(stmt, gimple_vdef(oldstmt));
-+
-+ gsi = gsi_for_stmt(oldstmt);
-+ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
-+ update_stmt(stmt);
-+ pointer_set_insert(visited, oldstmt);
-+ return gimple_assign_lhs(stmt);
-+}
-+
-+static tree cast_parm_decl(tree phi_ssa_name, tree arg, tree size_overflow_type, basic_block bb)
-+{
-+ gimple assign;
-+ gimple_stmt_iterator gsi;
-+ basic_block first_bb;
-+
-+ gcc_assert(SSA_NAME_IS_DEFAULT_DEF(arg));
-+
-+ if (bb->index == 0) {
-+ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
-+ gcc_assert(dom_info_available_p(CDI_DOMINATORS));
-+ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
-+ bb = first_bb;
-+ }
-+
-+ gsi = gsi_after_labels(bb);
-+ assign = build_cast_stmt(size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false);
-+ set_stmt_flag(assign, MY_STMT);
-+
-+ return gimple_assign_lhs(assign);
-+}
-+
-+static tree use_phi_ssa_name(tree ssa_name_var, tree new_arg)
-+{
-+ gimple_stmt_iterator gsi;
-+ gimple assign, def_stmt = get_def_stmt(new_arg);
-+
-+ if (gimple_code(def_stmt) == GIMPLE_PHI) {
-+ gsi = gsi_after_labels(gimple_bb(def_stmt));
-+ assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, BEFORE_STMT, true);
-+ } else {
-+ gsi = gsi_for_stmt(def_stmt);
-+ assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, AFTER_STMT, true);
-+ }
-+
-+ set_stmt_flag(assign, MY_STMT);
-+ return gimple_assign_lhs(assign);
-+}
-+
-+static tree cast_visited_phi_arg(tree ssa_name_var, tree arg, tree size_overflow_type)
-+{
-+ basic_block bb;
-+ gimple_stmt_iterator gsi;
-+ const_gimple def_stmt;
-+ gimple assign;
-+
-+ def_stmt = get_def_stmt(arg);
-+ bb = gimple_bb(def_stmt);
-+ gcc_assert(bb->index != 0);
-+ gsi = gsi_after_labels(bb);
-+
-+ assign = build_cast_stmt(size_overflow_type, arg, ssa_name_var, &gsi, BEFORE_STMT, false);
-+ set_stmt_flag(assign, MY_STMT);
-+ return gimple_assign_lhs(assign);
-+}
-+
-+static tree create_new_phi_arg(tree ssa_name_var, tree new_arg, gimple oldstmt, unsigned int i)
-+{
-+ tree size_overflow_type;
-+ tree arg;
-+ const_gimple def_stmt;
-+
-+ if (new_arg != NULL_TREE && is_gimple_constant(new_arg))
-+ return new_arg;
-+
-+ arg = gimple_phi_arg_def(oldstmt, i);
-+ def_stmt = get_def_stmt(arg);
-+ gcc_assert(def_stmt != NULL);
-+ size_overflow_type = get_size_overflow_type(oldstmt, arg);
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_PHI:
-+ return cast_visited_phi_arg(ssa_name_var, arg, size_overflow_type);
-+ case GIMPLE_NOP: {
-+ basic_block bb;
-+
-+ bb = gimple_phi_arg_edge(oldstmt, i)->src;
-+ return cast_parm_decl(ssa_name_var, arg, size_overflow_type, bb);
-+ }
-+ case GIMPLE_ASM: {
-+ gimple_stmt_iterator gsi;
-+ gimple assign, stmt = get_def_stmt(arg);
-+
-+ gsi = gsi_for_stmt(stmt);
-+ assign = build_cast_stmt(size_overflow_type, arg, ssa_name_var, &gsi, AFTER_STMT, false);
-+ set_stmt_flag(assign, MY_STMT);
-+ return gimple_assign_lhs(assign);
-+ }
-+ default:
-+ gcc_assert(new_arg != NULL_TREE);
-+ gcc_assert(types_compatible_p(TREE_TYPE(new_arg), size_overflow_type));
-+ return use_phi_ssa_name(ssa_name_var, new_arg);
-+ }
-+}
-+
-+static gimple overflow_create_phi_node(gimple oldstmt, tree result)
-+{
-+ basic_block bb;
-+ gimple phi;
-+ gimple_seq seq;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
-+
-+ bb = gsi_bb(gsi);
-+
-+ if (result == NULL_TREE) {
-+ tree old_result = gimple_phi_result(oldstmt);
-+ tree size_overflow_type = get_size_overflow_type(oldstmt, old_result);
-+
-+ result = create_new_var(size_overflow_type);
-+ }
-+
-+ phi = create_phi_node(result, bb);
-+ gimple_phi_set_result(phi, make_ssa_name(result, phi));
-+ seq = phi_nodes(bb);
-+ gsi = gsi_last(seq);
-+ gsi_remove(&gsi, false);
-+
-+ gsi = gsi_for_stmt(oldstmt);
-+ gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
-+ gimple_set_bb(phi, bb);
-+ set_stmt_flag(phi, MY_STMT);
-+ return phi;
-+}
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+static tree create_new_phi_node(VEC(tree, heap) **args, tree ssa_name_var, gimple oldstmt)
-+#else
-+static tree create_new_phi_node(vec<tree, va_heap, vl_embed> *&args, tree ssa_name_var, gimple oldstmt)
-+#endif
-+{
-+ gimple new_phi;
-+ unsigned int i;
-+ tree arg, result;
-+ location_t loc = gimple_location(oldstmt);
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ gcc_assert(!VEC_empty(tree, *args));
-+#else
-+ gcc_assert(!args->is_empty());
-+#endif
-+
-+ new_phi = overflow_create_phi_node(oldstmt, ssa_name_var);
-+ result = gimple_phi_result(new_phi);
-+ ssa_name_var = SSA_NAME_VAR(result);
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ FOR_EACH_VEC_ELT(tree, *args, i, arg) {
-+#else
-+ FOR_EACH_VEC_SAFE_ELT(args, i, arg) {
-+#endif
-+ arg = create_new_phi_arg(ssa_name_var, arg, oldstmt, i);
-+ add_phi_arg(new_phi, arg, gimple_phi_arg_edge(oldstmt, i), loc);
-+ }
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC_free(tree, heap, *args);
-+#else
-+ vec_free(args);
-+#endif
-+ update_stmt(new_phi);
-+ return result;
-+}
-+
-+static tree handle_phi(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree orig_result)
-+{
-+ tree ssa_name_var = NULL_TREE;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC(tree, heap) *args = NULL;
-+#else
-+ vec<tree, va_heap, vl_embed> *args = NULL;
-+#endif
-+ gimple oldstmt = get_def_stmt(orig_result);
-+ unsigned int i, len = gimple_phi_num_args(oldstmt);
-+
-+ pointer_set_insert(visited, oldstmt);
-+ for (i = 0; i < len; i++) {
-+ tree arg, new_arg;
-+
-+ arg = gimple_phi_arg_def(oldstmt, i);
-+ new_arg = expand(visited, caller_node, arg);
-+
-+ if (ssa_name_var == NULL_TREE && new_arg != NULL_TREE)
-+ ssa_name_var = SSA_NAME_VAR(new_arg);
-+
-+ if (is_gimple_constant(arg)) {
-+ tree size_overflow_type = get_size_overflow_type(oldstmt, arg);
-+
-+ new_arg = cast_a_tree(size_overflow_type, arg);
-+ }
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC_safe_push(tree, heap, args, new_arg);
-+#else
-+ vec_safe_push(args, new_arg);
-+#endif
-+ }
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ return create_new_phi_node(&args, ssa_name_var, oldstmt);
-+#else
-+ return create_new_phi_node(args, ssa_name_var, oldstmt);
-+#endif
-+}
-+
-+static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs)
++static const char *get_asm_string(const_gimple stmt)
+{
-+ gimple assign;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+ tree origtype = TREE_TYPE(orig_rhs);
-+
-+ gcc_assert(is_gimple_assign(stmt));
++ if (!stmt)
++ return NULL;
++ if (gimple_code(stmt) != GIMPLE_ASM)
++ return NULL;
+
-+ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ set_stmt_flag(assign, MY_STMT);
-+ return gimple_assign_lhs(assign);
++ return gimple_asm_string(stmt);
+}
+
-+static bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
++bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt)
+{
-+ const_tree rhs1, lhs, rhs1_type, lhs_type;
-+ enum machine_mode lhs_mode, rhs_mode;
-+ gimple def_stmt = get_def_stmt(no_const_rhs);
-+
-+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
-+ return false;
++ const char *str;
+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ lhs = gimple_assign_lhs(def_stmt);
-+ rhs1_type = TREE_TYPE(rhs1);
-+ lhs_type = TREE_TYPE(lhs);
-+ rhs_mode = TYPE_MODE(rhs1_type);
-+ lhs_mode = TYPE_MODE(lhs_type);
-+ if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
++ str = get_asm_string(stmt);
++ if (!str)
+ return false;
-+
-+ return true;
-+}
-+
-+static tree create_cast_assign(struct pointer_set_t *visited, gimple stmt)
-+{
-+ tree rhs1 = gimple_assign_rhs1(stmt);
-+ tree lhs = gimple_assign_lhs(stmt);
-+ const_tree rhs1_type = TREE_TYPE(rhs1);
-+ const_tree lhs_type = TREE_TYPE(lhs);
-+
-+ if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ return create_assign(visited, stmt, rhs1, AFTER_STMT);
++ return !strncmp(str, TURN_OFF_ASM_STR, sizeof(TURN_OFF_ASM_STR) - 1);
+}
+
-+static bool no_uses(tree node)
++bool is_size_overflow_intentional_asm_yes(const_gimple stmt)
+{
-+ imm_use_iterator imm_iter;
-+ use_operand_p use_p;
-+
-+ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
-+ const_gimple use_stmt = USE_STMT(use_p);
++ const char *str;
+
-+ if (use_stmt == NULL)
-+ return true;
-+ if (is_gimple_debug(use_stmt))
-+ continue;
++ str = get_asm_string(stmt);
++ if (!str)
+ return false;
-+ }
-+ return true;
++ return !strncmp(str, YES_ASM_STR, sizeof(YES_ASM_STR) - 1);
+}
+
-+// 3.8.5 mm/page-writeback.c __ilog2_u64(): ret, uint + uintmax; uint -> int; int max
-+static bool is_const_plus_unsigned_signed_truncation(const_tree lhs)
++bool is_size_overflow_asm(const_gimple stmt)
+{
-+ tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs;
-+ gimple def_stmt = get_def_stmt(lhs);
-+
-+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
-+ return false;
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs_type = TREE_TYPE(rhs1);
-+ lhs_type = TREE_TYPE(lhs);
-+ if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type))
-+ return false;
-+ if (TYPE_MODE(lhs_type) != TYPE_MODE(rhs_type))
-+ return false;
-+
-+ def_stmt = get_def_stmt(rhs1);
-+ if (!def_stmt || !is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 3)
-+ return false;
-+
-+ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR)
-+ return false;
++ const char *str;
+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+ if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
++ str = get_asm_string(stmt);
++ if (!str)
+ return false;
-+
-+ if (is_gimple_constant(rhs2))
-+ not_const_rhs = rhs1;
-+ else
-+ not_const_rhs = rhs2;
-+
-+ return no_uses(not_const_rhs);
++ return !strncmp(str, OK_ASM_STR, sizeof(OK_ASM_STR) - 1);
+}
+
-+static bool skip_lhs_cast_check(const_gimple stmt)
++bool is_a_return_check(const_tree node)
+{
-+ const_tree rhs = gimple_assign_rhs1(stmt);
-+ const_gimple def_stmt = get_def_stmt(rhs);
-+
-+ // 3.8.2 kernel/futex_compat.c compat_exit_robust_list(): get_user() 64 ulong -> int (compat_long_t), int max
-+ if (gimple_code(def_stmt) == GIMPLE_ASM)
-+ return true;
-+
-+ if (is_const_plus_unsigned_signed_truncation(rhs))
++ if (TREE_CODE(node) == FUNCTION_DECL)
+ return true;
+
++ gcc_assert(TREE_CODE(node) == PARM_DECL);
+ return false;
+}
+
-+static tree create_cast_overflow_check(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree new_rhs1, gimple stmt)
-+{
-+ bool cast_lhs, cast_rhs;
-+ tree lhs = gimple_assign_lhs(stmt);
-+ tree rhs = gimple_assign_rhs1(stmt);
-+ const_tree lhs_type = TREE_TYPE(lhs);
-+ const_tree rhs_type = TREE_TYPE(rhs);
-+ enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
-+ enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
-+ unsigned int lhs_size = GET_MODE_BITSIZE(lhs_mode);
-+ unsigned int rhs_size = GET_MODE_BITSIZE(rhs_mode);
-+
-+ static bool check_lhs[3][4] = {
-+ // ss su us uu
-+ { false, true, true, false }, // lhs > rhs
-+ { false, false, false, false }, // lhs = rhs
-+ { true, true, true, true }, // lhs < rhs
-+ };
-+
-+ static bool check_rhs[3][4] = {
-+ // ss su us uu
-+ { true, false, true, true }, // lhs > rhs
-+ { true, false, true, true }, // lhs = rhs
-+ { true, false, true, true }, // lhs < rhs
-+ };
-+
-+ // skip lhs check on signed SI -> HI cast or signed SI -> QI cast !!!!
-+ if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode))
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ if (lhs_size > rhs_size) {
-+ cast_lhs = check_lhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ cast_rhs = check_rhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ } else if (lhs_size == rhs_size) {
-+ cast_lhs = check_lhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ cast_rhs = check_rhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ } else {
-+ cast_lhs = check_lhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ cast_rhs = check_rhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ }
-+
-+ if (!cast_lhs && !cast_rhs)
-+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
-+
-+ if (cast_lhs && !skip_lhs_cast_check(stmt))
-+ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, lhs, BEFORE_STMT);
-+
-+ if (cast_rhs)
-+ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, rhs, BEFORE_STMT);
-+
-+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
-+}
-+
-+static tree handle_unary_rhs(struct pointer_set_t *visited, struct cgraph_node *caller_node, gimple stmt)
-+{
-+ tree rhs1, new_rhs1, lhs = gimple_assign_lhs(stmt);
-+
-+ if (get_stmt_flag(stmt) == MY_STMT)
-+ return lhs;
-+
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ new_rhs1 = expand(visited, caller_node, rhs1);
-+
-+ if (new_rhs1 == NULL_TREE)
-+ return create_cast_assign(visited, stmt);
-+
-+ if (get_stmt_flag(stmt) == NO_CAST_CHECK)
-+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
-+
-+ if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) {
-+ tree size_overflow_type = get_size_overflow_type(stmt, rhs1);
-+
-+ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
-+ check_size_overflow(caller_node, stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+ }
-+
-+ if (!gimple_assign_cast_p(stmt))
-+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
-+
-+ return create_cast_overflow_check(visited, caller_node, new_rhs1, stmt);
-+}
-+
-+static tree handle_unary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, gimple stmt)
-+{
-+ tree rhs1, lhs = gimple_assign_lhs(stmt);
-+ gimple def_stmt = get_def_stmt(lhs);
-+
-+ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+
-+ if (is_gimple_constant(rhs1))
-+ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
-+
-+ switch (TREE_CODE(rhs1)) {
-+ case SSA_NAME:
-+ return handle_unary_rhs(visited, caller_node, def_stmt);
-+ case ARRAY_REF:
-+ case BIT_FIELD_REF:
-+ case ADDR_EXPR:
-+ case COMPONENT_REF:
-+ case INDIRECT_REF:
-+#if BUILDING_GCC_VERSION >= 4006
-+ case MEM_REF:
-+#endif
-+ case TARGET_MEM_REF:
-+ case VIEW_CONVERT_EXPR:
-+ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
-+ case PARM_DECL:
-+ case VAR_DECL:
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ default:
-+ debug_gimple_stmt(def_stmt);
-+ debug_tree(rhs1);
-+ gcc_unreachable();
-+ }
-+}
-+
-+static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
-+{
-+ gimple cond_stmt;
-+ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
-+
-+ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
-+ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
-+ update_stmt(cond_stmt);
-+}
-+
-+static tree create_string_param(tree string)
-+{
-+ tree i_type, a_type;
-+ const int length = TREE_STRING_LENGTH(string);
-+
-+ gcc_assert(length > 0);
-+
-+ i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
-+ a_type = build_array_type(char_type_node, i_type);
-+
-+ TREE_TYPE(string) = a_type;
-+ TREE_CONSTANT(string) = 1;
-+ TREE_READONLY(string) = 1;
-+
-+ return build1(ADDR_EXPR, ptr_type_node, string);
-+}
-+
-+static void insert_cond_result(struct cgraph_node *caller_node, basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
-+{
-+ gimple func_stmt;
-+ const_gimple def_stmt;
-+ const_tree loc_line;
-+ tree loc_file, ssa_name, current_func;
-+ expanded_location xloc;
-+ char *ssa_name_buf;
-+ int len;
-+ struct cgraph_edge *edge;
-+ struct cgraph_node *callee_node;
-+ int frequency;
-+ gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
-+
-+ def_stmt = get_def_stmt(arg);
-+ xloc = expand_location(gimple_location(def_stmt));
-+
-+ if (!gimple_has_location(def_stmt)) {
-+ xloc = expand_location(gimple_location(stmt));
-+ if (!gimple_has_location(stmt))
-+ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
-+ }
-+
-+ loc_line = build_int_cstu(unsigned_type_node, xloc.line);
-+
-+ loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
-+ loc_file = create_string_param(loc_file);
-+
-+ current_func = build_string(DECL_NAME_LENGTH(current_function_decl) + 1, DECL_NAME_POINTER(current_function_decl));
-+ current_func = create_string_param(current_func);
-+
-+ gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL);
-+ call_count++;
-+ len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", DECL_NAME_POINTER(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count);
-+ gcc_assert(len > 0);
-+ ssa_name = build_string(len + 1, ssa_name_buf);
-+ free(ssa_name_buf);
-+ ssa_name = create_string_param(ssa_name);
-+
-+ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
-+ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
-+ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
-+
-+ callee_node = cgraph_get_create_node(report_size_overflow_decl);
-+ frequency = compute_call_stmt_bb_frequency(current_function_decl, bb_true);
-+
-+ edge = cgraph_create_edge(caller_node, callee_node, func_stmt, bb_true->count, frequency, bb_true->loop_depth);
-+ gcc_assert(edge != NULL);
-+}
-+
-+static void __unused print_the_code_insertions(const_gimple stmt)
-+{
-+ location_t loc = gimple_location(stmt);
-+
-+ inform(loc, "Integer size_overflow check applied here.");
-+}
-+
-+static void insert_check_size_overflow(struct cgraph_node *caller_node, gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
++// Get the argnum of a function decl, if node is a return then the argnum is 0
++unsigned int get_function_num(const_tree node, const_tree orig_fndecl)
+{
-+ basic_block cond_bb, join_bb, bb_true;
-+ edge e;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+
-+ cond_bb = gimple_bb(stmt);
-+ if (before)
-+ gsi_prev(&gsi);
-+ if (gsi_end_p(gsi))
-+ e = split_block_after_labels(cond_bb);
++ if (is_a_return_check(node))
++ return 0;
+ else
-+ e = split_block(cond_bb, gsi_stmt(gsi));
-+ cond_bb = e->src;
-+ join_bb = e->dest;
-+ e->flags = EDGE_FALSE_VALUE;
-+ e->probability = REG_BR_PROB_BASE;
-+
-+ bb_true = create_empty_bb(cond_bb);
-+ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
-+ make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
-+ make_edge(bb_true, join_bb, EDGE_FALLTHRU);
-+
-+ gcc_assert(dom_info_available_p(CDI_DOMINATORS));
-+ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
-+ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
-+
-+ if (current_loops != NULL) {
-+ gcc_assert(cond_bb->loop_father == join_bb->loop_father);
-+ add_bb_to_loop(bb_true, cond_bb->loop_father);
-+ }
-+
-+ insert_cond(cond_bb, arg, cond_code, type_value);
-+ insert_cond_result(caller_node, bb_true, stmt, arg, min);
-+
-+// print_the_code_insertions(stmt);
-+}
-+
-+static void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before)
-+{
-+ const_tree rhs_type = TREE_TYPE(rhs);
-+ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
-+
-+ gcc_assert(rhs_type != NULL_TREE);
-+ if (TREE_CODE(rhs_type) == POINTER_TYPE)
-+ return;
-+
-+ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
-+
-+ if (is_const_plus_unsigned_signed_truncation(rhs))
-+ return;
-+
-+ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
-+ // typemax (-1) < typemin (0)
-+ if (TREE_OVERFLOW(type_max))
-+ return;
-+
-+ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
-+
-+ cast_rhs_type = TREE_TYPE(cast_rhs);
-+ type_max_type = TREE_TYPE(type_max);
-+ gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
-+
-+ insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before, MAX_CHECK);
-+
-+ // special case: get_size_overflow_type(), 32, u64->s
-+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type) && !TYPE_UNSIGNED(rhs_type))
-+ return;
-+
-+ type_min_type = TREE_TYPE(type_min);
-+ gcc_assert(types_compatible_p(type_max_type, type_min_type));
-+ insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK);
-+}
-+
-+static bool is_lt_signed_type_max(const_tree rhs)
-+{
-+ const_tree new_type, type_max, type = TREE_TYPE(rhs);
-+
-+ if (!TYPE_UNSIGNED(type))
-+ return true;
-+
-+ switch (TYPE_MODE(type)) {
-+ case QImode:
-+ new_type = intQI_type_node;
-+ break;
-+ case HImode:
-+ new_type = intHI_type_node;
-+ break;
-+ case SImode:
-+ new_type = intSI_type_node;
-+ break;
-+ case DImode:
-+ new_type = intDI_type_node;
-+ break;
-+ default:
-+ debug_tree((tree)type);
-+ gcc_unreachable();
-+ }
-+
-+ type_max = TYPE_MAX_VALUE(new_type);
-+ if (!tree_int_cst_lt(type_max, rhs))
-+ return true;
-+
-+ return false;
-+}
-+
-+static bool is_gt_zero(const_tree rhs)
-+{
-+ const_tree type = TREE_TYPE(rhs);
-+
-+ if (TYPE_UNSIGNED(type))
-+ return true;
-+
-+ if (!tree_int_cst_lt(rhs, integer_zero_node))
-+ return true;
-+
-+ return false;
-+}
-+
-+static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
-+{
-+ if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
-+ return false;
-+ if (!is_gimple_constant(rhs))
-+ return false;
-+
-+ // If the const is between 0 and the max value of the signed type of the same bitsize then there is no intentional overflow
-+// if (is_lt_signed_type_max(rhs) && is_gt_zero(rhs))
-+// return false;
-+
-+ return true;
-+}
-+
-+static tree get_def_stmt_rhs(const_tree var)
-+{
-+ tree rhs1, def_stmt_rhs1;
-+ gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
-+
-+ def_stmt = get_def_stmt(var);
-+ if (!gimple_assign_cast_p(def_stmt))
-+ return NULL_TREE;
-+ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && get_stmt_flag(def_stmt) == MY_STMT && gimple_assign_cast_p(def_stmt));
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs1_def_stmt = get_def_stmt(rhs1);
-+ if (!gimple_assign_cast_p(rhs1_def_stmt))
-+ return rhs1;
-+
-+ def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
-+ def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
-+
-+ switch (gimple_code(def_stmt_rhs1_def_stmt)) {
-+ case GIMPLE_CALL:
-+ case GIMPLE_NOP:
-+ case GIMPLE_ASM:
-+ case GIMPLE_PHI:
-+ return def_stmt_rhs1;
-+ case GIMPLE_ASSIGN:
-+ return rhs1;
-+ default:
-+ debug_gimple_stmt(def_stmt_rhs1_def_stmt);
-+ gcc_unreachable();
-+ }
-+}
-+
-+static tree handle_intentional_overflow(struct pointer_set_t *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2)
-+{
-+ tree new_rhs, orig_rhs;
-+ void (*gimple_assign_set_rhs)(gimple, tree);
-+ tree rhs1 = gimple_assign_rhs1(stmt);
-+ tree rhs2 = gimple_assign_rhs2(stmt);
-+ tree lhs = gimple_assign_lhs(stmt);
-+
-+ if (!check_overflow)
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ if (change_rhs == NULL_TREE)
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ if (new_rhs2 == NULL_TREE) {
-+ orig_rhs = rhs1;
-+ gimple_assign_set_rhs = &gimple_assign_set_rhs1;
-+ } else {
-+ orig_rhs = rhs2;
-+ gimple_assign_set_rhs = &gimple_assign_set_rhs2;
-+ }
-+
-+ check_size_overflow(caller_node, stmt, TREE_TYPE(change_rhs), change_rhs, orig_rhs, BEFORE_STMT);
-+
-+ new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs);
-+ gimple_assign_set_rhs(stmt, new_rhs);
-+ update_stmt(stmt);
-+
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+}
-+
-+static bool is_subtraction_special(const_gimple stmt)
-+{
-+ gimple rhs1_def_stmt, rhs2_def_stmt;
-+ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
-+ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
-+ const_tree rhs1 = gimple_assign_rhs1(stmt);
-+ const_tree rhs2 = gimple_assign_rhs2(stmt);
-+
-+ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
-+ return false;
-+
-+ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
-+
-+ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
-+ return false;
-+
-+ rhs1_def_stmt = get_def_stmt(rhs1);
-+ rhs2_def_stmt = get_def_stmt(rhs2);
-+ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
-+ return false;
-+
-+ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
-+ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
-+ rhs1_def_stmt_lhs = gimple_assign_lhs(rhs1_def_stmt);
-+ rhs2_def_stmt_lhs = gimple_assign_lhs(rhs2_def_stmt);
-+ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
-+ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
-+ rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
-+ rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs));
-+ if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode))
-+ return false;
-+ if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode))
-+ return false;
-+
-+ set_stmt_flag(rhs1_def_stmt, NO_CAST_CHECK);
-+ set_stmt_flag(rhs2_def_stmt, NO_CAST_CHECK);
-+ return true;
-+}
-+
-+static tree handle_integer_truncation(struct pointer_set_t *visited, struct cgraph_node *caller_node, const_tree lhs)
-+{
-+ tree new_rhs1, new_rhs2;
-+ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
-+ gimple assign, stmt = get_def_stmt(lhs);
-+ tree rhs1 = gimple_assign_rhs1(stmt);
-+ tree rhs2 = gimple_assign_rhs2(stmt);
-+
-+ if (!is_subtraction_special(stmt))
-+ return NULL_TREE;
-+
-+ new_rhs1 = expand(visited, caller_node, rhs1);
-+ new_rhs2 = expand(visited, caller_node, rhs2);
-+
-+ new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1);
-+ new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2);
-+
-+ if (new_rhs1_def_stmt_rhs1 == NULL_TREE || new_rhs2_def_stmt_rhs1 == NULL_TREE)
-+ return NULL_TREE;
-+
-+ if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) {
-+ new_rhs1_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs1_def_stmt_rhs1);
-+ new_rhs2_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs2_def_stmt_rhs1);
-+ }
-+
-+ assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
-+ new_lhs = gimple_assign_lhs(assign);
-+ check_size_overflow(caller_node, assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
-+
-+ return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
-+}
-+
-+static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
-+{
-+ const_gimple def_stmt;
-+
-+ if (TREE_CODE(rhs) != SSA_NAME)
-+ return false;
-+
-+ if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
-+ return false;
-+
-+ def_stmt = get_def_stmt(rhs);
-+ if (!is_gimple_assign(def_stmt) || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
-+ return false;
-+
-+ return true;
-+}
-+
-+static tree handle_binary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
-+{
-+ tree rhs1, rhs2, new_lhs;
-+ gimple def_stmt = get_def_stmt(lhs);
-+ tree new_rhs1 = NULL_TREE;
-+ tree new_rhs2 = NULL_TREE;
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+
-+ /* no DImode/TImode division in the 32/64 bit kernel */
-+ switch (gimple_assign_rhs_code(def_stmt)) {
-+ case RDIV_EXPR:
-+ case TRUNC_DIV_EXPR:
-+ case CEIL_DIV_EXPR:
-+ case FLOOR_DIV_EXPR:
-+ case ROUND_DIV_EXPR:
-+ case TRUNC_MOD_EXPR:
-+ case CEIL_MOD_EXPR:
-+ case FLOOR_MOD_EXPR:
-+ case ROUND_MOD_EXPR:
-+ case EXACT_DIV_EXPR:
-+ case POINTER_PLUS_EXPR:
-+ case BIT_AND_EXPR:
-+ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
-+ default:
-+ break;
-+ }
-+
-+ new_lhs = handle_integer_truncation(visited, caller_node, lhs);
-+ if (new_lhs != NULL_TREE)
-+ return new_lhs;
-+
-+ if (TREE_CODE(rhs1) == SSA_NAME)
-+ new_rhs1 = expand(visited, caller_node, rhs1);
-+ if (TREE_CODE(rhs2) == SSA_NAME)
-+ new_rhs2 = expand(visited, caller_node, rhs2);
-+
-+ if (is_a_neg_overflow(def_stmt, rhs2))
-+ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs1, NULL_TREE);
-+ if (is_a_neg_overflow(def_stmt, rhs1))
-+ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs2, new_rhs2);
-+
-+
-+ if (is_a_constant_overflow(def_stmt, rhs2))
-+ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, NULL_TREE);
-+ if (is_a_constant_overflow(def_stmt, rhs1))
-+ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, new_rhs2);
-+
-+ // the const is between 0 and (signed) MAX
-+ if (is_gimple_constant(rhs1))
-+ new_rhs1 = create_assign(visited, def_stmt, rhs1, BEFORE_STMT);
-+ if (is_gimple_constant(rhs2))
-+ new_rhs2 = create_assign(visited, def_stmt, rhs2, BEFORE_STMT);
-+
-+ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4006
-+static tree get_new_rhs(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree size_overflow_type, tree rhs)
-+{
-+ if (is_gimple_constant(rhs))
-+ return cast_a_tree(size_overflow_type, rhs);
-+ if (TREE_CODE(rhs) != SSA_NAME)
-+ return NULL_TREE;
-+ return expand(visited, caller_node, rhs);
-+}
-+
-+static tree handle_ternary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
-+{
-+ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
-+ gimple def_stmt = get_def_stmt(lhs);
-+
-+ size_overflow_type = get_size_overflow_type(def_stmt, lhs);
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+ rhs3 = gimple_assign_rhs3(def_stmt);
-+ new_rhs1 = get_new_rhs(visited, caller_node, size_overflow_type, rhs1);
-+ new_rhs2 = get_new_rhs(visited, caller_node, size_overflow_type, rhs2);
-+ new_rhs3 = get_new_rhs(visited, caller_node, size_overflow_type, rhs3);
-+
-+ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
-+}
-+#endif
-+
-+static tree get_size_overflow_type(gimple stmt, const_tree node)
-+{
-+ const_tree type;
-+ tree new_type;
-+
-+ gcc_assert(node != NULL_TREE);
-+
-+ type = TREE_TYPE(node);
-+
-+ if (get_stmt_flag(stmt) == MY_STMT)
-+ return TREE_TYPE(node);
-+
-+ switch (TYPE_MODE(type)) {
-+ case QImode:
-+ new_type = intHI_type_node;
-+ break;
-+ case HImode:
-+ new_type = intSI_type_node;
-+ break;
-+ case SImode:
-+ new_type = intDI_type_node;
-+ break;
-+ case DImode:
-+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
-+ new_type = TYPE_UNSIGNED(type) ? unsigned_intDI_type_node : intDI_type_node;
-+ else
-+ new_type = intTI_type_node;
-+ break;
-+ case TImode:
-+ gcc_assert(!TYPE_UNSIGNED(type));
-+ new_type = intTI_type_node;
-+ break;
-+ default:
-+ debug_tree((tree)node);
-+ error("%s: unsupported gcc configuration (%qE).", __func__, current_function_decl);
-+ gcc_unreachable();
-+ }
-+
-+ if (TYPE_QUALS(type) != 0)
-+ return build_qualified_type(new_type, TYPE_QUALS(type));
-+ return new_type;
-+}
-+
-+static tree expand_visited(gimple def_stmt)
-+{
-+ const_gimple next_stmt;
-+ gimple_stmt_iterator gsi;
-+ enum gimple_code code = gimple_code(def_stmt);
-+
-+ if (code == GIMPLE_ASM)
-+ return NULL_TREE;
-+
-+ gsi = gsi_for_stmt(def_stmt);
-+ gsi_next(&gsi);
-+
-+ if (gimple_code(def_stmt) == GIMPLE_PHI && gsi_end_p(gsi))
-+ return NULL_TREE;
-+ gcc_assert(!gsi_end_p(gsi));
-+ next_stmt = gsi_stmt(gsi);
-+
-+ if (gimple_code(def_stmt) == GIMPLE_PHI && get_stmt_flag((gimple)next_stmt) != MY_STMT)
-+ return NULL_TREE;
-+ gcc_assert(get_stmt_flag((gimple)next_stmt) == MY_STMT);
-+
-+ return get_lhs(next_stmt);
-+}
-+
-+static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
-+{
-+ gimple def_stmt;
-+
-+ def_stmt = get_def_stmt(lhs);
-+
-+ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
-+ return NULL_TREE;
-+
-+ if (get_stmt_flag(def_stmt) == MY_STMT)
-+ return lhs;
-+
-+ if (pointer_set_contains(visited, def_stmt))
-+ return expand_visited(def_stmt);
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_PHI:
-+ return handle_phi(visited, caller_node, lhs);
-+ case GIMPLE_CALL:
-+ case GIMPLE_ASM:
-+ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
-+ case 2:
-+ return handle_unary_ops(visited, caller_node, def_stmt);
-+ case 3:
-+ return handle_binary_ops(visited, caller_node, lhs);
-+#if BUILDING_GCC_VERSION >= 4006
-+ case 4:
-+ return handle_ternary_ops(visited, caller_node, lhs);
-+#endif
-+ }
-+ default:
-+ debug_gimple_stmt(def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-+static tree cast_to_orig_type(gimple stmt, const_tree orig_node, tree new_node)
-+{
-+ const_gimple assign;
-+ tree orig_type = TREE_TYPE(orig_node);
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+
-+ assign = build_cast_stmt(orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ return gimple_assign_lhs(assign);
-+}
-+
-+static void change_orig_node(struct interesting_node *cur_node, tree new_node)
-+{
-+ void (*set_rhs)(gimple, tree);
-+ gimple stmt = cur_node->first_stmt;
-+ const_tree orig_node = cur_node->node;
-+
-+ switch (gimple_code(stmt)) {
-+ case GIMPLE_RETURN:
-+ gimple_return_set_retval(stmt, cast_to_orig_type(stmt, orig_node, new_node));
-+ break;
-+ case GIMPLE_CALL:
-+ gimple_call_set_arg(stmt, cur_node->num - 1, cast_to_orig_type(stmt, orig_node, new_node));
-+ break;
-+ case GIMPLE_ASSIGN:
-+ switch (cur_node->num) {
-+ case 1:
-+ set_rhs = &gimple_assign_set_rhs1;
-+ break;
-+ case 2:
-+ set_rhs = &gimple_assign_set_rhs2;
-+ break;
-+#if BUILDING_GCC_VERSION >= 4006
-+ case 3:
-+ set_rhs = &gimple_assign_set_rhs3;
-+ break;
-+#endif
-+ default:
-+ gcc_unreachable();
-+ }
-+
-+ set_rhs(stmt, cast_to_orig_type(stmt, orig_node, new_node));
-+ break;
-+ default:
-+ debug_gimple_stmt(stmt);
-+ gcc_unreachable();
-+ }
-+
-+ update_stmt(stmt);
++ return find_arg_number_tree(node, orig_fndecl);
+}
+
-+static unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl)
++unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl)
+{
+ const struct size_overflow_hash *hash;
+ unsigned int new_argnum;
@@ -117792,81 +120576,6 @@ index 0000000..948ec25
+ return CANNOT_FIND_ARG;
+}
+
-+// Don't want to duplicate entries in next_cgraph_node
-+static bool is_in_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, const_tree fndecl, unsigned int num)
-+{
-+ const_tree new_callee_fndecl;
-+ struct next_cgraph_node *cur_node;
-+
-+ if (fndecl == RET_CHECK)
-+ new_callee_fndecl = NODE_DECL(node);
-+ else
-+ new_callee_fndecl = fndecl;
-+
-+ for (cur_node = head; cur_node; cur_node = cur_node->next) {
-+ if (!operand_equal_p(NODE_DECL(cur_node->current_function), NODE_DECL(node), 0))
-+ continue;
-+ if (!operand_equal_p(cur_node->callee_fndecl, new_callee_fndecl, 0))
-+ continue;
-+ if (num == cur_node->num)
-+ return true;
-+ }
-+ return false;
-+}
-+
-+/* Add a next_cgraph_node into the list for handle_function().
-+ * handle_function() iterates over all the next cgraph nodes and
-+ * starts the overflow check insertion process.
-+ */
-+static struct next_cgraph_node *create_new_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, tree fndecl, unsigned int num)
-+{
-+ struct next_cgraph_node *new_node;
-+
-+ if (is_in_next_cgraph_node(head, node, fndecl, num))
-+ return head;
-+
-+ new_node = (struct next_cgraph_node *)xmalloc(sizeof(*new_node));
-+ new_node->current_function = node;
-+ new_node->next = NULL;
-+ new_node->num = num;
-+ if (fndecl == RET_CHECK)
-+ new_node->callee_fndecl = NODE_DECL(node);
-+ else
-+ new_node->callee_fndecl = fndecl;
-+
-+ if (!head)
-+ return new_node;
-+
-+ new_node->next = head;
-+ return new_node;
-+}
-+
-+static struct next_cgraph_node *create_new_next_cgraph_nodes(struct next_cgraph_node *head, struct cgraph_node *node, unsigned int num)
-+{
-+ struct cgraph_edge *e;
-+
-+ if (num == 0)
-+ return create_new_next_cgraph_node(head, node, RET_CHECK, num);
-+
-+ for (e = node->callers; e; e = e->next_caller) {
-+ tree fndecl = gimple_call_fndecl(e->call_stmt);
-+
-+ gcc_assert(fndecl != NULL_TREE);
-+ head = create_new_next_cgraph_node(head, e->caller, fndecl, num);
-+ }
-+
-+ return head;
-+}
-+
-+static bool is_a_return_check(const_tree node)
-+{
-+ if (TREE_CODE(node) == FUNCTION_DECL)
-+ return true;
-+
-+ gcc_assert(TREE_CODE(node) == PARM_DECL);
-+ return false;
-+}
-+
+static bool is_in_hash_table(const_tree fndecl, unsigned int num)
+{
+ const struct size_overflow_hash *hash;
@@ -117877,37 +120586,10 @@ index 0000000..948ec25
+ return false;
+}
+
-+struct missing_functions {
-+ struct missing_functions *next;
-+ const_tree node;
-+ tree fndecl;
-+};
-+
-+static struct missing_functions *create_new_missing_function(struct missing_functions *missing_fn_head, tree node)
-+{
-+ struct missing_functions *new_function;
-+
-+ new_function = (struct missing_functions *)xmalloc(sizeof(*new_function));
-+ new_function->node = node;
-+ new_function->next = NULL;
-+
-+ if (TREE_CODE(node) == FUNCTION_DECL)
-+ new_function->fndecl = node;
-+ else
-+ new_function->fndecl = current_function_decl;
-+ gcc_assert(new_function->fndecl);
-+
-+ if (!missing_fn_head)
-+ return new_function;
-+
-+ new_function->next = missing_fn_head;
-+ return new_function;
-+}
-+
+/* Check if the function has a size_overflow attribute or it is in the size_overflow hash table.
+ * If the function is missing everywhere then print the missing message into stderr.
+ */
-+static bool is_missing_function(const_tree orig_fndecl, unsigned int num)
++bool is_missing_function(const_tree orig_fndecl, unsigned int num)
+{
+ switch (DECL_FUNCTION_CODE(orig_fndecl)) {
+#if BUILDING_GCC_VERSION >= 4008
@@ -117935,2189 +120617,6 @@ index 0000000..948ec25
+ return true;
+}
+
-+// Get the argnum of a function decl, if node is a return then the argnum is 0
-+static unsigned int get_function_num(const_tree node, const_tree orig_fndecl)
-+{
-+ if (is_a_return_check(node))
-+ return 0;
-+ else
-+ return find_arg_number_tree(node, orig_fndecl);
-+}
-+
-+/* If the function is missing from the hash table and it is a static function
-+ * then create a next_cgraph_node from it for handle_function()
-+ */
-+static struct next_cgraph_node *check_missing_overflow_attribute_and_create_next_node(struct next_cgraph_node *cnodes, struct missing_functions *missing_fn_head)
-+{
-+ unsigned int num;
-+ const_tree orig_fndecl;
-+ struct cgraph_node *next_node = NULL;
-+
-+ orig_fndecl = DECL_ORIGIN(missing_fn_head->fndecl);
-+
-+ num = get_function_num(missing_fn_head->node, orig_fndecl);
-+ if (num == CANNOT_FIND_ARG)
-+ return cnodes;
-+
-+ if (!is_missing_function(orig_fndecl, num))
-+ return cnodes;
-+
-+ next_node = cgraph_get_node(missing_fn_head->fndecl);
-+ if (next_node && next_node->local.local)
-+ cnodes = create_new_next_cgraph_nodes(cnodes, next_node, num);
-+ return cnodes;
-+}
-+
-+/* Search for missing size_overflow attributes on the last nodes in ipa and collect them
-+ * into the next_cgraph_node list. They will be the next interesting returns or callees.
-+ */
-+static struct next_cgraph_node *search_overflow_attribute(struct next_cgraph_node *cnodes, struct interesting_node *cur_node)
-+{
-+ unsigned int i;
-+ tree node;
-+ struct missing_functions *cur, *missing_fn_head = NULL;
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, node) {
-+#else
-+ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, node) {
-+#endif
-+ switch (TREE_CODE(node)) {
-+ case PARM_DECL:
-+ if (TREE_CODE(TREE_TYPE(node)) != INTEGER_TYPE)
-+ break;
-+ case FUNCTION_DECL:
-+ missing_fn_head = create_new_missing_function(missing_fn_head, node);
-+ break;
-+ default:
-+ break;
-+ }
-+ }
-+
-+ while (missing_fn_head) {
-+ cnodes = check_missing_overflow_attribute_and_create_next_node(cnodes, missing_fn_head);
-+
-+ cur = missing_fn_head->next;
-+ free(missing_fn_head);
-+ missing_fn_head = cur;
-+ }
-+
-+ return cnodes;
-+}
-+
-+static void walk_phi_set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree result)
-+{
-+ gimple phi = get_def_stmt(result);
-+ unsigned int i, n = gimple_phi_num_args(phi);
-+
-+ pointer_set_insert(visited, phi);
-+ for (i = 0; i < n; i++) {
-+ const_tree arg = gimple_phi_arg_def(phi, i);
-+
-+ set_conditions(visited, interesting_conditions, arg);
-+ }
-+}
-+
-+enum conditions {
-+ FROM_CONST, NOT_UNARY, CAST
-+};
-+
-+// Search for constants, cast assignments and binary/ternary assignments
-+static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs)
-+{
-+ gimple def_stmt = get_def_stmt(lhs);
-+
-+ if (is_gimple_constant(lhs)) {
-+ interesting_conditions[FROM_CONST] = true;
-+ return;
-+ }
-+
-+ if (!def_stmt)
-+ return;
-+
-+ if (pointer_set_contains(visited, def_stmt))
-+ return;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_NOP:
-+ case GIMPLE_CALL:
-+ case GIMPLE_ASM:
-+ return;
-+ case GIMPLE_PHI:
-+ return walk_phi_set_conditions(visited, interesting_conditions, lhs);
-+ case GIMPLE_ASSIGN:
-+ if (gimple_num_ops(def_stmt) == 2) {
-+ const_tree rhs = gimple_assign_rhs1(def_stmt);
-+
-+ if (gimple_assign_cast_p(def_stmt))
-+ interesting_conditions[CAST] = true;
-+
-+ return set_conditions(visited, interesting_conditions, rhs);
-+ } else {
-+ interesting_conditions[NOT_UNARY] = true;
-+ return;
-+ }
-+ default:
-+ debug_gimple_stmt(def_stmt);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// determine whether duplication will be necessary or not.
-+static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions)
-+{
-+ struct pointer_set_t *visited;
-+
-+ if (gimple_assign_cast_p(cur_node->first_stmt))
-+ interesting_conditions[CAST] = true;
-+ else if (is_gimple_assign(cur_node->first_stmt) && gimple_num_ops(cur_node->first_stmt) > 2)
-+ interesting_conditions[NOT_UNARY] = true;
-+
-+ visited = pointer_set_create();
-+ set_conditions(visited, interesting_conditions, cur_node->node);
-+ pointer_set_destroy(visited);
-+}
-+
-+// Remove the size_overflow asm stmt and create an assignment from the input and output of the asm
-+static void replace_size_overflow_asm_with_assign(gimple asm_stmt, tree lhs, tree rhs)
-+{
-+ gimple assign;
-+ gimple_stmt_iterator gsi;
-+
-+ // already removed
-+ if (gimple_bb(asm_stmt) == NULL)
-+ return;
-+ gsi = gsi_for_stmt(asm_stmt);
-+
-+ assign = gimple_build_assign(lhs, rhs);
-+ gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
-+ SSA_NAME_DEF_STMT(lhs) = assign;
-+
-+ gsi_remove(&gsi, true);
-+}
-+
-+// Get the field decl of a component ref for intentional_overflow checking
-+static const_tree search_field_decl(const_tree comp_ref)
-+{
-+ const_tree field = NULL_TREE;
-+ unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
-+
-+ for (i = 0; i < len; i++) {
-+ field = TREE_OPERAND(comp_ref, i);
-+ if (TREE_CODE(field) == FIELD_DECL)
-+ break;
-+ }
-+ gcc_assert(TREE_CODE(field) == FIELD_DECL);
-+ return field;
-+}
-+
-+/* Get the fndecl of an interesting stmt, the fndecl is the caller function if the interesting
-+ * stmt is a return otherwise it is the callee function.
-+ */
-+static const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum)
-+{
-+ const_tree fndecl;
-+
-+ if (argnum == 0)
-+ fndecl = current_function_decl;
-+ else
-+ fndecl = gimple_call_fndecl(stmt);
-+
-+ if (fndecl == NULL_TREE)
-+ return NULL_TREE;
-+
-+ return DECL_ORIGIN(fndecl);
-+}
-+
-+/* Get the param of the intentional_overflow attribute.
-+ * * 0: MARK_NOT_INTENTIONAL
-+ * * 1..MAX_PARAM: MARK_YES
-+ * * -1: MARK_TURN_OFF
-+ */
-+static tree get_attribute_param(const_tree decl)
-+{
-+ const_tree attr;
-+
-+ if (decl == NULL_TREE)
-+ return NULL_TREE;
-+
-+ attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(decl));
-+ if (!attr || !TREE_VALUE(attr))
-+ return NULL_TREE;
-+
-+ return TREE_VALUE(attr);
-+}
-+
-+// MARK_TURN_OFF
-+static bool is_turn_off_intentional_attr(const_tree decl)
-+{
-+ const_tree param_head;
-+
-+ param_head = get_attribute_param(decl);
-+ if (param_head == NULL_TREE)
-+ return false;
-+
-+ if (TREE_INT_CST_HIGH(TREE_VALUE(param_head)) == -1)
-+ return true;
-+ return false;
-+}
-+
-+// MARK_NOT_INTENTIONAL
-+static bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum)
-+{
-+ const_tree param_head;
-+
-+ if (argnum == 0)
-+ return false;
-+
-+ param_head = get_attribute_param(decl);
-+ if (param_head == NULL_TREE)
-+ return false;
-+
-+ if (!TREE_INT_CST_LOW(TREE_VALUE(param_head)))
-+ return true;
-+ return false;
-+}
-+
-+// MARK_YES
-+static bool is_yes_intentional_attr(const_tree decl, unsigned int argnum)
-+{
-+ tree param, param_head;
-+
-+ if (argnum == 0)
-+ return false;
-+
-+ param_head = get_attribute_param(decl);
-+ for (param = param_head; param; param = TREE_CHAIN(param))
-+ if (argnum == TREE_INT_CST_LOW(TREE_VALUE(param)))
-+ return true;
-+ return false;
-+}
-+
-+static const char *get_asm_string(const_gimple stmt)
-+{
-+ if (!stmt)
-+ return NULL;
-+ if (gimple_code(stmt) != GIMPLE_ASM)
-+ return NULL;
-+
-+ return gimple_asm_string(stmt);
-+}
-+
-+static bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt)
-+{
-+ const char *str;
-+
-+ str = get_asm_string(stmt);
-+ if (!str)
-+ return false;
-+ return !strncmp(str, TURN_OFF_ASM_STR, sizeof(TURN_OFF_ASM_STR) - 1);
-+}
-+
-+static bool is_size_overflow_intentional_asm_yes(const_gimple stmt)
-+{
-+ const char *str;
-+
-+ str = get_asm_string(stmt);
-+ if (!str)
-+ return false;
-+ return !strncmp(str, YES_ASM_STR, sizeof(YES_ASM_STR) - 1);
-+}
-+
-+static bool is_size_overflow_asm(const_gimple stmt)
-+{
-+ const char *str;
-+
-+ str = get_asm_string(stmt);
-+ if (!str)
-+ return false;
-+ return !strncmp(str, OK_ASM_STR, sizeof(OK_ASM_STR) - 1);
-+}
-+
-+static void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum)
-+{
-+ location_t loc;
-+
-+ if (caller_attr == MARK_NO || caller_attr == MARK_NOT_INTENTIONAL || caller_attr == MARK_TURN_OFF)
-+ return;
-+
-+ if (callee_attr == MARK_NOT_INTENTIONAL || callee_attr == MARK_YES)
-+ return;
-+
-+ loc = DECL_SOURCE_LOCATION(decl);
-+ inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", DECL_NAME_POINTER(decl), argnum);
-+}
-+
-+/* Get the type of the intentional_overflow attribute of a node
-+ * * MARK_TURN_OFF
-+ * * MARK_YES
-+ * * MARK_NO
-+ * * MARK_NOT_INTENTIONAL
-+ */
-+static enum mark get_intentional_attr_type(const_tree node)
-+{
-+ const_tree cur_decl;
-+
-+ if (node == NULL_TREE)
-+ return MARK_NO;
-+
-+ switch (TREE_CODE(node)) {
-+ case COMPONENT_REF:
-+ cur_decl = search_field_decl(node);
-+ if (is_turn_off_intentional_attr(cur_decl))
-+ return MARK_TURN_OFF;
-+ if (is_end_intentional_intentional_attr(cur_decl, 1))
-+ return MARK_YES;
-+ break;
-+ case PARM_DECL: {
-+ unsigned int argnum;
-+
-+ cur_decl = DECL_ORIGIN(current_function_decl);
-+ argnum = find_arg_number_tree(node, cur_decl);
-+ if (argnum == CANNOT_FIND_ARG)
-+ return MARK_NO;
-+ if (is_yes_intentional_attr(cur_decl, argnum))
-+ return MARK_YES;
-+ if (is_end_intentional_intentional_attr(cur_decl, argnum))
-+ return MARK_NOT_INTENTIONAL;
-+ break;
-+ }
-+ case FUNCTION_DECL:
-+ if (is_turn_off_intentional_attr(DECL_ORIGIN(node)))
-+ return MARK_TURN_OFF;
-+ break;
-+ default:
-+ break;
-+ }
-+ return MARK_NO;
-+}
-+
-+// Search for the intentional_overflow attribute on the last nodes
-+static enum mark search_last_nodes_intentional(struct interesting_node *cur_node)
-+{
-+ unsigned int i;
-+ tree last_node;
-+ enum mark mark = MARK_NO;
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, last_node) {
-+#else
-+ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, last_node) {
-+#endif
-+ mark = get_intentional_attr_type(last_node);
-+ if (mark != MARK_NO)
-+ break;
-+ }
-+ return mark;
-+}
-+
-+/* Check the intentional kind of size_overflow asm stmt (created by the gimple pass) and
-+ * set the appropriate intentional_overflow type. Delete the asm stmt in the end.
-+ */
-+static bool is_intentional_attribute_from_gimple(struct interesting_node *cur_node)
-+{
-+ if (!cur_node->intentional_mark_from_gimple)
-+ return false;
-+
-+ if (is_size_overflow_intentional_asm_yes(cur_node->intentional_mark_from_gimple))
-+ cur_node->intentional_attr_cur_fndecl = MARK_YES;
-+ else
-+ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
-+
-+ // skip param decls
-+ if (gimple_asm_noutputs(cur_node->intentional_mark_from_gimple) == 0)
-+ return true;
-+ return true;
-+}
-+
-+/* Search intentional_overflow attribute on caller and on callee too.
-+ * 0</MARK_YES: no dup, search size_overflow and intentional_overflow attributes
-+ * 0/MARK_NOT_INTENTIONAL: no dup, search size_overflow attribute (int)
-+ * -1/MARK_TURN_OFF: no dup, no search, current_function_decl -> no dup
-+*/
-+static void check_intentional_attribute_ipa(struct interesting_node *cur_node)
-+{
-+ const_tree fndecl;
-+
-+ if (is_intentional_attribute_from_gimple(cur_node))
-+ return;
-+
-+ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
-+ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
-+ return;
-+ }
-+
-+ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASM) {
-+ cur_node->intentional_attr_cur_fndecl = MARK_NOT_INTENTIONAL;
-+ return;
-+ }
-+
-+ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASSIGN)
-+ return;
-+
-+ fndecl = get_interesting_orig_fndecl(cur_node->first_stmt, cur_node->num);
-+ if (is_turn_off_intentional_attr(fndecl)) {
-+ cur_node->intentional_attr_decl = MARK_TURN_OFF;
-+ return;
-+ }
-+
-+ if (is_end_intentional_intentional_attr(fndecl, cur_node->num))
-+ cur_node->intentional_attr_decl = MARK_NOT_INTENTIONAL;
-+ else if (is_yes_intentional_attr(fndecl, cur_node->num))
-+ cur_node->intentional_attr_decl = MARK_YES;
-+
-+ cur_node->intentional_attr_cur_fndecl = search_last_nodes_intentional(cur_node);
-+ print_missing_intentional(cur_node->intentional_attr_decl, cur_node->intentional_attr_cur_fndecl, cur_node->fndecl, cur_node->num);
-+}
-+
-+// e.g., 3.8.2, 64, arch/x86/ia32/ia32_signal.c copy_siginfo_from_user32(): compat_ptr() u32 max
-+static bool skip_asm(const_tree arg)
-+{
-+ gimple def_stmt = get_def_stmt(arg);
-+
-+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
-+ return false;
-+
-+ def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt));
-+ return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM;
-+}
-+
-+static void walk_use_def_phi(struct pointer_set_t *visited, struct interesting_node *cur_node, tree result)
-+{
-+ gimple phi = get_def_stmt(result);
-+ unsigned int i, n = gimple_phi_num_args(phi);
-+
-+ pointer_set_insert(visited, phi);
-+ for (i = 0; i < n; i++) {
-+ tree arg = gimple_phi_arg_def(phi, i);
-+
-+ walk_use_def(visited, cur_node, arg);
-+ }
-+}
-+
-+static void walk_use_def_binary(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
-+{
-+ gimple def_stmt = get_def_stmt(lhs);
-+ tree rhs1, rhs2;
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+
-+ walk_use_def(visited, cur_node, rhs1);
-+ walk_use_def(visited, cur_node, rhs2);
-+}
-+
-+static void insert_last_node(struct interesting_node *cur_node, tree node)
-+{
-+ unsigned int i;
-+ tree element;
-+ enum tree_code code;
-+
-+ gcc_assert(node != NULL_TREE);
-+
-+ if (is_gimple_constant(node))
-+ return;
-+
-+ code = TREE_CODE(node);
-+ if (code == VAR_DECL) {
-+ node = DECL_ORIGIN(node);
-+ code = TREE_CODE(node);
-+ }
-+
-+ if (code != PARM_DECL && code != FUNCTION_DECL && code != COMPONENT_REF)
-+ return;
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, element) {
-+#else
-+ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, element) {
-+#endif
-+ if (operand_equal_p(node, element, 0))
-+ return;
-+ }
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ gcc_assert(VEC_length(tree, cur_node->last_nodes) < VEC_LEN);
-+ VEC_safe_push(tree, gc, cur_node->last_nodes, node);
-+#else
-+ gcc_assert(cur_node->last_nodes->length() < VEC_LEN);
-+ vec_safe_push(cur_node->last_nodes, node);
-+#endif
-+}
-+
-+// a size_overflow asm stmt in the control flow doesn't stop the recursion
-+static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs, const_gimple stmt)
-+{
-+ if (!is_size_overflow_asm(stmt))
-+ walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
-+}
-+
-+/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow)
-+ * and component refs (for checking the intentional_overflow attribute).
-+ */
-+static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
-+{
-+ const_gimple def_stmt;
-+
-+ if (TREE_CODE(lhs) != SSA_NAME) {
-+ insert_last_node(cur_node, lhs);
-+ return;
-+ }
-+
-+ def_stmt = get_def_stmt(lhs);
-+ if (!def_stmt)
-+ return;
-+
-+ if (pointer_set_insert(visited, def_stmt))
-+ return;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_NOP:
-+ return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
-+ case GIMPLE_ASM:
-+ return handle_asm_stmt(visited, cur_node, lhs, def_stmt);
-+ case GIMPLE_CALL: {
-+ tree fndecl = gimple_call_fndecl(def_stmt);
-+
-+ if (fndecl == NULL_TREE)
-+ return;
-+ insert_last_node(cur_node, fndecl);
-+ return;
-+ }
-+ case GIMPLE_PHI:
-+ return walk_use_def_phi(visited, cur_node, lhs);
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
-+ case 2:
-+ return walk_use_def(visited, cur_node, gimple_assign_rhs1(def_stmt));
-+ case 3:
-+ return walk_use_def_binary(visited, cur_node, lhs);
-+ }
-+ default:
-+ debug_gimple_stmt((gimple)def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Collect all the last nodes for checking the intentional_overflow and size_overflow attributes
-+static void set_last_nodes(struct interesting_node *cur_node)
-+{
-+ struct pointer_set_t *visited;
-+
-+ visited = pointer_set_create();
-+ walk_use_def(visited, cur_node, cur_node->node);
-+ pointer_set_destroy(visited);
-+}
-+
-+enum precond {
-+ NO_ATTRIBUTE_SEARCH, NO_CHECK_INSERT, NONE
-+};
-+
-+/* If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere.
-+ * There is only missing size_overflow attribute checking if the intentional_overflow attribute is the mark_no type.
-+ * Stmt duplication is unnecessary if there are no binary/ternary assignements or if the unary assignment isn't a cast.
-+ * It skips the possible error codes too. If the def_stmts trace back to a constant and there are no binary/ternary assigments then we assume that it is some kind of error code.
-+ */
-+static enum precond check_preconditions(struct interesting_node *cur_node)
-+{
-+ bool interesting_conditions[3] = {false, false, false};
-+
-+ set_last_nodes(cur_node);
-+
-+ check_intentional_attribute_ipa(cur_node);
-+ if (cur_node->intentional_attr_decl == MARK_TURN_OFF || cur_node->intentional_attr_cur_fndecl == MARK_TURN_OFF)
-+ return NO_ATTRIBUTE_SEARCH;
-+
-+ search_interesting_conditions(cur_node, interesting_conditions);
-+
-+ // error code
-+ if (interesting_conditions[CAST] && interesting_conditions[FROM_CONST] && !interesting_conditions[NOT_UNARY])
-+ return NO_ATTRIBUTE_SEARCH;
-+
-+ // unnecessary overflow check
-+ if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY])
-+ return NO_CHECK_INSERT;
-+
-+ if (cur_node->intentional_attr_cur_fndecl != MARK_NO)
-+ return NO_CHECK_INSERT;
-+
-+ return NONE;
-+}
-+
-+/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts,
-+ * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node
-+ * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value.
-+ */
-+static struct next_cgraph_node *handle_interesting_stmt(struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node)
-+{
-+ enum precond ret;
-+ struct pointer_set_t *visited;
-+ tree new_node, orig_node = cur_node->node;
-+
-+ ret = check_preconditions(cur_node);
-+ if (ret == NO_ATTRIBUTE_SEARCH)
-+ return cnodes;
-+
-+ cnodes = search_overflow_attribute(cnodes, cur_node);
-+
-+ if (ret == NO_CHECK_INSERT)
-+ return cnodes;
-+
-+ visited = pointer_set_create();
-+ new_node = expand(visited, caller_node, orig_node);
-+ pointer_set_destroy(visited);
-+
-+ if (new_node == NULL_TREE)
-+ return cnodes;
-+
-+ change_orig_node(cur_node, new_node);
-+ check_size_overflow(caller_node, cur_node->first_stmt, TREE_TYPE(new_node), new_node, orig_node, BEFORE_STMT);
-+
-+ return cnodes;
-+}
-+
-+// Check visited interesting nodes.
-+static bool is_in_interesting_node(struct interesting_node *head, const_gimple first_stmt, const_tree node, unsigned int num)
-+{
-+ struct interesting_node *cur;
-+
-+ for (cur = head; cur; cur = cur->next) {
-+ if (!operand_equal_p(node, cur->node, 0))
-+ continue;
-+ if (num != cur->num)
-+ continue;
-+ if (first_stmt == cur->first_stmt)
-+ return true;
-+ }
-+ return false;
-+}
-+
-+/* Create an interesting node. The ipa pass starts to duplicate from these stmts.
-+ first_stmt: it is the call or assignment or ret stmt, change_orig_node() will change the original node (retval, or function arg) in this
-+ last_nodes: they are the last stmts in the recursion (they haven't a def_stmt). They are useful in the missing size_overflow attribute check and
-+ the intentional_overflow attribute check. They are collected by set_last_nodes().
-+ num: arg count of a call stmt or 0 when it is a ret
-+ node: the recursion starts from here, it is a call arg or a return value
-+ fndecl: the fndecl of the interesting node when the node is an arg. it is the fndecl of the callee function otherwise it is the fndecl of the caller (current_function_fndecl) function.
-+ intentional_attr_decl: intentional_overflow attribute of the callee function
-+ intentional_attr_cur_fndecl: intentional_overflow attribute of the caller function
-+ intentional_mark_from_gimple: the intentional overflow type of size_overflow asm stmt from gimple if it exists
-+ */
-+static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gimple asm_stmt)
-+{
-+ struct interesting_node *new_node;
-+ tree fndecl;
-+ enum gimple_code code;
-+
-+ gcc_assert(node != NULL_TREE);
-+ code = gimple_code(first_stmt);
-+ gcc_assert(code == GIMPLE_CALL || code == GIMPLE_ASM || code == GIMPLE_ASSIGN || code == GIMPLE_RETURN);
-+
-+ if (num == CANNOT_FIND_ARG)
-+ return head;
-+
-+ if (skip_types(node))
-+ return head;
-+
-+ if (skip_asm(node))
-+ return head;
-+
-+ if (is_gimple_call(first_stmt))
-+ fndecl = gimple_call_fndecl(first_stmt);
-+ else
-+ fndecl = current_function_decl;
-+
-+ if (fndecl == NULL_TREE)
-+ return head;
-+
-+ if (is_in_interesting_node(head, first_stmt, node, num))
-+ return head;
-+
-+ new_node = (struct interesting_node *)xmalloc(sizeof(*new_node));
-+
-+ new_node->next = NULL;
-+ new_node->first_stmt = first_stmt;
-+#if BUILDING_GCC_VERSION <= 4007
-+ new_node->last_nodes = VEC_alloc(tree, gc, VEC_LEN);
-+#else
-+ vec_alloc(new_node->last_nodes, VEC_LEN);
-+#endif
-+ new_node->num = num;
-+ new_node->node = node;
-+ new_node->fndecl = fndecl;
-+ new_node->intentional_attr_decl = MARK_NO;
-+ new_node->intentional_attr_cur_fndecl = MARK_NO;
-+ new_node->intentional_mark_from_gimple = asm_stmt;
-+
-+ if (!head)
-+ return new_node;
-+
-+ new_node->next = head;
-+ return new_node;
-+}
-+
-+/* Check the ret stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
-+ * If the ret stmt is in the next cgraph node list then it's an interesting ret.
-+ */
-+static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
-+{
-+ struct next_cgraph_node *cur_node;
-+ tree ret = gimple_return_retval(stmt);
-+
-+ if (ret == NULL_TREE)
-+ return head;
-+
-+ for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
-+ if (!operand_equal_p(cur_node->callee_fndecl, DECL_ORIGIN(current_function_decl), 0))
-+ continue;
-+ if (cur_node->num == 0)
-+ head = create_new_interesting_node(head, stmt, ret, 0, NOT_INTENTIONAL_ASM);
-+ }
-+
-+ return head;
-+}
-+
-+/* Check the call stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
-+ * If the call stmt is in the next cgraph node list then it's an interesting call.
-+ */
-+static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
-+{
-+ unsigned int argnum;
-+ tree arg;
-+ const_tree fndecl;
-+ struct next_cgraph_node *cur_node;
-+
-+ fndecl = gimple_call_fndecl(stmt);
-+ if (fndecl == NULL_TREE)
-+ return head;
-+
-+ for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
-+ if (!operand_equal_p(cur_node->callee_fndecl, fndecl, 0))
-+ continue;
-+ argnum = get_correct_arg_count(cur_node->num, fndecl);
-+ gcc_assert(argnum != CANNOT_FIND_ARG);
-+ if (argnum == 0)
-+ continue;
-+
-+ arg = gimple_call_arg(stmt, argnum - 1);
-+ head = create_new_interesting_node(head, stmt, arg, argnum, NOT_INTENTIONAL_ASM);
-+ }
-+
-+ return head;
-+}
-+
-+static unsigned int check_ops(const_tree orig_node, const_tree node, unsigned int ret_count)
-+{
-+ if (!operand_equal_p(orig_node, node, 0))
-+ return WRONG_NODE;
-+ if (skip_types(node))
-+ return WRONG_NODE;
-+ return ret_count;
-+}
-+
-+// Get the index of the rhs node in an assignment
-+static unsigned int get_assign_ops_count(const_gimple stmt, tree node)
-+{
-+ const_tree rhs1, rhs2;
-+ unsigned int ret;
-+
-+ gcc_assert(stmt);
-+ gcc_assert(is_gimple_assign(stmt));
-+
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ gcc_assert(rhs1 != NULL_TREE);
-+
-+ switch (gimple_num_ops(stmt)) {
-+ case 2:
-+ return check_ops(node, rhs1, 1);
-+ case 3:
-+ ret = check_ops(node, rhs1, 1);
-+ if (ret != WRONG_NODE)
-+ return ret;
-+
-+ rhs2 = gimple_assign_rhs2(stmt);
-+ gcc_assert(rhs2 != NULL_TREE);
-+ return check_ops(node, rhs2, 2);
-+ default:
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Find the correct arg number of a call stmt. It is needed when the interesting function is a cloned function.
-+static unsigned int find_arg_number_gimple(const_tree arg, const_gimple stmt)
-+{
-+ unsigned int i;
-+
-+ if (gimple_call_fndecl(stmt) == NULL_TREE)
-+ return CANNOT_FIND_ARG;
-+
-+ for (i = 0; i < gimple_call_num_args(stmt); i++) {
-+ tree node;
-+
-+ node = gimple_call_arg(stmt, i);
-+ if (!operand_equal_p(arg, node, 0))
-+ continue;
-+ if (!skip_types(node))
-+ return i + 1;
-+ }
-+
-+ return CANNOT_FIND_ARG;
-+}
-+
-+/* starting from the size_overflow asm stmt collect interesting stmts. They can be
-+ * any of return, call or assignment stmts (because of inlining).
-+ */
-+static struct interesting_node *get_interesting_ret_or_call(struct pointer_set_t *visited, struct interesting_node *head, tree node, gimple intentional_asm)
-+{
-+ use_operand_p use_p;
-+ imm_use_iterator imm_iter;
-+ unsigned int argnum;
-+
-+ gcc_assert(TREE_CODE(node) == SSA_NAME);
-+
-+ if (pointer_set_insert(visited, node))
-+ return head;
-+
-+ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
-+ gimple stmt = USE_STMT(use_p);
-+
-+ if (stmt == NULL)
-+ return head;
-+ if (is_gimple_debug(stmt))
-+ continue;
-+
-+ switch (gimple_code(stmt)) {
-+ case GIMPLE_CALL:
-+ argnum = find_arg_number_gimple(node, stmt);
-+ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
-+ break;
-+ case GIMPLE_RETURN:
-+ head = create_new_interesting_node(head, stmt, node, 0, intentional_asm);
-+ break;
-+ case GIMPLE_ASSIGN:
-+ argnum = get_assign_ops_count(stmt, node);
-+ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
-+ break;
-+ case GIMPLE_PHI: {
-+ tree result = gimple_phi_result(stmt);
-+ head = get_interesting_ret_or_call(visited, head, result, intentional_asm);
-+ break;
-+ }
-+ case GIMPLE_ASM:
-+ if (gimple_asm_noutputs(stmt) != 0)
-+ break;
-+ if (!is_size_overflow_asm(stmt))
-+ break;
-+ head = create_new_interesting_node(head, stmt, node, 1, intentional_asm);
-+ break;
-+ case GIMPLE_COND:
-+ case GIMPLE_SWITCH:
-+ break;
-+ default:
-+ debug_gimple_stmt(stmt);
-+ gcc_unreachable();
-+ break;
-+ }
-+ }
-+ return head;
-+}
-+
-+static void remove_size_overflow_asm(gimple stmt)
-+{
-+ gimple_stmt_iterator gsi;
-+ tree input, output;
-+
-+ if (!is_size_overflow_asm(stmt))
-+ return;
-+
-+ if (gimple_asm_noutputs(stmt) == 0) {
-+ gsi = gsi_for_stmt(stmt);
-+ ipa_remove_stmt_references(cgraph_get_create_node(current_function_decl), stmt);
-+ gsi_remove(&gsi, true);
-+ return;
-+ }
-+
-+ input = gimple_asm_input_op(stmt, 0);
-+ output = gimple_asm_output_op(stmt, 0);
-+ replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input));
-+}
-+
-+/* handle the size_overflow asm stmts from the gimple pass and collect the interesting stmts.
-+ * If the asm stmt is a parm_decl kind (noutputs == 0) then remove it.
-+ * If it is a simple asm stmt then replace it with an assignment from the asm input to the asm output.
-+ */
-+static struct interesting_node *handle_stmt_by_size_overflow_asm(gimple stmt, struct interesting_node *head)
-+{
-+ const_tree output;
-+ struct pointer_set_t *visited;
-+ gimple intentional_asm = NOT_INTENTIONAL_ASM;
-+
-+ if (!is_size_overflow_asm(stmt))
-+ return head;
-+
-+ if (is_size_overflow_intentional_asm_yes(stmt) || is_size_overflow_intentional_asm_turn_off(stmt))
-+ intentional_asm = stmt;
-+
-+ gcc_assert(gimple_asm_ninputs(stmt) == 1);
-+
-+ if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt))
-+ return head;
-+
-+ if (gimple_asm_noutputs(stmt) == 0) {
-+ const_tree input;
-+
-+ if (!is_size_overflow_intentional_asm_turn_off(stmt))
-+ return head;
-+
-+ input = gimple_asm_input_op(stmt, 0);
-+ remove_size_overflow_asm(stmt);
-+ if (is_gimple_constant(TREE_VALUE(input)))
-+ return head;
-+ visited = pointer_set_create();
-+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm);
-+ pointer_set_destroy(visited);
-+ return head;
-+ }
-+
-+ if (!is_size_overflow_intentional_asm_yes(stmt) && !is_size_overflow_intentional_asm_turn_off(stmt))
-+ remove_size_overflow_asm(stmt);
-+
-+ visited = pointer_set_create();
-+ output = gimple_asm_output_op(stmt, 0);
-+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm);
-+ pointer_set_destroy(visited);
-+ return head;
-+}
-+
-+/* Iterate over all the stmts of a function and look for the size_overflow asm stmts (they were created in the gimple pass)
-+ * or a call stmt or a return stmt and store them in the interesting_node list
-+ */
-+static struct interesting_node *collect_interesting_stmts(struct next_cgraph_node *next_node)
-+{
-+ basic_block bb;
-+ struct interesting_node *head = NULL;
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator gsi;
-+
-+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
-+ enum gimple_code code;
-+ gimple stmt = gsi_stmt(gsi);
-+
-+ code = gimple_code(stmt);
-+
-+ if (code == GIMPLE_ASM)
-+ head = handle_stmt_by_size_overflow_asm(stmt, head);
-+
-+ if (!next_node)
-+ continue;
-+ if (code == GIMPLE_CALL)
-+ head = handle_stmt_by_cgraph_nodes_call(head, stmt, next_node);
-+ if (code == GIMPLE_RETURN)
-+ head = handle_stmt_by_cgraph_nodes_ret(head, stmt, next_node);
-+ }
-+ }
-+ return head;
-+}
-+
-+static void set_current_function_decl(tree fndecl)
-+{
-+ gcc_assert(fndecl != NULL_TREE);
-+
-+ push_cfun(DECL_STRUCT_FUNCTION(fndecl));
-+ calculate_dominance_info(CDI_DOMINATORS);
-+ current_function_decl = fndecl;
-+}
-+
-+static void unset_current_function_decl(void)
-+{
-+ free_dominance_info(CDI_DOMINATORS);
-+ pop_cfun();
-+ current_function_decl = NULL_TREE;
-+}
-+
-+static void free_interesting_node(struct interesting_node *head)
-+{
-+ struct interesting_node *cur;
-+
-+ while (head) {
-+ cur = head->next;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC_free(tree, gc, head->last_nodes);
-+#else
-+ vec_free(head->last_nodes);
-+#endif
-+ free(head);
-+ head = cur;
-+ }
-+}
-+
-+static struct visited *insert_visited_function(struct visited *head, struct interesting_node *cur_node)
-+{
-+ struct visited *new_visited;
-+
-+ new_visited = (struct visited *)xmalloc(sizeof(*new_visited));
-+ new_visited->fndecl = cur_node->fndecl;
-+ new_visited->num = cur_node->num;
-+ new_visited->next = NULL;
-+
-+ if (!head)
-+ return new_visited;
-+
-+ new_visited->next = head;
-+ return new_visited;
-+}
-+
-+/* Check whether the function was already visited. If the fndecl, the arg count of the fndecl and the first_stmt (call or return) are same then
-+ * it is a visited function.
-+ */
-+static bool is_visited_function(struct visited *head, struct interesting_node *cur_node)
-+{
-+ struct visited *cur;
-+
-+ if (!head)
-+ return false;
-+
-+ if (get_stmt_flag(cur_node->first_stmt) != VISITED_STMT)
-+ return false;
-+
-+ for (cur = head; cur; cur = cur->next) {
-+ if (!operand_equal_p(cur_node->fndecl, cur->fndecl, 0))
-+ continue;
-+ if (cur_node->num == cur->num)
-+ return true;
-+ }
-+ return false;
-+}
-+
-+static void free_next_cgraph_node(struct next_cgraph_node *head)
-+{
-+ struct next_cgraph_node *cur;
-+
-+ while (head) {
-+ cur = head->next;
-+ free(head);
-+ head = cur;
-+ }
-+}
-+
-+static void remove_all_size_overflow_asm(void)
-+{
-+ basic_block bb;
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator si;
-+
-+ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
-+ remove_size_overflow_asm(gsi_stmt(si));
-+ }
-+}
-+
-+/* Main recursive walk of the ipa pass: iterate over the collected interesting stmts in a function
-+ * (they are interesting if they have an associated size_overflow asm stmt) and recursively walk
-+ * the newly collected interesting functions (they are interesting if there is control flow between
-+ * the interesting stmts and them).
-+ */
-+static struct visited *handle_function(struct cgraph_node *node, struct next_cgraph_node *next_node, struct visited *visited)
-+{
-+ struct interesting_node *head, *cur_node;
-+ struct next_cgraph_node *cur_cnodes, *cnodes_head = NULL;
-+
-+ set_current_function_decl(NODE_DECL(node));
-+ call_count = 0;
-+
-+ head = collect_interesting_stmts(next_node);
-+
-+ for (cur_node = head; cur_node; cur_node = cur_node->next) {
-+ if (is_visited_function(visited, cur_node))
-+ continue;
-+ cnodes_head = handle_interesting_stmt(cnodes_head, cur_node, node);
-+ set_stmt_flag(cur_node->first_stmt, VISITED_STMT);
-+ visited = insert_visited_function(visited, cur_node);
-+ }
-+
-+ free_interesting_node(head);
-+ remove_all_size_overflow_asm();
-+ unset_current_function_decl();
-+
-+ for (cur_cnodes = cnodes_head; cur_cnodes; cur_cnodes = cur_cnodes->next)
-+ visited = handle_function(cur_cnodes->current_function, cur_cnodes, visited);
-+
-+ free_next_cgraph_node(cnodes_head);
-+ return visited;
-+}
-+
-+static void free_visited(struct visited *head)
-+{
-+ struct visited *cur;
-+
-+ while (head) {
-+ cur = head->next;
-+ free(head);
-+ head = cur;
-+ }
-+}
-+
-+// erase the local flag
-+static void set_plf_false(void)
-+{
-+ basic_block bb;
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator si;
-+
-+ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
-+ set_stmt_flag(gsi_stmt(si), NO_FLAGS);
-+ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
-+ set_stmt_flag(gsi_stmt(si), NO_FLAGS);
-+ }
-+}
-+
-+// Main entry point of the ipa pass: erases the plf flag of all stmts and iterates over all the functions
-+static unsigned int search_function(void)
-+{
-+ struct cgraph_node *node;
-+ struct visited *visited = NULL;
-+
-+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
-+ set_current_function_decl(NODE_DECL(node));
-+ set_plf_false();
-+ unset_current_function_decl();
-+ }
-+
-+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
-+ gcc_assert(cgraph_function_flags_ready);
-+#if BUILDING_GCC_VERSION <= 4007
-+ gcc_assert(node->reachable);
-+#endif
-+
-+ visited = handle_function(node, NULL, visited);
-+ }
-+
-+ free_visited(visited);
-+ return 0;
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+static const struct pass_data ipa_pass_data = {
-+#else
-+static struct ipa_opt_pass_d ipa_pass = {
-+ .pass = {
-+#endif
-+ .type = SIMPLE_IPA_PASS,
-+ .name = "size_overflow",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 4009
-+ .has_gate = false,
-+ .has_execute = true,
-+#else
-+ .gate = NULL,
-+ .execute = search_function,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = 0,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_ggc_collect | TODO_verify_flow | TODO_dump_cgraph | TODO_dump_func | TODO_update_ssa_no_phi,
-+#if BUILDING_GCC_VERSION < 4009
-+ },
-+ .generate_summary = NULL,
-+ .write_summary = NULL,
-+ .read_summary = NULL,
-+#if BUILDING_GCC_VERSION >= 4006
-+ .write_optimization_summary = NULL,
-+ .read_optimization_summary = NULL,
-+#endif
-+ .stmt_fixup = NULL,
-+ .function_transform_todo_flags_start = 0,
-+ .function_transform = NULL,
-+ .variable_transform = NULL,
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+class ipa_pass : public ipa_opt_pass_d {
-+public:
-+ ipa_pass() : ipa_opt_pass_d(ipa_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
-+ unsigned int execute() { return search_function(); }
-+};
-+}
-+
-+static opt_pass *make_ipa_pass(void)
-+{
-+ return new ipa_pass();
-+}
-+#else
-+static struct opt_pass *make_ipa_pass(void)
-+{
-+ return &ipa_pass.pass;
-+}
-+#endif
-+
-+// data for the size_overflow asm stmt
-+struct asm_data {
-+ gimple def_stmt;
-+ tree input;
-+ tree output;
-+};
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+static VEC(tree, gc) *create_asm_io_list(tree string, tree io)
-+#else
-+static vec<tree, va_gc> *create_asm_io_list(tree string, tree io)
-+#endif
-+{
-+ tree list;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC(tree, gc) *vec_list = NULL;
-+#else
-+ vec<tree, va_gc> *vec_list = NULL;
-+#endif
-+
-+ list = build_tree_list(NULL_TREE, string);
-+ list = chainon(NULL_TREE, build_tree_list(list, io));
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC_safe_push(tree, gc, vec_list, list);
-+#else
-+ vec_safe_push(vec_list, list);
-+#endif
-+ return vec_list;
-+}
-+
-+static void create_asm_stmt(const char *str, tree str_input, tree str_output, struct asm_data *asm_data)
-+{
-+ gimple asm_stmt;
-+ gimple_stmt_iterator gsi;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC(tree, gc) *input, *output = NULL;
-+#else
-+ vec<tree, va_gc> *input, *output = NULL;
-+#endif
-+
-+ input = create_asm_io_list(str_input, asm_data->input);
-+
-+ if (asm_data->output)
-+ output = create_asm_io_list(str_output, asm_data->output);
-+
-+ asm_stmt = gimple_build_asm_vec(str, input, output, NULL, NULL);
-+ gsi = gsi_for_stmt(asm_data->def_stmt);
-+ gsi_insert_after(&gsi, asm_stmt, GSI_NEW_STMT);
-+
-+ if (asm_data->output)
-+ SSA_NAME_DEF_STMT(asm_data->output) = asm_stmt;
-+}
-+
-+static void replace_call_lhs(const struct asm_data *asm_data)
-+{
-+ gimple_set_lhs(asm_data->def_stmt, asm_data->input);
-+ update_stmt(asm_data->def_stmt);
-+ SSA_NAME_DEF_STMT(asm_data->input) = asm_data->def_stmt;
-+}
-+
-+static enum mark search_intentional_phi(struct pointer_set_t *visited, const_tree result)
-+{
-+ enum mark cur_fndecl_attr;
-+ gimple phi = get_def_stmt(result);
-+ unsigned int i, n = gimple_phi_num_args(phi);
-+
-+ pointer_set_insert(visited, phi);
-+ for (i = 0; i < n; i++) {
-+ tree arg = gimple_phi_arg_def(phi, i);
-+
-+ cur_fndecl_attr = search_intentional(visited, arg);
-+ if (cur_fndecl_attr != MARK_NO)
-+ return cur_fndecl_attr;
-+ }
-+ return MARK_NO;
-+}
-+
-+static enum mark search_intentional_binary(struct pointer_set_t *visited, const_tree lhs)
-+{
-+ enum mark cur_fndecl_attr;
-+ const_tree rhs1, rhs2;
-+ gimple def_stmt = get_def_stmt(lhs);
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+
-+ cur_fndecl_attr = search_intentional(visited, rhs1);
-+ if (cur_fndecl_attr != MARK_NO)
-+ return cur_fndecl_attr;
-+ return search_intentional(visited, rhs2);
-+}
-+
-+// Look up the intentional_overflow attribute on the caller and the callee functions.
-+static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs)
-+{
-+ const_gimple def_stmt;
-+
-+ if (TREE_CODE(lhs) != SSA_NAME)
-+ return get_intentional_attr_type(lhs);
-+
-+ def_stmt = get_def_stmt(lhs);
-+ if (!def_stmt)
-+ return MARK_NO;
-+
-+ if (pointer_set_contains(visited, def_stmt))
-+ return MARK_NO;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_NOP:
-+ return search_intentional(visited, SSA_NAME_VAR(lhs));
-+ case GIMPLE_ASM:
-+ if (is_size_overflow_intentional_asm_turn_off(def_stmt))
-+ return MARK_TURN_OFF;
-+ return MARK_NO;
-+ case GIMPLE_CALL:
-+ return MARK_NO;
-+ case GIMPLE_PHI:
-+ return search_intentional_phi(visited, lhs);
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
-+ case 2:
-+ return search_intentional(visited, gimple_assign_rhs1(def_stmt));
-+ case 3:
-+ return search_intentional_binary(visited, lhs);
-+ }
-+ case GIMPLE_RETURN:
-+ return MARK_NO;
-+ default:
-+ debug_gimple_stmt((gimple)def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Check the intentional_overflow attribute and create the asm comment string for the size_overflow asm stmt.
-+static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum)
-+{
-+ const_tree fndecl;
-+ struct pointer_set_t *visited;
-+ enum mark cur_fndecl_attr, decl_attr = MARK_NO;
-+
-+ fndecl = get_interesting_orig_fndecl(stmt, argnum);
-+ if (is_end_intentional_intentional_attr(fndecl, argnum))
-+ decl_attr = MARK_NOT_INTENTIONAL;
-+ else if (is_yes_intentional_attr(fndecl, argnum))
-+ decl_attr = MARK_YES;
-+ else if (is_turn_off_intentional_attr(fndecl) || is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
-+ return MARK_TURN_OFF;
-+ }
-+
-+ visited = pointer_set_create();
-+ cur_fndecl_attr = search_intentional(visited, arg);
-+ pointer_set_destroy(visited);
-+
-+ switch (cur_fndecl_attr) {
-+ case MARK_NO:
-+ case MARK_TURN_OFF:
-+ return cur_fndecl_attr;
-+ default:
-+ print_missing_intentional(decl_attr, cur_fndecl_attr, fndecl, argnum);
-+ return MARK_YES;
-+ }
-+}
-+
-+static void check_missing_size_overflow_attribute(tree var)
-+{
-+ tree orig_fndecl;
-+ unsigned int num;
-+
-+ if (is_a_return_check(var))
-+ orig_fndecl = DECL_ORIGIN(var);
-+ else
-+ orig_fndecl = DECL_ORIGIN(current_function_decl);
-+
-+ num = get_function_num(var, orig_fndecl);
-+ if (num == CANNOT_FIND_ARG)
-+ return;
-+
-+ is_missing_function(orig_fndecl, num);
-+}
-+
-+static void search_size_overflow_attribute_phi(struct pointer_set_t *visited, const_tree result)
-+{
-+ gimple phi = get_def_stmt(result);
-+ unsigned int i, n = gimple_phi_num_args(phi);
-+
-+ pointer_set_insert(visited, phi);
-+ for (i = 0; i < n; i++) {
-+ tree arg = gimple_phi_arg_def(phi, i);
-+
-+ search_size_overflow_attribute(visited, arg);
-+ }
-+}
-+
-+static void search_size_overflow_attribute_binary(struct pointer_set_t *visited, const_tree lhs)
-+{
-+ const_gimple def_stmt = get_def_stmt(lhs);
-+ tree rhs1, rhs2;
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+
-+ search_size_overflow_attribute(visited, rhs1);
-+ search_size_overflow_attribute(visited, rhs2);
-+}
-+
-+static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs)
-+{
-+ const_gimple def_stmt;
-+
-+ if (TREE_CODE(lhs) == PARM_DECL) {
-+ check_missing_size_overflow_attribute(lhs);
-+ return;
-+ }
-+
-+ def_stmt = get_def_stmt(lhs);
-+ if (!def_stmt)
-+ return;
-+
-+ if (pointer_set_insert(visited, def_stmt))
-+ return;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_NOP:
-+ return search_size_overflow_attribute(visited, SSA_NAME_VAR(lhs));
-+ case GIMPLE_ASM:
-+ return;
-+ case GIMPLE_CALL: {
-+ tree fndecl = gimple_call_fndecl(def_stmt);
-+
-+ if (fndecl == NULL_TREE)
-+ return;
-+ check_missing_size_overflow_attribute(fndecl);
-+ return;
-+ }
-+ case GIMPLE_PHI:
-+ return search_size_overflow_attribute_phi(visited, lhs);
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
-+ case 2:
-+ return search_size_overflow_attribute(visited, gimple_assign_rhs1(def_stmt));
-+ case 3:
-+ return search_size_overflow_attribute_binary(visited, lhs);
-+ }
-+ default:
-+ debug_gimple_stmt((gimple)def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Search missing entries in the hash table (invoked from the gimple pass)
-+static void search_missing_size_overflow_attribute_gimple(const_gimple stmt, unsigned int num)
-+{
-+ tree fndecl = NULL_TREE;
-+ tree lhs;
-+ struct pointer_set_t *visited;
-+
-+ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl)))
-+ return;
-+
-+ if (num == 0) {
-+ gcc_assert(gimple_code(stmt) == GIMPLE_RETURN);
-+ lhs = gimple_return_retval(stmt);
-+ } else {
-+ gcc_assert(is_gimple_call(stmt));
-+ lhs = gimple_call_arg(stmt, num - 1);
-+ fndecl = gimple_call_fndecl(stmt);
-+ }
-+
-+ if (fndecl != NULL_TREE && is_turn_off_intentional_attr(DECL_ORIGIN(fndecl)))
-+ return;
-+
-+ visited = pointer_set_create();
-+ search_size_overflow_attribute(visited, lhs);
-+ pointer_set_destroy(visited);
-+}
-+
-+static void create_output_from_phi(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
-+{
-+ gimple_stmt_iterator gsi;
-+ gimple assign;
-+
-+ assign = gimple_build_assign(asm_data->input, asm_data->output);
-+ gsi = gsi_for_stmt(stmt);
-+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
-+ asm_data->def_stmt = assign;
-+
-+ asm_data->output = create_new_var(TREE_TYPE(asm_data->output));
-+ asm_data->output = make_ssa_name(asm_data->output, stmt);
-+ if (gimple_code(stmt) == GIMPLE_RETURN)
-+ gimple_return_set_retval(stmt, asm_data->output);
-+ else
-+ gimple_call_set_arg(stmt, argnum - 1, asm_data->output);
-+ update_stmt(stmt);
-+}
-+
-+static char *create_asm_comment(unsigned int argnum, const_gimple stmt , const char *mark_str)
-+{
-+ const char *fn_name;
-+ char *asm_comment;
-+ unsigned int len;
-+
-+ if (argnum == 0)
-+ fn_name = DECL_NAME_POINTER(current_function_decl);
-+ else
-+ fn_name = DECL_NAME_POINTER(gimple_call_fndecl(stmt));
-+
-+ len = asprintf(&asm_comment, "%s %s %u", mark_str, fn_name, argnum);
-+ gcc_assert(len > 0);
-+
-+ return asm_comment;
-+}
-+
-+static const char *convert_mark_to_str(enum mark mark)
-+{
-+ switch (mark) {
-+ case MARK_NO:
-+ return OK_ASM_STR;
-+ case MARK_YES:
-+ case MARK_NOT_INTENTIONAL:
-+ return YES_ASM_STR;
-+ case MARK_TURN_OFF:
-+ return TURN_OFF_ASM_STR;
-+ }
-+
-+ gcc_unreachable();
-+}
-+
-+/* Create the input of the size_overflow asm stmt.
-+ * When the arg of the callee function is a parm_decl it creates this kind of size_overflow asm stmt:
-+ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
-+ * The input field in asm_data will be empty if there is no need for further size_overflow asm stmt insertion.
-+ * otherwise create the input (for a phi stmt the output too) of the asm stmt.
-+ */
-+static void create_asm_input(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
-+{
-+ if (!asm_data->def_stmt) {
-+ asm_data->input = NULL_TREE;
-+ return;
-+ }
-+
-+ asm_data->input = create_new_var(TREE_TYPE(asm_data->output));
-+ asm_data->input = make_ssa_name(asm_data->input, asm_data->def_stmt);
-+
-+ switch (gimple_code(asm_data->def_stmt)) {
-+ case GIMPLE_ASSIGN:
-+ case GIMPLE_CALL:
-+ replace_call_lhs(asm_data);
-+ break;
-+ case GIMPLE_PHI:
-+ create_output_from_phi(stmt, argnum, asm_data);
-+ break;
-+ case GIMPLE_NOP: {
-+ enum mark mark;
-+ const char *mark_str;
-+ char *asm_comment;
-+
-+ mark = check_intentional_attribute_gimple(asm_data->output, stmt, argnum);
-+
-+ asm_data->input = asm_data->output;
-+ asm_data->output = NULL;
-+ asm_data->def_stmt = stmt;
-+
-+ mark_str = convert_mark_to_str(mark);
-+ asm_comment = create_asm_comment(argnum, stmt, mark_str);
-+
-+ create_asm_stmt(asm_comment, build_string(2, "rm"), NULL, asm_data);
-+ free(asm_comment);
-+ asm_data->input = NULL_TREE;
-+ break;
-+ }
-+ case GIMPLE_ASM:
-+ if (is_size_overflow_asm(asm_data->def_stmt)) {
-+ asm_data->input = NULL_TREE;
-+ break;
-+ }
-+ default:
-+ debug_gimple_stmt(asm_data->def_stmt);
-+ gcc_unreachable();
-+ }
-+}
-+
-+/* This is the gimple part of searching for a missing size_overflow attribute. If the intentional_overflow attribute type
-+ * is of the right kind create the appropriate size_overflow asm stmts:
-+ * __asm__("# size_overflow" : =rm" D.3344_8 : "0" cicus.4_16);
-+ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
-+ */
-+static void create_size_overflow_asm(gimple stmt, tree output_node, unsigned int argnum)
-+{
-+ struct asm_data asm_data;
-+ const char *mark_str;
-+ char *asm_comment;
-+ enum mark mark;
-+
-+ if (is_gimple_constant(output_node))
-+ return;
-+
-+ asm_data.output = output_node;
-+ mark = check_intentional_attribute_gimple(asm_data.output, stmt, argnum);
-+ if (mark != MARK_TURN_OFF)
-+ search_missing_size_overflow_attribute_gimple(stmt, argnum);
-+
-+ asm_data.def_stmt = get_def_stmt(asm_data.output);
-+ if (is_size_overflow_intentional_asm_turn_off(asm_data.def_stmt))
-+ return;
-+
-+ create_asm_input(stmt, argnum, &asm_data);
-+ if (asm_data.input == NULL_TREE)
-+ return;
-+
-+ mark_str = convert_mark_to_str(mark);
-+ asm_comment = create_asm_comment(argnum, stmt, mark_str);
-+ create_asm_stmt(asm_comment, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
-+ free(asm_comment);
-+}
-+
-+// Insert an asm stmt with "MARK_TURN_OFF", "MARK_YES" or "MARK_NOT_INTENTIONAL".
-+static bool create_mark_asm(gimple stmt, enum mark mark)
-+{
-+ struct asm_data asm_data;
-+ const char *asm_str;
-+
-+ switch (mark) {
-+ case MARK_TURN_OFF:
-+ asm_str = TURN_OFF_ASM_STR;
-+ break;
-+ case MARK_NOT_INTENTIONAL:
-+ case MARK_YES:
-+ asm_str = YES_ASM_STR;
-+ break;
-+ default:
-+ gcc_unreachable();
-+ }
-+
-+ asm_data.def_stmt = stmt;
-+ asm_data.output = gimple_call_lhs(stmt);
-+
-+ if (asm_data.output == NULL_TREE) {
-+ asm_data.input = gimple_call_arg(stmt, 0);
-+ if (is_gimple_constant(asm_data.input))
-+ return false;
-+ asm_data.output = NULL;
-+ create_asm_stmt(asm_str, build_string(2, "rm"), NULL, &asm_data);
-+ return true;
-+ }
-+
-+ create_asm_input(stmt, 0, &asm_data);
-+ gcc_assert(asm_data.input != NULL_TREE);
-+
-+ create_asm_stmt(asm_str, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
-+ return true;
-+}
-+
-+static bool is_from_cast(const_tree node)
-+{
-+ gimple def_stmt = get_def_stmt(node);
-+
-+ if (!def_stmt)
-+ return false;
-+
-+ if (gimple_assign_cast_p(def_stmt))
-+ return true;
-+
-+ return false;
-+}
-+
-+// Skip duplication when there is a minus expr and the type of rhs1 or rhs2 is a pointer_type.
-+static bool skip_ptr_minus(gimple stmt)
-+{
-+ const_tree rhs1, rhs2, ptr1_rhs, ptr2_rhs;
-+
-+ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
-+ return false;
-+
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ if (!is_from_cast(rhs1))
-+ return false;
-+
-+ rhs2 = gimple_assign_rhs2(stmt);
-+ if (!is_from_cast(rhs2))
-+ return false;
-+
-+ ptr1_rhs = gimple_assign_rhs1(get_def_stmt(rhs1));
-+ ptr2_rhs = gimple_assign_rhs1(get_def_stmt(rhs2));
-+
-+ if (TREE_CODE(TREE_TYPE(ptr1_rhs)) != POINTER_TYPE && TREE_CODE(TREE_TYPE(ptr2_rhs)) != POINTER_TYPE)
-+ return false;
-+
-+ create_mark_asm(stmt, MARK_YES);
-+ return true;
-+}
-+
-+static void walk_use_def_ptr(struct pointer_set_t *visited, const_tree lhs)
-+{
-+ gimple def_stmt;
-+
-+ def_stmt = get_def_stmt(lhs);
-+ if (!def_stmt)
-+ return;
-+
-+ if (pointer_set_insert(visited, def_stmt))
-+ return;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_NOP:
-+ case GIMPLE_ASM:
-+ case GIMPLE_CALL:
-+ break;
-+ case GIMPLE_PHI: {
-+ unsigned int i, n = gimple_phi_num_args(def_stmt);
-+
-+ pointer_set_insert(visited, def_stmt);
-+
-+ for (i = 0; i < n; i++) {
-+ tree arg = gimple_phi_arg_def(def_stmt, i);
-+
-+ walk_use_def_ptr(visited, arg);
-+ }
-+ }
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
-+ case 2:
-+ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
-+ return;
-+ case 3:
-+ if (skip_ptr_minus(def_stmt))
-+ return;
-+
-+ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
-+ walk_use_def_ptr(visited, gimple_assign_rhs2(def_stmt));
-+ return;
-+ default:
-+ return;
-+ }
-+ default:
-+ debug_gimple_stmt((gimple)def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Look for a ptr - ptr expression (e.g., cpuset_common_file_read() s - page)
-+static void insert_mark_not_intentional_asm_at_ptr(const_tree arg)
-+{
-+ struct pointer_set_t *visited;
-+
-+ visited = pointer_set_create();
-+ walk_use_def_ptr(visited, arg);
-+ pointer_set_destroy(visited);
-+}
-+
-+// Determine the return value and insert the asm stmt to mark the return stmt.
-+static void insert_asm_ret(gimple stmt)
-+{
-+ tree ret;
-+
-+ ret = gimple_return_retval(stmt);
-+ create_size_overflow_asm(stmt, ret, 0);
-+}
-+
-+// Determine the correct arg index and arg and insert the asm stmt to mark the stmt.
-+static void insert_asm_arg(gimple stmt, unsigned int orig_argnum)
-+{
-+ tree arg;
-+ unsigned int argnum;
-+
-+ argnum = get_correct_arg_count(orig_argnum, gimple_call_fndecl(stmt));
-+ gcc_assert(argnum != 0);
-+ if (argnum == CANNOT_FIND_ARG)
-+ return;
-+
-+ arg = gimple_call_arg(stmt, argnum - 1);
-+ gcc_assert(arg != NULL_TREE);
-+
-+ // skip all ptr - ptr expressions
-+ insert_mark_not_intentional_asm_at_ptr(arg);
-+
-+ create_size_overflow_asm(stmt, arg, argnum);
-+}
-+
-+// If a function arg or the return value is marked by the size_overflow attribute then set its index in the array.
-+static void set_argnum_attribute(const_tree attr, bool *argnums)
-+{
-+ unsigned int argnum;
-+ tree attr_value;
-+
-+ for (attr_value = TREE_VALUE(attr); attr_value; attr_value = TREE_CHAIN(attr_value)) {
-+ argnum = TREE_INT_CST_LOW(TREE_VALUE(attr_value));
-+ argnums[argnum] = true;
-+ }
-+}
-+
-+// If a function arg or the return value is in the hash table then set its index in the array.
-+static void set_argnum_hash(tree fndecl, bool *argnums)
-+{
-+ unsigned int num;
-+ const struct size_overflow_hash *hash;
-+
-+ hash = get_function_hash(DECL_ORIGIN(fndecl));
-+ if (!hash)
-+ return;
-+
-+ for (num = 0; num <= MAX_PARAM; num++) {
-+ if (!(hash->param & (1U << num)))
-+ continue;
-+
-+ argnums[num] = true;
-+ }
-+}
-+
-+static bool is_all_the_argnums_empty(bool *argnums)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; i <= MAX_PARAM; i++)
-+ if (argnums[i])
-+ return false;
-+ return true;
-+}
-+
-+// Check whether the arguments or the return value of the function are in the hash table or are marked by the size_overflow attribute.
-+static void search_interesting_args(tree fndecl, bool *argnums)
-+{
-+ const_tree attr;
-+
-+ set_argnum_hash(fndecl, argnums);
-+ if (!is_all_the_argnums_empty(argnums))
-+ return;
-+
-+ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
-+ if (attr && TREE_VALUE(attr))
-+ set_argnum_attribute(attr, argnums);
-+}
-+
-+/*
-+ * Look up the intentional_overflow attribute that turns off ipa based duplication
-+ * on the callee function.
-+ */
-+static bool is_mark_turn_off_attribute(gimple stmt)
-+{
-+ enum mark mark;
-+ const_tree fndecl = gimple_call_fndecl(stmt);
-+
-+ mark = get_intentional_attr_type(DECL_ORIGIN(fndecl));
-+ if (mark == MARK_TURN_OFF)
-+ return true;
-+ return false;
-+}
-+
-+// If the argument(s) of the callee function is/are in the hash table or are marked by an attribute then mark the call stmt with an asm stmt
-+static void handle_interesting_function(gimple stmt)
-+{
-+ unsigned int argnum;
-+ tree fndecl;
-+ bool orig_argnums[MAX_PARAM + 1] = {false};
-+
-+ if (gimple_call_num_args(stmt) == 0)
-+ return;
-+ fndecl = gimple_call_fndecl(stmt);
-+ if (fndecl == NULL_TREE)
-+ return;
-+ fndecl = DECL_ORIGIN(fndecl);
-+
-+ if (is_mark_turn_off_attribute(stmt)) {
-+ create_mark_asm(stmt, MARK_TURN_OFF);
-+ return;
-+ }
-+
-+ search_interesting_args(fndecl, orig_argnums);
-+
-+ for (argnum = 1; argnum < MAX_PARAM; argnum++)
-+ if (orig_argnums[argnum])
-+ insert_asm_arg(stmt, argnum);
-+}
-+
-+// If the return value of the caller function is in hash table (its index is 0) then mark the return stmt with an asm stmt
-+static void handle_interesting_ret(gimple stmt)
-+{
-+ bool orig_argnums[MAX_PARAM + 1] = {false};
-+
-+ search_interesting_args(current_function_decl, orig_argnums);
-+
-+ if (orig_argnums[0])
-+ insert_asm_ret(stmt);
-+}
-+
-+// Iterate over all the stmts and search for call and return stmts and mark them if they're in the hash table
-+static unsigned int search_interesting_functions(void)
-+{
-+ basic_block bb;
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator gsi;
-+
-+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
-+ gimple stmt = gsi_stmt(gsi);
-+
-+ if (is_size_overflow_asm(stmt))
-+ continue;
-+
-+ if (is_gimple_call(stmt))
-+ handle_interesting_function(stmt);
-+ else if (gimple_code(stmt) == GIMPLE_RETURN)
-+ handle_interesting_ret(stmt);
-+ }
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * A lot of functions get inlined before the ipa passes so after the build_ssa gimple pass
-+ * this pass inserts asm stmts to mark the interesting args
-+ * that the ipa pass will detect and insert the size overflow checks for.
-+ */
-+#if BUILDING_GCC_VERSION >= 4009
-+static const struct pass_data insert_size_overflow_asm_pass_data = {
-+#else
-+static struct gimple_opt_pass insert_size_overflow_asm_pass = {
-+ .pass = {
-+#endif
-+ .type = GIMPLE_PASS,
-+ .name = "insert_size_overflow_asm",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 4009
-+ .has_gate = false,
-+ .has_execute = true,
-+#else
-+ .gate = NULL,
-+ .execute = search_interesting_functions,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = PROP_cfg,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
-+#if BUILDING_GCC_VERSION < 4009
-+ }
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+class insert_size_overflow_asm_pass : public gimple_opt_pass {
-+public:
-+ insert_size_overflow_asm_pass() : gimple_opt_pass(insert_size_overflow_asm_pass_data, g) {}
-+ unsigned int execute() { return search_interesting_functions(); }
-+};
-+}
-+
-+static opt_pass *make_insert_size_overflow_asm_pass(void)
-+{
-+ return new insert_size_overflow_asm_pass();
-+}
-+#else
-+static struct opt_pass *make_insert_size_overflow_asm_pass(void)
-+{
-+ return &insert_size_overflow_asm_pass.pass;
-+}
-+#endif
-+
-+// Create the noreturn report_size_overflow() function decl.
-+static void size_overflow_start_unit(void __unused *gcc_data, void __unused *user_data)
-+{
-+ tree const_char_ptr_type_node;
-+ tree fntype;
-+
-+ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
-+
-+ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
-+ fntype = build_function_type_list(void_type_node,
-+ const_char_ptr_type_node,
-+ unsigned_type_node,
-+ const_char_ptr_type_node,
-+ const_char_ptr_type_node,
-+ NULL_TREE);
-+ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
-+
-+ DECL_ASSEMBLER_NAME(report_size_overflow_decl);
-+ TREE_PUBLIC(report_size_overflow_decl) = 1;
-+ DECL_EXTERNAL(report_size_overflow_decl) = 1;
-+ DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
-+ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
-+}
-+
-+static unsigned int dump_functions(void)
-+{
-+ struct cgraph_node *node;
-+
-+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
-+ basic_block bb;
-+
-+ push_cfun(DECL_STRUCT_FUNCTION(NODE_DECL(node)));
-+ current_function_decl = NODE_DECL(node);
-+
-+ fprintf(stderr, "-----------------------------------------\n%s\n-----------------------------------------\n", DECL_NAME_POINTER(current_function_decl));
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator si;
-+
-+ fprintf(stderr, "<bb %u>:\n", bb->index);
-+ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
-+ debug_gimple_stmt(gsi_stmt(si));
-+ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
-+ debug_gimple_stmt(gsi_stmt(si));
-+ fprintf(stderr, "\n");
-+ }
-+
-+ fprintf(stderr, "-------------------------------------------------------------------------\n");
-+
-+ pop_cfun();
-+ current_function_decl = NULL_TREE;
-+ }
-+
-+ fprintf(stderr, "###############################################################################\n");
-+
-+ return 0;
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+static const struct pass_data dump_pass_data = {
-+#else
-+static struct ipa_opt_pass_d dump_pass = {
-+ .pass = {
-+#endif
-+ .type = SIMPLE_IPA_PASS,
-+ .name = "dump",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 4009
-+ .has_gate = false,
-+ .has_execute = true,
-+#else
-+ .gate = NULL,
-+ .execute = dump_functions,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = 0,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = 0,
-+#if BUILDING_GCC_VERSION < 4009
-+ },
-+ .generate_summary = NULL,
-+ .write_summary = NULL,
-+ .read_summary = NULL,
-+#if BUILDING_GCC_VERSION >= 4006
-+ .write_optimization_summary = NULL,
-+ .read_optimization_summary = NULL,
-+#endif
-+ .stmt_fixup = NULL,
-+ .function_transform_todo_flags_start = 0,
-+ .function_transform = NULL,
-+ .variable_transform = NULL,
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+class dump_pass : public ipa_opt_pass_d {
-+public:
-+ dump_pass() : ipa_opt_pass_d(dump_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
-+ unsigned int execute() { return dump_functions(); }
-+};
-+}
-+
-+static opt_pass *make_dump_pass(void)
-+{
-+ return new dump_pass();
-+}
-+#else
-+static struct opt_pass *make_dump_pass(void)
-+{
-+ return &dump_pass.pass;
-+}
-+#endif
-+
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
-+{
-+ int i;
-+ const char * const plugin_name = plugin_info->base_name;
-+ const int argc = plugin_info->argc;
-+ const struct plugin_argument * const argv = plugin_info->argv;
-+ bool enable = true;
-+ struct register_pass_info insert_size_overflow_asm_pass_info;
-+ struct register_pass_info __unused dump_before_pass_info;
-+ struct register_pass_info __unused dump_after_pass_info;
-+ struct register_pass_info ipa_pass_info;
-+ static const struct ggc_root_tab gt_ggc_r_gt_size_overflow[] = {
-+ {
-+ .base = &report_size_overflow_decl,
-+ .nelt = 1,
-+ .stride = sizeof(report_size_overflow_decl),
-+ .cb = &gt_ggc_mx_tree_node,
-+ .pchw = &gt_pch_nx_tree_node
-+ },
-+ LAST_GGC_ROOT_TAB
-+ };
-+
-+ insert_size_overflow_asm_pass_info.pass = make_insert_size_overflow_asm_pass();
-+ insert_size_overflow_asm_pass_info.reference_pass_name = "ssa";
-+ insert_size_overflow_asm_pass_info.ref_pass_instance_number = 1;
-+ insert_size_overflow_asm_pass_info.pos_op = PASS_POS_INSERT_AFTER;
-+
-+ dump_before_pass_info.pass = make_dump_pass();
-+ dump_before_pass_info.reference_pass_name = "increase_alignment";
-+ dump_before_pass_info.ref_pass_instance_number = 1;
-+ dump_before_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
-+
-+ ipa_pass_info.pass = make_ipa_pass();
-+ ipa_pass_info.reference_pass_name = "increase_alignment";
-+ ipa_pass_info.ref_pass_instance_number = 1;
-+ ipa_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
-+
-+ dump_after_pass_info.pass = make_dump_pass();
-+ dump_after_pass_info.reference_pass_name = "increase_alignment";
-+ dump_after_pass_info.ref_pass_instance_number = 1;
-+ dump_after_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
-+
-+ if (!plugin_default_version_check(version, &gcc_version)) {
-+ error(G_("incompatible gcc/plugin versions"));
-+ return 1;
-+ }
-+
-+ for (i = 0; i < argc; ++i) {
-+ if (!strcmp(argv[i].key, "no-size-overflow")) {
-+ enable = false;
-+ continue;
-+ }
-+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
-+ }
-+
-+ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
-+ if (enable) {
-+ register_callback(plugin_name, PLUGIN_START_UNIT, &size_overflow_start_unit, NULL);
-+ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_size_overflow);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_asm_pass_info);
-+// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_before_pass_info);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &ipa_pass_info);
-+// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_after_pass_info);
-+ }
-+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
-+
-+ return 0;
-+}
diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
new file mode 100644
index 0000000..dd94983
@@ -120822,19 +121321,6 @@ index 6789d78..4afd019e 100644
+ .endm
+
#endif
-diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
-index ce9ed99..8c805a0 100644
---- a/virt/kvm/ioapic.c
-+++ b/virt/kvm/ioapic.c
-@@ -306,7 +306,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status)
- BUG_ON(ioapic->rtc_status.pending_eoi != 0);
- ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
- ioapic->rtc_status.dest_map);
-- ioapic->rtc_status.pending_eoi = ret;
-+ ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
- } else
- ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
-
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 03a0381..8b31923 100644
--- a/virt/kvm/kvm_main.c
diff --git a/3.14.3/4425_grsec_remove_EI_PAX.patch b/3.14.3/4425_grsec_remove_EI_PAX.patch
index fc51f79..23631d1 100644
--- a/3.14.3/4425_grsec_remove_EI_PAX.patch
+++ b/3.14.3/4425_grsec_remove_EI_PAX.patch
@@ -8,7 +8,7 @@ X-Gentoo-Bug-URL: https://bugs.gentoo.org/445600
diff -Nuar linux-3.7.1-hardened.orig/security/Kconfig linux-3.7.1-hardened/security/Kconfig
--- linux-3.7.1-hardened.orig/security/Kconfig 2012-12-26 08:39:29.000000000 -0500
+++ linux-3.7.1-hardened/security/Kconfig 2012-12-26 09:05:44.000000000 -0500
-@@ -268,7 +268,7 @@
+@@ -269,7 +269,7 @@
config PAX_EI_PAX
bool 'Use legacy ELF header marking'
diff --git a/3.14.3/4450_grsec-kconfig-default-gids.patch b/3.14.3/4450_grsec-kconfig-default-gids.patch
index 8857c39..ed2968f 100644
--- a/3.14.3/4450_grsec-kconfig-default-gids.patch
+++ b/3.14.3/4450_grsec-kconfig-default-gids.patch
@@ -73,7 +73,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
diff -Nuar a/security/Kconfig b/security/Kconfig
--- a/security/Kconfig 2012-10-13 09:51:35.000000000 -0400
+++ b/security/Kconfig 2012-10-13 09:52:59.000000000 -0400
-@@ -196,7 +196,7 @@
+@@ -197,7 +197,7 @@
config GRKERNSEC_PROC_GID
int "GID exempted from /proc restrictions"
@@ -82,7 +82,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines which group will be exempted from
grsecurity's /proc restrictions, allowing users of the specified
-@@ -207,7 +207,7 @@
+@@ -208,7 +208,7 @@
config GRKERNSEC_TPE_UNTRUSTED_GID
int "GID for TPE-untrusted users"
depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
@@ -91,7 +91,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines which group untrusted users should
be added to. These users will be placed under grsecurity's Trusted Path
-@@ -219,7 +219,7 @@
+@@ -220,7 +220,7 @@
config GRKERNSEC_TPE_TRUSTED_GID
int "GID for TPE-trusted users"
depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
@@ -100,7 +100,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines what group TPE restrictions will be
*disabled* for. If the sysctl option is enabled, a sysctl option
-@@ -228,7 +228,7 @@
+@@ -229,7 +229,7 @@
config GRKERNSEC_SYMLINKOWN_GID
int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
depends on GRKERNSEC_CONFIG_SERVER
diff --git a/3.14.3/4475_emutramp_default_on.patch b/3.14.3/4475_emutramp_default_on.patch
index a453a5b..2c704b9 100644
--- a/3.14.3/4475_emutramp_default_on.patch
+++ b/3.14.3/4475_emutramp_default_on.patch
@@ -10,7 +10,7 @@ See bug:
diff -Naur linux-3.9.2-hardened.orig/security/Kconfig linux-3.9.2-hardened/security/Kconfig
--- linux-3.9.2-hardened.orig/security/Kconfig 2013-05-18 08:53:41.000000000 -0400
+++ linux-3.9.2-hardened/security/Kconfig 2013-05-18 09:17:57.000000000 -0400
-@@ -428,7 +428,7 @@
+@@ -429,7 +429,7 @@
config PAX_EMUTRAMP
bool "Emulate trampolines"